repo_name stringlengths 5 100 | ref stringlengths 12 67 | path stringlengths 4 244 | copies stringlengths 1 8 | content stringlengths 0 1.05M ⌀ |
|---|---|---|---|---|
ubic135/odoo-design | refs/heads/master | addons/purchase/tests/test_purchase_to_invoice.py | 10 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (c) 2013-TODAY OpenERP S.A. <http://www.openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.addons.mail.tests.common import TestMail
from openerp.tools import mute_logger
from datetime import datetime
class TestPurchase(TestMail):
@mute_logger('openerp.addons.base.ir.ir_model', 'openerp.osv.orm')
def setUp(self):
super(TestPurchase, self).setUp()
def test_purchase_to_invoice(self):
""" Testing for invoice create,validate and pay with invoicing and payment user."""
cr, uid = self.cr, self.uid
# Usefull models
data_obj = self.registry('ir.model.data')
user_obj = self.registry('res.users')
partner_obj = self.registry('res.partner')
purchase_obj = self.registry('purchase.order')
purchase_order_line = self.registry('purchase.order.line')
invoice_obj = self.registry('account.invoice')
# Usefull record id
group_id = data_obj.get_object_reference(cr, uid, 'account', 'group_account_invoice')[1]
product_ref = data_obj.get_object_reference(cr, uid, 'product', 'product_category_5')
product_id = product_ref and product_ref[1] or False
company_id = data_obj.get_object_reference(cr, uid, 'base', 'main_company')[1]
location_id = data_obj.get_object_reference(cr, uid, 'stock', 'stock_location_3')[1]
# In order to test, I create new user and applied Invoicing & Payments group.
user_id = user_obj.create(cr, uid, {
'name': 'Test User',
'login': 'test@test.com',
'company_id': 1,
'groups_id': [(6, 0, [group_id])]
})
assert user_id, "User will not created."
# I create partner for purchase order.
partner_id = partner_obj.create(cr, uid, {
'name': 'Test Customer',
'email': 'testcustomer@test.com',
})
# In order to test I create purchase order and confirmed it.
order_id = purchase_obj.create(cr, uid, {
'partner_id': partner_id,
'location_id': location_id,
'pricelist_id': 1,
})
order_line = purchase_order_line.create(cr, uid, {
'order_id': order_id,
'product_id': product_id,
'product_qty': 100.0,
'product_uom': 1,
'price_unit': 89.0,
'name': 'Service',
'date_planned': '2014-05-31',
})
assert order_id, "purchase order will not created."
context = {"active_model": 'purchase.order', "active_ids": [order_id], "active_id":order_id}
purchase_obj.wkf_confirm_order(cr, uid, [order_id], context=context)
# In order to test I create invoice.
invoice_id = purchase_obj.action_invoice_create(cr, uid, [order_id], context=context)
assert invoice_id,"No any invoice is created for this purchase order"
# In order to test I validate invoice wihth Test User(invoicing and payment).
res = invoice_obj.invoice_validate(cr, uid, [invoice_id], context=context)
self.assertTrue(res, 'Invoice will not validated')
|
aringh/odl | refs/heads/master | odl/contrib/mrc/mrc.py | 1 | # Copyright 2014-2017 The ODL contributors
#
# This file is part of ODL.
#
# This Source Code Form is subject to the terms of the Mozilla Public License,
# v. 2.0. If a copy of the MPL was not distributed with this file, You can
# obtain one at https://mozilla.org/MPL/2.0/.
"""Specification and reader for the MRC2014 file format."""
from __future__ import print_function, division, absolute_import
from builtins import int, object
from collections import OrderedDict
from itertools import permutations
import numpy as np
import struct
import warnings
from odl.contrib.mrc.uncompr_bin import (
FileReaderRawBinaryWithHeader, FileWriterRawBinaryWithHeader,
header_fields_from_table)
__all__ = ('FileReaderMRC', 'FileWriterMRC', 'mrc_header_from_params')
# The standard header
MRC_2014_SPEC_TABLE = """
+---------+--------+----------+--------+-------------------------------+
|Long word|Byte |Data type |Name |Description |
+=========+========+==========+========+===============================+
|1 |1-4 |Int32 |NX |Number of columns |
+---------+--------+----------+--------+-------------------------------+
|2 |5-8 |Int32 |NY |Number of rows |
+---------+--------+----------+--------+-------------------------------+
|3 |9-12 |Int32 |NZ |Number of sections |
+---------+--------+----------+--------+-------------------------------+
|4 |13-16 |Int32 |MODE |Data type |
+---------+--------+----------+--------+-------------------------------+
|8 |29-32 |Int32 |MX |Number of intervals along X of |
| | | | |the "unit cell" |
+---------+--------+----------+--------+-------------------------------+
|9 |33-36 |Int32 |MY |Number of intervals along Y of |
| | | | |the "unit cell" |
+---------+--------+----------+--------+-------------------------------+
|10 |37-40 |Int32 |MZ |Number of intervals along Z of |
| | | | |the "unit cell" |
+---------+--------+----------+--------+-------------------------------+
|11-13 |41-52 |Float32 |CELLA |Cell dimension in angstroms |
| | | | |(whole volume) |
+---------+--------+----------+--------+-------------------------------+
|17 |65-68 |Int32 |MAPC |axis corresponding to columns |
| | | | |(1,2,3 for X,Y,Z) |
+---------+--------+----------+--------+-------------------------------+
|18 |69-72 |Int32 |MAPR |axis corresponding to rows |
| | | | |(1,2,3 for X,Y,Z) |
+---------+--------+----------+--------+-------------------------------+
|19 |73-76 |Int32 |MAPS |axis corresponding to sections |
| | | | |(1,2,3 for X,Y,Z) |
+---------+--------+----------+--------+-------------------------------+
|20 |77-80 |Float32 |DMIN |Minimum density value |
+---------+--------+----------+--------+-------------------------------+
|21 |81-84 |Float32 |DMAX |Maximum density value |
+---------+--------+----------+--------+-------------------------------+
|22 |85-88 |Float32 |DMEAN |Mean density value |
+---------+--------+----------+--------+-------------------------------+
|23 |89-92 |Int32 |ISPG |Space group number 0, 1, or 401|
+---------+--------+----------+--------+-------------------------------+
|24 |93-96 |Int32 |NSYMBT |Number of bytes in extended |
| | | | |header |
+---------+--------+----------+--------+-------------------------------+
|27 |105-108 |String |EXTTYPE |Extended header type |
+---------+--------+----------+--------+-------------------------------+
|28 |109-112 |Int32 |NVERSION|Format version identification |
| | | | |number |
+---------+--------+----------+--------+-------------------------------+
|50-52 |197-208 |Int32 |ORIGIN |Origin in X, Y, Z used in |
| | | | |transform |
+---------+--------+----------+--------+-------------------------------+
|53 |209-212 |String |MAP |Character string 'MAP' to |
| | | | |identify file type |
+---------+--------+----------+--------+-------------------------------+
|54 |213-216 |String |MACHST |Machine stamp |
+---------+--------+----------+--------+-------------------------------+
|55 |217-220 |Float32 |RMS |RMS deviation of map from mean |
| | | | |density |
+---------+--------+----------+--------+-------------------------------+
|56 |221-224 |Int32 |NLABL |Number of labels being used |
+---------+--------+----------+--------+-------------------------------+
|57-256 |225-1024|String(80)|LABEL |10 80-character text labels |
+---------+--------+----------+--------+-------------------------------+
"""
MRC_HEADER_SIZE = 1024
MRC_SPEC_KEYS = {
'id': 'Long word',
'byte_range': 'Byte',
'dtype': 'Data type',
'name': 'Name',
'description': 'Description'}
MRC_DTYPE_TO_NPY_DTYPE = {
'Float32': np.dtype('float32'),
'Int32': np.dtype('int32'),
'String': np.dtype('S1')}
MRC_MODE_TO_NPY_DTYPE = {
0: np.dtype('int8'),
1: np.dtype('int16'),
2: np.dtype('float32'),
6: np.dtype('uint16')}
NPY_DTYPE_TO_MRC_MODE = {v: k for k, v in MRC_MODE_TO_NPY_DTYPE.items()}
ANGSTROM_IN_METERS = 1e-10
MICRON_IN_METERS = 1e-6
def print_mrc2014_spec():
"""Print the MRC2014 specification table.
The specification table is as follows:
"""
print(MRC_2014_SPEC_TABLE)
print_mrc2014_spec.__doc__ += MRC_2014_SPEC_TABLE
# Extended header (first section) for the `FEI1` type
MRC_FEI_EXT_HEADER_SECTION = """
+---------+---------+---------+---------------+------------------------------+
|Long word|Byte |Data type|Name |Description |
+=========+=========+=========+===============+==============================+
|1 |1025-1028|Float32 |A_TILT |Alpha tilt, in degrees |
+---------+---------+---------+---------------+------------------------------+
|2 |1029-1032|Float32 |B_TILT |Beta tilt, in degrees |
+---------+---------+---------+---------------+------------------------------+
|3 |1033-1036|Float32 |X_STAGE |Stage x position. Normally in |
| | | | |SI units (meters), but some |
| | | | |older files may be in |
| | | | |micrometers.(values larger |
| | | | |than 1) |
+---------+---------+---------+---------------+------------------------------+
|4 |1037-1040|Float32 |Y_STAGE |Stage y position |
+---------+---------+---------+---------------+------------------------------+
|5 |1041-1044|Float32 |Z_STAGE |Stage z position |
+---------+---------+---------+---------------+------------------------------+
|6 |1045-1048|Float32 |X_SHIFT |Stage x shift. For units see |
| | | | |remarks on X_STAGE |
+---------+---------+---------+---------------+------------------------------+
|7 |1049-1052|Float32 |Y_SHIFT |Stage y shift |
+---------+---------+---------+---------------+------------------------------+
|8 |1053-1056|Float32 |DEFOCUS |Defocus as read from the |
| | | | |microscope. For units see |
| | | | |remarks on X_STAGE. |
+---------+---------+---------+---------------+------------------------------+
|9 |1057-1060|Float32 |EXP_TIME |Exposure time in seconds |
+---------+---------+---------+---------------+------------------------------+
|10 |1061-1064|Float32 |MEAN_INT |Mean value of the image |
+---------+---------+---------+---------------+------------------------------+
|11 |1065-1068|Float32 |TILT_AXIS |Orientation of the tilt axis |
| | | | |in the image in degrees. |
| | | | |Vertical to the top is 0 |
| | | | |degrees, the direction of |
| | | | |positive rotation is |
| | | | |anti-clockwise. |
+---------+---------+---------+---------------+------------------------------+
|12 |1069-1072|Float32 |PIXEL_SIZE |Pixel size of the images in SI|
| | | | |units (meters) |
+---------+---------+---------+---------------+------------------------------+
|13 |1073-1076|Float32 |MAGNIFICATION |Magnification used for |
| | | | |recording the images |
+---------+---------+---------+---------------+------------------------------+
|14 |1077-1080|Float32 |HT |Value of the high tension in |
| | | | |SI units (volts) |
+---------+---------+---------+---------------+------------------------------+
|15 |1081-1084|Float32 |BINNING |The binning of the CCD or STEM|
| | | | |acquisition |
+---------+---------+---------+---------------+------------------------------+
|16 |1085-1088|Float32 |APPLIED_DEFOCUS|The intended application |
| | | | |defocus in SI units (meters), |
| | | | |as defined for example in the |
| | | | |tomography parameters view |
+---------+---------+---------+---------------+------------------------------+
"""
MRC_FEI_SECTION_SIZE = 128
MRC_FEI_NUM_SECTIONS = 1024
def print_fei_ext_header_spec():
"""Print the specification table of an FEI extended header section.
The specification table is as follows:
"""
print(MRC_FEI_EXT_HEADER_SECTION)
print_fei_ext_header_spec.__doc__ += MRC_FEI_EXT_HEADER_SECTION
class MRCHeaderProperties(object):
"""Mixin class adding MRC header-based properties to I/O classes."""
print_mrc2014_spec = staticmethod(print_mrc2014_spec)
print_fei_ext_header_spec = staticmethod(print_fei_ext_header_spec)
@property
def header_size(self):
"""Total size of `file`'s header (including extended) in bytes.
The size of the header is determined from `header`. If this is not
possible (i.e., before the header has been read), 0 is returned.
If the header contains an ``'nsymbt'`` entry (size of the extra
header in bytes), its value is added to the regular header size.
"""
standard_header_size = MRC_HEADER_SIZE
try:
extra_header_size = int(self.header['nsymbt']['value'])
except KeyError:
extra_header_size = 0
return standard_header_size + extra_header_size
@property
def data_shape(self):
"""Shape tuple of the whole data block as determined from `header`.
If no header is available (i.e., before it has been initialized),
or any of the header entries ``'nx', 'ny', 'nz'`` is missing,
-1 is returned, which makes reshaping a no-op.
Otherwise, the returned shape is ``(nx, ny, nz)``.
Note: this is the shape of the data as defined by the header.
For a non-trivial axis ordering, the shape of actual data will
be different.
See Also
--------
data_storage_shape
data_axis_order
"""
if not self.header:
return -1
try:
nx = self.header['nx']['value']
ny = self.header['ny']['value']
nz = self.header['nz']['value']
except KeyError:
return -1
else:
return tuple(int(n) for n in (nx, ny, nz))
@property
def data_storage_shape(self):
"""Shape tuple of the data as stored in the file.
If no header is available (i.e., before it has been initialized),
or any of the header entries ``'nx', 'ny', 'nz'`` is missing,
-1 is returned, which makes reshaping a no-op.
Otherwise, the returned shape is a permutation of `data_shape`,
i.e., ``(nx, ny, nz)``, according to `data_axis_order` in the
following way::
data_shape[i] == data_storage_shape[data_axis_order[i]]
See Also
--------
data_shape
data_axis_order
"""
if self.data_shape == -1:
return -1
else:
return tuple(self.data_shape[ax]
for ax in np.argsort(self.data_axis_order))
@property
def data_dtype(self):
"""Data type of the data block as determined from `header`.
If no header is available (i.e., before it has been initialized),
or the header entry ``'mode'`` is missing, the data type gained
from the ``dtype`` argument in the initializer is returned.
Otherwise, it is determined from ``mode``.
"""
if not self.header:
return self._init_data_dtype
try:
mode = int(self.header['mode']['value'])
except KeyError:
return self._init_data_dtype
else:
try:
return MRC_MODE_TO_NPY_DTYPE[mode]
except KeyError:
raise ValueError('data mode {} not supported'.format(mode))
@property
def data_kind(self):
"""String ``'volume'``, ``'projections'`` or ``'unknown'``.
The value is determined from the ``'ispg'`` header entry.
"""
ispg = self.header['ispg']['value']
if ispg == 0:
return 'projections'
elif ispg == 1:
return 'volume'
else:
return 'unknown'
@property
def data_axis_order(self):
"""Permutation of ``(0, 1, 2)``.
The value is determined from the ``'mapc', 'mapr', 'maps'``
header entries and determines the order of the axes of a
dataset (`data_shape`) when stored in a file
(`data_storage_shape`)::
data_shape[i] == data_storage_shape[data_axis_order[i]]
For example, if ``data_axis_order == (2, 0, 1)`` then the
data axis 2 comes first in storage, axis 0 comes second and
axis 1 comes last.
If no header is available, (i.e., before it has been initialized),
or one of the header entries ``'mapc', 'mapr', 'maps'`` is missing,
the identity permutation ``(0, 1, 2)`` is returned.
See Also
--------
data_shape
data_storage_shape
"""
if not self.header:
return (0, 1, 2)
try:
mapc = self.header['mapc']['value']
mapr = self.header['mapr']['value']
maps = self.header['maps']['value']
except KeyError:
return (0, 1, 2)
else:
axis_order = tuple(int(m) - 1 for m in [mapc, mapr, maps])
if (sorted(axis_order) != [0, 1, 2]):
# Ignore invalid entries in the header, e.g. 0, 0, 0.
# Some MRC files out there are like that.
warnings.warn('invalid axis mapping {}, using (0, 1, 2)'
''.format(tuple(m + 1 for m in axis_order)),
RuntimeWarning)
axis_order = (0, 1, 2)
return axis_order
@property
def cell_sides_angstrom(self):
"""Array of sizes of a unit cell in Angstroms.
The value is determined from the ``'cella'`` entry in `header`.
"""
return np.asarray(
self.header['cella']['value'], dtype=float) / self.data_shape
@property
def cell_sides(self):
"""Array of sizes of a unit cell in meters.
The value is determined from the ``'cella'`` entry in `header`.
"""
return self.cell_sides_angstrom * ANGSTROM_IN_METERS
@property
def mrc_version(self):
"""Version tuple of the MRC file.
The value is determined from the ``'nversion'`` header entry.
"""
nversion = int(self.header['nversion']['value'])
return nversion // 10, nversion % 10
@property
def extended_header_size(self):
"""Size of the extended header in bytes.
The value is determined from the header entry ``'nsymbt'``.
"""
return int(self.header['nsymbt']['value'])
@property
def extended_header_type(self):
"""Type of the extended header.
The value is determined from the header entry ``'exttype'``.
See `the specification homepage
<http://www.ccpem.ac.uk/mrc_format/mrc2014.php>`_ for possible
values.
"""
return ''.join(self.header['exttype']['value'].astype(str))
@property
def labels(self):
"""Return the 10-tuple of text labels from `header`.
The value is determined from the header entries ``'nlabl'`` and
``'label'``.
"""
label_array = self.header['label']['value']
labels = tuple(''.join(row.astype(str)) for row in label_array)
try:
nlabels = int(self.header['nlabl']['value'])
except KeyError:
nlabels = len(labels)
# Check if there are nontrivial labels after the number given in
# the header. If yes, ignore the 'nlabl' information and return
# all labels.
if any(label.strip() for label in labels[nlabels:]):
return labels
else:
return labels[:nlabels]
class FileReaderMRC(MRCHeaderProperties, FileReaderRawBinaryWithHeader):
"""Reader for the MRC file format.
By default, the MRC2014 format is used, see `print_mrc_2014_spec` for
details. See also [Che+2015] or the `explanations on the CCP4 homepage
<http://www.ccpem.ac.uk/mrc_format/mrc2014.php>`_ for the
text of the specification.
References
----------
[Che+2015] Cheng, A et al. *MRC2014: Extensions to the MRC format header
for electron cryo-microscopy and tomography*. Journal of Structural
Biology, 129 (2015), pp 146--150.
"""
def __init__(self, file, header_fields=None):
"""Initialize a new instance.
Parameters
----------
file : file-like or str
Stream or filename from which to read the data. The stream
is allowed to be already opened in ``'rb'`` mode.
header_fields : sequence of dicts, optional
Definition of the fields in the header (per row), each
containing key-value pairs for the following keys:
- ``'name'`` : Label for the field.
- ``'offset'`` : Start of the field in bytes.
- ``'size'`` : Size of the field in bytes.
- ``'dtype'`` : Data type in Numpy- or Numpy-readable format.
- ``'dshape'`` (optional) : The array of values is reshaped to
this shape.
- ``'description'`` (optional) : A human-readable description
of the field.
For the default ``None``, the MRC2014 format is used, see
`print_mrc2014_spec`.
"""
if header_fields is None:
header_fields = header_fields_from_table(
spec_table=MRC_2014_SPEC_TABLE,
keys=MRC_SPEC_KEYS,
dtype_map=MRC_DTYPE_TO_NPY_DTYPE)
# `MRCHeaderProperties` has no `__init__`, so this calls
# `FileReaderRawBinaryWithHeader.__init__`
super(FileReaderMRC, self).__init__(file, header_fields)
def read_extended_header(self, groupby='field', force_type=''):
"""Read the extended header according to `extended_header_type`.
Currently, only the FEI extended header format is supported.
See `print_fei_ext_header_spec` or `this homepage`_ for the format
specification.
The extended header usually has one header section per
image (slice), in case of the FEI header 128 bytes each, with
a total of 1024 sections.
Parameters
----------
groupby : {'field', 'section'}, optional
How to group the values in the extended header sections.
``'field'`` : make an array per section field, e.g.::
'defocus': [dval1, dval2, ..., dval1024],
'exp_time': [tval1, tval2, ..., tval1024],
...
``'section'`` : make a dictionary for each section, e.g.::
{'defocus': dval1, 'exp_time': tval1},
{'defocus': dval2, 'exp_time': tval2},
...
If the number of images is smaller than 1024, the last values are
all set to zero.
force_type : string, optional
If given, this value overrides the `extended_header_type`
from `header`.
Currently supported: ``'FEI1'``
Returns
-------
ext_header: `OrderedDict` or tuple
For ``groupby == 'field'``, a dictionary with the field names
as keys, like in the example.
For ``groupby == 'section'``, a tuple of dictionaries as
shown above.
The returned data structures store no offsets, in contrast
to the regular header.
See Also
--------
References
----------
.. _this homepage:
http://www.2dx.unibas.ch/documentation/mrc-software/fei-\
extended-mrc-format-not-used-by-2dx
"""
ext_header_type = str(force_type).upper() or self.extended_header_type
if ext_header_type != 'FEI1':
raise ValueError("extended header type '{}' not supported"
"".format(self.extended_header_type))
groupby, groupby_in = str(groupby).lower(), groupby
ext_header_len = int(self.header['nsymbt']['value'])
if ext_header_len % MRC_FEI_SECTION_SIZE:
raise ValueError('extended header length {} from header is '
'not divisible by extended header section size '
'{}'.format(ext_header_len, MRC_FEI_SECTION_SIZE))
num_sections = ext_header_len // MRC_FEI_SECTION_SIZE
if num_sections != MRC_FEI_NUM_SECTIONS:
raise ValueError('calculated number of sections ({}) not equal to '
'expected number of sections ({})'
''.format(num_sections, MRC_FEI_NUM_SECTIONS))
section_fields = header_fields_from_table(
MRC_FEI_EXT_HEADER_SECTION, keys=MRC_SPEC_KEYS,
dtype_map=MRC_DTYPE_TO_NPY_DTYPE)
# Make a list for each field and append the values for that
# field. Then create an array from that list and store it
# under the field name.
ext_header = OrderedDict()
for field in section_fields:
value_list = []
field_offset = field['offset']
field_dtype = field['dtype']
field_dshape = field['dshape']
# Compute some parameters
num_items = int(np.prod(field_dshape))
size_bytes = num_items * field_dtype.itemsize
fmt = '{}{}'.format(num_items, field_dtype.char)
for section in range(num_sections):
# Get the bytestring from the right position in the file,
# unpack it and append the value to the list.
start = section * MRC_FEI_SECTION_SIZE + field_offset
self.file.seek(start)
packed_value = self.file.read(size_bytes)
value_list.append(struct.unpack(fmt, packed_value))
ext_header[field['name']] = np.array(value_list, dtype=field_dtype)
if groupby == 'field':
return ext_header
elif groupby == 'section':
# Transpose the data and return as tuple.
return tuple({key: ext_header[key][i] for key in ext_header}
for i in range(num_sections))
else:
raise ValueError("`groupby` '{}' not understood"
"".format(groupby_in))
def read_data(self, dstart=None, dend=None, swap_axes=True):
"""Read the data from `file` and return it as Numpy array.
Parameters
----------
dstart : int, optional
Offset in bytes of the data field. By default, it is equal
to ``header_size``. Backwards indexing with negative values
is also supported.
Use a value larger than the header size to extract a data subset.
dend : int, optional
End position in bytes until which data is read (exclusive).
Backwards indexing with negative values is also supported.
Use a value different from the file size to extract a data subset.
swap_axes : bool, optional
If ``True``, use `data_axis_order` to swap the axes in the
returned array. In that case, the shape of the array may no
longer agree with `data_storage_shape`.
Returns
-------
data : `numpy.ndarray`
The data read from `file`.
"""
data = super(FileReaderMRC, self).read_data(dstart, dend)
data = data.reshape(self.data_shape, order='F')
if swap_axes:
data = np.transpose(data, axes=self.data_axis_order)
assert data.shape == self.data_shape
return data
class FileWriterMRC(MRCHeaderProperties, FileWriterRawBinaryWithHeader):
"""Writer for the MRC file format.
See [Che+2015] or the `explanations on the CCP4 homepage
<http://www.ccpem.ac.uk/mrc_format/mrc2014.php>`_ for the
text of the specification.
References
----------
[Che+2015] Cheng, A et al. *MRC2014: Extensions to the MRC format header
for electron cryo-microscopy and tomography*. Journal of Structural
Biology, 129 (2015), pp 146--150.
"""
def write_data(self, data, dstart=None, swap_axes=True):
"""Write ``data`` to `file`.
Parameters
----------
data : `array-like`
Data that should be written to `file`.
dstart : non-negative int, optional
Offset in bytes of the start position of the written data.
If provided, reshaping and axis swapping of ``data`` is
skipped.
For ``None``, `header_size` is used.
swap_axes : bool, optional
If ``True``, use the ``'mapc', 'mapr', 'maps'`` header entries
to swap the axes in the ``data`` before writing. Use ``False``
only if the data is already consistent with the final axis
order.
"""
if dstart is None:
shape = self.data_shape
dstart = int(self.header_size)
elif dstart < 0:
raise ValueError('`dstart` must be non-negative, got {}'
''.format(dstart))
else:
shape = -1
dstart = int(dstart)
if dstart < self.header_size:
raise ValueError('invalid `dstart`, resulting in absolute '
'`dstart` < `header_size` ({} < {})'
''.format(dstart, self.header_size))
data = np.asarray(data, dtype=self.data_dtype).reshape(shape)
if swap_axes:
# Need to argsort here since `data_axis_order` tells
# "which axis comes from where", which is the inverse of what the
# `transpose` function needs.
data = np.transpose(data, axes=np.argsort(self.data_axis_order))
assert data.shape == self.data_storage_shape
data = data.reshape(-1, order='F')
self.file.seek(dstart)
data.tofile(self.file)
def mrc_header_from_params(shape, dtype, kind, **kwargs):
"""Create a minimal MRC2014 header from the given parameters.
Parameters
----------
shape : 3-sequence of ints
3D shape of the stored data. The values are used as
``'nx', 'ny', 'nz'`` header entries, in this order. Note that
this is different from the actual data storage shape for
non-trivial ``axis_order``.
dtype : {'int8', 'int16', 'float32', 'uint16'}
Data type specifier as understood by `numpy.dtype`. It is
translated to a ``'mode'`` header entry. See `this page
<http://www.ccpem.ac.uk/mrc_format/mrc2014.php>`_ for valid
modes.
kind : {'volume', 'projections'}
Interpretation of the 3D data, either as single 3D volume or as
a stack of 2D projections. The value is used for the ``'ispg'``
header entry.
extent : 3-sequence of floats, optional
Size of the 3D volume in meters. The values are used for
the ``'cella'`` header entry.
Default: ``shape``, resulting in ``(1, 1, 1)`` unit cells
axis_order : permutation of ``(0, 1, 2)`` optional
Order of the data axes as they should appear in the stored file.
The values are used for the ``'mapc', 'mapr', 'maps'`` header
entries.
Default: ``(0, 1, 2)``
dmin, dmax : float, optional
Minimum and maximum values of the data, used for header entries
``'dmin'`` and ``'dmax'``, resp.
Default: 1.0, 0.0. These values indicate according to [Che+2015]
that the values are considered as undetermined.
dmean, rms : float, optional
Mean and variance of the data, used for header entries ``'dmean'``
and ``'rms'``, resp.
Default: ``min(dmin, dmax) - 1, -1.0``. These values indicate
according to [Che+2015] that the values are considered as
undetermined.
mrc_version : 2-tuple of int, optional
Version identifier for the MRC file, used for the ``'nversion'``
header entry.
Default: ``(2014, 0)``
text_labels : sequence of strings, optional
Maximal 10 strings with 80 characters each, used for the
``'nlabl'`` and ``'label'`` header entries.
Default: ``[]``
Returns
-------
header : `OrderedDict`
Header stored in an ordered dictionary, where each entry has the
following form::
'name': {'value': value_as_array,
'offset': offset_in_bytes
'description': description_string}
All ``'value'``'s are `numpy.ndarray`'s with at least one
dimension.
References
----------
[Che+2015] Cheng, A et al. *MRC2014: Extensions to the MRC format header
for electron cryo-microscopy and tomography*. Journal of Structural
Biology, 129 (2015), pp 146--150.
"""
# Positional args
shape = [int(n) for n in shape]
kind, kind_in = str(kind).lower(), kind
if kind not in ('volume', 'projections'):
raise ValueError("`kind '{}' not understood".format(kind_in))
# Keyword args
extent = kwargs.pop('extent', shape)
axis_order = kwargs.pop('axis_order', (0, 1, 2))
if tuple(axis_order) not in permutations((0, 1, 2)):
raise ValueError('`axis_order` must be a permutation of (0, 1, 2), '
'got {}'.format(axis_order))
dmin = kwargs.pop('dmin', 1.0)
dmax = kwargs.pop('dmax', 0.0)
dmean = kwargs.pop('dmean', min(dmin, dmax) - 1.0)
rms = kwargs.pop('rms', -1.0)
mrc_version = kwargs.pop('mrc_version', (2014, 0))
if len(mrc_version) != 2:
raise ValueError('`mrc_version` must be a sequence of length 2, got '
'{}'.format(mrc_version))
# Text labels: fill each label up with whitespace to 80 characters.
# Create the remaining labels as 80 * '\x00'
text_labels_in = kwargs.pop('text_labels', [])
nlabl = len(text_labels_in)
if nlabl > 10:
raise ValueError('expexted maximum of 10 labels, got {} labels'
''.format(nlabl))
text_labels = [str(label).ljust(80) for label in text_labels_in]
if any(len(label) > 80 for label in text_labels):
raise ValueError('labels cannot have more than 80 characters each')
# Convert to header-friendly form. Names are required to match
# exactly the header field names, and all of them must exist,
# so that `eval` below succeeds for all fields.
nx, ny, nz = [np.array(n, dtype='int32').reshape([1]) for n in shape]
mode = np.array(NPY_DTYPE_TO_MRC_MODE[np.dtype(dtype)],
dtype='int32').reshape([1])
mx, my, mz = nx, ny, nz
cella = np.array(extent).reshape([3]).astype('float32')
mapc, mapr, maps = [np.array(m, dtype='int32').reshape([1]) + 1
for m in axis_order]
dmin, dmax, dmean, rms = [np.array(x, dtype='float32').reshape([1])
for x in (dmin, dmax, dmean, rms)]
ispg = 1 if kind == 'volume' else 0
ispg = np.array(ispg, dtype='int32', ndmin=1)
nsymbt = np.array([0], dtype='int32')
exttype = np.fromstring(' ', dtype='S1')
nversion = np.array(10 * mrc_version[0] + mrc_version[1],
dtype='int32').reshape([1])
origin = np.zeros(3, dtype='int32')
map = np.fromstring('MAP ', dtype='S1')
# TODO: no idea how to properly choose the machine stamp
machst = np.fromiter(b'DD ', dtype='S1')
nlabl = np.array(nlabl, dtype='int32').reshape([1])
label = np.zeros((10, 80), dtype='S1') # ensure correct size
for i, label_i in enumerate(text_labels):
label[i] = np.fromstring(label_i, dtype='S1')
# Make the header
# We use again the specification to set the values
header_fields = header_fields_from_table(
MRC_2014_SPEC_TABLE, MRC_SPEC_KEYS, MRC_DTYPE_TO_NPY_DTYPE)
header = OrderedDict()
for field in header_fields:
header[field['name']] = {'offset': field['offset'],
'value': eval(field['name'])}
return header
|
gf712/AbPyTools | refs/heads/master | tests/features_tests_composition.py | 1 | import unittest
from abpytools import ChainCollection
from abpytools.features.composition import *
class SequenceCompositionTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.chain = ChainCollection.load_from_file(path='./tests/Data/chain_collection_1_light.json')
cls.chain.load()
cls.sequence = cls.chain.sequences[0]
def test_chou(self):
chou_pseudo_aa = chou_pseudo_aa_composition(self.sequence)
self.assertAlmostEqual(chou_pseudo_aa[0][10], 1)
self.assertAlmostEqual(chou_pseudo_aa[0][25], 504)
self.assertAlmostEqual(chou_pseudo_aa[0][-1], 585.75)
def test_aa_composition(self):
composition = aa_composition(self.sequence)
self.assertEqual(composition['F'], 3)
def test_aa_frequency(self):
composition = aa_frequency(self.sequence)
self.assertAlmostEqual(composition['F'], 0.02803738317757, delta=10e-9)
def test_distance_to_first(self):
distance = distance_to_first(self.sequence)
self.assertEqual(distance['W'], 34)
def test_amino_acid_distribution(self):
composition = aa_composition(self.sequence)
distance = distance_to_first(self.sequence)
distribution = aa_distribution(self.sequence, composition, distance)
self.assertEqual(distribution['V'], 1290.5)
def test_triad_method(self):
triad = triad_method(self.sequence)
self.assertEqual(len(triad[0]), 343)
self.assertEqual(triad[0][255], 0.2)
|
jcoady9/python-for-android | refs/heads/master | python3-alpha/python3-src/Lib/ctypes/test/test_slicing.py | 65 | import unittest
from ctypes import *
import _ctypes_test
class SlicesTestCase(unittest.TestCase):
def test_getslice_cint(self):
a = (c_int * 100)(*range(1100, 1200))
b = list(range(1100, 1200))
self.assertEqual(a[0:2], b[0:2])
self.assertEqual(a[0:2:], b[0:2:])
self.assertEqual(len(a), len(b))
self.assertEqual(a[5:7], b[5:7])
self.assertEqual(a[5:7:], b[5:7:])
self.assertEqual(a[-1], b[-1])
self.assertEqual(a[:], b[:])
self.assertEqual(a[::], b[::])
self.assertEqual(a[10::-1], b[10::-1])
self.assertEqual(a[30:20:-1], b[30:20:-1])
self.assertEqual(a[:12:6], b[:12:6])
self.assertEqual(a[2:6:4], b[2:6:4])
a[0:5] = range(5, 10)
self.assertEqual(a[0:5], list(range(5, 10)))
self.assertEqual(a[0:5:], list(range(5, 10)))
self.assertEqual(a[4::-1], list(range(9, 4, -1)))
def test_setslice_cint(self):
a = (c_int * 100)(*range(1100, 1200))
b = list(range(1100, 1200))
a[32:47] = list(range(32, 47))
self.assertEqual(a[32:47], list(range(32, 47)))
a[32:47] = range(132, 147)
self.assertEqual(a[32:47:], list(range(132, 147)))
a[46:31:-1] = range(232, 247)
self.assertEqual(a[32:47:1], list(range(246, 231, -1)))
a[32:47] = range(1132, 1147)
self.assertEqual(a[:], b)
a[32:47:7] = range(3)
b[32:47:7] = range(3)
self.assertEqual(a[:], b)
a[33::-3] = range(12)
b[33::-3] = range(12)
self.assertEqual(a[:], b)
from operator import setitem
# TypeError: int expected instead of str instance
self.assertRaises(TypeError, setitem, a, slice(0, 5), "abcde")
# TypeError: int expected instead of str instance
self.assertRaises(TypeError, setitem, a, slice(0, 5),
["a", "b", "c", "d", "e"])
# TypeError: int expected instead of float instance
self.assertRaises(TypeError, setitem, a, slice(0, 5),
[1, 2, 3, 4, 3.14])
# ValueError: Can only assign sequence of same size
self.assertRaises(ValueError, setitem, a, slice(0, 5), range(32))
def test_char_ptr(self):
s = b"abcdefghijklmnopqrstuvwxyz"
dll = CDLL(_ctypes_test.__file__)
dll.my_strdup.restype = POINTER(c_char)
dll.my_free.restype = None
res = dll.my_strdup(s)
self.assertEqual(res[:len(s)], s)
self.assertEqual(res[:3], s[:3])
self.assertEqual(res[:len(s):], s)
self.assertEqual(res[len(s)-1:-1:-1], s[::-1])
self.assertEqual(res[len(s)-1:5:-7], s[:5:-7])
self.assertEqual(res[0:-1:-1], s[0::-1])
import operator
self.assertRaises(ValueError, operator.getitem,
res, slice(None, None, None))
self.assertRaises(ValueError, operator.getitem,
res, slice(0, None, None))
self.assertRaises(ValueError, operator.getitem,
res, slice(None, 5, -1))
self.assertRaises(ValueError, operator.getitem,
res, slice(-5, None, None))
self.assertRaises(TypeError, operator.setitem,
res, slice(0, 5), "abcde")
dll.my_free(res)
dll.my_strdup.restype = POINTER(c_byte)
res = dll.my_strdup(s)
self.assertEqual(res[:len(s)], list(range(ord("a"), ord("z")+1)))
self.assertEqual(res[:len(s):], list(range(ord("a"), ord("z")+1)))
dll.my_free(res)
def test_char_ptr_with_free(self):
dll = CDLL(_ctypes_test.__file__)
s = b"abcdefghijklmnopqrstuvwxyz"
class allocated_c_char_p(c_char_p):
pass
dll.my_free.restype = None
def errcheck(result, func, args):
retval = result.value
dll.my_free(result)
return retval
dll.my_strdup.restype = allocated_c_char_p
dll.my_strdup.errcheck = errcheck
try:
res = dll.my_strdup(s)
self.assertEqual(res, s)
finally:
del dll.my_strdup.errcheck
def test_char_array(self):
s = b"abcdefghijklmnopqrstuvwxyz\0"
p = (c_char * 27)(*s)
self.assertEqual(p[:], s)
self.assertEqual(p[::], s)
self.assertEqual(p[::-1], s[::-1])
self.assertEqual(p[5::-2], s[5::-2])
self.assertEqual(p[2:5:-3], s[2:5:-3])
try:
c_wchar
except NameError:
pass
else:
def test_wchar_ptr(self):
s = "abcdefghijklmnopqrstuvwxyz\0"
dll = CDLL(_ctypes_test.__file__)
dll.my_wcsdup.restype = POINTER(c_wchar)
dll.my_wcsdup.argtypes = POINTER(c_wchar),
dll.my_free.restype = None
res = dll.my_wcsdup(s)
self.assertEqual(res[:len(s)], s)
self.assertEqual(res[:len(s):], s)
self.assertEqual(res[len(s)-1:-1:-1], s[::-1])
self.assertEqual(res[len(s)-1:5:-7], s[:5:-7])
import operator
self.assertRaises(TypeError, operator.setitem,
res, slice(0, 5), "abcde")
dll.my_free(res)
if sizeof(c_wchar) == sizeof(c_short):
dll.my_wcsdup.restype = POINTER(c_short)
elif sizeof(c_wchar) == sizeof(c_int):
dll.my_wcsdup.restype = POINTER(c_int)
elif sizeof(c_wchar) == sizeof(c_long):
dll.my_wcsdup.restype = POINTER(c_long)
else:
return
res = dll.my_wcsdup(s)
tmpl = list(range(ord("a"), ord("z")+1))
self.assertEqual(res[:len(s)-1], tmpl)
self.assertEqual(res[:len(s)-1:], tmpl)
self.assertEqual(res[len(s)-2:-1:-1], tmpl[::-1])
self.assertEqual(res[len(s)-2:5:-7], tmpl[:5:-7])
dll.my_free(res)
################################################################
if __name__ == "__main__":
unittest.main()
|
Eureka22/ASM_xf | refs/heads/master | PythonD/lib/python2.4/test/test_exceptions.py | 8 | # Python test set -- part 5, built-in exceptions
from test.test_support import TestFailed, TESTFN, unlink
from types import ClassType
import warnings
import sys, traceback, os
print '5. Built-in exceptions'
# XXX This is not really enough, each *operation* should be tested!
# Reloading the built-in exceptions module failed prior to Py2.2, while it
# should act the same as reloading built-in sys.
try:
import exceptions
reload(exceptions)
except ImportError, e:
raise TestFailed, e
def test_raise_catch(exc):
try:
raise exc, "spam"
except exc, err:
buf = str(err)
try:
raise exc("spam")
except exc, err:
buf = str(err)
print buf
def r(thing):
test_raise_catch(thing)
if isinstance(thing, ClassType):
print thing.__name__
else:
print thing
r(AttributeError)
import sys
try: x = sys.undefined_attribute
except AttributeError: pass
r(EOFError)
import sys
fp = open(TESTFN, 'w')
fp.close()
fp = open(TESTFN, 'r')
savestdin = sys.stdin
try:
try:
sys.stdin = fp
x = raw_input()
except EOFError:
pass
finally:
sys.stdin = savestdin
fp.close()
r(IOError)
try: open('this file does not exist', 'r')
except IOError: pass
r(ImportError)
try: import undefined_module
except ImportError: pass
r(IndexError)
x = []
try: a = x[10]
except IndexError: pass
r(KeyError)
x = {}
try: a = x['key']
except KeyError: pass
r(KeyboardInterrupt)
print '(not testable in a script)'
r(MemoryError)
print '(not safe to test)'
r(NameError)
try: x = undefined_variable
except NameError: pass
r(OverflowError)
# XXX
# Obscure: in 2.2 and 2.3, this test relied on changing OverflowWarning
# into an error, in order to trigger OverflowError. In 2.4, OverflowWarning
# should no longer be generated, so the focus of the test shifts to showing
# that OverflowError *isn't* generated. OverflowWarning should be gone
# in Python 2.5, and then the filterwarnings() call, and this comment,
# should go away.
warnings.filterwarnings("error", "", OverflowWarning, __name__)
x = 1
for dummy in range(128):
x += x # this simply shouldn't blow up
r(RuntimeError)
print '(not used any more?)'
r(SyntaxError)
try: exec '/\n'
except SyntaxError: pass
# make sure the right exception message is raised for each of these
# code fragments:
def ckmsg(src, msg):
try:
compile(src, '<fragment>', 'exec')
except SyntaxError, e:
print e.msg
if e.msg == msg:
print "ok"
else:
print "expected:", msg
else:
print "failed to get expected SyntaxError"
s = '''\
while 1:
try:
pass
finally:
continue
'''
if sys.platform.startswith('java'):
print "'continue' not supported inside 'finally' clause"
print "ok"
else:
ckmsg(s, "'continue' not supported inside 'finally' clause")
s = '''\
try:
continue
except:
pass
'''
ckmsg(s, "'continue' not properly in loop")
ckmsg("continue\n", "'continue' not properly in loop")
r(IndentationError)
r(TabError)
# can only be tested under -tt, and is the only test for -tt
#try: compile("try:\n\t1/0\n \t1/0\nfinally:\n pass\n", '<string>', 'exec')
#except TabError: pass
#else: raise TestFailed
r(SystemError)
print '(hard to reproduce)'
r(SystemExit)
import sys
try: sys.exit(0)
except SystemExit: pass
r(TypeError)
try: [] + ()
except TypeError: pass
r(ValueError)
try: x = chr(10000)
except ValueError: pass
r(ZeroDivisionError)
try: x = 1/0
except ZeroDivisionError: pass
r(Exception)
try: x = 1/0
except Exception, e: pass
# test that setting an exception at the C level works even if the
# exception object can't be constructed.
class BadException:
def __init__(self):
raise RuntimeError, "can't instantiate BadException"
def test_capi1():
import _testcapi
try:
_testcapi.raise_exception(BadException, 1)
except TypeError, err:
exc, err, tb = sys.exc_info()
co = tb.tb_frame.f_code
assert co.co_name == "test_capi1"
assert co.co_filename.endswith('test_exceptions'+os.extsep+'py')
else:
print "Expected exception"
def test_capi2():
import _testcapi
try:
_testcapi.raise_exception(BadException, 0)
except RuntimeError, err:
exc, err, tb = sys.exc_info()
co = tb.tb_frame.f_code
assert co.co_name == "__init__"
assert co.co_filename.endswith('test_exceptions'+os.extsep+'py')
co2 = tb.tb_frame.f_back.f_code
assert co2.co_name == "test_capi2"
else:
print "Expected exception"
if not sys.platform.startswith('java'):
test_capi1()
test_capi2()
unlink(TESTFN)
|
ruzhytskyi/Koans | refs/heads/master | python3/koans/about_extra_credit.py | 130 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# EXTRA CREDIT:
#
# Create a program that will play the Greed Game.
# Rules for the game are in GREED_RULES.TXT.
#
# You already have a DiceSet class and score function you can use.
# Write a player class and a Game class to complete the project. This
# is a free form assignment, so approach it however you desire.
from runner.koan import *
class AboutExtraCredit(Koan):
# Write tests here. If you need extra test classes add them to the
# test suite in runner/path_to_enlightenment.py
def test_extra_credit_task(self):
pass
|
javrasya/python-indent-parser | refs/heads/master | setup.py | 1 | __author__ = 'ahmetdal'
from setuptools import setup
try:
long_description = open('README.md').read()
except IOError:
long_description = ''
setup(
name='python-indent-parser',
version='1.0.0',
description='Python Indent Parser',
author='Ahmet DAL',
author_email='ceahmetdal@gmail.com',
url='https://github.com/javrasya/python-indent-parser',
keywords=["python", "indent", "indent_parser"],
install_requires=[
],
packages=['src', 'resources'],
include_package_data=True,
zip_safe=False,
license='GPL',
platforms=['any'],
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7'
]
) |
akimboio/sleepy | refs/heads/master | sleepy/base.py | 1 | """
Sleepy
A RESTful api framework built on top of Django that simplifies common RESTful
idioms. Originally created at akimbo.
:author: Adam Haney
:contact: adam.haney@akimbo.io
:license: (c) 2013 Akimbo
"""
__author__ = "Adam Haney"
__license__ = "Copyright (c) 2013 Akimbo"
HTTP_READ_ONLY_METHODS = ['GET', 'HEAD', 'OPTIONS']
HTTP_METHODS = HTTP_READ_ONLY_METHODS + ['POST', 'PUT', 'DELETE']
import json
import django.http
from django.conf import settings
from responses import api_error
CORS_SHARING_ALLOWED_ORIGINS = getattr(
settings,
'CORS_SHARING_ALLOWED_ORIGINS',
['*']
)
CORS_SHARING_ALLOWED_METHODS = getattr(
settings,
'CORS_SHARING_ALLOWED_METHODS',
['GET', 'POST', 'PUT', 'DELETE']
)
CORS_SHARING_ALLOWED_HEADERS = getattr(
settings,
'CORS_SHARING_ALLOWED_HEADERS',
['Content-type', 'Authorization']
)
class Base:
"""
The base method all http handlers will inherit from. This functor
like object handles the scaffolding of __call__ requests to make
sure if a GET, POST, PUT or DELETE request is made that it is
routed to the appropriate method in a child class or an error is
thrown. It also provides the functions used to output django
responses in json format.
"""
def __init__(self):
try:
self.read_only = settings.SLEEPY_READ_ONLY
except AttributeError:
self.read_only = False
def _origin_is_allowed(self, origin):
"""
This helper method validates the url given to us in an 'Origin:'
request header.
"""
if ('*' in CORS_SHARING_ALLOWED_ORIGINS or
origin in CORS_SHARING_ALLOWED_ORIGINS):
return True
return False
def __call__(self, request, *args, **kwargs):
# Add the request user to the class, this allows certain django decorators to work
if hasattr(request, 'user'):
self.user = request.user
# Check if we're in read only mode
if (self.read_only is True
and request.method not in HTTP_READ_ONLY_METHODS):
return api_error("the API is in read only mode for maintenance")
if request.method == "PUT":
query_dict = django.http.QueryDict(request.body)
request.PUT = {
k: v
for k, v
in query_dict.items()}
kwargs.update(request.PUT)
# Addd requests to kwargs
kwargs.update(request.REQUEST)
# Build our response object
response = django.http.HttpResponse()
# See if we have an 'Origin:' header in the request. If so, this is
# a CORS (cross-orgin resource sharing) request.
# See http://enable-cors.org/
origin_is_allowed = False
if 'HTTP_ORIGIN' in request.META:
# Make sure the given origin is allowed
if not self._origin_is_allowed(request.META['HTTP_ORIGIN']):
# If the origin is not allowed to make the request then we
# return an empty 200 response. This will make the cross
# origin request fail on the client side.
return response
origin_is_allowed = True
# If we had an 'Origin:' header with a valid origin and the request
# used the OPTIONS method, then we'll add the proper Access-Control
# headers to the response.
if origin_is_allowed and request.method == 'OPTIONS':
response['Access-Control-Allow-Origin'] = (
request.META['HTTP_ORIGIN']
)
response['Access-Control-Allow-Methods'] = (
",".join(CORS_SHARING_ALLOWED_METHODS)
)
response['Access-Control-Allow-Headers'] = (
",".join(CORS_SHARING_ALLOWED_HEADERS)
)
# Allows cross-origin cookie access
response['Access-Control-Allow-Credentials'] = 'true'
# Allow the client to cache the pre-flight response for up to a day
response['Access-Control-Max-Age'] = 86400
return response
if hasattr(self, request.method):
response = getattr(self, request.method)(request, *args, **kwargs)
# Explicitly type check here because type errors further
# down are harder to diagnose
if type(response) is None:
raise TypeError(
"{0} returned None, should have returned a response object".format(
request.method
)
)
# Use introspection to handle HEAD requests
elif request.method == 'HEAD' and hasattr(self, 'GET'):
response = self.GET(request, *args, **kwargs)
response.content = ""
else:
response = api_error(
"Resource does not support {0} for this method".format(
request.method
)
)
response.status_code = 405
# if supress_error_codes is set make all response codes 200
if "suppress_response_codes" in request.REQUEST:
response.status_code = 200
# If we are responding to a valid CORS request we must add the
# Access-Control-Allow-Origin header
if origin_is_allowed:
response['Access-Control-Allow-Origin'] = request.META['HTTP_ORIGIN']
response['Access-Control-Allow-Credentials'] = 'true'
# At this point if we have a json response and a param of format with the value of html
# Convert the response to an html response with the content in the body of the page
if request.REQUEST.get("format") == "html" and response['Content-Type'] == "application/json":
json_formatted = json.dumps(json.loads(response.content), indent=4)
response = django.http.HttpResponse("<html><body><pre>{0}</pre></body></html>".format(
json_formatted,
))
# Return the response
return response
|
anomen-s/programming-challenges | refs/heads/master | gopas.cz/python_adv/1_3_variables.py | 1 | print('-------- variable scope')
a = 10
def func1():
print(a)
# cannot switch to other variable from other scope
# FAILS: a = 100
b = 100
def func2():
global a
print(a)
a = 100
print(a)
func1()
print(a)
func2()
print(a)
print('-------- default parameters')
def funkce2(a, b=10, c=[]):
# c is initialized only once, so changes to c are visible by subsequent calls
c.append(a)
print(a + b + len(c))
funkce2(1, 2)
funkce2(10, 2)
funkce2(20, 2)
funkce2(31, 2)
print(funkce2.__defaults__)
print('-------- variable parameters')
def funcVarList(a, *b):
print(a)
print(b)
funcVarList(1, 2, 3, 4)
def funcVarDict(a, **b):
print(a)
print(b)
funcVarDict('keywords args 1', x1=2, x4=3, y5=4)
D = {'a1': '4', 'y5': '5'}
funcVarDict('keywords args 2', **D)
print('-------- hash (dict keys)')
print(hash((3, 4)))
# list is not hashable
print(hash([3, 4]))
|
paleolimbot/qosm | refs/heads/master | qosmpy/qosmlogging.py | 1 | '''
Created on Jan 5, 2016
@author: dewey
'''
import os
import time
FILE = os.path.join(os.path.dirname(__file__), "../qosm.log.txt")
PREVIOUS_FILE = os.path.join(os.path.dirname(__file__), "../qosm.previous.log.txt")
_yesdoitlogme = False
def initialize_logging():
global _yesdoitlogme
_yesdoitlogme = True
if os.path.isfile(PREVIOUS_FILE):
os.unlink(PREVIOUS_FILE)
if os.path.isfile(FILE):
os.rename(FILE, PREVIOUS_FILE)
f = open(FILE, "w")
f.write(time.strftime("%c") + ": Logging started\n")
f.close()
def log(message):
if _yesdoitlogme:
try:
f = open(FILE, "a")
f.write(time.strftime("%c") + ": ")
f.write(message)
f.write("\n")
f.close()
except IOError:
pass #silently fail on logging error |
codecollision/DropboxToFlickr | refs/heads/master | django/views/decorators/vary.py | 307 | try:
from functools import wraps
except ImportError:
from django.utils.functional import wraps # Python 2.4 fallback.
from django.utils.cache import patch_vary_headers
from django.utils.decorators import available_attrs
def vary_on_headers(*headers):
"""
A view decorator that adds the specified headers to the Vary header of the
response. Usage:
@vary_on_headers('Cookie', 'Accept-language')
def index(request):
...
Note that the header names are not case-sensitive.
"""
def decorator(func):
def inner_func(*args, **kwargs):
response = func(*args, **kwargs)
patch_vary_headers(response, headers)
return response
return wraps(func, assigned=available_attrs(func))(inner_func)
return decorator
def vary_on_cookie(func):
"""
A view decorator that adds "Cookie" to the Vary header of a response. This
indicates that a page's contents depends on cookies. Usage:
@vary_on_cookie
def index(request):
...
"""
def inner_func(*args, **kwargs):
response = func(*args, **kwargs)
patch_vary_headers(response, ('Cookie',))
return response
return wraps(func, assigned=available_attrs(func))(inner_func)
|
hsorby/neon | refs/heads/develop | src/opencmiss/neon/ui/zincwidgets/fieldconditions.py | 3 | '''
Copyright 2016 University of Auckland
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
from math import *
from opencmiss.zinc.field import Field
def FieldIsRealValued(field):
'''
Conditional function returning true if the field has real values
'''
return field.getValueType() == Field.VALUE_TYPE_REAL
def FieldIsScalar(field):
'''
Conditional function returning true if the field is real with 1 component
'''
return (field.getValueType() == Field.VALUE_TYPE_REAL) and \
(field.getNumberOfComponents() == 1)
def FieldIsCoordinateCapable(field):
'''
Conditional function returning true if the field can be used as a coordinate
field, i.e. is real valued with up to 3 component
'''
return (field.getValueType() == Field.VALUE_TYPE_REAL) and \
(field.getNumberOfComponents() <= 3)
def FieldIsOrientationScaleCapable(field):
'''
Conditional function returning true if the field can be used to orient or scale
glyphs. Generally, this means it has 1,2,3,4,6 or 9 components, where:
1 = scalar (no vector, isotropic scaling).
2 = 1 2-D vector (2nd axis is normal in plane, 3rd is out of 2-D plane);
3 = 1 3-D vector (orthogonal 2nd and 3rd axes are arbitrarily chosen);
4 = 2 2-D vectors (3rd axis taken as out of 2-D plane);
6 = 2 3-D vectors (3rd axis found from cross product);
9 = 3 3-D vectors = complete definition of 3 axes.
'''
return (field.getValueType() == Field.VALUE_TYPE_REAL) and \
(field.getNumberOfComponents() in [1, 2, 3, 4, 6, 9])
def FieldIsStreamVectorCapable(field):
'''
Conditional function returning true if the field can be used as a
streamline stream vector field.
For a 3-D domain with a 3-D coordinate field, can have 3, 6 or 9 components;
extra components set the lateral axes for extruded profiles.
For a 2-D domain the stream vector may have 2 components.
'''
return (field.getValueType() == Field.VALUE_TYPE_REAL) and \
(field.getNumberOfComponents() in [2, 3, 6, 9])
def FieldIsRCAndThreeComponents(field):
return (field.getCoordinateSystemType() == Field.COORDINATE_SYSTEM_TYPE_RECTANGULAR_CARTESIAN) and \
(field.getNumberOfComponents == 3)
def FieldIsRCAndCoordinateCapable(field):
return (field.getCoordinateSystemType() == Field.COORDINATE_SYSTEM_TYPE_RECTANGULAR_CARTESIAN) and \
(FieldIsCoordinateCapable(field))
def FieldIsMeshLocation(field):
'''
Conditional function returning true if the field is mesh location
'''
return field.getValueType() == Field.VALUE_TYPE_MESH_LOCATION
def FieldIsEigenvalues(field):
eigenvaluesField = field.castEigenvalues()
return eigenvaluesField.isValid()
def FieldIsFiniteElement(field):
finiteElementField = field.castFiniteElement()
return finiteElementField.isValid()
def FieldIsSquareMatrix(field):
numberOfComponents = field.getNumberOfComponents()
if numberOfComponents > 1:
sqrt1 = sqrt(numberOfComponents)
if 0 == floor(sqrt1) - sqrt1:
return field.getValueType() == Field.VALUE_TYPE_REAL
return False
def FieldIsDeterminantEligible(field):
return (field.getValueType() == Field.VALUE_TYPE_REAL) and \
(field.getNumberOfComponents() in [4, 9])
|
arnehilmann/yumrepos | refs/heads/master | setup.py | 6 | #!/usr/bin/env python
#
# -*- coding: utf-8 -*-
#
# This file is part of PyBuilder
#
# Copyright 2011-2015 PyBuilder Team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This script allows to support installation via:
# pip install git+git://<project>@<branch>
#
# This script is designed to be used in combination with `pip install` ONLY
#
# DO NOT RUN MANUALLY
#
import os
import subprocess
import sys
import glob
import shutil
from sys import version_info
script_dir = os.path.dirname(os.path.realpath(__file__))
exit_code = 0
try:
subprocess.check_call(["pyb", "--version"])
except FileNotFoundError as e:
try:
subprocess.check_call([sys.executable, "-m", "pip.__main__", "install", "pybuilder"])
except subprocess.CalledProcessError as e:
sys.exit(e.returncode)
except subprocess.CalledProcessError as e:
sys.exit(e.returncode)
try:
subprocess.check_call(["pyb", "clean", "install_build_dependencies", "package", "-o"])
dist_dir = glob.glob(os.path.join(script_dir, "target", "dist", "*"))[0]
for src_file in glob.glob(os.path.join(dist_dir, "*")):
file_name = os.path.basename(src_file)
target_file_name = os.path.join(script_dir, file_name)
if os.path.exists(target_file_name):
if os.path.isdir(target_file_name):
os.removedirs(target_file_name)
else:
os.remove(target_file_name)
shutil.move(src_file, script_dir)
setup_args = sys.argv[1:]
subprocess.check_call([sys.executable, "setup.py"] + setup_args, cwd=script_dir)
except subprocess.CalledProcessError as e:
exit_code = e.returncode
sys.exit(exit_code)
|
AlexTMjugador/mtasa-blue | refs/heads/master | vendor/google-breakpad/src/tools/gyp/test/subdirectory/gyptest-subdir2-deep.py | 501 | #!/usr/bin/env python
# Copyright (c) 2009 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies building a project rooted several layers under src_dir works.
"""
import TestGyp
test = TestGyp.TestGyp()
test.run_gyp('prog3.gyp', chdir='src/subdir/subdir2')
test.relocate('src', 'relocate/src')
test.build('prog3.gyp', test.ALL, chdir='relocate/src/subdir/subdir2')
test.run_built_executable('prog3',
chdir='relocate/src/subdir/subdir2',
stdout="Hello from prog3.c\n")
test.pass_test()
|
prakashmishra1598/gdeploy | refs/heads/3.0 | gdeployfeatures/peer/peer.py | 1 | """
Add functions corresponding to each of the actions in the json file.
The function should be named as follows <feature name>_<action_name>
"""
from gdeploylib import defaults, Global
def peer_probe(section_dict):
if not check_for_host_names():
return False, False
Global.ignore_errors = section_dict.get('ignore_peer_errors')
to_be_probed = Global.hosts + Global.brick_hosts
to_be_probed = sorted(set(to_be_probed))
section_dict['to_be_probed'] = to_be_probed
Global.logger.info("Peer probing hosts %s"%to_be_probed)
return section_dict, defaults.PROBE_YML
def peer_detach(section_dict):
Global.ignore_errors = section_dict.get('ignore_peer_errors')
section_dict['hosts'] = Global.hosts
if not check_for_host_names():
return False, False
p = list(set(Global.hosts))
Global.logger.info("Detaching peers %s"%p)
return section_dict, defaults.DETACH_YML
def peer_ignore(section_dict):
return section_dict, ''
def check_for_host_names():
if not Global.hosts:
msg = "Although peer manage option is provided, " \
"no hosts are provided in the section. " \
"Skipping section `peer"
print "Error: " + msg
Global.logger.error(msg)
return False
return True
|
cloud9UG/odoo | refs/heads/8.0 | addons/warning/__init__.py | 446 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import warning
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
jadonk/debexpo | refs/heads/master | debexpo/controllers/my.py | 2 | # -*- coding: utf-8 -*-
#
# my.py — My Controller
#
# This file is part of debexpo - https://alioth.debian.org/projects/debexpo/
#
# Copyright © 2008 Jonny Lamb <jonny@debian.org>
# Copyright © 2010 Jan Dittberner <jandd@debian.org>
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
"""
Holds the MyController.
"""
__author__ = 'Jonny Lamb'
__copyright__ = 'Copyright © 2008 Jonny Lamb, Copyright © 2010 Jan Dittberner'
__license__ = 'MIT'
import logging
import tempfile
from debexpo.lib.base import *
from debexpo.lib import constants, form
from debexpo.lib.schemas import DetailsForm, GpgForm, PasswordForm, OtherDetailsForm, MetricsForm, DmupForm
from debexpo.lib.utils import get_gnupg
from debexpo.model import meta
from debexpo.model.users import User
from debexpo.model.user_countries import UserCountry
from debexpo.model.sponsor_metrics import SponsorMetrics, SponsorMetricsTags, SponsorTags
from debexpo.model.data_store import DataStore
from sqlalchemy.orm import joinedload
import debexpo.lib.utils
log = logging.getLogger(__name__)
class MyController(BaseController):
"""
Controller for handling /my.
"""
requires_auth = True
def __init__(self):
"""
Class constructor. Sets common class and template attributes.
"""
c.config = config
self.user = None
self.gnupg = get_gnupg()
def _details(self):
"""
Handles a user submitting the details form.
"""
log.debug('Validating details form')
try:
fields = form.validate(DetailsForm, user_id=self.user.id)
except Exception, e:
log.error('Failed validation')
return form.htmlfill(self.index(get=True), e)
log.debug('Validation successful')
self.user.name = fields['name']
self.user.email = fields['email']
meta.session.commit()
log.debug('Saved name and email and redirecting')
redirect(url('my'))
@validate(schema=GpgForm(), form='index')
def _gpg(self):
"""
Handles a user submitting the GPG form.
"""
log.debug('GPG form validated successfully')
# Should the key be deleted?
if self.form_result['delete_gpg'] and self.user.gpg is not None:
keyid = self.user.gpg_id
log.debug('Deleting current GPG key %s' % (keyid))
result = self.gnupg.remove_signature(keyid)
if not result.success:
log.error("gpg failed to delete keyring: %s" % (result.out))
abort(500)
self.user.gpg = None
self.user.gpg_id = None
# Should the key be updated.
if 'gpg' in self.form_result and self.form_result['gpg'] is not None:
log.debug('Setting a new GPG key')
self.user.gpg = self.form_result['gpg'].value
(key, uids) = self.gnupg.parse_key_block(self.user.gpg)
self.user.gpg_id = key.id
result = self.gnupg.add_signature(data=self.user.gpg)
log.debug(result.out)
if result.code != 0:
log.error("gpg failed to import keyring: %s" % (result.err))
abort(500)
meta.session.commit()
log.debug('Saved key changes and redirecting')
redirect(url('my'))
@validate(schema=PasswordForm(), form='index')
def _password(self):
"""
Handles a user submitting the password form.
"""
log.debug('Password form validated successfully')
# Simply set password.
self.user.password = debexpo.lib.utils.hash_it(self.form_result['password_new'])
meta.session.commit()
log.debug('Saved new password and redirecting')
redirect(url('my'))
@validate(schema=OtherDetailsForm(), form='index')
def _other_details(self):
"""
Handles a user submitting the other details form.
"""
log.debug('Other details form validated successfully')
# A country ID of -1 means the country shouldn't be set.
if self.form_result['country'] == -1:
self.user.country = None
else:
self.user.country_id = self.form_result['country']
self.user.ircnick = self.form_result['ircnick']
self.user.jabber = self.form_result['jabber']
# Only set these values if the checkbox was shown in the form.
if config['debexpo.debian_specific'] == 'true':
if self.user.status != constants.USER_STATUS_DEVELOPER:
if self.form_result['status']:
self.user.status = constants.USER_STATUS_MAINTAINER
else:
self.user.status = constants.USER_STATUS_NORMAL
meta.session.commit()
log.debug('Saved other details and redirecting')
redirect(url('my'))
@validate(schema=MetricsForm(), form='index')
def _metrics(self):
"""
Handles a user submitting the metrics form.
"""
log.debug('Metrics form validated successfully')
if 'user_id' not in session:
log.debug('Requires authentication')
session['path_before_login'] = request.path_info
session.save()
redirect(url('login'))
sm = SponsorMetrics(user_id=session['user_id'])
sm.contact = int(self.form_result['preferred_contact_method'])
#XXX TODO: WTF?! Find out why on earth package_types is no string
sm.types = str(self.form_result['package_types'])
sm.guidelines_text = self.form_result['packaging_guideline_text']
sm.social_requirements = self.form_result['social_requirements']
sm.availability = self.form_result['availability']
if self.form_result['packaging_guidelines'] == constants.SPONSOR_GUIDELINES_TYPE_URL:
sm.guidelines = constants.SPONSOR_GUIDELINES_TYPE_URL
elif self.form_result['packaging_guidelines'] == constants.SPONSOR_GUIDELINES_TYPE_TEXT:
sm.guidelines = constants.SPONSOR_GUIDELINES_TYPE_TEXT
else:
sm.guidelines = constants.SPONSOR_GUIDELINES_TYPE_NONE
for tag in meta.session.query(SponsorTags).all():
if tag.tag in self.form_result:
log.debug("Weighten tag %s to %s" % (tag.tag, self.form_result[tag.tag]))
metrics = SponsorMetricsTags(tag=tag.tag, user_id=session['user_id'], weight=self.form_result[tag.tag])
sm.tags.append(metrics)
meta.session.merge(sm)
meta.session.commit()
redirect(url('my'))
@validate(schema=DmupForm(), form='index')
def _dmup(self):
"""
Handles a user submitting the DMUP form
"""
log.debug('DMUP acceptance form validated successfully')
# set the new value to the 'dmup' boolean in the User object
self.user.dmup = True
meta.session.commit()
log.debug('Changed DMUP acceptance status and redirecting')
redirect(url('my'))
def index(self, get=False):
"""
Controller entry point. Displays forms to change user details.
``get``
Whether to ignore request.method and assume it's a GET. This is useful
for validators to re-display the form if there's something wrong.
"""
# Get User object.
log.debug('Getting user object for user_id = "%s"' % session['user_id'])
self.user = meta.session.query(User).get(session['user_id'])
if self.user is None:
# Cannot find user from user_id.
log.debug('Cannot find user from user_id')
redirect(url('login'))
log.debug('User object successfully selected')
# A form has been submit.
if request.method == 'POST' and get is False:
log.debug('A form has been submit')
try:
return { 'details' : self._details,
'gpg' : self._gpg,
'password' : self._password,
'other_details' : self._other_details,
'metrics' : self._metrics,
'dmup' : self._dmup,
}[request.params['form']]()
except KeyError:
log.error('Could not find form name "%s"; defaulting to main page' % (request.params['form']))
pass
log.debug('Populating template context')
# The template will need to look at the user details.
c.user = self.user
# Create the countries values.
countries = { -1: '' }
for country in meta.session.query(UserCountry).all():
countries[country.id] = country.name
c.countries = countries
if self.user.country is None:
c.current_country = -1
else:
c.current_country = self.user.country.id
# Toggle whether Debian developer/maintainer forms should be shown.
if self.user.status == constants.USER_STATUS_DEVELOPER:
c.debian_developer = True
c.debian_maintainer = False
else:
c.debian_developer = False
if self.user.status == constants.USER_STATUS_MAINTAINER:
c.debian_maintainer = True
else:
c.debian_maintainer = False
# Enable the form to show information on the user's GPG key.
if self.user.gpg is not None:
c.currentgpg = c.user.gpg_id
else:
c.currentgpg = None
if self.user.status == constants.USER_STATUS_DEVELOPER:
# Fill in various sponsor metrics
c.constants = constants
c.contact_methods = [
(constants.SPONSOR_CONTACT_METHOD_NONE, _('None')),
(constants.SPONSOR_CONTACT_METHOD_EMAIL, _('Email')),
(constants.SPONSOR_CONTACT_METHOD_IRC, _('IRC')),
(constants.SPONSOR_CONTACT_METHOD_JABBER, _('Jabber')),
]
c.metrics = meta.session.query(SponsorMetrics)\
.options(joinedload(SponsorMetrics.user))\
.options(joinedload(SponsorMetrics.tags))\
.filter_by(user_id = session['user_id'])\
.first()
c.technical_tags = meta.session.query(SponsorTags).filter_by(tag_type=constants.SPONSOR_METRICS_TYPE_TECHNICAL).all()
c.social_tags = meta.session.query(SponsorTags).filter_by(tag_type=constants.SPONSOR_METRICS_TYPE_SOCIAL).all()
if not c.metrics:
# Set some sane defaults
log.debug("Generating new defaults for sponsor metrics")
c.metrics = SponsorMetrics()
c.metrics.availability = constants.SPONSOR_METRICS_PRIVATE
c.metrics.guidelines = constants.SPONSOR_GUIDELINES_TYPE_NONE
# Enable the form to show the current DMUP acceptance status
c.current_dmup = self.user.dmup
log.debug('Rendering page')
return render('/my/index.mako')
def download_dmup(self):
"""
Serves a file containing the agreement to the DMUP
"""
response.content_type = 'text/plain'
response.headers['Content-Disposition'] = 'attachment; filename="dmup_agreement.txt"'
user = meta.session.query(User).get(session['user_id'])
with open('debexpo/public/dmup_agreement_form.txt', 'r') as f:
data = f.read().format(name=user.name)
log.debug('Serving DMUP agreement file')
return data
|
cloudera/hue | refs/heads/master | desktop/libs/kafka/src/kafka/settings.py | 2 | #!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
DJANGO_APPS = ["kafka"]
NICE_NAME = "Kafka"
REQUIRES_HADOOP = False
MENU_INDEX = -1
IS_URL_NAMESPACED = True
|
appleseedhq/gaffer | refs/heads/master | python/GafferSceneTest/CopyPrimitiveVariablesTest.py | 1 | ##########################################################################
#
# Copyright (c) 2019, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import unittest
import IECore
import Gaffer
import GafferScene
import GafferSceneTest
class CopyPrimitiveVariablesTest( GafferSceneTest.SceneTestCase ) :
def testConstantVariables( self ) :
sphere = GafferScene.Sphere()
sphere["name"].setValue( "object" )
cube = GafferScene.Cube()
cube["name"].setValue( "object" )
cubeVariables = GafferScene.PrimitiveVariables()
cubeVariables["in"].setInput( cube["out"] )
cubeVariables["primitiveVariables"].addChild( Gaffer.NameValuePlug( "ten", IECore.IntData( 10 ) ) )
cubeVariables["primitiveVariables"].addChild( Gaffer.NameValuePlug( "twenty", IECore.IntData( 20 ) ) )
copy = GafferScene.CopyPrimitiveVariables()
copy["in"].setInput( sphere["out"] )
copy["source"].setInput( cubeVariables["out"] )
# Not filtered to anything, so should be a perfect pass through.
self.assertScenesEqual( sphere["out"], copy["out"] )
self.assertSceneHashesEqual( sphere["out"], copy["out"] )
# Add a filter, should still be a pass through because we haven't
# asked for any variables to be copied.
objectFilter = GafferScene.PathFilter()
objectFilter["paths"].setValue( IECore.StringVectorData( [ "/object" ] ) )
copy["filter"].setInput( objectFilter["out"] )
self.assertScenesEqual( sphere["out"], copy["out"] )
# Copy something that doesn't exist. This isn't an error, because the
# variables are treated as match patterns.
copy["primitiveVariables"].setValue( "these don't exist" )
self.assertScenesEqual( sphere["out"], copy["out"] )
# Copy things that do exist, and check that it has worked.
copy["primitiveVariables"].setValue( "ten twenty" )
self.assertEqual(
set( copy["out"].object( "/object" ).keys() ),
set( sphere["out"].object( "/object" ).keys() ) | { "ten", "twenty" },
)
self.assertEqual(
copy["out"].object( "/object" )["ten"],
cubeVariables["out"].object( "/object" )["ten"],
)
self.assertEqual(
copy["out"].object( "/object" )["twenty"],
cubeVariables["out"].object( "/object" )["twenty"],
)
# Check that wildcards work
copy["primitiveVariables"].setValue( "twen*" )
self.assertEqual(
set( copy["out"].object( "/object" ).keys() ),
set( sphere["out"].object( "/object" ).keys() ) | { "twenty" },
)
def testInterpolatedVariables( self ) :
littleSphere = GafferScene.Sphere()
bigSphere = GafferScene.Sphere()
bigSphere["radius"].setValue( 10 )
sphereFilter = GafferScene.PathFilter()
sphereFilter["paths"].setValue( IECore.StringVectorData( [ "/sphere" ] ) )
copy = GafferScene.CopyPrimitiveVariables()
copy["in"].setInput( littleSphere["out"] )
copy["source"].setInput( bigSphere["out"] )
copy["filter"].setInput( sphereFilter["out"] )
self.assertScenesEqual( copy["out"], littleSphere["out"] )
copy["primitiveVariables"].setValue( "*" )
self.assertScenesEqual( copy["out"], bigSphere["out"] )
# If the spheres have differing topologies, then we can't copy
# and should get an error.
bigSphere["divisions"][0].setValue( 100 )
with self.assertRaisesRegexp( RuntimeError, 'Cannot copy .* from "/sphere" to "/sphere" because source and destination primitives have different topology' ) :
copy["out"].object( "/sphere" )
def testMismatchedHierarchy( self ) :
sphere = GafferScene.Sphere()
cube = GafferScene.Cube()
sphereFilter = GafferScene.PathFilter()
sphereFilter["paths"].setValue( IECore.StringVectorData( [ "/sphere" ] ) )
copy = GafferScene.CopyPrimitiveVariables()
copy["in"].setInput( sphere["out"] )
copy["source"].setInput( cube["out"] )
copy["filter"].setInput( sphereFilter["out"] )
copy["primitiveVariables"].setValue( "*" )
self.assertEqual( copy["out"].object( "/sphere" ), copy["in"].object( "/sphere" ) )
def testSourceLocation( self ) :
sphere1 = GafferScene.Sphere()
sphere2 = GafferScene.Sphere()
sphere2["radius"].setValue( 2 )
sphere3 = GafferScene.Sphere()
sphere3["radius"].setValue( 3 )
group = GafferScene.Group()
group["in"][0].setInput( sphere2["out"] )
group["in"][1].setInput( sphere3["out"] )
sphereFilter = GafferScene.PathFilter()
sphereFilter["paths"].setValue( IECore.StringVectorData( [ "/sphere" ] ) )
copy = GafferScene.CopyPrimitiveVariables()
copy["in"].setInput( sphere1["out"] )
copy["source"].setInput( group["out"] )
copy["filter"].setInput( sphereFilter["out"] )
copy["primitiveVariables"].setValue( "P" )
copy["sourceLocation"].setValue( "/group/sphere" )
self.assertEqual( copy["out"].object( "/sphere" )["P"], group["out"].object( "/group/sphere" )["P"] )
copy["sourceLocation"].setValue( "/group/sphere1" )
self.assertEqual( copy["out"].object( "/sphere" )["P"], group["out"].object( "/group/sphere1" )["P"] )
# Copying from a non-existing location should be a no-op
copy["sourceLocation"].setValue( "/road/to/nowhere" )
self.assertScenesEqual( copy["out"], sphere1["out"] )
def testBoundUpdate( self ) :
sphere1 = GafferScene.Sphere()
sphere2 = GafferScene.Sphere()
sphere2["radius"].setValue( 2 )
group = GafferScene.Group()
group["in"][0].setInput( sphere1["out"] )
sphereFilter = GafferScene.PathFilter()
sphereFilter["paths"].setValue( IECore.StringVectorData( [ "/group/sphere" ] ) )
copy = GafferScene.CopyPrimitiveVariables()
copy["in"].setInput( group["out"] )
copy["source"].setInput( sphere2["out"] )
copy["filter"].setInput( sphereFilter["out"] )
copy["sourceLocation"].setValue( "/sphere" )
copy["primitiveVariables"].setValue( "P" )
# We're copying "P", so the bounds need updating.
self.assertEqual( copy["out"].object( "/group/sphere" )["P"], sphere2["out"].object( "/sphere" )["P"] )
self.assertSceneValid( copy["out"] )
self.assertEqual( copy["out"].bound( "/" ), sphere2["out"].bound( "/" ) )
self.assertEqual( copy["out"].bound( "/group" ), sphere2["out"].bound( "/" ) )
self.assertEqual( copy["out"].bound( "/group/sphere" ), sphere2["out"].bound( "/" ) )
# If we turn off "adjustBounds", we want a perfect pass through of the input
# bounds.
copy["adjustBounds"].setValue( False )
self.assertScenesEqual( copy["out"], group["out"], checks = { "bound" } )
self.assertSceneHashesEqual( copy["out"], group["out"], checks = { "bound" } )
# If "adjustBounds" is on, but "P" isn't being copied, we also want
# a perfect pass through of the input bounds. We don't want to pay for
# unnecessary bounds propagation.
copy["adjustBounds"].setValue( True )
copy["primitiveVariables"].setValue( "uv" )
self.assertScenesEqual( copy["out"], group["out"], checks = { "bound" } )
self.assertSceneHashesEqual( copy["out"], group["out"], checks = { "bound" } )
if __name__ == "__main__":
unittest.main()
|
dvliman/jaikuengine | refs/heads/master | .google_appengine/google/appengine/api/dosinfo.py | 7 | #!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""DOS configuration tools.
Library for parsing dos.yaml files and working with these in memory.
"""
import re
import google
import ipaddr
from google.appengine.api import validation
from google.appengine.api import yaml_builder
from google.appengine.api import yaml_listener
from google.appengine.api import yaml_object
_DESCRIPTION_REGEX = r'^.{0,499}$'
BLACKLIST = 'blacklist'
DESCRIPTION = 'description'
SUBNET = 'subnet'
class SubnetValidator(validation.Validator):
"""Checks that a subnet can be parsed and is a valid IPv4 or IPv6 subnet."""
def Validate(self, value, unused_key=None):
"""Validates a subnet."""
if value is None:
raise validation.MissingAttribute('subnet must be specified')
if not isinstance(value, basestring):
raise validation.ValidationError('subnet must be a string, not \'%r\'' %
type(value))
try:
ipaddr.IPNetwork(value)
except ValueError:
raise validation.ValidationError('%s is not a valid IPv4 or IPv6 subnet' %
value)
parts = value.split('/')
if len(parts) == 2 and not re.match('^[0-9]+$', parts[1]):
raise validation.ValidationError('Prefix length of subnet %s must be an '
'integer (quad-dotted masks are not '
'supported)' % value)
return value
class MalformedDosConfiguration(Exception):
"""Configuration file for DOS API is malformed."""
class BlacklistEntry(validation.Validated):
"""A blacklist entry describes a blocked IP address or subnet."""
ATTRIBUTES = {
DESCRIPTION: validation.Optional(_DESCRIPTION_REGEX),
SUBNET: SubnetValidator(),
}
class DosInfoExternal(validation.Validated):
"""Describes the format of a dos.yaml file."""
ATTRIBUTES = {
BLACKLIST: validation.Optional(validation.Repeated(BlacklistEntry)),
}
def LoadSingleDos(dos_info, open_fn=None):
"""Load a dos.yaml file or string and return a DosInfoExternal object.
Args:
dos_info: The contents of a dos.yaml file as a string, or an open file
object.
open_fn: Function for opening files. Unused.
Returns:
A DosInfoExternal instance which represents the contents of the parsed yaml
file.
Raises:
MalformedDosConfiguration: The yaml file contains multiple blacklist
sections.
yaml_errors.EventError: An error occured while parsing the yaml file.
"""
builder = yaml_object.ObjectBuilder(DosInfoExternal)
handler = yaml_builder.BuilderHandler(builder)
listener = yaml_listener.EventListener(handler)
listener.Parse(dos_info)
parsed_yaml = handler.GetResults()
if not parsed_yaml:
return DosInfoExternal()
if len(parsed_yaml) > 1:
raise MalformedDosConfiguration('Multiple blacklist: sections '
'in configuration.')
return parsed_yaml[0]
|
nishad-jobsglobal/odoo-marriot | refs/heads/master | addons/report/tests/test_reports.py | 385 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2014-Today OpenERP SA (<http://www.openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import logging
import openerp
import openerp.tests
_logger = logging.getLogger(__name__)
@openerp.tests.common.at_install(False)
@openerp.tests.common.post_install(True)
class TestReports(openerp.tests.TransactionCase):
def test_reports(self):
registry, cr, uid = self.registry, self.cr, self.uid
r_model = registry('ir.actions.report.xml')
domain = [('report_type', 'like', 'qweb')]
for r in r_model.browse(cr, uid, r_model.search(cr, uid, domain)):
report_model = 'report.%s' % r.report_name
try:
registry(report_model)
except KeyError:
# Only test the generic reports here
_logger.info("testing report %s", r.report_name)
report_model = registry(r.model)
report_model_ids = report_model.search(cr, uid, [], limit=10)
if not report_model_ids:
_logger.info("no record found skipping report %s", r.report_name)
if not r.multi:
report_model_ids = report_model_ids[:1]
# Test report generation
registry('report').get_html(cr, uid, report_model_ids, r.report_name)
else:
continue
|
vribeiro1/plainsboro_221 | refs/heads/master | plainsboro/doctor_subscriptions/tests/test_view_doctor_profile.py | 1 | from django.contrib.auth.models import User
from django.test import TestCase
from plainsboro.doctor_subscriptions.forms import EditProfileForm
from plainsboro.doctor_subscriptions.models import Doctor
class DoctorSignIn(TestCase):
def setUp(self):
self.user = User.objects.create(
username='hao123',
first_name='Hao',
last_name='123',
email='hao123@hao123.com',
password='12345678'
)
self.doctor = Doctor.objects.create(
user=self.user,
slug='hao123',
address='Rua Hao, 123',
neighborhood='hao123',
city='Campinas',
phone='+55 11 123123123',
specialization='Oftalmologista'
)
self.valid_user = dict(username='hao123',
password='12345678')
self.invalid_user = dict(username=self.user.username,
password='xxxxxx')
def test_get(self):
response = self.client.get('/login/')
self.assertEqual(200, response.status_code)
def test_template(self):
response = self.client.get('/login/')
self.assertTemplateUsed(response,
'doctor_subscriptions/login.html')
def test_user_exits(self):
self.assertTrue(User.objects.exists())
def test_doctor_exists(self):
self.assertTrue(Doctor.objects.exists())
def test_valid_sign_in(self):
response = self.client.post('/login/',
self.valid_user,
follow=True)
self.assertContains(response, 'Usuário logado com sucesso!')
def test_invalid_sign_in(self):
response = self.client.post('/login/',
self.invalid_user,
follow=True)
self.assertContains(response, 'Username ou senha inválidos!')
class DoctorProfileTest(TestCase):
def setUp(self):
self.user = User.objects.create(
first_name='Hao',
last_name='123',
username='hao123',
email='hao123@hao123.com',
password='12345678'
)
self.doctor = Doctor.objects.create(
user=self.user,
slug='hao123',
address='Rua Hao, 123',
neighborhood='hao123',
city='Campinas',
phone='+55 11 123123123',
specialization='Oftalmologista'
)
self.response = self.client.get('/doctor_profile/hao123/')
def test_get(self):
self.assertEqual(200, self.response.status_code)
def test_template(self):
self.assertTemplateUsed(self.response,
'doctor_subscriptions/doctor_profile.html')
class EditDoctorProfileTest(TestCase):
def setUp(self):
self.user = User.objects.create(
first_name='Hao',
last_name='123',
username='hao123',
email='hao123@hao123.com',
password='12345678'
)
self.doctor = Doctor.objects.create(
user=self.user,
slug='hao123',
address='Rua Hao, 123',
neighborhood='hao123',
city='Campinas',
phone='+55 11 123123123',
specialization='Oftalmologista'
)
self.data = dict(
address='Rua Princeton-Plainsboro, 221',
neighborhood='Princeton',
city='Princeton',
phone='+55 11 123123123',
specialization='Medicina Diagnostica')
self.response = self.client.get('/doctor_profile/hao123/edit_profile/')
def test_get(self):
self.assertEqual(200, self.response.status_code)
def test_template(self):
self.assertTemplateUsed(self.response,
'doctor_subscriptions/edit_profile.html')
def test_html(self):
tags = (('<form', 1),
('<input', 7),
('type="text"', 5),
('type="submit"', 1))
for tag, qtde in tags:
with self.subTest():
self.assertContains(self.response, tag, qtde)
def test_csrf(self):
self.assertContains(self.response, 'csrfmiddlewaretoken')
def test_has_form(self):
form = self.response.context['form']
self.assertIsInstance(form, EditProfileForm)
def test_error_message(self):
data = dict(address='', neighborhood='', city='',
phone='', specialization='')
response = self.client.post('/doctor_profile/hao123/edit_profile/',
data, follow=True)
self.assertContains(response, 'O formulário contem erros.')
def test_success_message(self):
response = self.client.post('/doctor_profile/hao123/edit_profile/',
self.data,
follow=True)
self.assertContains(response, 'Perfil atualizado com sucesso!')
def test_edit_profile(self):
self.client.post('/doctor_profile/hao123/edit_profile/',
self.data, follow=True)
user = User.objects.get(username=self.user.username)
doctor = Doctor.objects.get(user=user)
assert_list = [
(self.data['neighborhood'], doctor.neighborhood),
(self.data['city'], doctor.city),
(self.data['phone'], doctor.phone),
(self.data['specialization'], doctor.specialization),
]
for attr, expected in assert_list:
with self.subTest():
self.assertEqual(attr, expected)
|
pabloborrego93/edx-platform | refs/heads/master | common/djangoapps/student/management/commands/assigngroups.py | 170 | from django.core.management.base import BaseCommand
from django.contrib.auth.models import User
from student.models import UserTestGroup
import random
import sys
import datetime
from textwrap import dedent
import json
from pytz import UTC
def group_from_value(groups, v):
''' Given group: (('a',0.3),('b',0.4),('c',0.3)) And random value
in [0,1], return the associated group (in the above case, return
'a' if v<0.3, 'b' if 0.3<=v<0.7, and 'c' if v>0.7
'''
sum = 0
for (g, p) in groups:
sum = sum + p
if sum > v:
return g
return g # For round-off errors
class Command(BaseCommand):
help = dedent("""\
Assign users to test groups. Takes a list of groups:
a:0.3,b:0.4,c:0.3 file.txt "Testing something"
Will assign each user to group a, b, or c with
probability 0.3, 0.4, 0.3. Probabilities must
add up to 1.
Will log what happened to file.txt.
""")
def handle(self, *args, **options):
if len(args) != 3:
print "Invalid number of options"
sys.exit(-1)
# Extract groups from string
group_strs = [x.split(':') for x in args[0].split(',')]
groups = [(group, float(value)) for group, value in group_strs]
print "Groups", groups
## Confirm group probabilities add up to 1
total = sum(zip(*groups)[1])
print "Total:", total
if abs(total - 1) > 0.01:
print "Total not 1"
sys.exit(-1)
## Confirm groups don't already exist
for group in dict(groups):
if UserTestGroup.objects.filter(name=group).count() != 0:
print group, "already exists!"
sys.exit(-1)
group_objects = {}
f = open(args[1], "a+")
## Create groups
for group in dict(groups):
utg = UserTestGroup()
utg.name = group
utg.description = json.dumps({"description": args[2]},
{"time": datetime.datetime.now(UTC).isoformat()})
group_objects[group] = utg
group_objects[group].save()
## Assign groups
users = list(User.objects.all())
count = 0
for user in users:
if count % 1000 == 0:
print count
count = count + 1
v = random.uniform(0, 1)
group = group_from_value(groups, v)
group_objects[group].users.add(user)
f.write(u"Assigned user {name} ({id}) to {group}\n".format(
name=user.username,
id=user.id,
group=group
).encode('utf-8'))
## Save groups
for group in group_objects:
group_objects[group].save()
f.close()
# python manage.py assigngroups summary_test:0.3,skip_summary_test:0.7 log.txt "Do previews of future materials help?"
# python manage.py assigngroups skip_capacitor:0.3,capacitor:0.7 log.txt "Do we show capacitor in linearity tutorial?"
|
luisgg/iteexe | refs/heads/master | twisted/spread/ui/gtkutil.py | 21 |
# Copyright (c) 2001-2004 Twisted Matrix Laboratories.
# See LICENSE for details.
import gtk
import string
from twisted.spread import pb
from twisted import copyright
from twisted.python import reflect
from twisted.cred.credentials import UsernamePassword
normalFont = gtk.load_font("-adobe-courier-medium-r-normal-*-*-120-*-*-m-*-iso8859-1")
boldFont = gtk.load_font("-adobe-courier-bold-r-normal-*-*-120-*-*-m-*-iso8859-1")
errorFont = gtk.load_font("-adobe-courier-medium-o-normal-*-*-120-*-*-m-*-iso8859-1")
def selectAll(widget,event):
widget.select_region(0,-1)
def cbutton(name, callback):
b = gtk.GtkButton(name)
b.connect('clicked', callback)
return b
class ButtonBar:
barButtons = None
def getButtonList(self, prefix='button_', container=None):
result = []
buttons = self.barButtons or \
reflect.prefixedMethodNames(self.__class__, prefix)
for b in buttons:
bName = string.replace(b, '_', ' ')
result.append(cbutton(bName, getattr(self,prefix+b)))
if container:
map(container.add, result)
return result
def scrollify(widget):
#widget.set_word_wrap(gtk.TRUE)
scrl=gtk.GtkScrolledWindow()
scrl.add(widget)
scrl.set_policy(gtk.POLICY_NEVER, gtk.POLICY_ALWAYS)
# scrl.set_update_policy(gtk.POLICY_AUTOMATIC)
return scrl
def defocusify(widget):
widget.unset_flags(gtk.CAN_FOCUS)
class GetString(gtk.GtkWindow):
def __init__(self, im, desc):
gtk.GtkWindow.__init__(self, gtk.WINDOW_TOPLEVEL)
self.set_title(desc)
self.im = im
button = cbutton(desc, self.clicked)
self.entry = gtk.GtkEntry()
self.entry.connect('activate', self.clicked)
hb = gtk.GtkHBox()
hb.add(self.entry)
hb.add(button)
self.add(hb)
self.show_all()
def clicked(self, btn):
raise NotImplementedError
class Login(gtk.GtkWindow):
_resetTimeout = None
def __init__(self, callback,
referenceable=None,
initialUser="guest", initialPassword="guest",
initialHostname="localhost",initialPortno=str(pb.portno),
initialService="", initialPerspective=""):
gtk.GtkWindow.__init__(self,gtk.WINDOW_TOPLEVEL)
version_label = gtk.GtkLabel("Twisted v%s" % copyright.version)
self.pbReferenceable = referenceable
self.pbCallback = callback
# version_label.show()
self.username = gtk.GtkEntry()
self.password = gtk.GtkEntry()
self.service = gtk.GtkEntry()
self.perspective = gtk.GtkEntry()
self.hostname = gtk.GtkEntry()
self.port = gtk.GtkEntry()
self.password.set_visibility(gtk.FALSE)
self.username.set_text(initialUser)
self.password.set_text(initialPassword)
self.service.set_text(initialService)
self.perspective.set_text(initialPerspective)
self.hostname.set_text(initialHostname)
self.port.set_text(str(initialPortno))
userlbl=gtk.GtkLabel("Username:")
passlbl=gtk.GtkLabel("Password:")
servicelbl=gtk.GtkLabel("Service:")
perspeclbl=gtk.GtkLabel("Perspective:")
hostlbl=gtk.GtkLabel("Hostname:")
portlbl=gtk.GtkLabel("Port #:")
self.allLabels = [
userlbl, passlbl, servicelbl, perspeclbl, hostlbl, portlbl
]
self.logstat = gtk.GtkLabel("Protocol PB-%s" % pb.Broker.version)
self.okbutton = cbutton("Log In", self.login)
self.okbutton["can_default"] = 1
self.okbutton["receives_default"] = 1
okbtnbx = gtk.GtkHButtonBox()
okbtnbx.add(self.okbutton)
vbox = gtk.GtkVBox()
vbox.add(version_label)
table = gtk.GtkTable(2,6)
row=0
for label, entry in [(userlbl, self.username),
(passlbl, self.password),
(hostlbl, self.hostname),
(servicelbl, self.service),
(perspeclbl, self.perspective),
(portlbl, self.port)]:
table.attach(label, 0, 1, row, row+1)
table.attach(entry, 1, 2, row, row+1)
row = row+1
vbox.add(table)
vbox.add(self.logstat)
vbox.add(okbtnbx)
self.add(vbox)
self.username.grab_focus()
self.okbutton.grab_default()
for fld in self.username, self.password, self.hostname, self.service, self.perspective:
fld.signal_connect('activate',self.login)
fld.signal_connect('focus_in_event',selectAll)
self.signal_connect('destroy',gtk.mainquit,None)
def loginReset(self):
print 'doing login reset'
self.logstat.set_text("Idle.")
self._resetTimeout = None
return 0
def loginReport(self, txt):
print 'setting login report',repr(txt)
self.logstat.set_text(txt)
if not (self._resetTimeout is None):
gtk.timeout_remove(self._resetTimeout)
self._resetTimeout = gtk.timeout_add(59000, self.loginReset)
def login(self, btn):
host = self.hostname.get_text()
port = self.port.get_text()
service = self.service.get_text()
perspective = self.perspective.get_text()
# Maybe we're connecting to a unix socket, so don't make any
# assumptions
try:
port = int(port)
except:
pass
user = self.username.get_text()
pswd = self.password.get_text()
self.loginReport("connecting...")
# putting this off to avoid a stupid bug in gtk where it won't redraw
# if you input_add a connecting socket (!??)
self.user_tx = user
self.pswd_tx = pswd
self.host_tx = host
self.port_tx = port
self.service_tx = service
self.perspective_tx = perspective or user
afterOneTimeout(10, self.__actuallyConnect)
def __actuallyConnect(self):
from twisted.application import internet
f = pb.PBClientFactory()
internet.TCPClient(self.host_tx, self.port_tx, f)
creds = UsernamePassword(self.user_tx, self.pswd_tx)
f.login(creds, self.pbReferenceable
).addCallbacks(self.pbCallback, self.couldNotConnect
).setTimeout(30
)
def couldNotConnect(self, msg):
self.loginReport("couldn't connect: %s" % str(msg))
class _TimerOuter:
def __init__(self, timeout, cmd, args):
self.args = args
self.cmd = cmd
self.tid = gtk.timeout_add(timeout, self.doIt)
def doIt(self):
gtk.timeout_remove(self.tid)
apply(self.cmd, self.args)
def afterOneTimeout(timeout, cmd, *args):
_TimerOuter(timeout, cmd, args)
|
Gamebasis/3DGamebasisServer | refs/heads/master | GameData/blender-2.71-windows64/2.71/python/lib/email/errors.py | 120 | # Copyright (C) 2001-2006 Python Software Foundation
# Author: Barry Warsaw
# Contact: email-sig@python.org
"""email package exception classes."""
class MessageError(Exception):
"""Base class for errors in the email package."""
class MessageParseError(MessageError):
"""Base class for message parsing errors."""
class HeaderParseError(MessageParseError):
"""Error while parsing headers."""
class BoundaryError(MessageParseError):
"""Couldn't find terminating boundary."""
class MultipartConversionError(MessageError, TypeError):
"""Conversion to a multipart is prohibited."""
class CharsetError(MessageError):
"""An illegal charset was given."""
# These are parsing defects which the parser was able to work around.
class MessageDefect(ValueError):
"""Base class for a message defect."""
def __init__(self, line=None):
if line is not None:
super().__init__(line)
self.line = line
class NoBoundaryInMultipartDefect(MessageDefect):
"""A message claimed to be a multipart but had no boundary parameter."""
class StartBoundaryNotFoundDefect(MessageDefect):
"""The claimed start boundary was never found."""
class CloseBoundaryNotFoundDefect(MessageDefect):
"""A start boundary was found, but not the corresponding close boundary."""
class FirstHeaderLineIsContinuationDefect(MessageDefect):
"""A message had a continuation line as its first header line."""
class MisplacedEnvelopeHeaderDefect(MessageDefect):
"""A 'Unix-from' header was found in the middle of a header block."""
class MissingHeaderBodySeparatorDefect(MessageDefect):
"""Found line with no leading whitespace and no colon before blank line."""
# XXX: backward compatibility, just in case (it was never emitted).
MalformedHeaderDefect = MissingHeaderBodySeparatorDefect
class MultipartInvariantViolationDefect(MessageDefect):
"""A message claimed to be a multipart but no subparts were found."""
class InvalidMultipartContentTransferEncodingDefect(MessageDefect):
"""An invalid content transfer encoding was set on the multipart itself."""
class UndecodableBytesDefect(MessageDefect):
"""Header contained bytes that could not be decoded"""
class InvalidBase64PaddingDefect(MessageDefect):
"""base64 encoded sequence had an incorrect length"""
class InvalidBase64CharactersDefect(MessageDefect):
"""base64 encoded sequence had characters not in base64 alphabet"""
# These errors are specific to header parsing.
class HeaderDefect(MessageDefect):
"""Base class for a header defect."""
def __init__(self, *args, **kw):
super().__init__(*args, **kw)
class InvalidHeaderDefect(HeaderDefect):
"""Header is not valid, message gives details."""
class HeaderMissingRequiredValue(HeaderDefect):
"""A header that must have a value had none"""
class NonPrintableDefect(HeaderDefect):
"""ASCII characters outside the ascii-printable range found"""
def __init__(self, non_printables):
super().__init__(non_printables)
self.non_printables = non_printables
def __str__(self):
return ("the following ASCII non-printables found in header: "
"{}".format(self.non_printables))
class ObsoleteHeaderDefect(HeaderDefect):
"""Header uses syntax declared obsolete by RFC 5322"""
class NonASCIILocalPartDefect(HeaderDefect):
"""local_part contains non-ASCII characters"""
# This defect only occurs during unicode parsing, not when
# parsing messages decoded from binary.
|
ahmadiga/min_edx | refs/heads/master | common/djangoapps/service_status/test.py | 132 | """Test for async task service status"""
from django.utils import unittest
from django.test.client import Client
from django.core.urlresolvers import reverse
import json
class CeleryConfigTest(unittest.TestCase):
"""
Test that we can get a response from Celery
"""
def setUp(self):
"""
Create a django test client
"""
super(CeleryConfigTest, self).setUp()
self.client = Client()
self.ping_url = reverse('status.service.celery.ping')
def test_ping(self):
"""
Try to ping celery.
"""
# Access the service status page, which starts a delayed
# asynchronous task
response = self.client.get(self.ping_url)
# HTTP response should be successful
self.assertEqual(response.status_code, 200)
# Expect to get a JSON-serialized dict with
# task and time information
result_dict = json.loads(response.content)
# Was it successful?
self.assertTrue(result_dict['success'])
# We should get a "pong" message back
self.assertEqual(result_dict['value'], "pong")
# We don't know the other dict values exactly,
# but we can assert that they take the right form
self.assertIsInstance(result_dict['task_id'], unicode)
self.assertIsInstance(result_dict['time'], float)
self.assertTrue(result_dict['time'] > 0.0)
|
orekyuu/intellij-community | refs/heads/master | python/lib/Lib/xml/sax/saxlib.py | 130 | """
This module contains the core classes of version 2.0 of SAX for Python.
This file provides only default classes with absolutely minimum
functionality, from which drivers and applications can be subclassed.
Many of these classes are empty and are included only as documentation
of the interfaces.
$Id: saxlib.py,v 1.12 2002/05/10 14:49:21 akuchling Exp $
"""
version = '2.0beta'
# A number of interfaces used to live in saxlib, but are now in
# various other modules for Python 2 compatibility. If nobody uses
# them here any longer, the references can be removed
from handler import ErrorHandler, ContentHandler, DTDHandler, EntityResolver
from xmlreader import XMLReader, InputSource, Locator, IncrementalParser
from _exceptions import *
from handler import \
feature_namespaces,\
feature_namespace_prefixes,\
feature_string_interning,\
feature_validation,\
feature_external_ges,\
feature_external_pes,\
all_features,\
property_lexical_handler,\
property_declaration_handler,\
property_dom_node,\
property_xml_string,\
all_properties
#============================================================================
#
# MAIN INTERFACES
#
#============================================================================
# ===== XMLFILTER =====
class XMLFilter(XMLReader):
"""Interface for a SAX2 parser filter.
A parser filter is an XMLReader that gets its events from another
XMLReader (which may in turn also be a filter) rather than from a
primary source like a document or other non-SAX data source.
Filters can modify a stream of events before passing it on to its
handlers."""
def __init__(self, parent = None):
"""Creates a filter instance, allowing applications to set the
parent on instantiation."""
XMLReader.__init__(self)
self._parent = parent
def setParent(self, parent):
"""Sets the parent XMLReader of this filter. The argument may
not be None."""
self._parent = parent
def getParent(self):
"Returns the parent of this filter."
return self._parent
# ===== ATTRIBUTES =====
class Attributes:
"""Interface for a list of XML attributes.
Contains a list of XML attributes, accessible by name."""
def getLength(self):
"Returns the number of attributes in the list."
raise NotImplementedError("This method must be implemented!")
def getType(self, name):
"Returns the type of the attribute with the given name."
raise NotImplementedError("This method must be implemented!")
def getValue(self, name):
"Returns the value of the attribute with the given name."
raise NotImplementedError("This method must be implemented!")
def getValueByQName(self, name):
"""Returns the value of the attribute with the given raw (or
qualified) name."""
raise NotImplementedError("This method must be implemented!")
def getNameByQName(self, name):
"""Returns the namespace name of the attribute with the given
raw (or qualified) name."""
raise NotImplementedError("This method must be implemented!")
def getNames(self):
"""Returns a list of the names of all attributes
in the list."""
raise NotImplementedError("This method must be implemented!")
def getQNames(self):
"""Returns a list of the raw qualified names of all attributes
in the list."""
raise NotImplementedError("This method must be implemented!")
def __len__(self):
"Alias for getLength."
raise NotImplementedError("This method must be implemented!")
def __getitem__(self, name):
"Alias for getValue."
raise NotImplementedError("This method must be implemented!")
def keys(self):
"Returns a list of the attribute names in the list."
raise NotImplementedError("This method must be implemented!")
def has_key(self, name):
"True if the attribute is in the list, false otherwise."
raise NotImplementedError("This method must be implemented!")
def get(self, name, alternative=None):
"""Return the value associated with attribute name; if it is not
available, then return the alternative."""
raise NotImplementedError("This method must be implemented!")
def copy(self):
"Return a copy of the Attributes object."
raise NotImplementedError("This method must be implemented!")
def items(self):
"Return a list of (attribute_name, value) pairs."
raise NotImplementedError("This method must be implemented!")
def values(self):
"Return a list of all attribute values."
raise NotImplementedError("This method must be implemented!")
#============================================================================
#
# HANDLER INTERFACES
#
#============================================================================
# ===== DECLHANDLER =====
class DeclHandler:
"""Optional SAX2 handler for DTD declaration events.
Note that some DTD declarations are already reported through the
DTDHandler interface. All events reported to this handler will
occur between the startDTD and endDTD events of the
LexicalHandler.
To set the DeclHandler for an XMLReader, use the setProperty method
with the identifier http://xml.org/sax/handlers/DeclHandler."""
def attributeDecl(self, elem_name, attr_name, type, value_def, value):
"""Report an attribute type declaration.
Only the first declaration will be reported. The type will be
one of the strings "CDATA", "ID", "IDREF", "IDREFS",
"NMTOKEN", "NMTOKENS", "ENTITY", "ENTITIES", or "NOTATION", or
a list of names (in the case of enumerated definitions).
elem_name is the element type name, attr_name the attribute
type name, type a string representing the attribute type,
value_def a string representing the default declaration
('#IMPLIED', '#REQUIRED', '#FIXED' or None). value is a string
representing the attribute's default value, or None if there
is none."""
def elementDecl(self, elem_name, content_model):
"""Report an element type declaration.
Only the first declaration will be reported.
content_model is the string 'EMPTY', the string 'ANY' or the content
model structure represented as tuple (separator, tokens, modifier)
where separator is the separator in the token list (that is, '|' or
','), tokens is the list of tokens (element type names or tuples
representing parentheses) and modifier is the quantity modifier
('*', '?' or '+')."""
def internalEntityDecl(self, name, value):
"""Report an internal entity declaration.
Only the first declaration of an entity will be reported.
name is the name of the entity. If it is a parameter entity,
the name will begin with '%'. value is the replacement text of
the entity."""
def externalEntityDecl(self, name, public_id, system_id):
"""Report a parsed entity declaration. (Unparsed entities are
reported to the DTDHandler.)
Only the first declaration for each entity will be reported.
name is the name of the entity. If it is a parameter entity,
the name will begin with '%'. public_id and system_id are the
public and system identifiers of the entity. public_id will be
None if none were declared."""
# ===== LEXICALHANDLER =====
class LexicalHandler:
"""Optional SAX2 handler for lexical events.
This handler is used to obtain lexical information about an XML
document, that is, information about how the document was encoded
(as opposed to what it contains, which is reported to the
ContentHandler), such as comments and CDATA marked section
boundaries.
To set the LexicalHandler of an XMLReader, use the setProperty
method with the property identifier
'http://xml.org/sax/handlers/LexicalHandler'. There is no
guarantee that the XMLReader will support or recognize this
property."""
def comment(self, content):
"""Reports a comment anywhere in the document (including the
DTD and outside the document element).
content is a string that holds the contents of the comment."""
def startDTD(self, name, public_id, system_id):
"""Report the start of the DTD declarations, if the document
has an associated DTD.
A startEntity event will be reported before declaration events
from the external DTD subset are reported, and this can be
used to infer from which subset DTD declarations derive.
name is the name of the document element type, public_id the
public identifier of the DTD (or None if none were supplied)
and system_id the system identfier of the external subset (or
None if none were supplied)."""
def endDTD(self):
"Signals the end of DTD declarations."
def startEntity(self, name):
"""Report the beginning of an entity.
The start and end of the document entity is not reported. The
start and end of the external DTD subset is reported with the
pseudo-name '[dtd]'.
Skipped entities will be reported through the skippedEntity
event of the ContentHandler rather than through this event.
name is the name of the entity. If it is a parameter entity,
the name will begin with '%'."""
def endEntity(self, name):
"""Reports the end of an entity. name is the name of the
entity, and follows the same conventions as for
startEntity."""
def startCDATA(self):
"""Reports the beginning of a CDATA marked section.
The contents of the CDATA marked section will be reported
through the characters event."""
def endCDATA(self):
"Reports the end of a CDATA marked section."
#============================================================================
#
# SAX 1.0 COMPATIBILITY CLASSES
# Note that these are all deprecated.
#
#============================================================================
# ===== ATTRIBUTELIST =====
class AttributeList:
"""Interface for an attribute list. This interface provides
information about a list of attributes for an element (only
specified or defaulted attributes will be reported). Note that the
information returned by this object will be valid only during the
scope of the DocumentHandler.startElement callback, and the
attributes will not necessarily be provided in the order declared
or specified."""
def getLength(self):
"Return the number of attributes in list."
def getName(self, i):
"Return the name of an attribute in the list."
def getType(self, i):
"""Return the type of an attribute in the list. (Parameter can be
either integer index or attribute name.)"""
def getValue(self, i):
"""Return the value of an attribute in the list. (Parameter can be
either integer index or attribute name.)"""
def __len__(self):
"Alias for getLength."
def __getitem__(self, key):
"Alias for getName (if key is an integer) and getValue (if string)."
def keys(self):
"Returns a list of the attribute names."
def has_key(self, key):
"True if the attribute is in the list, false otherwise."
def get(self, key, alternative=None):
"""Return the value associated with attribute name; if it is not
available, then return the alternative."""
def copy(self):
"Return a copy of the AttributeList."
def items(self):
"Return a list of (attribute_name,value) pairs."
def values(self):
"Return a list of all attribute values."
# ===== DOCUMENTHANDLER =====
class DocumentHandler:
"""Handle general document events. This is the main client
interface for SAX: it contains callbacks for the most important
document events, such as the start and end of elements. You need
to create an object that implements this interface, and then
register it with the Parser. If you do not want to implement
the entire interface, you can derive a class from HandlerBase,
which implements the default functionality. You can find the
location of any document event using the Locator interface
supplied by setDocumentLocator()."""
def characters(self, ch, start, length):
"Handle a character data event."
def endDocument(self):
"Handle an event for the end of a document."
def endElement(self, name):
"Handle an event for the end of an element."
def ignorableWhitespace(self, ch, start, length):
"Handle an event for ignorable whitespace in element content."
def processingInstruction(self, target, data):
"Handle a processing instruction event."
def setDocumentLocator(self, locator):
"Receive an object for locating the origin of SAX document events."
def startDocument(self):
"Handle an event for the beginning of a document."
def startElement(self, name, atts):
"Handle an event for the beginning of an element."
# ===== HANDLERBASE =====
class HandlerBase(EntityResolver, DTDHandler, DocumentHandler,\
ErrorHandler):
"""Default base class for handlers. This class implements the
default behaviour for four SAX interfaces: EntityResolver,
DTDHandler, DocumentHandler, and ErrorHandler: rather
than implementing those full interfaces, you may simply extend
this class and override the methods that you need. Note that the
use of this class is optional (you are free to implement the
interfaces directly if you wish)."""
# ===== PARSER =====
class Parser:
"""Basic interface for SAX (Simple API for XML) parsers. All SAX
parsers must implement this basic interface: it allows users to
register handlers for different types of events and to initiate a
parse from a URI, a character stream, or a byte stream. SAX
parsers should also implement a zero-argument constructor."""
def __init__(self):
self.doc_handler = DocumentHandler()
self.dtd_handler = DTDHandler()
self.ent_handler = EntityResolver()
self.err_handler = ErrorHandler()
def parse(self, systemId):
"Parse an XML document from a system identifier."
def parseFile(self, fileobj):
"Parse an XML document from a file-like object."
def setDocumentHandler(self, handler):
"Register an object to receive basic document-related events."
self.doc_handler=handler
def setDTDHandler(self, handler):
"Register an object to receive basic DTD-related events."
self.dtd_handler=handler
def setEntityResolver(self, resolver):
"Register an object to resolve external entities."
self.ent_handler=resolver
def setErrorHandler(self, handler):
"Register an object to receive error-message events."
self.err_handler=handler
def setLocale(self, locale):
"""Allow an application to set the locale for errors and warnings.
SAX parsers are not required to provide localisation for errors
and warnings; if they cannot support the requested locale,
however, they must throw a SAX exception. Applications may
request a locale change in the middle of a parse."""
raise SAXNotSupportedException("Locale support not implemented")
|
UAVenture/mavlink | refs/heads/master | pymavlink/generator/mavgen_python.py | 20 | #!/usr/bin/env python
'''
parse a MAVLink protocol XML file and generate a python implementation
Copyright Andrew Tridgell 2011
Released under GNU GPL version 3 or later
'''
import sys, textwrap, os
from . import mavparse, mavtemplate
t = mavtemplate.MAVTemplate()
def generate_preamble(outf, msgs, basename, args, xml):
print("Generating preamble")
t.write(outf, """
'''
MAVLink protocol implementation (auto-generated by mavgen.py)
Generated from: ${FILELIST}
Note: this file has been auto-generated. DO NOT EDIT
'''
import struct, array, time, json, os, sys, platform
from ...generator.mavcrc import x25crc
WIRE_PROTOCOL_VERSION = "${WIRE_PROTOCOL_VERSION}"
DIALECT = "${DIALECT}"
native_supported = platform.system() != 'Windows' # Not yet supported on other dialects
native_force = 'MAVNATIVE_FORCE' in os.environ # Will force use of native code regardless of what client app wants
native_testing = 'MAVNATIVE_TESTING' in os.environ # Will force both native and legacy code to be used and their results compared
if native_supported:
try:
import mavnative
except ImportError:
print("ERROR LOADING MAVNATIVE - falling back to python implementation")
native_supported = False
# some base types from mavlink_types.h
MAVLINK_TYPE_CHAR = 0
MAVLINK_TYPE_UINT8_T = 1
MAVLINK_TYPE_INT8_T = 2
MAVLINK_TYPE_UINT16_T = 3
MAVLINK_TYPE_INT16_T = 4
MAVLINK_TYPE_UINT32_T = 5
MAVLINK_TYPE_INT32_T = 6
MAVLINK_TYPE_UINT64_T = 7
MAVLINK_TYPE_INT64_T = 8
MAVLINK_TYPE_FLOAT = 9
MAVLINK_TYPE_DOUBLE = 10
class MAVLink_header(object):
'''MAVLink message header'''
def __init__(self, msgId, mlen=0, seq=0, srcSystem=0, srcComponent=0):
self.mlen = mlen
self.seq = seq
self.srcSystem = srcSystem
self.srcComponent = srcComponent
self.msgId = msgId
def pack(self):
return struct.pack('BBBBBB', ${PROTOCOL_MARKER}, self.mlen, self.seq,
self.srcSystem, self.srcComponent, self.msgId)
class MAVLink_message(object):
'''base MAVLink message class'''
def __init__(self, msgId, name):
self._header = MAVLink_header(msgId)
self._payload = None
self._msgbuf = None
self._crc = None
self._fieldnames = []
self._type = name
def get_msgbuf(self):
if isinstance(self._msgbuf, bytearray):
return self._msgbuf
return bytearray(self._msgbuf)
def get_header(self):
return self._header
def get_payload(self):
return self._payload
def get_crc(self):
return self._crc
def get_fieldnames(self):
return self._fieldnames
def get_type(self):
return self._type
def get_msgId(self):
return self._header.msgId
def get_srcSystem(self):
return self._header.srcSystem
def get_srcComponent(self):
return self._header.srcComponent
def get_seq(self):
return self._header.seq
def __str__(self):
ret = '%s {' % self._type
for a in self._fieldnames:
v = getattr(self, a)
ret += '%s : %s, ' % (a, v)
ret = ret[0:-2] + '}'
return ret
def __ne__(self, other):
return not self.__eq__(other)
def __eq__(self, other):
if other == None:
return False
if self.get_type() != other.get_type():
return False
# We do not compare CRC because native code doesn't provide it
#if self.get_crc() != other.get_crc():
# return False
if self.get_seq() != other.get_seq():
return False
if self.get_srcSystem() != other.get_srcSystem():
return False
if self.get_srcComponent() != other.get_srcComponent():
return False
for a in self._fieldnames:
if getattr(self, a) != getattr(other, a):
return False
return True
def to_dict(self):
d = dict({})
d['mavpackettype'] = self._type
for a in self._fieldnames:
d[a] = getattr(self, a)
return d
def to_json(self):
return json.dumps(self.to_dict())
def pack(self, mav, crc_extra, payload):
self._payload = payload
self._header = MAVLink_header(self._header.msgId, len(payload), mav.seq,
mav.srcSystem, mav.srcComponent)
self._msgbuf = self._header.pack() + payload
crc = x25crc(self._msgbuf[1:])
if ${crc_extra}: # using CRC extra
crc.accumulate_str(chr(crc_extra))
self._crc = crc.crc
self._msgbuf += struct.pack('<H', self._crc)
return self._msgbuf
""", {'FILELIST' : ",".join(args),
'PROTOCOL_MARKER' : xml.protocol_marker,
'DIALECT' : os.path.splitext(os.path.basename(basename))[0],
'crc_extra' : xml.crc_extra,
'WIRE_PROTOCOL_VERSION' : xml.wire_protocol_version })
def generate_enums(outf, enums):
print("Generating enums")
outf.write('''
# enums
class EnumEntry(object):
def __init__(self, name, description):
self.name = name
self.description = description
self.param = {}
enums = {}
''')
wrapper = textwrap.TextWrapper(initial_indent="", subsequent_indent=" # ")
for e in enums:
outf.write("\n# %s\n" % e.name)
outf.write("enums['%s'] = {}\n" % e.name)
for entry in e.entry:
outf.write("%s = %u # %s\n" % (entry.name, entry.value, wrapper.fill(entry.description)))
outf.write("enums['%s'][%d] = EnumEntry('%s', '''%s''')\n" % (e.name,
int(entry.value), entry.name,
entry.description))
for param in entry.param:
outf.write("enums['%s'][%d].param[%d] = '''%s'''\n" % (e.name,
int(entry.value),
int(param.index),
param.description))
def generate_message_ids(outf, msgs):
print("Generating message IDs")
outf.write("\n# message IDs\n")
outf.write("MAVLINK_MSG_ID_BAD_DATA = -1\n")
for m in msgs:
outf.write("MAVLINK_MSG_ID_%s = %u\n" % (m.name.upper(), m.id))
def generate_classes(outf, msgs):
print("Generating class definitions")
wrapper = textwrap.TextWrapper(initial_indent=" ", subsequent_indent=" ")
for m in msgs:
classname = "MAVLink_%s_message" % m.name.lower()
fieldname_str = ", ".join(map(lambda s: "'%s'" % s, m.fieldnames))
ordered_fieldname_str = ", ".join(map(lambda s: "'%s'" % s, m.ordered_fieldnames))
outf.write("""
class %s(MAVLink_message):
'''
%s
'''
id = MAVLINK_MSG_ID_%s
name = '%s'
fieldnames = [%s]
ordered_fieldnames = [ %s ]
format = '%s'
native_format = bytearray('%s', 'ascii')
orders = %s
lengths = %s
array_lengths = %s
crc_extra = %s
def __init__(self""" % (classname, wrapper.fill(m.description.strip()),
m.name.upper(),
m.name.upper(),
fieldname_str,
ordered_fieldname_str,
m.fmtstr,
m.native_fmtstr,
m.order_map,
m.len_map,
m.array_len_map,
m.crc_extra))
if len(m.fields) != 0:
outf.write(", " + ", ".join(m.fieldnames))
outf.write("):\n")
outf.write(" MAVLink_message.__init__(self, %s.id, %s.name)\n" % (classname, classname))
outf.write(" self._fieldnames = %s.fieldnames\n" % (classname))
for f in m.fields:
outf.write(" self.%s = %s\n" % (f.name, f.name))
outf.write("""
def pack(self, mav):
return MAVLink_message.pack(self, mav, %u, struct.pack('%s'""" % (m.crc_extra, m.fmtstr))
for field in m.ordered_fields:
if (field.type != "char" and field.array_length > 1):
for i in range(field.array_length):
outf.write(", self.{0:s}[{1:d}]".format(field.name,i))
else:
outf.write(", self.{0:s}".format(field.name))
outf.write("))\n")
def native_mavfmt(field):
'''work out the struct format for a type (in a form expected by mavnative)'''
map = {
'float' : 'f',
'double' : 'd',
'char' : 'c',
'int8_t' : 'b',
'uint8_t' : 'B',
'uint8_t_mavlink_version' : 'v',
'int16_t' : 'h',
'uint16_t' : 'H',
'int32_t' : 'i',
'uint32_t' : 'I',
'int64_t' : 'q',
'uint64_t' : 'Q',
}
return map[field.type]
def mavfmt(field):
'''work out the struct format for a type'''
map = {
'float' : 'f',
'double' : 'd',
'char' : 'c',
'int8_t' : 'b',
'uint8_t' : 'B',
'uint8_t_mavlink_version' : 'B',
'int16_t' : 'h',
'uint16_t' : 'H',
'int32_t' : 'i',
'uint32_t' : 'I',
'int64_t' : 'q',
'uint64_t' : 'Q',
}
if field.array_length:
if field.type == 'char':
return str(field.array_length)+'s'
return str(field.array_length)+map[field.type]
return map[field.type]
def generate_mavlink_class(outf, msgs, xml):
print("Generating MAVLink class")
outf.write("\n\nmavlink_map = {\n");
for m in msgs:
outf.write(" MAVLINK_MSG_ID_%s : MAVLink_%s_message,\n" % (m.name.upper(), m.name.lower()))
outf.write("}\n\n")
t.write(outf, """
class MAVError(Exception):
'''MAVLink error class'''
def __init__(self, msg):
Exception.__init__(self, msg)
self.message = msg
class MAVString(str):
'''NUL terminated string'''
def __init__(self, s):
str.__init__(self)
def __str__(self):
i = self.find(chr(0))
if i == -1:
return self[:]
return self[0:i]
class MAVLink_bad_data(MAVLink_message):
'''
a piece of bad data in a mavlink stream
'''
def __init__(self, data, reason):
MAVLink_message.__init__(self, MAVLINK_MSG_ID_BAD_DATA, 'BAD_DATA')
self._fieldnames = ['data', 'reason']
self.data = data
self.reason = reason
self._msgbuf = data
def __str__(self):
'''Override the __str__ function from MAVLink_messages because non-printable characters are common in to be the reason for this message to exist.'''
return '%s {%s, data:%s}' % (self._type, self.reason, [('%x' % ord(i) if isinstance(i, str) else '%x' % i) for i in self.data])
class MAVLink(object):
'''MAVLink protocol handling class'''
def __init__(self, file, srcSystem=0, srcComponent=0, use_native=False):
self.seq = 0
self.file = file
self.srcSystem = srcSystem
self.srcComponent = srcComponent
self.callback = None
self.callback_args = None
self.callback_kwargs = None
self.send_callback = None
self.send_callback_args = None
self.send_callback_kwargs = None
self.buf = bytearray()
self.expected_length = 8
self.have_prefix_error = False
self.robust_parsing = False
self.protocol_marker = ${protocol_marker}
self.little_endian = ${little_endian}
self.crc_extra = ${crc_extra}
self.sort_fields = ${sort_fields}
self.total_packets_sent = 0
self.total_bytes_sent = 0
self.total_packets_received = 0
self.total_bytes_received = 0
self.total_receive_errors = 0
self.startup_time = time.time()
if native_supported and (use_native or native_testing or native_force):
print("NOTE: mavnative is currently beta-test code")
self.native = mavnative.NativeConnection(MAVLink_message, mavlink_map)
else:
self.native = None
if native_testing:
self.test_buf = bytearray()
def set_callback(self, callback, *args, **kwargs):
self.callback = callback
self.callback_args = args
self.callback_kwargs = kwargs
def set_send_callback(self, callback, *args, **kwargs):
self.send_callback = callback
self.send_callback_args = args
self.send_callback_kwargs = kwargs
def send(self, mavmsg):
'''send a MAVLink message'''
buf = mavmsg.pack(self)
self.file.write(buf)
self.seq = (self.seq + 1) % 256
self.total_packets_sent += 1
self.total_bytes_sent += len(buf)
if self.send_callback:
self.send_callback(mavmsg, *self.send_callback_args, **self.send_callback_kwargs)
def bytes_needed(self):
'''return number of bytes needed for next parsing stage'''
if self.native:
ret = self.native.expected_length - len(self.buf)
else:
ret = self.expected_length - len(self.buf)
if ret <= 0:
return 1
return ret
def __parse_char_native(self, c):
'''this method exists only to see in profiling results'''
m = self.native.parse_chars(c)
return m
def __callbacks(self, msg):
'''this method exists only to make profiling results easier to read'''
if self.callback:
self.callback(msg, *self.callback_args, **self.callback_kwargs)
def parse_char(self, c):
'''input some data bytes, possibly returning a new message'''
self.buf.extend(c)
self.total_bytes_received += len(c)
if self.native:
if native_testing:
self.test_buf.extend(c)
m = self.__parse_char_native(self.test_buf)
m2 = self.__parse_char_legacy()
if m2 != m:
print("Native: %s\\nLegacy: %s\\n" % (m, m2))
raise Exception('Native vs. Legacy mismatch')
else:
m = self.__parse_char_native(self.buf)
else:
m = self.__parse_char_legacy()
if m != None:
self.total_packets_received += 1
self.__callbacks(m)
return m
def __parse_char_legacy(self):
'''input some data bytes, possibly returning a new message (uses no native code)'''
if len(self.buf) >= 1 and self.buf[0] != ${protocol_marker}:
magic = self.buf[0]
self.buf = self.buf[1:]
if self.robust_parsing:
m = MAVLink_bad_data(chr(magic), "Bad prefix")
self.expected_length = 8
self.total_receive_errors += 1
return m
if self.have_prefix_error:
return None
self.have_prefix_error = True
self.total_receive_errors += 1
raise MAVError("invalid MAVLink prefix '%s'" % magic)
self.have_prefix_error = False
if len(self.buf) >= 2:
if sys.version_info[0] < 3:
(magic, self.expected_length) = struct.unpack('BB', str(self.buf[0:2])) # bytearrays are not supported in py 2.7.3
else:
(magic, self.expected_length) = struct.unpack('BB', self.buf[0:2])
self.expected_length += 8
if self.expected_length >= 8 and len(self.buf) >= self.expected_length:
mbuf = array.array('B', self.buf[0:self.expected_length])
self.buf = self.buf[self.expected_length:]
self.expected_length = 8
if self.robust_parsing:
try:
m = self.decode(mbuf)
except MAVError as reason:
m = MAVLink_bad_data(mbuf, reason.message)
self.total_receive_errors += 1
else:
m = self.decode(mbuf)
return m
return None
def parse_buffer(self, s):
'''input some data bytes, possibly returning a list of new messages'''
m = self.parse_char(s)
if m is None:
return None
ret = [m]
while True:
m = self.parse_char("")
if m is None:
return ret
ret.append(m)
return ret
def decode(self, msgbuf):
'''decode a buffer as a MAVLink message'''
# decode the header
try:
magic, mlen, seq, srcSystem, srcComponent, msgId = struct.unpack('cBBBBB', msgbuf[:6])
except struct.error as emsg:
raise MAVError('Unable to unpack MAVLink header: %s' % emsg)
if ord(magic) != ${protocol_marker}:
raise MAVError("invalid MAVLink prefix '%s'" % magic)
if mlen != len(msgbuf)-8:
raise MAVError('invalid MAVLink message length. Got %u expected %u, msgId=%u' % (len(msgbuf)-8, mlen, msgId))
if not msgId in mavlink_map:
raise MAVError('unknown MAVLink message ID %u' % msgId)
# decode the payload
type = mavlink_map[msgId]
fmt = type.format
order_map = type.orders
len_map = type.lengths
crc_extra = type.crc_extra
# decode the checksum
try:
crc, = struct.unpack('<H', msgbuf[-2:])
except struct.error as emsg:
raise MAVError('Unable to unpack MAVLink CRC: %s' % emsg)
crcbuf = msgbuf[1:-2]
if ${crc_extra}: # using CRC extra
crcbuf.append(crc_extra)
crc2 = x25crc(crcbuf)
if crc != crc2.crc:
raise MAVError('invalid MAVLink CRC in msgID %u 0x%04x should be 0x%04x' % (msgId, crc, crc2.crc))
try:
t = struct.unpack(fmt, msgbuf[6:-2])
except struct.error as emsg:
raise MAVError('Unable to unpack MAVLink payload type=%s fmt=%s payloadLength=%u: %s' % (
type, fmt, len(msgbuf[6:-2]), emsg))
tlist = list(t)
# handle sorted fields
if ${sort_fields}:
t = tlist[:]
if sum(len_map) == len(len_map):
# message has no arrays in it
for i in range(0, len(tlist)):
tlist[i] = t[order_map[i]]
else:
# message has some arrays
tlist = []
for i in range(0, len(order_map)):
order = order_map[i]
L = len_map[order]
tip = sum(len_map[:order])
field = t[tip]
if L == 1 or isinstance(field, str):
tlist.append(field)
else:
tlist.append(t[tip:(tip + L)])
# terminate any strings
for i in range(0, len(tlist)):
if isinstance(tlist[i], str):
tlist[i] = str(MAVString(tlist[i]))
t = tuple(tlist)
# construct the message object
try:
m = type(*t)
except Exception as emsg:
raise MAVError('Unable to instantiate MAVLink message of type %s : %s' % (type, emsg))
m._msgbuf = msgbuf
m._payload = msgbuf[6:-2]
m._crc = crc
m._header = MAVLink_header(msgId, mlen, seq, srcSystem, srcComponent)
return m
""", xml)
def generate_methods(outf, msgs):
print("Generating methods")
def field_descriptions(fields):
ret = ""
for f in fields:
ret += " %-18s : %s (%s)\n" % (f.name, f.description.strip(), f.type)
return ret
wrapper = textwrap.TextWrapper(initial_indent="", subsequent_indent=" ")
for m in msgs:
comment = "%s\n\n%s" % (wrapper.fill(m.description.strip()), field_descriptions(m.fields))
selffieldnames = 'self, '
for f in m.fields:
if f.omit_arg:
selffieldnames += '%s=%s, ' % (f.name, f.const_value)
else:
selffieldnames += '%s, ' % f.name
selffieldnames = selffieldnames[:-2]
sub = {'NAMELOWER' : m.name.lower(),
'SELFFIELDNAMES' : selffieldnames,
'COMMENT' : comment,
'FIELDNAMES' : ", ".join(m.fieldnames)}
t.write(outf, """
def ${NAMELOWER}_encode(${SELFFIELDNAMES}):
'''
${COMMENT}
'''
return MAVLink_${NAMELOWER}_message(${FIELDNAMES})
""", sub)
t.write(outf, """
def ${NAMELOWER}_send(${SELFFIELDNAMES}):
'''
${COMMENT}
'''
return self.send(self.${NAMELOWER}_encode(${FIELDNAMES}))
""", sub)
def generate(basename, xml):
'''generate complete python implemenation'''
if basename.endswith('.py'):
filename = basename
else:
filename = basename + '.py'
msgs = []
enums = []
filelist = []
for x in xml:
msgs.extend(x.message)
enums.extend(x.enum)
filelist.append(os.path.basename(x.filename))
for m in msgs:
if xml[0].little_endian:
m.fmtstr = '<'
else:
m.fmtstr = '>'
m.native_fmtstr = m.fmtstr
for f in m.ordered_fields:
m.fmtstr += mavfmt(f)
m.native_fmtstr += native_mavfmt(f)
m.order_map = [ 0 ] * len(m.fieldnames)
m.len_map = [ 0 ] * len(m.fieldnames)
m.array_len_map = [ 0 ] * len(m.fieldnames)
for i in range(0, len(m.fieldnames)):
m.order_map[i] = m.ordered_fieldnames.index(m.fieldnames[i])
m.array_len_map[i] = m.ordered_fields[i].array_length
for i in range(0, len(m.fieldnames)):
n = m.order_map[i]
m.len_map[n] = m.fieldlengths[i]
print("Generating %s" % filename)
outf = open(filename, "w")
generate_preamble(outf, msgs, basename, filelist, xml[0])
generate_enums(outf, enums)
generate_message_ids(outf, msgs)
generate_classes(outf, msgs)
generate_mavlink_class(outf, msgs, xml[0])
generate_methods(outf, msgs)
outf.close()
print("Generated %s OK" % filename)
|
heurezjusz/Athena | refs/heads/master | athenet/algorithm/random.py | 2 | import numpy
def get_random_indicators(layers):
return [numpy.random.rand(layer.W.size).reshape(layer.W.shape)
for layer in layers]
|
CVL-GitHub/karaage | refs/heads/master | karaage/tests/test_people.py | 2 | # Copyright 2009-2010, 2013-2015 VPAC
# Copyright 2010-2011, 2014 The University of Melbourne
#
# This file is part of Karaage.
#
# Karaage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Karaage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Karaage If not, see <http://www.gnu.org/licenses/>.
import datetime
import re
from django.core.urlresolvers import reverse
from django.conf import settings
from django.core import mail
from karaage.people.models import Person
from karaage.institutes.models import Institute, InstituteDelegate
from karaage.projects.models import Project
from karaage.machines.models import Account, MachineCategory
from karaage.tests.integration import IntegrationTestCase
class FakeRequest(object):
def __init__(self, person):
self.user = person
class PersonTestCase(IntegrationTestCase):
fixtures = [
'test_karaage.json',
]
def setUp(self):
super(PersonTestCase, self).setUp()
self._datastore = self.mc_ldap_datastore
def do_permission_tests(self, test_object, users):
for user_id in users:
# print("can user '%d' access '%s'?"%(user_id, test_object))
person = Person.objects.get(id=user_id)
request = FakeRequest(person)
result = test_object.can_view(request)
expected_result = users[user_id]
# print("---> got:'%s' expected:'%s'"%(result, expected_result))
self.assertEqual(
result, expected_result,
"%r.can_view(%r) returned %r but we expected %r"
% (test_object, person, result, expected_result))
# print()
def test_permissions(self):
test_object = Project.objects.get(pid="TestProject1")
self.do_permission_tests(test_object, {
1: True, # person 1 can view: person's institute delegate,
# project leader
2: False, # person 2 cannot view
3: True, # person 3 can view: project member
4: True, # person 4 can view: is_staff
})
test_object = Person.objects.get(id=1)
self.do_permission_tests(test_object, {
1: True, # person 1 can view: self, project member,
# person's institute delegate
2: False, # person 2 cannot view
3: False, # person 3 cannot view
4: True, # person 4 can view: is_staff, institute delegate
})
test_object = Person.objects.get(id=2)
self.do_permission_tests(test_object, {
1: True, # person 1 can view: person's institute delegate
2: True, # person 2 can view: self
3: False, # person 3 cannot view
4: True, # person 4 can view: is_staff
})
test_object = Person.objects.get(id=3)
self.do_permission_tests(test_object, {
1: True, # person 1 can view: person's institute delegate,
# project leader
2: False, # person 2 cannot view
3: True, # person 3 can view: self, project member
4: True, # person 4 can view: is_staff
})
test_object = Person.objects.get(id=4)
self.do_permission_tests(test_object, {
1: True, # person 1 can view: person's institute delegate
2: False, # person 2 cannot view
3: False, # person 3 cannot view
4: True, # person 4 can view: self, is_staff
})
# add user 2 to project
# test that members can see other people in own project
# print("------------------------------------------------------------")
project = Project.objects.get(pid="TestProject1")
project.group.members = [2, 3]
test_object = Project.objects.get(pid="TestProject1")
self.do_permission_tests(test_object, {
1: True, # person 1 can view: person's institute delegate
2: True, # person 2 can view: project member
3: True, # person 3 can view: project member
4: True, # person 4 can view: is_staff
})
test_object = Person.objects.get(id=1)
self.do_permission_tests(test_object, {
1: True, # person 1 can view: self, project member,
# delegate of institute
2: False, # person 2 cannot view
3: False, # person 3 cannot view
4: True, # person 4 can view: is_staff
})
test_object = Person.objects.get(id=2)
self.do_permission_tests(test_object, {
1: True, # person 1 can view: person's institute delegate,
# project leader
2: True, # person 2 can view: self
3: True, # person 3 can view: project member
4: True, # person 4 can view: is_staff
})
test_object = Person.objects.get(id=3)
self.do_permission_tests(test_object, {
1: True, # person 1 can view: person's institute delegate,
# project leader
2: True, # person 2 can view: project member
3: True, # person 3 can view: self, project member
4: True, # person 4 can view: is_staff
})
test_object = Person.objects.get(id=4)
self.do_permission_tests(test_object, {
1: True, # person 1 can view: person's institute delegate
2: False, # person 2 cannot view
3: False, # person 3 cannot view
4: True, # person 4 can view: self, is_staff
})
# change institute of all people
# Test institute leader can access people in project despite not being
# institute leader for these people.
# print("------------------------------------------------------------")
Person.objects.all().update(institute=2)
# Institute.objects.filter(pk=2).update(delegate=2,active_delegate=2)
InstituteDelegate.objects.get_or_create(
institute=Institute.objects.get(id=2),
person=Person.objects.get(id=2),
defaults={'send_email': False})
project = Project.objects.get(pid="TestProject1")
project.leaders = [2]
test_object = Project.objects.get(pid="TestProject1")
self.do_permission_tests(test_object, {
1: True, # person 1 can view: person's institute delegate
2: True, # person 2 can view: project member, person's
# institute delegate, project leader
3: True, # person 3 can view: project member
4: True, # person 4 can view: is_staff
})
test_object = Person.objects.get(id=1)
self.do_permission_tests(test_object, {
1: True, # person 1 can view: self, project member
2: True, # person 2 can view: person's institute delegate
3: False, # person 3 cannot view
4: True, # person 4 can view: is_staff
})
test_object = Person.objects.get(id=2)
self.do_permission_tests(test_object, {
1: True, # person 1 can view: project's institute leader
2: True, # person 2 can view: self, person's institute delegate,
# project leader
3: True, # person 3 can view: project member
4: True, # person 4 can view: is_staff
})
test_object = Person.objects.get(id=3)
self.do_permission_tests(test_object, {
1: True, # person 1 can view: project's institute leader
2: True, # person 2 can view: project member, person's institute
# delegate, project leader
3: True, # person 3 can view: self, project member
4: True, # person 4 can view: is_staff
})
test_object = Person.objects.get(id=4)
self.do_permission_tests(test_object, {
1: True, # person 1 can view: person's institute delegate
2: True, # person 2 can view: person's institute delegate
3: False, # person 3 cannot view
4: True, # person 4 can view: self, is_staff
})
def test_admin_create_user_with_account(self):
users = Person.objects.count()
project = Project.objects.get(pid='TestProject1')
p_users = project.group.members.count()
logged_in = self.client.login(username='kgsuper', password='aq12ws')
self.assertEqual(logged_in, True)
response = self.client.get(reverse('kg_person_add'))
self.assertEqual(response.status_code, 200)
form_data = {
'title': 'Mr',
'short_name': 'Sam',
'full_name': 'Sam Morrison',
'position': 'Sys Admin',
'institute': 1,
'department': 'eddf',
'email': 'sam@vpac.org',
'country': 'AU',
'telephone': '4444444',
'username': 'samtest',
'password1': 'Exaiquouxei0',
'password2': 'Exaiquouxei0',
'project': 1,
'needs_account': True,
'machine_category': 1,
}
response = self.client.post(reverse('kg_person_add'), form_data)
self.assertEqual(response.status_code, 302)
self.assertEqual(Person.objects.count(), users + 1)
users = users + 1
person = Person.objects.get(pk=users)
self.assertEqual(person.is_active, True)
self.assertEqual(person.username, 'samtest')
self.assertEqual(Account.objects.count(), 2)
self.assertEqual(project.group.members.count(), p_users + 1)
luser = self._datastore._accounts().get(uid='samtest')
self.assertEqual(luser.givenName, 'Sam')
self.assertEqual(luser.homeDirectory, '/vpac/TestProject1/samtest')
def test_admin_create_user(self):
users = Person.objects.count()
project = Project.objects.get(pid='TestProject1')
project.group.members.count()
logged_in = self.client.login(username='kgsuper', password='aq12ws')
self.assertEqual(logged_in, True)
response = self.client.get(reverse('kg_person_add'))
self.assertEqual(response.status_code, 200)
form_data = {
'title': 'Mr',
'short_name': 'Sam',
'full_name': 'Sam Morrison2',
'position': 'Sys Admin',
'institute': 1,
'department': 'eddf',
'email': 'sam@vpac.org',
'country': 'AU',
'telephone': '4444444',
'username': 'samtest2',
'password1': 'Exaiquouxei0',
'password2': 'Exaiquouxei0',
'needs_account': False,
}
response = self.client.post(reverse('kg_person_add'), form_data)
self.assertEqual(response.status_code, 302)
self.assertEqual(Person.objects.count(), users + 1)
users = users + 1
person = Person.objects.get(pk=users)
self.assertEqual(person.is_active, True)
self.assertEqual(person.username, 'samtest2')
# Try adding it again - Should fail
response = self.client.post(reverse('kg_person_add'), form_data)
self.assertEqual(response.status_code, 200)
def test_admin_update_person(self):
logged_in = self.client.login(username='kgsuper', password='aq12ws')
self.assertEqual(logged_in, True)
person = Person.objects.get(username='kgtestuser3')
luser = self._datastore._accounts().get(uid='kgtestuser3')
self.assertEqual(person.mobile, '')
self.assertEqual(luser.gidNumber, 500)
self.assertEqual(luser.o, 'Example')
self.assertEqual(luser.gecos, 'Test User3 (Example)')
response = self.client.get(
reverse('kg_person_edit', args=['kgtestuser3']))
self.assertEqual(response.status_code, 200)
form_data = {
'title': 'Mr',
'short_name': 'Test',
'full_name': 'Test User3',
'position': 'Sys Admin',
'institute': 2,
'department': 'eddf',
'email': 'sam@vpac.org',
'country': 'AU',
'telephone': '4444444',
'mobile': '555666',
}
response = self.client.post(
reverse('kg_person_edit', args=['kgtestuser3']), form_data)
self.assertEqual(response.status_code, 302)
person = Person.objects.get(username='kgtestuser3')
luser = self._datastore._accounts().get(uid='kgtestuser3')
self.assertEqual(person.mobile, '555666')
self.assertEqual(luser.gidNumber, 501)
self.assertEqual(luser.o, 'OtherInst')
self.assertEqual(luser.gecos, 'Test User3 (OtherInst)')
def test_delete_activate_person(self):
self.client.login(username='kgsuper', password='aq12ws')
person = Person.objects.get(username='kgtestuser3')
self.assertEqual(person.is_active, True)
self.assertEqual(person.projects.count(), 1)
self.assertEqual(person.account_set.count(), 1)
self.assertEqual(person.account_set.all()[0].date_deleted, None)
luser = self._datastore._accounts().get(uid='kgtestuser3')
self.assertEqual(luser.givenName, 'Test')
response = self.client.get(
reverse('kg_person_delete', args=[person.username]))
self.assertEqual(response.status_code, 200)
# Test deleting
response = self.client.post(
reverse('kg_person_delete', args=[person.username]))
self.assertEqual(response.status_code, 302)
person = Person.objects.get(username='kgtestuser3')
self.assertEqual(person.is_active, False)
self.assertEqual(person.projects.count(), 0)
self.assertEqual(person.account_set.count(), 1)
self.assertEqual(person.account_set.all()[0].date_deleted,
datetime.date.today())
self.assertRaises(
self._datastore._account.DoesNotExist,
self._datastore._accounts().get,
uid='kgtestuser3')
# Test activating
response = self.client.post(
reverse('kg_person_activate', args=[person.username]))
self.assertEqual(response.status_code, 302)
person = Person.objects.get(username='kgtestuser3')
self.assertEqual(person.is_active, True)
def stest_delete_account(self):
person = Person.objects.get(pk=Person.objects.count())
ua = person.account_set.all()[0]
self.assertEqual(person.is_active, True)
self.assertEqual(person.account_set.count(), 1)
self.assertEqual(ua.date_deleted, None)
response = self.client.post(
'/%susers/accounts/delete/%i/' % (settings.BASE_URL, ua.id))
self.assertEqual(response.status_code, 302)
person = Person.objects.get(pk=Person.objects.count())
ua = person.account_set.all()[0]
self.assertEqual(ua.date_deleted, datetime.date.today())
self.assertEqual(person.project_set.count(), 0)
def stest_default_projects(self):
person = Person.objects.get(pk=Person.objects.count())
ua = person.account_set.all()[0]
self.assertEqual(person.project_set.count(), 1)
self.assertEqual(person.project_set.all()[0], ua.default_project)
project = Project.objects.create(
pid='test2',
name='test project',
leader=person,
start_date=datetime.date.today(),
machine_category=MachineCategory.objects.get(name='VPAC'),
institute=Institute.objects.get(name='VPAC'),
is_active=True,
is_approved=True,
)
project.users.add(person)
self.assertEqual(person.project_set.count(), 2)
# change default
response = self.client.post(
reverse('kg_account_set_default', args=[ua.id, project.pid]))
self.assertEqual(response.status_code, 302)
person = Person.objects.get(pk=Person.objects.count())
ua = person.account_set.all()[0]
project = Project.objects.get(pid='test2')
self.assertEqual(person.project_set.count(), 2)
self.assertEqual(project, ua.default_project)
def stest_add_user_to_project(self):
person = Person.objects.get(pk=Person.objects.count())
person.account_set.all()[0]
self.assertEqual(person.project_set.count(), 1)
Project.objects.create(
pid='test2',
name='test project 5',
leader=Person.objects.get(username='leader'),
start_date=datetime.date.today(),
machine_category=MachineCategory.objects.get(name='VPAC'),
institute=Institute.objects.get(name='VPAC'),
is_active=True,
is_approved=True,
)
response = self.client.post(
reverse('kg_person_detail', args=[person.username]),
{'project': 'test2', 'project-add': 'true'})
self.assertEqual(response.status_code, 200)
self.assertEqual(person.project_set.count(), 2)
def test_password_reset_by_self(self):
logged_in = self.client.login(
username='kgtestuser1', password='aq12ws')
self.assertEqual(logged_in, True)
# send request
url = reverse("kg_profile_reset")
done_url = reverse("kg_profile_reset_done")
response = self.client.post(url, follow=True)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.redirect_chain[0][0],
'http://testserver' + done_url)
# check email
self.assertEqual(len(mail.outbox), 1)
message = mail.outbox[0]
self.assertEqual(message.subject, "TestOrg Password change")
url = re.search("(?P<url>https?://[^\s]+)", message.body).group("url")
self.assertTrue(
url.startswith("https://example.com/users/persons/reset/"))
url = url[25:]
# get password reset page
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
# send new password
form_data = {
'new_password1': 'q1w2e3r4',
'new_password2': 'q1w2e3r4',
}
done_url = reverse("password_reset_complete")
response = self.client.post(url, form_data, follow=True)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.redirect_chain[0][0],
'http://testserver' + done_url)
# test new password
logged_in = self.client.login(
username='kgtestuser1', password='q1w2e3r4')
self.assertEqual(logged_in, True)
def test_password_reset_by_admin(self):
logged_in = self.client.login(username='kgsuper', password='aq12ws')
self.assertEqual(logged_in, True)
# send request
url = reverse("kg_person_reset", args=["kgtestuser1"])
done_url = reverse("kg_person_reset_done", args=["kgtestuser1"])
response = self.client.post(url, follow=True)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.redirect_chain[0][0],
'http://testserver' + done_url)
self.client.logout()
# check email
self.assertEqual(len(mail.outbox), 1)
message = mail.outbox[0]
self.assertEqual(message.subject, "TestOrg Password change")
url = re.search("(?P<url>https?://[^\s]+)", message.body).group("url")
self.assertTrue(
url.startswith("https://example.com/users/persons/reset/"))
url = url[25:]
# get password reset page
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
# send new password
form_data = {
'new_password1': 'q1w2e3r4',
'new_password2': 'q1w2e3r4',
}
done_url = reverse("password_reset_complete")
response = self.client.post(url, form_data, follow=True)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.redirect_chain[0][0],
'http://testserver' + done_url)
# test new password
logged_in = self.client.login(
username='kgtestuser1', password='q1w2e3r4')
self.assertEqual(logged_in, True)
|
freundTech/deepl-cli | refs/heads/master | deepl/__init__.py | 1 | from .translator import translate
__all__ = ['translate'] |
gilneidp/FinalProject | refs/heads/master | ALL_FILES/pox/misc/gephi_topo.py | 40 | # Copyright 2013 James McCauley
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Detects topology and streams it to Gephi
Gephi is a pretty awesome graph visualization/manipulation package. It has
a plugin for streaming graphs back and forth between it and something else.
We use that (by opening a listening socket -- port 8282 by default) and
sending detected switches, links, and (optionally) hosts.
Based on POXDesk's tinytopo module.
Requires discovery. host_tracker is optional.
pox.py openflow.discovery misc.gephi_topo host_tracker forwarding.l2_learning
"""
from pox.core import core
from pox.lib.util import dpid_to_str
from pox.lib.ioworker.workers import *
from pox.lib.ioworker import *
import json
log = core.getLogger()
class ServerWorker (TCPServerWorker, RecocoIOWorker):
pass
clients = set()
class GephiWorker (RecocoIOWorker):
def __init__ (self, *args, **kw):
super(GephiWorker, self).__init__(*args, **kw)
self._connecting = True
self.data = b''
def _handle_close (self):
log.info("Client disconnect")
super(GephiWorker, self)._handle_close()
clients.discard(self)
def _handle_connect (self):
log.info("Client connect")
super(GephiWorker, self)._handle_connect()
core.GephiTopo.send_full(self)
clients.add(self)
def _handle_rx (self):
self.data += self.read()
while '\n' in self.data:
# We don't currently do anything with this
msg,self.data = self.data.split('\n',1)
# This SHOULD be an HTTP request.
#print msg
pass
def an (n, **kw):
kw['label'] = str(n)
return {'an':{str(n):kw}}
def ae (a, b):
a = str(a)
b = str(b)
if a > b:
a,b=b,a
return {'ae':{a+"_"+b:{'source':a,'target':b,'directed':False}}}
def de (a, b):
a = str(a)
b = str(b)
if a > b:
a,b=b,a
return {'de':{a+"_"+b:{}}}
def dn (n):
return {'dn':{str(n):{}}}
def clear ():
return {'dn':{'filter':'ALL'}}
class GephiTopo (object):
def __init__ (self):
core.listen_to_dependencies(self)
self.switches = set()
self.links = set()
self.hosts = {} # mac -> dpid
def _handle_core_ComponentRegistered (self, event):
if event.name == "host_tracker":
event.component.addListenerByName("HostEvent",
self.__handle_host_tracker_HostEvent)
def send (self, data):
for c in clients:
c.send(json.dumps(data) + '\r\n')
def send_full (self, client):
out = []
out.append(clear())
for s in self.switches:
out.append(an(s, kind='switch'))
for e in self.links:
out.append(ae(e[0],e[1]))
for h,s in self.hosts.iteritems():
out.append(an(h, kind='host'))
if s in self.switches:
out.append(ae(h,s))
out = '\r\n'.join(json.dumps(o) for o in out)
client.send(out + '\r\n')
def __handle_host_tracker_HostEvent (self, event):
# Name is intentionally mangled to keep listen_to_dependencies away
h = str(event.entry.macaddr)
s = dpid_to_str(event.entry.dpid)
if event.leave:
if h in self.hosts:
if s in self.switches:
self.send(de(h,s))
self.send(dn(h))
del self.hosts[h]
else:
if h not in self.hosts:
self.hosts[h] = s
self.send(an(h, kind='host'))
if s in self.switches:
self.send(ae(h, s))
else:
log.warn("Missing switch")
def _handle_openflow_ConnectionUp (self, event):
s = dpid_to_str(event.dpid)
if s not in self.switches:
self.send(an(s))
self.switches.add(s)
def _handle_openflow_ConnectionDown (self, event):
s = dpid_to_str(event.dpid)
if s in self.switches:
self.send(dn(s))
self.switches.remove(s)
def _handle_openflow_discovery_LinkEvent (self, event):
s1 = event.link.dpid1
s2 = event.link.dpid2
s1 = dpid_to_str(s1)
s2 = dpid_to_str(s2)
if s1 > s2: s1,s2 = s2,s1
assert s1 in self.switches
assert s2 in self.switches
if event.added and (s1,s2) not in self.links:
self.links.add((s1,s2))
self.send(ae(s1,s2))
# Do we have abandoned hosts?
for h,s in self.hosts.iteritems():
if s == s1: self.send(ae(h,s1))
elif s == s2: self.send(ae(h,s2))
elif event.removed and (s1,s2) in self.links:
self.links.remove((s1,s2))
self.send(de(s1,s2))
def launch (port = 8282):
core.registerNew(GephiTopo)
# In theory, we're supposed to be running a web service, but instead
# we just spew Gephi graph streaming junk at everyone who connects. :)
global loop
loop = RecocoIOLoop()
#loop.more_debugging = True
loop.start()
w = ServerWorker(child_worker_type=GephiWorker, port = int(port))
loop.register_worker(w)
|
ChinaMassClouds/copenstack-server | refs/heads/master | openstack/src/horizon-2014.2/openstack_dashboard/dashboards/project/volumes/snapshots/urls.py | 7 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.conf.urls import patterns
from django.conf.urls import url
from openstack_dashboard.dashboards.project.volumes.snapshots import views
urlpatterns = patterns('',
url(r'^(?P<snapshot_id>[^/]+)$',
views.DetailView.as_view(),
name='detail'),
url(r'^(?P<snapshot_id>[^/]+)/update/$',
views.UpdateView.as_view(),
name='update'),
)
|
newerthcom/savagerebirth | refs/heads/master | libs/python-2.72/Lib/plat-mac/Carbon/CoreGraphics.py | 81 | # Generated from 'CGContext.h'
def FOUR_CHAR_CODE(x): return x
kCGLineJoinMiter = 0
kCGLineJoinRound = 1
kCGLineJoinBevel = 2
kCGLineCapButt = 0
kCGLineCapRound = 1
kCGLineCapSquare = 2
kCGPathFill = 0
kCGPathEOFill = 1
kCGPathStroke = 2
kCGPathFillStroke = 3
kCGPathEOFillStroke = 4
kCGTextFill = 0
kCGTextStroke = 1
kCGTextFillStroke = 2
kCGTextInvisible = 3
kCGTextFillClip = 4
kCGTextStrokeClip = 5
kCGTextFillStrokeClip = 6
kCGTextClip = 7
kCGEncodingFontSpecific = 0
kCGEncodingMacRoman = 1
kCGInterpolationDefault = 0
kCGInterpolationNone = 1
kCGInterpolationLow = 2
kCGInterpolationHigh = 3
|
ayys/alms | refs/heads/master | settings/views.py | 2 | # -*- coding: utf-8 -*-
from settings.models import Globals, addGlobalContext
from django.http import HttpResponseRedirect
from django.core.urlresolvers import reverse
from django.shortcuts import render
def home(request):
config = Globals()
context = {'config': config}
if request.method == "POST":
settings = request.POST
org_long_name = settings.get("org_long_name", None)
org_short_name = settings.get("org_short_name", None)
org_motto = settings.get("org_motto", None)
late_fees_price = settings.get("late_fees_price", None)
max_books_borrow_days = settings.get("max_books_borrow_days", None)
membership_valid_days = settings.get("membership_valid_days", None)
config = Globals()
if org_long_name is not None:
config.add("alms", "org_long_name", org_long_name)
if org_short_name is not None:
config.add("alms", "org_short_name", org_short_name)
if org_motto is not None:
config.add("alms", "org_motto", org_motto)
if late_fees_price is not None:
config.add("misc", "late_fees_price", late_fees_price)
if max_books_borrow_days is not None:
config.add("books", "borrow_max_days", max_books_borrow_days)
if membership_valid_days is not None:
config.add("misc", "membership_valid_days", membership_valid_days)
return HttpResponseRedirect(reverse("settingsHome"))
return render(request,
"settings/home.html",
addGlobalContext(context))
|
CamelBackNotation/CarnotKE | refs/heads/master | jyhton/bugtests/test387.py | 13 | import support
import test387p.test387m
import sys
if not 'test387p.difflib' in sys.modules:
raise support.TestError, 'Cached module for sibling module import miss should exist in sys.modules'
if not sys.modules['test387p.difflib'] is None:
raise support.TestError, 'Cached module for sibling module import miss should be None in sys.modules'
|
septag/termite | refs/heads/master | deps/bgfx/3rdparty/scintilla/qt/ScintillaEditPy/sepbuild.py | 65 | import distutils.sysconfig
import getopt
import glob
import os
import platform
import shutil
import subprocess
import stat
import sys
sys.path.append(os.path.join("..", "ScintillaEdit"))
import WidgetGen
scintillaDirectory = "../.."
scintillaScriptsDirectory = os.path.join(scintillaDirectory, "scripts")
sys.path.append(scintillaScriptsDirectory)
from FileGenerator import GenerateFile
# Decide up front which platform, treat anything other than Windows or OS X as Linux
PLAT_WINDOWS = platform.system() == "Windows"
PLAT_DARWIN = platform.system() == "Darwin"
PLAT_LINUX = not (PLAT_DARWIN or PLAT_WINDOWS)
def IsFileNewer(name1, name2):
""" Returns whether file with name1 is newer than file with name2. Returns 1
if name2 doesn't exist. """
if not os.path.exists(name1):
return 0
if not os.path.exists(name2):
return 1
mod_time1 = os.stat(name1)[stat.ST_MTIME]
mod_time2 = os.stat(name2)[stat.ST_MTIME]
return (mod_time1 > mod_time2)
def textFromRun(args):
proc = subprocess.Popen(args, shell=isinstance(args, str), stdout=subprocess.PIPE)
(stdoutdata, stderrdata) = proc.communicate()
if proc.returncode:
raise OSError(proc.returncode)
return stdoutdata
def runProgram(args, exitOnFailure):
print(" ".join(args))
retcode = subprocess.call(" ".join(args), shell=True, stderr=subprocess.STDOUT)
if retcode:
print("Failed in " + " ".join(args) + " return code = " + str(retcode))
if exitOnFailure:
sys.exit()
def usage():
print("sepbuild.py [-h|--help][-c|--clean][-u|--underscore-names]")
print("")
print("Generate PySide wappers and build them.")
print("")
print("options:")
print("")
print("-c --clean remove all object and generated files")
print("-b --pyside-base Location of the PySide+Qt4 sandbox to use")
print("-h --help display this text")
print("-d --debug=yes|no force debug build (or non-debug build)")
print("-u --underscore-names use method_names consistent with GTK+ standards")
modifyFunctionElement = """ <modify-function signature="%s">%s
</modify-function>"""
injectCode = """
<inject-code class="target" position="beginning">%s
</inject-code>"""
injectCheckN = """
if (!cppArg%d) {
PyErr_SetString(PyExc_ValueError, "Null string argument");
return 0;
}"""
def methodSignature(name, v, options):
argTypes = ""
p1Type = WidgetGen.cppAlias(v["Param1Type"])
if p1Type == "int":
p1Type = "sptr_t"
if p1Type:
argTypes = argTypes + p1Type
p2Type = WidgetGen.cppAlias(v["Param2Type"])
if p2Type == "int":
p2Type = "sptr_t"
if p2Type and v["Param2Type"] != "stringresult":
if p1Type:
argTypes = argTypes + ", "
argTypes = argTypes + p2Type
methodName = WidgetGen.normalisedName(name, options, v["FeatureType"])
constDeclarator = " const" if v["FeatureType"] == "get" else ""
return methodName + "(" + argTypes + ")" + constDeclarator
def printTypeSystemFile(f, options):
out = []
for name in f.order:
v = f.features[name]
if v["Category"] != "Deprecated":
feat = v["FeatureType"]
if feat in ["fun", "get", "set"]:
checks = ""
if v["Param1Type"] == "string":
checks = checks + (injectCheckN % 0)
if v["Param2Type"] == "string":
if v["Param1Type"] == "": # Only arg 2 -> treat as first
checks = checks + (injectCheckN % 0)
else:
checks = checks + (injectCheckN % 1)
if checks:
inject = injectCode % checks
out.append(modifyFunctionElement % (methodSignature(name, v, options), inject))
#if v["Param1Type"] == "string":
# out.append("<string-xml>" + name + "</string-xml>\n")
return out
def doubleBackSlashes(s):
# Quote backslashes so qmake does not produce warnings
return s.replace("\\", "\\\\")
class SepBuilder:
def __init__(self):
# Discover configuration parameters
self.ScintillaEditIncludes = [".", "../ScintillaEdit", "../ScintillaEditBase", "../../include"]
if PLAT_WINDOWS:
self.MakeCommand = "nmake"
self.MakeTarget = "release"
else:
self.MakeCommand = "make"
self.MakeTarget = ""
if PLAT_DARWIN:
self.QMakeOptions = "-spec macx-g++"
else:
self.QMakeOptions = ""
# Default to debug build if running in a debug build interpreter
self.DebugBuild = hasattr(sys, 'getobjects')
# Python
self.PyVersion = "%d.%d" % sys.version_info[:2]
self.PyVersionSuffix = distutils.sysconfig.get_config_var("VERSION")
self.PyIncludes = distutils.sysconfig.get_python_inc()
self.PyPrefix = distutils.sysconfig.get_config_var("prefix")
self.PyLibDir = distutils.sysconfig.get_config_var(
("LIBDEST" if sys.platform == 'win32' else "LIBDIR"))
# Scintilla
with open("../../version.txt") as f:
version = f.read()
self.ScintillaVersion = version[0] + '.' + version[1] + '.' + version[2]
# Find out what qmake is called
self.QMakeCommand = "qmake"
if not PLAT_WINDOWS:
# On Unix qmake may not be present but qmake-qt4 may be so check
pathToQMake = textFromRun("which qmake-qt4 || which qmake").rstrip()
self.QMakeCommand = os.path.basename(pathToQMake)
# Qt default location from qmake
self._SetQtIncludeBase(textFromRun(self.QMakeCommand + " -query QT_INSTALL_HEADERS").rstrip())
# PySide default location
# No standard for installing PySide development headers and libs on Windows so
# choose /usr to be like Linux
self._setPySideBase('\\usr' if PLAT_WINDOWS else '/usr')
self.ProInclude = "sepbuild.pri"
self.qtStyleInterface = True
def _setPySideBase(self, base):
self.PySideBase = base
def _try_pkgconfig(var, package, *relpath):
try:
return textFromRun(["pkg-config", "--variable=" + var, package]).rstrip()
except OSError:
return os.path.join(self.PySideBase, *relpath)
self.PySideTypeSystem = _try_pkgconfig("typesystemdir", "pyside",
"share", "PySide", "typesystems")
self.PySideIncludeBase = _try_pkgconfig("includedir", "pyside",
"include", "PySide")
self.ShibokenIncludeBase = _try_pkgconfig("includedir", "shiboken",
"include", "shiboken")
self.PySideIncludes = [
self.ShibokenIncludeBase,
self.PySideIncludeBase,
os.path.join(self.PySideIncludeBase, "QtCore"),
os.path.join(self.PySideIncludeBase, "QtGui")]
self.PySideLibDir = _try_pkgconfig("libdir", "pyside", "lib")
self.ShibokenLibDir = _try_pkgconfig("libdir", "shiboken", "lib")
self.AllIncludes = os.pathsep.join(self.QtIncludes + self.ScintillaEditIncludes + self.PySideIncludes)
self.ShibokenGenerator = "shiboken"
# Is this still needed? It doesn't work with latest shiboken sources
#if PLAT_DARWIN:
# # On OS X, can not automatically find Shiboken dylib so provide a full path
# self.ShibokenGenerator = os.path.join(self.PySideLibDir, "generatorrunner", "shiboken")
def generateAPI(self, args):
os.chdir(os.path.join("..", "ScintillaEdit"))
if not self.qtStyleInterface:
args.insert(0, '--underscore-names')
WidgetGen.main(args)
f = WidgetGen.readInterface(False)
os.chdir(os.path.join("..", "ScintillaEditPy"))
options = {"qtStyle": self.qtStyleInterface}
GenerateFile("typesystem_ScintillaEdit.xml.template", "typesystem_ScintillaEdit.xml",
"<!-- ", True, printTypeSystemFile(f, options))
def runGenerator(self):
generatorrunner = "shiboken"
for name in ('shiboken', 'generatorrunner'):
if PLAT_WINDOWS:
name += '.exe'
name = os.path.join(self.PySideBase, "bin", name)
if os.path.exists(name):
generatorrunner = name
break
args = [
generatorrunner,
"--generator-set=" + self.ShibokenGenerator,
"global.h ",
"--avoid-protected-hack",
"--enable-pyside-extensions",
"--include-paths=" + self.AllIncludes,
"--typesystem-paths=" + self.PySideTypeSystem,
"--output-directory=.",
"typesystem_ScintillaEdit.xml"]
print(" ".join(args))
retcode = subprocess.call(" ".join(args), shell=True, stderr=subprocess.STDOUT)
if retcode:
print("Failed in generatorrunner", retcode)
sys.exit()
def writeVariables(self):
# Write variables needed into file to be included from project so it does not have to discover much
with open(self.ProInclude, "w") as f:
f.write("SCINTILLA_VERSION=" + self.ScintillaVersion + "\n")
f.write("PY_VERSION=" + self.PyVersion + "\n")
f.write("PY_VERSION_SUFFIX=" + self.PyVersionSuffix + "\n")
f.write("PY_PREFIX=" + doubleBackSlashes(self.PyPrefix) + "\n")
f.write("PY_INCLUDES=" + doubleBackSlashes(self.PyIncludes) + "\n")
f.write("PY_LIBDIR=" + doubleBackSlashes(self.PyLibDir) + "\n")
f.write("PYSIDE_INCLUDES=" + doubleBackSlashes(self.PySideIncludeBase) + "\n")
f.write("PYSIDE_LIB=" + doubleBackSlashes(self.PySideLibDir) + "\n")
f.write("SHIBOKEN_INCLUDES=" + doubleBackSlashes(self.ShibokenIncludeBase) + "\n")
f.write("SHIBOKEN_LIB=" + doubleBackSlashes(self.ShibokenLibDir) + "\n")
if self.DebugBuild:
f.write("CONFIG += debug\n")
else:
f.write("CONFIG += release\n")
def make(self):
runProgram([self.QMakeCommand, self.QMakeOptions], exitOnFailure=True)
runProgram([self.MakeCommand, self.MakeTarget], exitOnFailure=True)
def cleanEverything(self):
self.generateAPI(["--clean"])
runProgram([self.MakeCommand, "distclean"], exitOnFailure=False)
filesToRemove = [self.ProInclude, "typesystem_ScintillaEdit.xml",
"../../bin/ScintillaEditPy.so", "../../bin/ScintillaConstants.py"]
for file in filesToRemove:
try:
os.remove(file)
except OSError:
pass
for logFile in glob.glob("*.log"):
try:
os.remove(logFile)
except OSError:
pass
shutil.rmtree("debug", ignore_errors=True)
shutil.rmtree("release", ignore_errors=True)
shutil.rmtree("ScintillaEditPy", ignore_errors=True)
def buildEverything(self):
cleanGenerated = False
opts, args = getopt.getopt(sys.argv[1:], "hcdub",
["help", "clean", "debug=",
"underscore-names", "pyside-base="])
for opt, arg in opts:
if opt in ("-h", "--help"):
usage()
sys.exit()
elif opt in ("-c", "--clean"):
cleanGenerated = True
elif opt in ("-d", "--debug"):
self.DebugBuild = (arg == '' or arg.lower() == 'yes')
if self.DebugBuild and sys.platform == 'win32':
self.MakeTarget = 'debug'
elif opt in ("-b", '--pyside-base'):
self._SetQtIncludeBase(os.path.join(os.path.normpath(arg), 'include'))
self._setPySideBase(os.path.normpath(arg))
elif opt in ("-u", "--underscore-names"):
self.qtStyleInterface = False
if cleanGenerated:
self.cleanEverything()
else:
self.writeVariables()
self.generateAPI([""])
self.runGenerator()
self.make()
self.copyScintillaConstants()
def copyScintillaConstants(self):
orig = 'ScintillaConstants.py'
dest = '../../bin/' + orig
if IsFileNewer(dest, orig):
return
f = open(orig, 'r')
contents = f.read()
f.close()
f = open(dest, 'w')
f.write(contents)
f.close()
def _SetQtIncludeBase(self, base):
self.QtIncludeBase = base
self.QtIncludes = [self.QtIncludeBase] + [os.path.join(self.QtIncludeBase, sub) for sub in ["QtCore", "QtGui"]]
# Set path so correct qmake is found
path = os.environ.get('PATH', '').split(os.pathsep)
qt_bin_dir = os.path.join(os.path.dirname(base), 'bin')
if qt_bin_dir not in path:
path.insert(0, qt_bin_dir)
os.environ['PATH'] = os.pathsep.join(path)
if __name__ == "__main__":
sepBuild = SepBuilder()
sepBuild.buildEverything()
|
michaelgallacher/intellij-community | refs/heads/master | python/testData/quickFixes/PyRemoveParameterQuickFixTest/docstring.py | 80 |
def foo(r<caret>):
"""
:param r: some parameter
:type r: int
:return:
"""
def a():
pass
x = 1
x = 2 |
jchen7960/python_framework | refs/heads/master | pages/TestAuthenticateMerchantPage.py | 1 | __author__ = 'Yunxi Lin'
from selenium.webdriver.support.wait import WebDriverWait
from selenium.common.exceptions import NoSuchElementException
#from pages.BasePage import BasePage
from locators.TestAuthenticateMerchantPageLocators import TestAuthenticateMerchantPageLocators
from common.CommonMethods import CommonMethods
class TestAuthenticateMerchantPage(object):#BasePage
#driver = BasePage.get_driver()
def __init__(self, driver):
self.driver = driver
self.cm = CommonMethods()
def select_merchant(self, merchant_id):
self.cm.select_dropdown_by_value(self.driver, merchant_id,*TestAuthenticateMerchantPageLocators.DROP_DOWN_ELEMENT)
def submit_merchant(self):
self.cm.click(self.driver, *TestAuthenticateMerchantPageLocators.SUBMIT_BTN)
def get_driver(self):
return self.driver |
oliverhr/odoo | refs/heads/8.0-pos-pademobile-payment | addons/stock/stock.py | 3 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from datetime import date, datetime
from dateutil import relativedelta
import json
import time
from openerp.osv import fields, osv
from openerp.tools.float_utils import float_compare, float_round
from openerp.tools.translate import _
from openerp.tools import DEFAULT_SERVER_DATETIME_FORMAT, DEFAULT_SERVER_DATE_FORMAT
from openerp.exceptions import Warning
from openerp import SUPERUSER_ID, api
import openerp.addons.decimal_precision as dp
from openerp.addons.procurement import procurement
import logging
_logger = logging.getLogger(__name__)
#----------------------------------------------------------
# Incoterms
#----------------------------------------------------------
class stock_incoterms(osv.osv):
_name = "stock.incoterms"
_description = "Incoterms"
_columns = {
'name': fields.char('Name', required=True, help="Incoterms are series of sales terms. They are used to divide transaction costs and responsibilities between buyer and seller and reflect state-of-the-art transportation practices."),
'code': fields.char('Code', size=3, required=True, help="Incoterm Standard Code"),
'active': fields.boolean('Active', help="By unchecking the active field, you may hide an INCOTERM you will not use."),
}
_defaults = {
'active': True,
}
#----------------------------------------------------------
# Stock Location
#----------------------------------------------------------
class stock_location(osv.osv):
_name = "stock.location"
_description = "Inventory Locations"
_parent_name = "location_id"
_parent_store = True
_parent_order = 'name'
_order = 'parent_left'
_rec_name = 'complete_name'
def _location_owner(self, cr, uid, location, context=None):
''' Return the company owning the location if any '''
return location and (location.usage == 'internal') and location.company_id or False
def _complete_name(self, cr, uid, ids, name, args, context=None):
""" Forms complete name of location from parent location to child location.
@return: Dictionary of values
"""
res = {}
for m in self.browse(cr, uid, ids, context=context):
res[m.id] = m.name
parent = m.location_id
while parent:
res[m.id] = parent.name + ' / ' + res[m.id]
parent = parent.location_id
return res
def _get_sublocations(self, cr, uid, ids, context=None):
""" return all sublocations of the given stock locations (included) """
if context is None:
context = {}
context_with_inactive = context.copy()
context_with_inactive['active_test'] = False
return self.search(cr, uid, [('id', 'child_of', ids)], context=context_with_inactive)
def _name_get(self, cr, uid, location, context=None):
name = location.name
while location.location_id and location.usage != 'view':
location = location.location_id
name = location.name + '/' + name
return name
def name_get(self, cr, uid, ids, context=None):
res = []
for location in self.browse(cr, uid, ids, context=context):
res.append((location.id, self._name_get(cr, uid, location, context=context)))
return res
_columns = {
'name': fields.char('Location Name', required=True, translate=True),
'active': fields.boolean('Active', help="By unchecking the active field, you may hide a location without deleting it."),
'usage': fields.selection([
('supplier', 'Supplier Location'),
('view', 'View'),
('internal', 'Internal Location'),
('customer', 'Customer Location'),
('inventory', 'Inventory'),
('procurement', 'Procurement'),
('production', 'Production'),
('transit', 'Transit Location')],
'Location Type', required=True,
help="""* Supplier Location: Virtual location representing the source location for products coming from your suppliers
\n* View: Virtual location used to create a hierarchical structures for your warehouse, aggregating its child locations ; can't directly contain products
\n* Internal Location: Physical locations inside your own warehouses,
\n* Customer Location: Virtual location representing the destination location for products sent to your customers
\n* Inventory: Virtual location serving as counterpart for inventory operations used to correct stock levels (Physical inventories)
\n* Procurement: Virtual location serving as temporary counterpart for procurement operations when the source (supplier or production) is not known yet. This location should be empty when the procurement scheduler has finished running.
\n* Production: Virtual counterpart location for production operations: this location consumes the raw material and produces finished products
\n* Transit Location: Counterpart location that should be used in inter-companies or inter-warehouses operations
""", select=True),
'complete_name': fields.function(_complete_name, type='char', string="Location Name",
store={'stock.location': (_get_sublocations, ['name', 'location_id', 'active'], 10)}),
'location_id': fields.many2one('stock.location', 'Parent Location', select=True, ondelete='cascade'),
'child_ids': fields.one2many('stock.location', 'location_id', 'Contains'),
'partner_id': fields.many2one('res.partner', 'Owner', help="Owner of the location if not internal"),
'comment': fields.text('Additional Information'),
'posx': fields.integer('Corridor (X)', help="Optional localization details, for information purpose only"),
'posy': fields.integer('Shelves (Y)', help="Optional localization details, for information purpose only"),
'posz': fields.integer('Height (Z)', help="Optional localization details, for information purpose only"),
'parent_left': fields.integer('Left Parent', select=1),
'parent_right': fields.integer('Right Parent', select=1),
'company_id': fields.many2one('res.company', 'Company', select=1, help='Let this field empty if this location is shared between companies'),
'scrap_location': fields.boolean('Is a Scrap Location?', help='Check this box to allow using this location to put scrapped/damaged goods.'),
'removal_strategy_id': fields.many2one('product.removal', 'Removal Strategy', help="Defines the default method used for suggesting the exact location (shelf) where to take the products from, which lot etc. for this location. This method can be enforced at the product category level, and a fallback is made on the parent locations if none is set here."),
'putaway_strategy_id': fields.many2one('product.putaway', 'Put Away Strategy', help="Defines the default method used for suggesting the exact location (shelf) where to store the products. This method can be enforced at the product category level, and a fallback is made on the parent locations if none is set here."),
'loc_barcode': fields.char('Location Barcode'),
}
_defaults = {
'active': True,
'usage': 'internal',
'company_id': lambda self, cr, uid, c: self.pool.get('res.company')._company_default_get(cr, uid, 'stock.location', context=c),
'posx': 0,
'posy': 0,
'posz': 0,
'scrap_location': False,
}
_sql_constraints = [('loc_barcode_company_uniq', 'unique (loc_barcode,company_id)', 'The barcode for a location must be unique per company !')]
def create(self, cr, uid, default, context=None):
if not default.get('loc_barcode', False):
default.update({'loc_barcode': default.get('complete_name', False)})
return super(stock_location, self).create(cr, uid, default, context=context)
def get_putaway_strategy(self, cr, uid, location, product, context=None):
''' Returns the location where the product has to be put, if any compliant putaway strategy is found. Otherwise returns None.'''
putaway_obj = self.pool.get('product.putaway')
loc = location
while loc:
if loc.putaway_strategy_id:
res = putaway_obj.putaway_apply(cr, uid, loc.putaway_strategy_id, product, context=context)
if res:
return res
loc = loc.location_id
def _default_removal_strategy(self, cr, uid, context=None):
return 'fifo'
def get_removal_strategy(self, cr, uid, location, product, context=None):
''' Returns the removal strategy to consider for the given product and location.
:param location: browse record (stock.location)
:param product: browse record (product.product)
:rtype: char
'''
if product.categ_id.removal_strategy_id:
return product.categ_id.removal_strategy_id.method
loc = location
while loc:
if loc.removal_strategy_id:
return loc.removal_strategy_id.method
loc = loc.location_id
return self._default_removal_strategy(cr, uid, context=context)
def get_warehouse(self, cr, uid, location, context=None):
"""
Returns warehouse id of warehouse that contains location
:param location: browse record (stock.location)
"""
wh_obj = self.pool.get("stock.warehouse")
whs = wh_obj.search(cr, uid, [('view_location_id.parent_left', '<=', location.parent_left),
('view_location_id.parent_right', '>=', location.parent_left)], context=context)
return whs and whs[0] or False
#----------------------------------------------------------
# Routes
#----------------------------------------------------------
class stock_location_route(osv.osv):
_name = 'stock.location.route'
_description = "Inventory Routes"
_order = 'sequence'
_columns = {
'name': fields.char('Route Name', required=True),
'sequence': fields.integer('Sequence'),
'pull_ids': fields.one2many('procurement.rule', 'route_id', 'Pull Rules', copy=True),
'active': fields.boolean('Active', help="If the active field is set to False, it will allow you to hide the route without removing it."),
'push_ids': fields.one2many('stock.location.path', 'route_id', 'Push Rules', copy=True),
'product_selectable': fields.boolean('Applicable on Product'),
'product_categ_selectable': fields.boolean('Applicable on Product Category'),
'warehouse_selectable': fields.boolean('Applicable on Warehouse'),
'supplied_wh_id': fields.many2one('stock.warehouse', 'Supplied Warehouse'),
'supplier_wh_id': fields.many2one('stock.warehouse', 'Supplier Warehouse'),
'company_id': fields.many2one('res.company', 'Company', select=1, help='Let this field empty if this route is shared between all companies'),
}
_defaults = {
'sequence': lambda self, cr, uid, ctx: 0,
'active': True,
'product_selectable': True,
'company_id': lambda self, cr, uid, c: self.pool.get('res.company')._company_default_get(cr, uid, 'stock.location.route', context=c),
}
def write(self, cr, uid, ids, vals, context=None):
'''when a route is deactivated, deactivate also its pull and push rules'''
if isinstance(ids, (int, long)):
ids = [ids]
res = super(stock_location_route, self).write(cr, uid, ids, vals, context=context)
if 'active' in vals:
push_ids = []
pull_ids = []
for route in self.browse(cr, uid, ids, context=context):
if route.push_ids:
push_ids += [r.id for r in route.push_ids if r.active != vals['active']]
if route.pull_ids:
pull_ids += [r.id for r in route.pull_ids if r.active != vals['active']]
if push_ids:
self.pool.get('stock.location.path').write(cr, uid, push_ids, {'active': vals['active']}, context=context)
if pull_ids:
self.pool.get('procurement.rule').write(cr, uid, pull_ids, {'active': vals['active']}, context=context)
return res
#----------------------------------------------------------
# Quants
#----------------------------------------------------------
class stock_quant(osv.osv):
"""
Quants are the smallest unit of stock physical instances
"""
_name = "stock.quant"
_description = "Quants"
def _get_quant_name(self, cr, uid, ids, name, args, context=None):
""" Forms complete name of location from parent location to child location.
@return: Dictionary of values
"""
res = {}
for q in self.browse(cr, uid, ids, context=context):
res[q.id] = q.product_id.code or ''
if q.lot_id:
res[q.id] = q.lot_id.name
res[q.id] += ': ' + str(q.qty) + q.product_id.uom_id.name
return res
def _calc_inventory_value(self, cr, uid, ids, name, attr, context=None):
context = dict(context or {})
res = {}
uid_company_id = self.pool.get('res.users').browse(cr, uid, uid, context=context).company_id.id
for quant in self.browse(cr, uid, ids, context=context):
context.pop('force_company', None)
if quant.company_id.id != uid_company_id:
#if the company of the quant is different than the current user company, force the company in the context
#then re-do a browse to read the property fields for the good company.
context['force_company'] = quant.company_id.id
quant = self.browse(cr, uid, quant.id, context=context)
res[quant.id] = self._get_inventory_value(cr, uid, quant, context=context)
return res
def _get_inventory_value(self, cr, uid, quant, context=None):
return quant.product_id.standard_price * quant.qty
_columns = {
'name': fields.function(_get_quant_name, type='char', string='Identifier'),
'product_id': fields.many2one('product.product', 'Product', required=True, ondelete="restrict", readonly=True, select=True),
'location_id': fields.many2one('stock.location', 'Location', required=True, ondelete="restrict", readonly=True, select=True, auto_join=True),
'qty': fields.float('Quantity', required=True, help="Quantity of products in this quant, in the default unit of measure of the product", readonly=True, select=True),
'package_id': fields.many2one('stock.quant.package', string='Package', help="The package containing this quant", readonly=True, select=True),
'packaging_type_id': fields.related('package_id', 'packaging_id', type='many2one', relation='product.packaging', string='Type of packaging', readonly=True, store=True),
'reservation_id': fields.many2one('stock.move', 'Reserved for Move', help="The move the quant is reserved for", readonly=True, select=True),
'lot_id': fields.many2one('stock.production.lot', 'Lot', readonly=True, select=True, ondelete="restrict"),
'cost': fields.float('Unit Cost'),
'owner_id': fields.many2one('res.partner', 'Owner', help="This is the owner of the quant", readonly=True, select=True),
'create_date': fields.datetime('Creation Date', readonly=True),
'in_date': fields.datetime('Incoming Date', readonly=True, select=True),
'history_ids': fields.many2many('stock.move', 'stock_quant_move_rel', 'quant_id', 'move_id', 'Moves', help='Moves that operate(d) on this quant', copy=False),
'company_id': fields.many2one('res.company', 'Company', help="The company to which the quants belong", required=True, readonly=True, select=True),
'inventory_value': fields.function(_calc_inventory_value, string="Inventory Value", type='float', readonly=True),
# Used for negative quants to reconcile after compensated by a new positive one
'propagated_from_id': fields.many2one('stock.quant', 'Linked Quant', help='The negative quant this is coming from', readonly=True, select=True),
'negative_move_id': fields.many2one('stock.move', 'Move Negative Quant', help='If this is a negative quant, this will be the move that caused this negative quant.', readonly=True),
'negative_dest_location_id': fields.related('negative_move_id', 'location_dest_id', type='many2one', relation='stock.location', string="Negative Destination Location", readonly=True,
help="Technical field used to record the destination location of a move that created a negative quant"),
}
_defaults = {
'company_id': lambda self, cr, uid, c: self.pool.get('res.company')._company_default_get(cr, uid, 'stock.quant', context=c),
}
def init(self, cr):
cr.execute('SELECT indexname FROM pg_indexes WHERE indexname = %s', ('stock_quant_product_location_index',))
if not cr.fetchone():
cr.execute('CREATE INDEX stock_quant_product_location_index ON stock_quant (product_id, location_id, company_id, qty, in_date, reservation_id)')
def read_group(self, cr, uid, domain, fields, groupby, offset=0, limit=None, context=None, orderby=False, lazy=True):
''' Overwrite the read_group in order to sum the function field 'inventory_value' in group by'''
res = super(stock_quant, self).read_group(cr, uid, domain, fields, groupby, offset=offset, limit=limit, context=context, orderby=orderby, lazy=lazy)
if 'inventory_value' in fields:
for line in res:
if '__domain' in line:
lines = self.search(cr, uid, line['__domain'], context=context)
inv_value = 0.0
for line2 in self.browse(cr, uid, lines, context=context):
inv_value += line2.inventory_value
line['inventory_value'] = inv_value
return res
def action_view_quant_history(self, cr, uid, ids, context=None):
'''
This function returns an action that display the history of the quant, which
mean all the stock moves that lead to this quant creation with this quant quantity.
'''
mod_obj = self.pool.get('ir.model.data')
act_obj = self.pool.get('ir.actions.act_window')
result = mod_obj.get_object_reference(cr, uid, 'stock', 'action_move_form2')
id = result and result[1] or False
result = act_obj.read(cr, uid, [id], context={})[0]
move_ids = []
for quant in self.browse(cr, uid, ids, context=context):
move_ids += [move.id for move in quant.history_ids]
result['domain'] = "[('id','in',[" + ','.join(map(str, move_ids)) + "])]"
return result
def quants_reserve(self, cr, uid, quants, move, link=False, context=None):
'''This function reserves quants for the given move (and optionally given link). If the total of quantity reserved is enough, the move's state
is also set to 'assigned'
:param quants: list of tuple(quant browse record or None, qty to reserve). If None is given as first tuple element, the item will be ignored. Negative quants should not be received as argument
:param move: browse record
:param link: browse record (stock.move.operation.link)
'''
toreserve = []
reserved_availability = move.reserved_availability
#split quants if needed
for quant, qty in quants:
if qty <= 0.0 or (quant and quant.qty <= 0.0):
raise osv.except_osv(_('Error!'), _('You can not reserve a negative quantity or a negative quant.'))
if not quant:
continue
self._quant_split(cr, uid, quant, qty, context=context)
toreserve.append(quant.id)
reserved_availability += quant.qty
#reserve quants
if toreserve:
self.write(cr, SUPERUSER_ID, toreserve, {'reservation_id': move.id}, context=context)
#if move has a picking_id, write on that picking that pack_operation might have changed and need to be recomputed
if move.picking_id:
self.pool.get('stock.picking').write(cr, uid, [move.picking_id.id], {'recompute_pack_op': True}, context=context)
#check if move'state needs to be set as 'assigned'
rounding = move.product_id.uom_id.rounding
if float_compare(reserved_availability, move.product_qty, precision_rounding=rounding) == 0 and move.state in ('confirmed', 'waiting') :
self.pool.get('stock.move').write(cr, uid, [move.id], {'state': 'assigned'}, context=context)
elif float_compare(reserved_availability, 0, precision_rounding=rounding) > 0 and not move.partially_available:
self.pool.get('stock.move').write(cr, uid, [move.id], {'partially_available': True}, context=context)
def quants_move(self, cr, uid, quants, move, location_to, location_from=False, lot_id=False, owner_id=False, src_package_id=False, dest_package_id=False, context=None):
"""Moves all given stock.quant in the given destination location. Unreserve from current move.
:param quants: list of tuple(browse record(stock.quant) or None, quantity to move)
:param move: browse record (stock.move)
:param location_to: browse record (stock.location) depicting where the quants have to be moved
:param location_from: optional browse record (stock.location) explaining where the quant has to be taken (may differ from the move source location in case a removal strategy applied). This parameter is only used to pass to _quant_create if a negative quant must be created
:param lot_id: ID of the lot that must be set on the quants to move
:param owner_id: ID of the partner that must own the quants to move
:param src_package_id: ID of the package that contains the quants to move
:param dest_package_id: ID of the package that must be set on the moved quant
"""
quants_reconcile = []
to_move_quants = []
self._check_location(cr, uid, location_to, context=context)
for quant, qty in quants:
if not quant:
#If quant is None, we will create a quant to move (and potentially a negative counterpart too)
quant = self._quant_create(cr, uid, qty, move, lot_id=lot_id, owner_id=owner_id, src_package_id=src_package_id, dest_package_id=dest_package_id, force_location_from=location_from, force_location_to=location_to, context=context)
else:
self._quant_split(cr, uid, quant, qty, context=context)
to_move_quants.append(quant)
quants_reconcile.append(quant)
if to_move_quants:
to_recompute_move_ids = [x.reservation_id.id for x in to_move_quants if x.reservation_id and x.reservation_id.id != move.id]
self.move_quants_write(cr, uid, to_move_quants, move, location_to, dest_package_id, context=context)
self.pool.get('stock.move').recalculate_move_state(cr, uid, to_recompute_move_ids, context=context)
if location_to.usage == 'internal':
# Do manual search for quant to avoid full table scan (order by id)
cr.execute("""
SELECT 0 FROM stock_quant, stock_location WHERE product_id = %s AND stock_location.id = stock_quant.location_id AND
((stock_location.parent_left >= %s AND stock_location.parent_left < %s) OR stock_location.id = %s) AND qty < 0.0 LIMIT 1
""", (move.product_id.id, location_to.parent_left, location_to.parent_right, location_to.id))
if cr.fetchone():
for quant in quants_reconcile:
self._quant_reconcile_negative(cr, uid, quant, move, context=context)
def move_quants_write(self, cr, uid, quants, move, location_dest_id, dest_package_id, context=None):
context=context or {}
vals = {'location_id': location_dest_id.id,
'history_ids': [(4, move.id)],
'reservation_id': False}
if not context.get('entire_pack'):
vals.update({'package_id': dest_package_id})
self.write(cr, SUPERUSER_ID, [q.id for q in quants], vals, context=context)
def quants_get_prefered_domain(self, cr, uid, location, product, qty, domain=None, prefered_domain_list=[], restrict_lot_id=False, restrict_partner_id=False, context=None):
''' This function tries to find quants in the given location for the given domain, by trying to first limit
the choice on the quants that match the first item of prefered_domain_list as well. But if the qty requested is not reached
it tries to find the remaining quantity by looping on the prefered_domain_list (tries with the second item and so on).
Make sure the quants aren't found twice => all the domains of prefered_domain_list should be orthogonal
'''
if domain is None:
domain = []
quants = [(None, qty)]
#don't look for quants in location that are of type production, supplier or inventory.
if location.usage in ['inventory', 'production', 'supplier']:
return quants
res_qty = qty
if not prefered_domain_list:
return self.quants_get(cr, uid, location, product, qty, domain=domain, restrict_lot_id=restrict_lot_id, restrict_partner_id=restrict_partner_id, context=context)
for prefered_domain in prefered_domain_list:
res_qty_cmp = float_compare(res_qty, 0, precision_rounding=product.uom_id.rounding)
if res_qty_cmp > 0:
#try to replace the last tuple (None, res_qty) with something that wasn't chosen at first because of the prefered order
quants.pop()
tmp_quants = self.quants_get(cr, uid, location, product, res_qty, domain=domain + prefered_domain, restrict_lot_id=restrict_lot_id, restrict_partner_id=restrict_partner_id, context=context)
for quant in tmp_quants:
if quant[0]:
res_qty -= quant[1]
quants += tmp_quants
return quants
def quants_get(self, cr, uid, location, product, qty, domain=None, restrict_lot_id=False, restrict_partner_id=False, context=None):
"""
Use the removal strategies of product to search for the correct quants
If you inherit, put the super at the end of your method.
:location: browse record of the parent location where the quants have to be found
:product: browse record of the product to find
:qty in UoM of product
"""
result = []
domain = domain or [('qty', '>', 0.0)]
if restrict_partner_id:
domain += [('owner_id', '=', restrict_partner_id)]
if restrict_lot_id:
domain += [('lot_id', '=', restrict_lot_id)]
if location:
removal_strategy = self.pool.get('stock.location').get_removal_strategy(cr, uid, location, product, context=context)
result += self.apply_removal_strategy(cr, uid, location, product, qty, domain, removal_strategy, context=context)
return result
def apply_removal_strategy(self, cr, uid, location, product, quantity, domain, removal_strategy, context=None):
if removal_strategy == 'fifo':
order = 'in_date, id'
return self._quants_get_order(cr, uid, location, product, quantity, domain, order, context=context)
elif removal_strategy == 'lifo':
order = 'in_date desc, id desc'
return self._quants_get_order(cr, uid, location, product, quantity, domain, order, context=context)
raise osv.except_osv(_('Error!'), _('Removal strategy %s not implemented.' % (removal_strategy,)))
def _quant_create(self, cr, uid, qty, move, lot_id=False, owner_id=False, src_package_id=False, dest_package_id=False,
force_location_from=False, force_location_to=False, context=None):
'''Create a quant in the destination location and create a negative quant in the source location if it's an internal location.
'''
if context is None:
context = {}
price_unit = self.pool.get('stock.move').get_price_unit(cr, uid, move, context=context)
location = force_location_to or move.location_dest_id
rounding = move.product_id.uom_id.rounding
vals = {
'product_id': move.product_id.id,
'location_id': location.id,
'qty': float_round(qty, precision_rounding=rounding),
'cost': price_unit,
'history_ids': [(4, move.id)],
'in_date': datetime.now().strftime(DEFAULT_SERVER_DATETIME_FORMAT),
'company_id': move.company_id.id,
'lot_id': lot_id,
'owner_id': owner_id,
'package_id': dest_package_id,
}
if move.location_id.usage == 'internal':
#if we were trying to move something from an internal location and reach here (quant creation),
#it means that a negative quant has to be created as well.
negative_vals = vals.copy()
negative_vals['location_id'] = force_location_from and force_location_from.id or move.location_id.id
negative_vals['qty'] = float_round(-qty, precision_rounding=rounding)
negative_vals['cost'] = price_unit
negative_vals['negative_move_id'] = move.id
negative_vals['package_id'] = src_package_id
negative_quant_id = self.create(cr, SUPERUSER_ID, negative_vals, context=context)
vals.update({'propagated_from_id': negative_quant_id})
#create the quant as superuser, because we want to restrict the creation of quant manually: we should always use this method to create quants
quant_id = self.create(cr, SUPERUSER_ID, vals, context=context)
return self.browse(cr, uid, quant_id, context=context)
def _quant_split(self, cr, uid, quant, qty, context=None):
context = context or {}
rounding = quant.product_id.uom_id.rounding
if float_compare(abs(quant.qty), abs(qty), precision_rounding=rounding) <= 0: # if quant <= qty in abs, take it entirely
return False
qty_round = float_round(qty, precision_rounding=rounding)
new_qty_round = float_round(quant.qty - qty, precision_rounding=rounding)
# Fetch the history_ids manually as it will not do a join with the stock moves then (=> a lot faster)
cr.execute("""SELECT move_id FROM stock_quant_move_rel WHERE quant_id = %s""", (quant.id,))
res = cr.fetchall()
new_quant = self.copy(cr, SUPERUSER_ID, quant.id, default={'qty': new_qty_round, 'history_ids': [(4, x[0]) for x in res]}, context=context)
self.write(cr, SUPERUSER_ID, quant.id, {'qty': qty_round}, context=context)
return self.browse(cr, uid, new_quant, context=context)
def _get_latest_move(self, cr, uid, quant, context=None):
move = False
for m in quant.history_ids:
if not move or m.date > move.date:
move = m
return move
@api.cr_uid_ids_context
def _quants_merge(self, cr, uid, solved_quant_ids, solving_quant, context=None):
path = []
for move in solving_quant.history_ids:
path.append((4, move.id))
self.write(cr, SUPERUSER_ID, solved_quant_ids, {'history_ids': path}, context=context)
def _quant_reconcile_negative(self, cr, uid, quant, move, context=None):
"""
When new quant arrive in a location, try to reconcile it with
negative quants. If it's possible, apply the cost of the new
quant to the conter-part of the negative quant.
"""
solving_quant = quant
dom = [('qty', '<', 0)]
if quant.lot_id:
dom += [('lot_id', '=', quant.lot_id.id)]
dom += [('owner_id', '=', quant.owner_id.id)]
dom += [('package_id', '=', quant.package_id.id)]
dom += [('id', '!=', quant.propagated_from_id.id)]
quants = self.quants_get(cr, uid, quant.location_id, quant.product_id, quant.qty, dom, context=context)
product_uom_rounding = quant.product_id.uom_id.rounding
for quant_neg, qty in quants:
if not quant_neg or not solving_quant:
continue
to_solve_quant_ids = self.search(cr, uid, [('propagated_from_id', '=', quant_neg.id)], context=context)
if not to_solve_quant_ids:
continue
solving_qty = qty
solved_quant_ids = []
for to_solve_quant in self.browse(cr, uid, to_solve_quant_ids, context=context):
if float_compare(solving_qty, 0, precision_rounding=product_uom_rounding) <= 0:
continue
solved_quant_ids.append(to_solve_quant.id)
self._quant_split(cr, uid, to_solve_quant, min(solving_qty, to_solve_quant.qty), context=context)
solving_qty -= min(solving_qty, to_solve_quant.qty)
remaining_solving_quant = self._quant_split(cr, uid, solving_quant, qty, context=context)
remaining_neg_quant = self._quant_split(cr, uid, quant_neg, -qty, context=context)
#if the reconciliation was not complete, we need to link together the remaining parts
if remaining_neg_quant:
remaining_to_solve_quant_ids = self.search(cr, uid, [('propagated_from_id', '=', quant_neg.id), ('id', 'not in', solved_quant_ids)], context=context)
if remaining_to_solve_quant_ids:
self.write(cr, SUPERUSER_ID, remaining_to_solve_quant_ids, {'propagated_from_id': remaining_neg_quant.id}, context=context)
if solving_quant.propagated_from_id and solved_quant_ids:
self.write(cr, SUPERUSER_ID, solved_quant_ids, {'propagated_from_id': solving_quant.propagated_from_id.id}, context=context)
#delete the reconciled quants, as it is replaced by the solved quants
self.unlink(cr, SUPERUSER_ID, [quant_neg.id], context=context)
if solved_quant_ids:
#price update + accounting entries adjustments
self._price_update(cr, uid, solved_quant_ids, solving_quant.cost, context=context)
#merge history (and cost?)
self._quants_merge(cr, uid, solved_quant_ids, solving_quant, context=context)
self.unlink(cr, SUPERUSER_ID, [solving_quant.id], context=context)
solving_quant = remaining_solving_quant
def _price_update(self, cr, uid, ids, newprice, context=None):
self.write(cr, SUPERUSER_ID, ids, {'cost': newprice}, context=context)
def quants_unreserve(self, cr, uid, move, context=None):
related_quants = [x.id for x in move.reserved_quant_ids]
if related_quants:
#if move has a picking_id, write on that picking that pack_operation might have changed and need to be recomputed
if move.picking_id:
self.pool.get('stock.picking').write(cr, uid, [move.picking_id.id], {'recompute_pack_op': True}, context=context)
if move.partially_available:
self.pool.get("stock.move").write(cr, uid, [move.id], {'partially_available': False}, context=context)
self.write(cr, SUPERUSER_ID, related_quants, {'reservation_id': False}, context=context)
def _quants_get_order(self, cr, uid, location, product, quantity, domain=[], orderby='in_date', context=None):
''' Implementation of removal strategies
If it can not reserve, it will return a tuple (None, qty)
'''
if context is None:
context = {}
domain += location and [('location_id', 'child_of', location.id)] or []
domain += [('product_id', '=', product.id)]
if context.get('force_company'):
domain += [('company_id', '=', context.get('force_company'))]
else:
domain += [('company_id', '=', self.pool.get('res.users').browse(cr, uid, uid, context=context).company_id.id)]
res = []
offset = 0
while float_compare(quantity, 0, precision_rounding=product.uom_id.rounding) > 0:
quants = self.search(cr, uid, domain, order=orderby, limit=10, offset=offset, context=context)
if not quants:
res.append((None, quantity))
break
for quant in self.browse(cr, uid, quants, context=context):
rounding = product.uom_id.rounding
if float_compare(quantity, abs(quant.qty), precision_rounding=rounding) >= 0:
res += [(quant, abs(quant.qty))]
quantity -= abs(quant.qty)
elif float_compare(quantity, 0.0, precision_rounding=rounding) != 0:
res += [(quant, quantity)]
quantity = 0
break
offset += 10
return res
def _check_location(self, cr, uid, location, context=None):
if location.usage == 'view':
raise osv.except_osv(_('Error'), _('You cannot move to a location of type view %s.') % (location.name))
return True
#----------------------------------------------------------
# Stock Picking
#----------------------------------------------------------
class stock_picking(osv.osv):
_name = "stock.picking"
_inherit = ['mail.thread']
_description = "Picking List"
_order = "priority desc, date asc, id desc"
def _set_min_date(self, cr, uid, id, field, value, arg, context=None):
move_obj = self.pool.get("stock.move")
if value:
move_ids = [move.id for move in self.browse(cr, uid, id, context=context).move_lines]
move_obj.write(cr, uid, move_ids, {'date_expected': value}, context=context)
def _set_priority(self, cr, uid, id, field, value, arg, context=None):
move_obj = self.pool.get("stock.move")
if value:
move_ids = [move.id for move in self.browse(cr, uid, id, context=context).move_lines]
move_obj.write(cr, uid, move_ids, {'priority': value}, context=context)
def get_min_max_date(self, cr, uid, ids, field_name, arg, context=None):
""" Finds minimum and maximum dates for picking.
@return: Dictionary of values
"""
res = {}
for id in ids:
res[id] = {'min_date': False, 'max_date': False, 'priority': '1'}
if not ids:
return res
cr.execute("""select
picking_id,
min(date_expected),
max(date_expected),
max(priority)
from
stock_move
where
picking_id IN %s
group by
picking_id""", (tuple(ids),))
for pick, dt1, dt2, prio in cr.fetchall():
res[pick]['min_date'] = dt1
res[pick]['max_date'] = dt2
res[pick]['priority'] = prio
return res
def create(self, cr, user, vals, context=None):
context = context or {}
if ('name' not in vals) or (vals.get('name') in ('/', False)):
ptype_id = vals.get('picking_type_id', context.get('default_picking_type_id', False))
sequence_id = self.pool.get('stock.picking.type').browse(cr, user, ptype_id, context=context).sequence_id.id
vals['name'] = self.pool.get('ir.sequence').get_id(cr, user, sequence_id, 'id', context=context)
return super(stock_picking, self).create(cr, user, vals, context)
def _state_get(self, cr, uid, ids, field_name, arg, context=None):
'''The state of a picking depends on the state of its related stock.move
draft: the picking has no line or any one of the lines is draft
done, draft, cancel: all lines are done / draft / cancel
confirmed, waiting, assigned, partially_available depends on move_type (all at once or partial)
'''
res = {}
for pick in self.browse(cr, uid, ids, context=context):
if (not pick.move_lines) or any([x.state == 'draft' for x in pick.move_lines]):
res[pick.id] = 'draft'
continue
if all([x.state == 'cancel' for x in pick.move_lines]):
res[pick.id] = 'cancel'
continue
if all([x.state in ('cancel', 'done') for x in pick.move_lines]):
res[pick.id] = 'done'
continue
order = {'confirmed': 0, 'waiting': 1, 'assigned': 2}
order_inv = {0: 'confirmed', 1: 'waiting', 2: 'assigned'}
lst = [order[x.state] for x in pick.move_lines if x.state not in ('cancel', 'done')]
if pick.move_type == 'one':
res[pick.id] = order_inv[min(lst)]
else:
#we are in the case of partial delivery, so if all move are assigned, picking
#should be assign too, else if one of the move is assigned, or partially available, picking should be
#in partially available state, otherwise, picking is in waiting or confirmed state
res[pick.id] = order_inv[max(lst)]
if not all(x == 2 for x in lst):
if any(x == 2 for x in lst):
res[pick.id] = 'partially_available'
else:
#if all moves aren't assigned, check if we have one product partially available
for move in pick.move_lines:
if move.partially_available:
res[pick.id] = 'partially_available'
break
return res
def _get_pickings(self, cr, uid, ids, context=None):
res = set()
for move in self.browse(cr, uid, ids, context=context):
if move.picking_id:
res.add(move.picking_id.id)
return list(res)
def _get_pickings_dates_priority(self, cr, uid, ids, context=None):
res = set()
for move in self.browse(cr, uid, ids, context=context):
if move.picking_id and (not (move.picking_id.min_date < move.date_expected < move.picking_id.max_date) or move.priority > move.picking_id.priority):
res.add(move.picking_id.id)
return list(res)
def _get_pack_operation_exist(self, cr, uid, ids, field_name, arg, context=None):
res = {}
for pick in self.browse(cr, uid, ids, context=context):
res[pick.id] = False
if pick.pack_operation_ids:
res[pick.id] = True
return res
def _get_quant_reserved_exist(self, cr, uid, ids, field_name, arg, context=None):
res = {}
for pick in self.browse(cr, uid, ids, context=context):
res[pick.id] = False
for move in pick.move_lines:
if move.reserved_quant_ids:
res[pick.id] = True
continue
return res
def check_group_lot(self, cr, uid, context=None):
""" This function will return true if we have the setting to use lots activated. """
return self.pool.get('res.users').has_group(cr, uid, 'stock.group_production_lot')
def check_group_pack(self, cr, uid, context=None):
""" This function will return true if we have the setting to use package activated. """
return self.pool.get('res.users').has_group(cr, uid, 'stock.group_tracking_lot')
def action_assign_owner(self, cr, uid, ids, context=None):
for picking in self.browse(cr, uid, ids, context=context):
packop_ids = [op.id for op in picking.pack_operation_ids]
self.pool.get('stock.pack.operation').write(cr, uid, packop_ids, {'owner_id': picking.owner_id.id}, context=context)
_columns = {
'name': fields.char('Reference', select=True, states={'done': [('readonly', True)], 'cancel': [('readonly', True)]}, copy=False),
'origin': fields.char('Source Document', states={'done': [('readonly', True)], 'cancel': [('readonly', True)]}, help="Reference of the document", select=True),
'backorder_id': fields.many2one('stock.picking', 'Back Order of', states={'done': [('readonly', True)], 'cancel': [('readonly', True)]}, help="If this shipment was split, then this field links to the shipment which contains the already processed part.", select=True, copy=False),
'note': fields.text('Notes'),
'move_type': fields.selection([('direct', 'Partial'), ('one', 'All at once')], 'Delivery Method', required=True, states={'done': [('readonly', True)], 'cancel': [('readonly', True)]}, help="It specifies goods to be deliver partially or all at once"),
'state': fields.function(_state_get, type="selection", copy=False,
store={
'stock.picking': (lambda self, cr, uid, ids, ctx: ids, ['move_type'], 20),
'stock.move': (_get_pickings, ['state', 'picking_id', 'partially_available'], 20)},
selection=[
('draft', 'Draft'),
('cancel', 'Cancelled'),
('waiting', 'Waiting Another Operation'),
('confirmed', 'Waiting Availability'),
('partially_available', 'Partially Available'),
('assigned', 'Ready to Transfer'),
('done', 'Transferred'),
], string='Status', readonly=True, select=True, track_visibility='onchange',
help="""
* Draft: not confirmed yet and will not be scheduled until confirmed\n
* Waiting Another Operation: waiting for another move to proceed before it becomes automatically available (e.g. in Make-To-Order flows)\n
* Waiting Availability: still waiting for the availability of products\n
* Partially Available: some products are available and reserved\n
* Ready to Transfer: products reserved, simply waiting for confirmation.\n
* Transferred: has been processed, can't be modified or cancelled anymore\n
* Cancelled: has been cancelled, can't be confirmed anymore"""
),
'priority': fields.function(get_min_max_date, multi="min_max_date", fnct_inv=_set_priority, type='selection', selection=procurement.PROCUREMENT_PRIORITIES, string='Priority',
store={'stock.move': (_get_pickings_dates_priority, ['priority', 'picking_id'], 20)}, states={'done': [('readonly', True)], 'cancel': [('readonly', True)]}, select=1, help="Priority for this picking. Setting manually a value here would set it as priority for all the moves",
track_visibility='onchange', required=True),
'min_date': fields.function(get_min_max_date, multi="min_max_date", fnct_inv=_set_min_date,
store={'stock.move': (_get_pickings_dates_priority, ['date_expected', 'picking_id'], 20)}, type='datetime', states={'done': [('readonly', True)], 'cancel': [('readonly', True)]}, string='Scheduled Date', select=1, help="Scheduled time for the first part of the shipment to be processed. Setting manually a value here would set it as expected date for all the stock moves.", track_visibility='onchange'),
'max_date': fields.function(get_min_max_date, multi="min_max_date",
store={'stock.move': (_get_pickings_dates_priority, ['date_expected', 'picking_id'], 20)}, type='datetime', string='Max. Expected Date', select=2, help="Scheduled time for the last part of the shipment to be processed"),
'date': fields.datetime('Creation Date', help="Creation Date, usually the time of the order", select=True, states={'done': [('readonly', True)], 'cancel': [('readonly', True)]}, track_visibility='onchange'),
'date_done': fields.datetime('Date of Transfer', help="Date of Completion", states={'done': [('readonly', True)], 'cancel': [('readonly', True)]}, copy=False),
'move_lines': fields.one2many('stock.move', 'picking_id', 'Internal Moves', states={'done': [('readonly', True)], 'cancel': [('readonly', True)]}, copy=True),
'quant_reserved_exist': fields.function(_get_quant_reserved_exist, type='boolean', string='Quant already reserved ?', help='technical field used to know if there is already at least one quant reserved on moves of a given picking'),
'partner_id': fields.many2one('res.partner', 'Partner', states={'done': [('readonly', True)], 'cancel': [('readonly', True)]}),
'company_id': fields.many2one('res.company', 'Company', required=True, select=True, states={'done': [('readonly', True)], 'cancel': [('readonly', True)]}),
'pack_operation_ids': fields.one2many('stock.pack.operation', 'picking_id', states={'done': [('readonly', True)], 'cancel': [('readonly', True)]}, string='Related Packing Operations'),
'pack_operation_exist': fields.function(_get_pack_operation_exist, type='boolean', string='Pack Operation Exists?', help='technical field for attrs in view'),
'picking_type_id': fields.many2one('stock.picking.type', 'Picking Type', states={'done': [('readonly', True)], 'cancel': [('readonly', True)]}, required=True),
'picking_type_code': fields.related('picking_type_id', 'code', type='char', string='Picking Type Code', help="Technical field used to display the correct label on print button in the picking view"),
'owner_id': fields.many2one('res.partner', 'Owner', states={'done': [('readonly', True)], 'cancel': [('readonly', True)]}, help="Default Owner"),
# Used to search on pickings
'product_id': fields.related('move_lines', 'product_id', type='many2one', relation='product.product', string='Product'),
'recompute_pack_op': fields.boolean('Recompute pack operation?', help='True if reserved quants changed, which mean we might need to recompute the package operations', copy=False),
'location_id': fields.related('move_lines', 'location_id', type='many2one', relation='stock.location', string='Location', readonly=True),
'location_dest_id': fields.related('move_lines', 'location_dest_id', type='many2one', relation='stock.location', string='Destination Location', readonly=True),
'group_id': fields.related('move_lines', 'group_id', type='many2one', relation='procurement.group', string='Procurement Group', readonly=True,
store={
'stock.picking': (lambda self, cr, uid, ids, ctx: ids, ['move_lines'], 10),
'stock.move': (_get_pickings, ['group_id', 'picking_id'], 10),
}),
}
_defaults = {
'name': '/',
'state': 'draft',
'move_type': 'direct',
'priority': '1', # normal
'date': fields.datetime.now,
'company_id': lambda self, cr, uid, c: self.pool.get('res.company')._company_default_get(cr, uid, 'stock.picking', context=c),
'recompute_pack_op': True,
}
_sql_constraints = [
('name_uniq', 'unique(name, company_id)', 'Reference must be unique per company!'),
]
def do_print_picking(self, cr, uid, ids, context=None):
'''This function prints the picking list'''
context = dict(context or {}, active_ids=ids)
return self.pool.get("report").get_action(cr, uid, ids, 'stock.report_picking', context=context)
def action_confirm(self, cr, uid, ids, context=None):
todo = []
todo_force_assign = []
for picking in self.browse(cr, uid, ids, context=context):
if picking.location_id.usage in ('supplier', 'inventory', 'production'):
todo_force_assign.append(picking.id)
for r in picking.move_lines:
if r.state == 'draft':
todo.append(r.id)
if len(todo):
self.pool.get('stock.move').action_confirm(cr, uid, todo, context=context)
if todo_force_assign:
self.force_assign(cr, uid, todo_force_assign, context=context)
return True
def action_assign(self, cr, uid, ids, context=None):
""" Check availability of picking moves.
This has the effect of changing the state and reserve quants on available moves, and may
also impact the state of the picking as it is computed based on move's states.
@return: True
"""
for pick in self.browse(cr, uid, ids, context=context):
if pick.state == 'draft':
self.action_confirm(cr, uid, [pick.id], context=context)
#skip the moves that don't need to be checked
move_ids = [x.id for x in pick.move_lines if x.state not in ('draft', 'cancel', 'done')]
if not move_ids:
raise osv.except_osv(_('Warning!'), _('Nothing to check the availability for.'))
self.pool.get('stock.move').action_assign(cr, uid, move_ids, context=context)
return True
def force_assign(self, cr, uid, ids, context=None):
""" Changes state of picking to available if moves are confirmed or waiting.
@return: True
"""
for pick in self.browse(cr, uid, ids, context=context):
move_ids = [x.id for x in pick.move_lines if x.state in ['confirmed', 'waiting']]
self.pool.get('stock.move').force_assign(cr, uid, move_ids, context=context)
#pack_operation might have changed and need to be recomputed
self.write(cr, uid, ids, {'recompute_pack_op': True}, context=context)
return True
def action_cancel(self, cr, uid, ids, context=None):
for pick in self.browse(cr, uid, ids, context=context):
ids2 = [move.id for move in pick.move_lines]
self.pool.get('stock.move').action_cancel(cr, uid, ids2, context)
return True
def action_done(self, cr, uid, ids, context=None):
"""Changes picking state to done by processing the Stock Moves of the Picking
Normally that happens when the button "Done" is pressed on a Picking view.
@return: True
"""
for pick in self.browse(cr, uid, ids, context=context):
todo = []
for move in pick.move_lines:
if move.state == 'draft':
todo.extend(self.pool.get('stock.move').action_confirm(cr, uid, [move.id], context=context))
elif move.state in ('assigned', 'confirmed'):
todo.append(move.id)
if len(todo):
self.pool.get('stock.move').action_done(cr, uid, todo, context=context)
return True
def unlink(self, cr, uid, ids, context=None):
#on picking deletion, cancel its move then unlink them too
move_obj = self.pool.get('stock.move')
context = context or {}
for pick in self.browse(cr, uid, ids, context=context):
move_ids = [move.id for move in pick.move_lines]
move_obj.action_cancel(cr, uid, move_ids, context=context)
move_obj.unlink(cr, uid, move_ids, context=context)
return super(stock_picking, self).unlink(cr, uid, ids, context=context)
def write(self, cr, uid, ids, vals, context=None):
if vals.get('move_lines') and not vals.get('pack_operation_ids'):
# pack operations are directly dependant of move lines, it needs to be recomputed
pack_operation_obj = self.pool['stock.pack.operation']
existing_package_ids = pack_operation_obj.search(cr, uid, [('picking_id', 'in', ids)], context=context)
if existing_package_ids:
pack_operation_obj.unlink(cr, uid, existing_package_ids, context)
res = super(stock_picking, self).write(cr, uid, ids, vals, context=context)
#if we changed the move lines or the pack operations, we need to recompute the remaining quantities of both
if 'move_lines' in vals or 'pack_operation_ids' in vals:
self.do_recompute_remaining_quantities(cr, uid, ids, context=context)
return res
def _create_backorder(self, cr, uid, picking, backorder_moves=[], context=None):
""" Move all non-done lines into a new backorder picking. If the key 'do_only_split' is given in the context, then move all lines not in context.get('split', []) instead of all non-done lines.
"""
if not backorder_moves:
backorder_moves = picking.move_lines
backorder_move_ids = [x.id for x in backorder_moves if x.state not in ('done', 'cancel')]
if 'do_only_split' in context and context['do_only_split']:
backorder_move_ids = [x.id for x in backorder_moves if x.id not in context.get('split', [])]
if backorder_move_ids:
backorder_id = self.copy(cr, uid, picking.id, {
'name': '/',
'move_lines': [],
'pack_operation_ids': [],
'backorder_id': picking.id,
})
backorder = self.browse(cr, uid, backorder_id, context=context)
self.message_post(cr, uid, picking.id, body=_("Back order <em>%s</em> <b>created</b>.") % (backorder.name), context=context)
move_obj = self.pool.get("stock.move")
move_obj.write(cr, uid, backorder_move_ids, {'picking_id': backorder_id}, context=context)
self.write(cr, uid, [picking.id], {'date_done': time.strftime(DEFAULT_SERVER_DATETIME_FORMAT)}, context=context)
self.action_confirm(cr, uid, [backorder_id], context=context)
return backorder_id
return False
@api.cr_uid_ids_context
def recheck_availability(self, cr, uid, picking_ids, context=None):
self.action_assign(cr, uid, picking_ids, context=context)
self.do_prepare_partial(cr, uid, picking_ids, context=context)
def _get_top_level_packages(self, cr, uid, quants_suggested_locations, context=None):
"""This method searches for the higher level packages that can be moved as a single operation, given a list of quants
to move and their suggested destination, and returns the list of matching packages.
"""
# Try to find as much as possible top-level packages that can be moved
pack_obj = self.pool.get("stock.quant.package")
quant_obj = self.pool.get("stock.quant")
top_lvl_packages = set()
quants_to_compare = quants_suggested_locations.keys()
for pack in list(set([x.package_id for x in quants_suggested_locations.keys() if x and x.package_id])):
loop = True
test_pack = pack
good_pack = False
pack_destination = False
while loop:
pack_quants = pack_obj.get_content(cr, uid, [test_pack.id], context=context)
all_in = True
for quant in quant_obj.browse(cr, uid, pack_quants, context=context):
# If the quant is not in the quants to compare and not in the common location
if not quant in quants_to_compare:
all_in = False
break
else:
#if putaway strat apply, the destination location of each quant may be different (and thus the package should not be taken as a single operation)
if not pack_destination:
pack_destination = quants_suggested_locations[quant]
elif pack_destination != quants_suggested_locations[quant]:
all_in = False
break
if all_in:
good_pack = test_pack
if test_pack.parent_id:
test_pack = test_pack.parent_id
else:
#stop the loop when there's no parent package anymore
loop = False
else:
#stop the loop when the package test_pack is not totally reserved for moves of this picking
#(some quants may be reserved for other picking or not reserved at all)
loop = False
if good_pack:
top_lvl_packages.add(good_pack)
return list(top_lvl_packages)
def _prepare_pack_ops(self, cr, uid, picking, quants, forced_qties, context=None):
""" returns a list of dict, ready to be used in create() of stock.pack.operation.
:param picking: browse record (stock.picking)
:param quants: browse record list (stock.quant). List of quants associated to the picking
:param forced_qties: dictionary showing for each product (keys) its corresponding quantity (value) that is not covered by the quants associated to the picking
"""
def _picking_putaway_apply(product):
location = False
# Search putaway strategy
if product_putaway_strats.get(product.id):
location = product_putaway_strats[product.id]
else:
location = self.pool.get('stock.location').get_putaway_strategy(cr, uid, picking.location_dest_id, product, context=context)
product_putaway_strats[product.id] = location
return location or picking.location_dest_id.id
# If we encounter an UoM that is smaller than the default UoM or the one already chosen, use the new one instead.
product_uom = {} # Determines UoM used in pack operations
location_dest_id = None
location_id = None
for move in [x for x in picking.move_lines if x.state not in ('done', 'cancel')]:
if not product_uom.get(move.product_id.id):
product_uom[move.product_id.id] = move.product_id.uom_id
if move.product_uom.id != move.product_id.uom_id.id and move.product_uom.factor > product_uom[move.product_id.id].factor:
product_uom[move.product_id.id] = move.product_uom
if not move.scrapped:
if location_dest_id and move.location_dest_id.id != location_dest_id:
raise Warning(_('The destination location must be the same for all the moves of the picking.'))
location_dest_id = move.location_dest_id.id
if location_id and move.location_id.id != location_id:
raise Warning(_('The source location must be the same for all the moves of the picking.'))
location_id = move.location_id.id
pack_obj = self.pool.get("stock.quant.package")
quant_obj = self.pool.get("stock.quant")
vals = []
qtys_grouped = {}
#for each quant of the picking, find the suggested location
quants_suggested_locations = {}
product_putaway_strats = {}
for quant in quants:
if quant.qty <= 0:
continue
suggested_location_id = _picking_putaway_apply(quant.product_id)
quants_suggested_locations[quant] = suggested_location_id
#find the packages we can movei as a whole
top_lvl_packages = self._get_top_level_packages(cr, uid, quants_suggested_locations, context=context)
# and then create pack operations for the top-level packages found
for pack in top_lvl_packages:
pack_quant_ids = pack_obj.get_content(cr, uid, [pack.id], context=context)
pack_quants = quant_obj.browse(cr, uid, pack_quant_ids, context=context)
vals.append({
'picking_id': picking.id,
'package_id': pack.id,
'product_qty': 1.0,
'location_id': pack.location_id.id,
'location_dest_id': quants_suggested_locations[pack_quants[0]],
'owner_id': pack.owner_id.id,
})
#remove the quants inside the package so that they are excluded from the rest of the computation
for quant in pack_quants:
del quants_suggested_locations[quant]
# Go through all remaining reserved quants and group by product, package, lot, owner, source location and dest location
for quant, dest_location_id in quants_suggested_locations.items():
key = (quant.product_id.id, quant.package_id.id, quant.lot_id.id, quant.owner_id.id, quant.location_id.id, dest_location_id)
if qtys_grouped.get(key):
qtys_grouped[key] += quant.qty
else:
qtys_grouped[key] = quant.qty
# Do the same for the forced quantities (in cases of force_assign or incomming shipment for example)
for product, qty in forced_qties.items():
if qty <= 0:
continue
suggested_location_id = _picking_putaway_apply(product)
key = (product.id, False, False, picking.owner_id.id, picking.location_id.id, suggested_location_id)
if qtys_grouped.get(key):
qtys_grouped[key] += qty
else:
qtys_grouped[key] = qty
# Create the necessary operations for the grouped quants and remaining qtys
uom_obj = self.pool.get('product.uom')
prevals = {}
for key, qty in qtys_grouped.items():
product = self.pool.get("product.product").browse(cr, uid, key[0], context=context)
uom_id = product.uom_id.id
qty_uom = qty
if product_uom.get(key[0]):
uom_id = product_uom[key[0]].id
qty_uom = uom_obj._compute_qty(cr, uid, product.uom_id.id, qty, uom_id)
val_dict = {
'picking_id': picking.id,
'product_qty': qty_uom,
'product_id': key[0],
'package_id': key[1],
'lot_id': key[2],
'owner_id': key[3],
'location_id': key[4],
'location_dest_id': key[5],
'product_uom_id': uom_id,
}
if key[0] in prevals:
prevals[key[0]].append(val_dict)
else:
prevals[key[0]] = [val_dict]
# prevals var holds the operations in order to create them in the same order than the picking stock moves if possible
processed_products = set()
for move in [x for x in picking.move_lines if x.state not in ('done', 'cancel')]:
if move.product_id.id not in processed_products:
vals += prevals.get(move.product_id.id, [])
processed_products.add(move.product_id.id)
return vals
@api.cr_uid_ids_context
def open_barcode_interface(self, cr, uid, picking_ids, context=None):
final_url="/barcode/web/#action=stock.ui&picking_id="+str(picking_ids[0])
return {'type': 'ir.actions.act_url', 'url':final_url, 'target': 'self',}
@api.cr_uid_ids_context
def do_partial_open_barcode(self, cr, uid, picking_ids, context=None):
self.do_prepare_partial(cr, uid, picking_ids, context=context)
return self.open_barcode_interface(cr, uid, picking_ids, context=context)
@api.cr_uid_ids_context
def do_prepare_partial(self, cr, uid, picking_ids, context=None):
context = context or {}
pack_operation_obj = self.pool.get('stock.pack.operation')
#used to avoid recomputing the remaining quantities at each new pack operation created
ctx = context.copy()
ctx['no_recompute'] = True
#get list of existing operations and delete them
existing_package_ids = pack_operation_obj.search(cr, uid, [('picking_id', 'in', picking_ids)], context=context)
if existing_package_ids:
pack_operation_obj.unlink(cr, uid, existing_package_ids, context)
for picking in self.browse(cr, uid, picking_ids, context=context):
forced_qties = {} # Quantity remaining after calculating reserved quants
picking_quants = []
#Calculate packages, reserved quants, qtys of this picking's moves
for move in picking.move_lines:
if move.state not in ('assigned', 'confirmed', 'waiting'):
continue
move_quants = move.reserved_quant_ids
picking_quants += move_quants
forced_qty = (move.state == 'assigned') and move.product_qty - sum([x.qty for x in move_quants]) or 0
#if we used force_assign() on the move, or if the move is incoming, forced_qty > 0
if float_compare(forced_qty, 0, precision_rounding=move.product_id.uom_id.rounding) > 0:
if forced_qties.get(move.product_id):
forced_qties[move.product_id] += forced_qty
else:
forced_qties[move.product_id] = forced_qty
for vals in self._prepare_pack_ops(cr, uid, picking, picking_quants, forced_qties, context=context):
pack_operation_obj.create(cr, uid, vals, context=ctx)
#recompute the remaining quantities all at once
self.do_recompute_remaining_quantities(cr, uid, picking_ids, context=context)
self.write(cr, uid, picking_ids, {'recompute_pack_op': False}, context=context)
@api.cr_uid_ids_context
def do_unreserve(self, cr, uid, picking_ids, context=None):
"""
Will remove all quants for picking in picking_ids
"""
moves_to_unreserve = []
pack_line_to_unreserve = []
for picking in self.browse(cr, uid, picking_ids, context=context):
moves_to_unreserve += [m.id for m in picking.move_lines if m.state not in ('done', 'cancel')]
pack_line_to_unreserve += [p.id for p in picking.pack_operation_ids]
if moves_to_unreserve:
if pack_line_to_unreserve:
self.pool.get('stock.pack.operation').unlink(cr, uid, pack_line_to_unreserve, context=context)
self.pool.get('stock.move').do_unreserve(cr, uid, moves_to_unreserve, context=context)
def recompute_remaining_qty(self, cr, uid, picking, context=None):
def _create_link_for_index(operation_id, index, product_id, qty_to_assign, quant_id=False):
move_dict = prod2move_ids[product_id][index]
qty_on_link = min(move_dict['remaining_qty'], qty_to_assign)
self.pool.get('stock.move.operation.link').create(cr, uid, {'move_id': move_dict['move'].id, 'operation_id': operation_id, 'qty': qty_on_link, 'reserved_quant_id': quant_id}, context=context)
if move_dict['remaining_qty'] == qty_on_link:
prod2move_ids[product_id].pop(index)
else:
move_dict['remaining_qty'] -= qty_on_link
return qty_on_link
def _create_link_for_quant(operation_id, quant, qty):
"""create a link for given operation and reserved move of given quant, for the max quantity possible, and returns this quantity"""
if not quant.reservation_id.id:
return _create_link_for_product(operation_id, quant.product_id.id, qty)
qty_on_link = 0
for i in range(0, len(prod2move_ids[quant.product_id.id])):
if prod2move_ids[quant.product_id.id][i]['move'].id != quant.reservation_id.id:
continue
qty_on_link = _create_link_for_index(operation_id, i, quant.product_id.id, qty, quant_id=quant.id)
break
return qty_on_link
def _create_link_for_product(operation_id, product_id, qty):
'''method that creates the link between a given operation and move(s) of given product, for the given quantity.
Returns True if it was possible to create links for the requested quantity (False if there was not enough quantity on stock moves)'''
qty_to_assign = qty
prod_obj = self.pool.get("product.product")
product = prod_obj.browse(cr, uid, product_id)
rounding = product.uom_id.rounding
qtyassign_cmp = float_compare(qty_to_assign, 0.0, precision_rounding=rounding)
if prod2move_ids.get(product_id):
while prod2move_ids[product_id] and qtyassign_cmp > 0:
qty_on_link = _create_link_for_index(operation_id, 0, product_id, qty_to_assign, quant_id=False)
qty_to_assign -= qty_on_link
qtyassign_cmp = float_compare(qty_to_assign, 0.0, precision_rounding=rounding)
return qtyassign_cmp == 0
uom_obj = self.pool.get('product.uom')
package_obj = self.pool.get('stock.quant.package')
quant_obj = self.pool.get('stock.quant')
link_obj = self.pool.get('stock.move.operation.link')
quants_in_package_done = set()
prod2move_ids = {}
still_to_do = []
#make a dictionary giving for each product, the moves and related quantity that can be used in operation links
for move in [x for x in picking.move_lines if x.state not in ('done', 'cancel')]:
if not prod2move_ids.get(move.product_id.id):
prod2move_ids[move.product_id.id] = [{'move': move, 'remaining_qty': move.product_qty}]
else:
prod2move_ids[move.product_id.id].append({'move': move, 'remaining_qty': move.product_qty})
need_rereserve = False
#sort the operations in order to give higher priority to those with a package, then a serial number
operations = picking.pack_operation_ids
operations = sorted(operations, key=lambda x: ((x.package_id and not x.product_id) and -4 or 0) + (x.package_id and -2 or 0) + (x.lot_id and -1 or 0))
#delete existing operations to start again from scratch
links = link_obj.search(cr, uid, [('operation_id', 'in', [x.id for x in operations])], context=context)
if links:
link_obj.unlink(cr, uid, links, context=context)
#1) first, try to create links when quants can be identified without any doubt
for ops in operations:
#for each operation, create the links with the stock move by seeking on the matching reserved quants,
#and deffer the operation if there is some ambiguity on the move to select
if ops.package_id and not ops.product_id:
#entire package
quant_ids = package_obj.get_content(cr, uid, [ops.package_id.id], context=context)
for quant in quant_obj.browse(cr, uid, quant_ids, context=context):
remaining_qty_on_quant = quant.qty
if quant.reservation_id:
#avoid quants being counted twice
quants_in_package_done.add(quant.id)
qty_on_link = _create_link_for_quant(ops.id, quant, quant.qty)
remaining_qty_on_quant -= qty_on_link
if remaining_qty_on_quant:
still_to_do.append((ops, quant.product_id.id, remaining_qty_on_quant))
need_rereserve = True
elif ops.product_id.id:
#Check moves with same product
qty_to_assign = uom_obj._compute_qty_obj(cr, uid, ops.product_uom_id, ops.product_qty, ops.product_id.uom_id, context=context)
for move_dict in prod2move_ids.get(ops.product_id.id, []):
move = move_dict['move']
for quant in move.reserved_quant_ids:
if not qty_to_assign > 0:
break
if quant.id in quants_in_package_done:
continue
#check if the quant is matching the operation details
if ops.package_id:
flag = quant.package_id and bool(package_obj.search(cr, uid, [('id', 'child_of', [ops.package_id.id])], context=context)) or False
else:
flag = not quant.package_id.id
flag = flag and ((ops.lot_id and ops.lot_id.id == quant.lot_id.id) or not ops.lot_id)
flag = flag and (ops.owner_id.id == quant.owner_id.id)
if flag:
max_qty_on_link = min(quant.qty, qty_to_assign)
qty_on_link = _create_link_for_quant(ops.id, quant, max_qty_on_link)
qty_to_assign -= qty_on_link
qty_assign_cmp = float_compare(qty_to_assign, 0, precision_rounding=ops.product_id.uom_id.rounding)
if qty_assign_cmp > 0:
#qty reserved is less than qty put in operations. We need to create a link but it's deferred after we processed
#all the quants (because they leave no choice on their related move and needs to be processed with higher priority)
still_to_do += [(ops, ops.product_id.id, qty_to_assign)]
need_rereserve = True
#2) then, process the remaining part
all_op_processed = True
for ops, product_id, remaining_qty in still_to_do:
all_op_processed = _create_link_for_product(ops.id, product_id, remaining_qty) and all_op_processed
return (need_rereserve, all_op_processed)
def picking_recompute_remaining_quantities(self, cr, uid, picking, context=None):
need_rereserve = False
all_op_processed = True
if picking.pack_operation_ids:
need_rereserve, all_op_processed = self.recompute_remaining_qty(cr, uid, picking, context=context)
return need_rereserve, all_op_processed
@api.cr_uid_ids_context
def do_recompute_remaining_quantities(self, cr, uid, picking_ids, context=None):
for picking in self.browse(cr, uid, picking_ids, context=context):
if picking.pack_operation_ids:
self.recompute_remaining_qty(cr, uid, picking, context=context)
def _prepare_values_extra_move(self, cr, uid, op, product, remaining_qty, context=None):
"""
Creates an extra move when there is no corresponding original move to be copied
"""
uom_obj = self.pool.get("product.uom")
uom_id = product.uom_id.id
qty = remaining_qty
if op.product_id and op.product_uom_id and op.product_uom_id.id != product.uom_id.id:
if op.product_uom_id.factor > product.uom_id.factor: #If the pack operation's is a smaller unit
uom_id = op.product_uom_id.id
#HALF-UP rounding as only rounding errors will be because of propagation of error from default UoM
qty = uom_obj._compute_qty_obj(cr, uid, product.uom_id, remaining_qty, op.product_uom_id, rounding_method='HALF-UP')
picking = op.picking_id
ref = product.default_code
name = '[' + ref + ']' + ' ' + product.name if ref else product.name
res = {
'picking_id': picking.id,
'location_id': picking.location_id.id,
'location_dest_id': picking.location_dest_id.id,
'product_id': product.id,
'product_uom': uom_id,
'product_uom_qty': qty,
'name': _('Extra Move: ') + name,
'state': 'draft',
'restrict_partner_id': op.owner_id,
}
return res
def _create_extra_moves(self, cr, uid, picking, context=None):
'''This function creates move lines on a picking, at the time of do_transfer, based on
unexpected product transfers (or exceeding quantities) found in the pack operations.
'''
move_obj = self.pool.get('stock.move')
operation_obj = self.pool.get('stock.pack.operation')
moves = []
for op in picking.pack_operation_ids:
for product_id, remaining_qty in operation_obj._get_remaining_prod_quantities(cr, uid, op, context=context).items():
product = self.pool.get('product.product').browse(cr, uid, product_id, context=context)
if float_compare(remaining_qty, 0, precision_rounding=product.uom_id.rounding) > 0:
vals = self._prepare_values_extra_move(cr, uid, op, product, remaining_qty, context=context)
moves.append(move_obj.create(cr, uid, vals, context=context))
if moves:
move_obj.action_confirm(cr, uid, moves, context=context)
return moves
def rereserve_pick(self, cr, uid, ids, context=None):
"""
This can be used to provide a button that rereserves taking into account the existing pack operations
"""
for pick in self.browse(cr, uid, ids, context=context):
self.rereserve_quants(cr, uid, pick, move_ids = [x.id for x in pick.move_lines], context=context)
def rereserve_quants(self, cr, uid, picking, move_ids=[], context=None):
""" Unreserve quants then try to reassign quants."""
stock_move_obj = self.pool.get('stock.move')
if not move_ids:
self.do_unreserve(cr, uid, [picking.id], context=context)
self.action_assign(cr, uid, [picking.id], context=context)
else:
stock_move_obj.do_unreserve(cr, uid, move_ids, context=context)
stock_move_obj.action_assign(cr, uid, move_ids, context=context)
@api.cr_uid_ids_context
def do_enter_transfer_details(self, cr, uid, picking, context=None):
if not context:
context = {}
context.update({
'active_model': self._name,
'active_ids': picking,
'active_id': len(picking) and picking[0] or False
})
created_id = self.pool['stock.transfer_details'].create(cr, uid, {'picking_id': len(picking) and picking[0] or False}, context)
return self.pool['stock.transfer_details'].wizard_view(cr, uid, created_id, context)
@api.cr_uid_ids_context
def do_transfer(self, cr, uid, picking_ids, context=None):
"""
If no pack operation, we do simple action_done of the picking
Otherwise, do the pack operations
"""
if not context:
context = {}
stock_move_obj = self.pool.get('stock.move')
for picking in self.browse(cr, uid, picking_ids, context=context):
if not picking.pack_operation_ids:
self.action_done(cr, uid, [picking.id], context=context)
continue
else:
need_rereserve, all_op_processed = self.picking_recompute_remaining_quantities(cr, uid, picking, context=context)
#create extra moves in the picking (unexpected product moves coming from pack operations)
todo_move_ids = []
if not all_op_processed:
todo_move_ids += self._create_extra_moves(cr, uid, picking, context=context)
#split move lines if needed
toassign_move_ids = []
for move in picking.move_lines:
remaining_qty = move.remaining_qty
if move.state in ('done', 'cancel'):
#ignore stock moves cancelled or already done
continue
elif move.state == 'draft':
toassign_move_ids.append(move.id)
if float_compare(remaining_qty, 0, precision_rounding = move.product_id.uom_id.rounding) == 0:
if move.state in ('draft', 'assigned', 'confirmed'):
todo_move_ids.append(move.id)
elif float_compare(remaining_qty,0, precision_rounding = move.product_id.uom_id.rounding) > 0 and \
float_compare(remaining_qty, move.product_qty, precision_rounding = move.product_id.uom_id.rounding) < 0:
new_move = stock_move_obj.split(cr, uid, move, remaining_qty, context=context)
todo_move_ids.append(move.id)
#Assign move as it was assigned before
toassign_move_ids.append(new_move)
if need_rereserve or not all_op_processed:
if not picking.location_id.usage in ("supplier", "production", "inventory"):
self.rereserve_quants(cr, uid, picking, move_ids=todo_move_ids, context=context)
self.do_recompute_remaining_quantities(cr, uid, [picking.id], context=context)
if todo_move_ids and not context.get('do_only_split'):
self.pool.get('stock.move').action_done(cr, uid, todo_move_ids, context=context)
elif context.get('do_only_split'):
context = dict(context, split=todo_move_ids)
self._create_backorder(cr, uid, picking, context=context)
if toassign_move_ids:
stock_move_obj.action_assign(cr, uid, toassign_move_ids, context=context)
return True
@api.cr_uid_ids_context
def do_split(self, cr, uid, picking_ids, context=None):
""" just split the picking (create a backorder) without making it 'done' """
if context is None:
context = {}
ctx = context.copy()
ctx['do_only_split'] = True
return self.do_transfer(cr, uid, picking_ids, context=ctx)
def get_next_picking_for_ui(self, cr, uid, context=None):
""" returns the next pickings to process. Used in the barcode scanner UI"""
if context is None:
context = {}
domain = [('state', 'in', ('assigned', 'partially_available'))]
if context.get('default_picking_type_id'):
domain.append(('picking_type_id', '=', context['default_picking_type_id']))
return self.search(cr, uid, domain, context=context)
def action_done_from_ui(self, cr, uid, picking_id, context=None):
""" called when button 'done' is pushed in the barcode scanner UI """
#write qty_done into field product_qty for every package_operation before doing the transfer
pack_op_obj = self.pool.get('stock.pack.operation')
for operation in self.browse(cr, uid, picking_id, context=context).pack_operation_ids:
pack_op_obj.write(cr, uid, operation.id, {'product_qty': operation.qty_done}, context=context)
self.do_transfer(cr, uid, [picking_id], context=context)
#return id of next picking to work on
return self.get_next_picking_for_ui(cr, uid, context=context)
@api.cr_uid_ids_context
def action_pack(self, cr, uid, picking_ids, operation_filter_ids=None, context=None):
""" Create a package with the current pack_operation_ids of the picking that aren't yet in a pack.
Used in the barcode scanner UI and the normal interface as well.
operation_filter_ids is used by barcode scanner interface to specify a subset of operation to pack"""
if operation_filter_ids == None:
operation_filter_ids = []
stock_operation_obj = self.pool.get('stock.pack.operation')
package_obj = self.pool.get('stock.quant.package')
stock_move_obj = self.pool.get('stock.move')
package_id = False
for picking_id in picking_ids:
operation_search_domain = [('picking_id', '=', picking_id), ('result_package_id', '=', False)]
if operation_filter_ids != []:
operation_search_domain.append(('id', 'in', operation_filter_ids))
operation_ids = stock_operation_obj.search(cr, uid, operation_search_domain, context=context)
pack_operation_ids = []
if operation_ids:
for operation in stock_operation_obj.browse(cr, uid, operation_ids, context=context):
#If we haven't done all qty in operation, we have to split into 2 operation
op = operation
if (operation.qty_done < operation.product_qty):
new_operation = stock_operation_obj.copy(cr, uid, operation.id, {'product_qty': operation.qty_done,'qty_done': operation.qty_done}, context=context)
stock_operation_obj.write(cr, uid, operation.id, {'product_qty': operation.product_qty - operation.qty_done,'qty_done': 0, 'lot_id': False}, context=context)
op = stock_operation_obj.browse(cr, uid, new_operation, context=context)
pack_operation_ids.append(op.id)
if op.product_id and op.location_id and op.location_dest_id:
stock_move_obj.check_tracking_product(cr, uid, op.product_id, op.lot_id.id, op.location_id, op.location_dest_id, context=context)
package_id = package_obj.create(cr, uid, {}, context=context)
stock_operation_obj.write(cr, uid, pack_operation_ids, {'result_package_id': package_id}, context=context)
return package_id
def process_product_id_from_ui(self, cr, uid, picking_id, product_id, op_id, increment=True, context=None):
return self.pool.get('stock.pack.operation')._search_and_increment(cr, uid, picking_id, [('product_id', '=', product_id),('id', '=', op_id)], increment=increment, context=context)
def process_barcode_from_ui(self, cr, uid, picking_id, barcode_str, visible_op_ids, context=None):
'''This function is called each time there barcode scanner reads an input'''
lot_obj = self.pool.get('stock.production.lot')
package_obj = self.pool.get('stock.quant.package')
product_obj = self.pool.get('product.product')
stock_operation_obj = self.pool.get('stock.pack.operation')
stock_location_obj = self.pool.get('stock.location')
answer = {'filter_loc': False, 'operation_id': False}
#check if the barcode correspond to a location
matching_location_ids = stock_location_obj.search(cr, uid, [('loc_barcode', '=', barcode_str)], context=context)
if matching_location_ids:
#if we have a location, return immediatly with the location name
location = stock_location_obj.browse(cr, uid, matching_location_ids[0], context=None)
answer['filter_loc'] = stock_location_obj._name_get(cr, uid, location, context=None)
answer['filter_loc_id'] = matching_location_ids[0]
return answer
#check if the barcode correspond to a product
matching_product_ids = product_obj.search(cr, uid, ['|', ('ean13', '=', barcode_str), ('default_code', '=', barcode_str)], context=context)
if matching_product_ids:
op_id = stock_operation_obj._search_and_increment(cr, uid, picking_id, [('product_id', '=', matching_product_ids[0])], filter_visible=True, visible_op_ids=visible_op_ids, increment=True, context=context)
answer['operation_id'] = op_id
return answer
#check if the barcode correspond to a lot
matching_lot_ids = lot_obj.search(cr, uid, [('name', '=', barcode_str)], context=context)
if matching_lot_ids:
lot = lot_obj.browse(cr, uid, matching_lot_ids[0], context=context)
op_id = stock_operation_obj._search_and_increment(cr, uid, picking_id, [('product_id', '=', lot.product_id.id), ('lot_id', '=', lot.id)], filter_visible=True, visible_op_ids=visible_op_ids, increment=True, context=context)
answer['operation_id'] = op_id
return answer
#check if the barcode correspond to a package
matching_package_ids = package_obj.search(cr, uid, [('name', '=', barcode_str)], context=context)
if matching_package_ids:
op_id = stock_operation_obj._search_and_increment(cr, uid, picking_id, [('package_id', '=', matching_package_ids[0])], filter_visible=True, visible_op_ids=visible_op_ids, increment=True, context=context)
answer['operation_id'] = op_id
return answer
return answer
class stock_production_lot(osv.osv):
_name = 'stock.production.lot'
_inherit = ['mail.thread']
_description = 'Lot/Serial'
_columns = {
'name': fields.char('Serial Number', required=True, help="Unique Serial Number"),
'ref': fields.char('Internal Reference', help="Internal reference number in case it differs from the manufacturer's serial number"),
'product_id': fields.many2one('product.product', 'Product', required=True, domain=[('type', '<>', 'service')]),
'quant_ids': fields.one2many('stock.quant', 'lot_id', 'Quants', readonly=True),
'create_date': fields.datetime('Creation Date'),
}
_defaults = {
'name': lambda x, y, z, c: x.pool.get('ir.sequence').get(y, z, 'stock.lot.serial'),
'product_id': lambda x, y, z, c: c.get('product_id', False),
}
_sql_constraints = [
('name_ref_uniq', 'unique (name, ref, product_id)', 'The combination of serial number, internal reference and product must be unique !'),
]
def action_traceability(self, cr, uid, ids, context=None):
""" It traces the information of lots
@param self: The object pointer.
@param cr: A database cursor
@param uid: ID of the user currently logged in
@param ids: List of IDs selected
@param context: A standard dictionary
@return: A dictionary of values
"""
quant_obj = self.pool.get("stock.quant")
quants = quant_obj.search(cr, uid, [('lot_id', 'in', ids)], context=context)
moves = set()
for quant in quant_obj.browse(cr, uid, quants, context=context):
moves |= {move.id for move in quant.history_ids}
if moves:
return {
'domain': "[('id','in',[" + ','.join(map(str, list(moves))) + "])]",
'name': _('Traceability'),
'view_mode': 'tree,form',
'view_type': 'form',
'context': {'tree_view_ref': 'stock.view_move_tree'},
'res_model': 'stock.move',
'type': 'ir.actions.act_window',
}
return False
# ----------------------------------------------------
# Move
# ----------------------------------------------------
class stock_move(osv.osv):
_name = "stock.move"
_description = "Stock Move"
_order = 'date_expected desc, id'
_log_create = False
def get_price_unit(self, cr, uid, move, context=None):
""" Returns the unit price to store on the quant """
return move.price_unit or move.product_id.standard_price
def name_get(self, cr, uid, ids, context=None):
res = []
for line in self.browse(cr, uid, ids, context=context):
name = line.location_id.name + ' > ' + line.location_dest_id.name
if line.product_id.code:
name = line.product_id.code + ': ' + name
if line.picking_id.origin:
name = line.picking_id.origin + '/ ' + name
res.append((line.id, name))
return res
def _quantity_normalize(self, cr, uid, ids, name, args, context=None):
uom_obj = self.pool.get('product.uom')
res = {}
for m in self.browse(cr, uid, ids, context=context):
res[m.id] = uom_obj._compute_qty_obj(cr, uid, m.product_uom, m.product_uom_qty, m.product_id.uom_id, context=context)
return res
def _get_remaining_qty(self, cr, uid, ids, field_name, args, context=None):
uom_obj = self.pool.get('product.uom')
res = {}
for move in self.browse(cr, uid, ids, context=context):
qty = move.product_qty
for record in move.linked_move_operation_ids:
qty -= record.qty
# Keeping in product default UoM
res[move.id] = float_round(qty, precision_rounding=move.product_id.uom_id.rounding)
return res
def _get_lot_ids(self, cr, uid, ids, field_name, args, context=None):
res = dict.fromkeys(ids, False)
for move in self.browse(cr, uid, ids, context=context):
if move.state == 'done':
res[move.id] = [q.lot_id.id for q in move.quant_ids if q.lot_id]
else:
res[move.id] = [q.lot_id.id for q in move.reserved_quant_ids if q.lot_id]
return res
def _get_product_availability(self, cr, uid, ids, field_name, args, context=None):
quant_obj = self.pool.get('stock.quant')
res = dict.fromkeys(ids, False)
for move in self.browse(cr, uid, ids, context=context):
if move.state == 'done':
res[move.id] = move.product_qty
else:
sublocation_ids = self.pool.get('stock.location').search(cr, uid, [('id', 'child_of', [move.location_id.id])], context=context)
quant_ids = quant_obj.search(cr, uid, [('location_id', 'in', sublocation_ids), ('product_id', '=', move.product_id.id), ('reservation_id', '=', False)], context=context)
availability = 0
for quant in quant_obj.browse(cr, uid, quant_ids, context=context):
availability += quant.qty
res[move.id] = min(move.product_qty, availability)
return res
def _get_string_qty_information(self, cr, uid, ids, field_name, args, context=None):
settings_obj = self.pool.get('stock.config.settings')
uom_obj = self.pool.get('product.uom')
res = dict.fromkeys(ids, '')
for move in self.browse(cr, uid, ids, context=context):
if move.state in ('draft', 'done', 'cancel') or move.location_id.usage != 'internal':
res[move.id] = '' # 'not applicable' or 'n/a' could work too
continue
total_available = min(move.product_qty, move.reserved_availability + move.availability)
total_available = uom_obj._compute_qty_obj(cr, uid, move.product_id.uom_id, total_available, move.product_uom, context=context)
info = str(total_available)
#look in the settings if we need to display the UoM name or not
config_ids = settings_obj.search(cr, uid, [], limit=1, order='id DESC', context=context)
if config_ids:
stock_settings = settings_obj.browse(cr, uid, config_ids[0], context=context)
if stock_settings.group_uom:
info += ' ' + move.product_uom.name
if move.reserved_availability:
if move.reserved_availability != total_available:
#some of the available quantity is assigned and some are available but not reserved
reserved_available = uom_obj._compute_qty_obj(cr, uid, move.product_id.uom_id, move.reserved_availability, move.product_uom, context=context)
info += _(' (%s reserved)') % str(reserved_available)
else:
#all available quantity is assigned
info += _(' (reserved)')
res[move.id] = info
return res
def _get_reserved_availability(self, cr, uid, ids, field_name, args, context=None):
res = dict.fromkeys(ids, 0)
for move in self.browse(cr, uid, ids, context=context):
res[move.id] = sum([quant.qty for quant in move.reserved_quant_ids])
return res
def _get_move(self, cr, uid, ids, context=None):
res = set()
for quant in self.browse(cr, uid, ids, context=context):
if quant.reservation_id:
res.add(quant.reservation_id.id)
return list(res)
def _get_move_ids(self, cr, uid, ids, context=None):
res = []
for picking in self.browse(cr, uid, ids, context=context):
res += [x.id for x in picking.move_lines]
return res
def _get_moves_from_prod(self, cr, uid, ids, context=None):
if ids:
return self.pool.get('stock.move').search(cr, uid, [('product_id', 'in', ids)], context=context)
return []
def _set_product_qty(self, cr, uid, id, field, value, arg, context=None):
""" The meaning of product_qty field changed lately and is now a functional field computing the quantity
in the default product UoM. This code has been added to raise an error if a write is made given a value
for `product_qty`, where the same write should set the `product_uom_qty` field instead, in order to
detect errors.
"""
raise osv.except_osv(_('Programming Error!'), _('The requested operation cannot be processed because of a programming error setting the `product_qty` field instead of the `product_uom_qty`.'))
_columns = {
'name': fields.char('Description', required=True, select=True),
'priority': fields.selection(procurement.PROCUREMENT_PRIORITIES, 'Priority'),
'create_date': fields.datetime('Creation Date', readonly=True, select=True),
'date': fields.datetime('Date', required=True, select=True, help="Move date: scheduled date until move is done, then date of actual move processing", states={'done': [('readonly', True)]}),
'date_expected': fields.datetime('Expected Date', states={'done': [('readonly', True)]}, required=True, select=True, help="Scheduled date for the processing of this move"),
'product_id': fields.many2one('product.product', 'Product', required=True, select=True, domain=[('type', '<>', 'service')], states={'done': [('readonly', True)]}),
'product_qty': fields.function(_quantity_normalize, fnct_inv=_set_product_qty, type='float', digits=0, store={
_name: (lambda self, cr, uid, ids, c={}: ids, ['product_id', 'product_uom', 'product_uom_qty'], 10),
}, string='Quantity',
help='Quantity in the default UoM of the product'),
'product_uom_qty': fields.float('Quantity', digits_compute=dp.get_precision('Product Unit of Measure'),
required=True, states={'done': [('readonly', True)]},
help="This is the quantity of products from an inventory "
"point of view. For moves in the state 'done', this is the "
"quantity of products that were actually moved. For other "
"moves, this is the quantity of product that is planned to "
"be moved. Lowering this quantity does not generate a "
"backorder. Changing this quantity on assigned moves affects "
"the product reservation, and should be done with care."
),
'product_uom': fields.many2one('product.uom', 'Unit of Measure', required=True, states={'done': [('readonly', True)]}),
'product_uos_qty': fields.float('Quantity (UOS)', digits_compute=dp.get_precision('Product Unit of Measure'), states={'done': [('readonly', True)]}),
'product_uos': fields.many2one('product.uom', 'Product UOS', states={'done': [('readonly', True)]}),
'product_tmpl_id': fields.related('product_id', 'product_tmpl_id', type='many2one', relation='product.template', string='Product Template'),
'product_packaging': fields.many2one('product.packaging', 'Prefered Packaging', help="It specifies attributes of packaging like type, quantity of packaging,etc."),
'location_id': fields.many2one('stock.location', 'Source Location', required=True, select=True, auto_join=True,
states={'done': [('readonly', True)]}, help="Sets a location if you produce at a fixed location. This can be a partner location if you subcontract the manufacturing operations."),
'location_dest_id': fields.many2one('stock.location', 'Destination Location', required=True, states={'done': [('readonly', True)]}, select=True,
auto_join=True, help="Location where the system will stock the finished products."),
'partner_id': fields.many2one('res.partner', 'Destination Address ', states={'done': [('readonly', True)]}, help="Optional address where goods are to be delivered, specifically used for allotment"),
'move_dest_id': fields.many2one('stock.move', 'Destination Move', help="Optional: next stock move when chaining them", select=True, copy=False),
'move_orig_ids': fields.one2many('stock.move', 'move_dest_id', 'Original Move', help="Optional: previous stock move when chaining them", select=True),
'picking_id': fields.many2one('stock.picking', 'Reference', select=True, states={'done': [('readonly', True)]}),
'note': fields.text('Notes'),
'state': fields.selection([('draft', 'New'),
('cancel', 'Cancelled'),
('waiting', 'Waiting Another Move'),
('confirmed', 'Waiting Availability'),
('assigned', 'Available'),
('done', 'Done'),
], 'Status', readonly=True, select=True, copy=False,
help= "* New: When the stock move is created and not yet confirmed.\n"\
"* Waiting Another Move: This state can be seen when a move is waiting for another one, for example in a chained flow.\n"\
"* Waiting Availability: This state is reached when the procurement resolution is not straight forward. It may need the scheduler to run, a component to me manufactured...\n"\
"* Available: When products are reserved, it is set to \'Available\'.\n"\
"* Done: When the shipment is processed, the state is \'Done\'."),
'partially_available': fields.boolean('Partially Available', readonly=True, help="Checks if the move has some stock reserved", copy=False),
'price_unit': fields.float('Unit Price', help="Technical field used to record the product cost set by the user during a picking confirmation (when costing method used is 'average price' or 'real'). Value given in company currency and in product uom."), # as it's a technical field, we intentionally don't provide the digits attribute
'company_id': fields.many2one('res.company', 'Company', required=True, select=True),
'split_from': fields.many2one('stock.move', string="Move Split From", help="Technical field used to track the origin of a split move, which can be useful in case of debug", copy=False),
'backorder_id': fields.related('picking_id', 'backorder_id', type='many2one', relation="stock.picking", string="Back Order of", select=True),
'origin': fields.char("Source"),
'procure_method': fields.selection([('make_to_stock', 'Default: Take From Stock'), ('make_to_order', 'Advanced: Apply Procurement Rules')], 'Supply Method', required=True,
help="""By default, the system will take from the stock in the source location and passively wait for availability. The other possibility allows you to directly create a procurement on the source location (and thus ignore its current stock) to gather products. If we want to chain moves and have this one to wait for the previous, this second option should be chosen."""),
# used for colors in tree views:
'scrapped': fields.related('location_dest_id', 'scrap_location', type='boolean', relation='stock.location', string='Scrapped', readonly=True),
'quant_ids': fields.many2many('stock.quant', 'stock_quant_move_rel', 'move_id', 'quant_id', 'Moved Quants', copy=False),
'reserved_quant_ids': fields.one2many('stock.quant', 'reservation_id', 'Reserved quants'),
'linked_move_operation_ids': fields.one2many('stock.move.operation.link', 'move_id', string='Linked Operations', readonly=True, help='Operations that impact this move for the computation of the remaining quantities'),
'remaining_qty': fields.function(_get_remaining_qty, type='float', string='Remaining Quantity', digits=0,
states={'done': [('readonly', True)]}, help="Remaining Quantity in default UoM according to operations matched with this move"),
'procurement_id': fields.many2one('procurement.order', 'Procurement'),
'group_id': fields.many2one('procurement.group', 'Procurement Group'),
'rule_id': fields.many2one('procurement.rule', 'Procurement Rule', help='The pull rule that created this stock move'),
'push_rule_id': fields.many2one('stock.location.path', 'Push Rule', help='The push rule that created this stock move'),
'propagate': fields.boolean('Propagate cancel and split', help='If checked, when this move is cancelled, cancel the linked move too'),
'picking_type_id': fields.many2one('stock.picking.type', 'Picking Type'),
'inventory_id': fields.many2one('stock.inventory', 'Inventory'),
'lot_ids': fields.function(_get_lot_ids, type='many2many', relation='stock.production.lot', string='Lots'),
'origin_returned_move_id': fields.many2one('stock.move', 'Origin return move', help='move that created the return move', copy=False),
'returned_move_ids': fields.one2many('stock.move', 'origin_returned_move_id', 'All returned moves', help='Optional: all returned moves created from this move'),
'reserved_availability': fields.function(_get_reserved_availability, type='float', string='Quantity Reserved', readonly=True, help='Quantity that has already been reserved for this move'),
'availability': fields.function(_get_product_availability, type='float', string='Quantity Available', readonly=True, help='Quantity in stock that can still be reserved for this move'),
'string_availability_info': fields.function(_get_string_qty_information, type='text', string='Availability', readonly=True, help='Show various information on stock availability for this move'),
'restrict_lot_id': fields.many2one('stock.production.lot', 'Lot', help="Technical field used to depict a restriction on the lot of quants to consider when marking this move as 'done'"),
'restrict_partner_id': fields.many2one('res.partner', 'Owner ', help="Technical field used to depict a restriction on the ownership of quants to consider when marking this move as 'done'"),
'route_ids': fields.many2many('stock.location.route', 'stock_location_route_move', 'move_id', 'route_id', 'Destination route', help="Preferred route to be followed by the procurement order"),
'warehouse_id': fields.many2one('stock.warehouse', 'Warehouse', help="Technical field depicting the warehouse to consider for the route selection on the next procurement (if any)."),
}
def _default_location_destination(self, cr, uid, context=None):
context = context or {}
if context.get('default_picking_type_id', False):
pick_type = self.pool.get('stock.picking.type').browse(cr, uid, context['default_picking_type_id'], context=context)
return pick_type.default_location_dest_id and pick_type.default_location_dest_id.id or False
return False
def _default_location_source(self, cr, uid, context=None):
context = context or {}
if context.get('default_picking_type_id', False):
pick_type = self.pool.get('stock.picking.type').browse(cr, uid, context['default_picking_type_id'], context=context)
return pick_type.default_location_src_id and pick_type.default_location_src_id.id or False
return False
def _default_destination_address(self, cr, uid, context=None):
return False
def _default_group_id(self, cr, uid, context=None):
context = context or {}
if context.get('default_picking_id', False):
picking = self.pool.get('stock.picking').browse(cr, uid, context['default_picking_id'], context=context)
return picking.group_id.id
return False
_defaults = {
'location_id': _default_location_source,
'location_dest_id': _default_location_destination,
'partner_id': _default_destination_address,
'state': 'draft',
'priority': '1',
'product_uom_qty': 1.0,
'scrapped': False,
'date': fields.datetime.now,
'company_id': lambda self, cr, uid, c: self.pool.get('res.company')._company_default_get(cr, uid, 'stock.move', context=c),
'date_expected': fields.datetime.now,
'procure_method': 'make_to_stock',
'propagate': True,
'partially_available': False,
'group_id': _default_group_id,
}
def _check_uom(self, cr, uid, ids, context=None):
for move in self.browse(cr, uid, ids, context=context):
if move.product_id.uom_id.category_id.id != move.product_uom.category_id.id:
return False
return True
_constraints = [
(_check_uom,
'You try to move a product using a UoM that is not compatible with the UoM of the product moved. Please use an UoM in the same UoM category.',
['product_uom']),
]
def init(self, cr):
cr.execute('SELECT indexname FROM pg_indexes WHERE indexname = %s', ('stock_move_product_location_index',))
if not cr.fetchone():
cr.execute('CREATE INDEX stock_move_product_location_index ON stock_move (product_id, location_id, location_dest_id, company_id, state)')
@api.cr_uid_ids_context
def do_unreserve(self, cr, uid, move_ids, context=None):
quant_obj = self.pool.get("stock.quant")
for move in self.browse(cr, uid, move_ids, context=context):
if move.state in ('done', 'cancel'):
raise osv.except_osv(_('Operation Forbidden!'), _('Cannot unreserve a done move'))
quant_obj.quants_unreserve(cr, uid, move, context=context)
if self.find_move_ancestors(cr, uid, move, context=context):
self.write(cr, uid, [move.id], {'state': 'waiting'}, context=context)
else:
self.write(cr, uid, [move.id], {'state': 'confirmed'}, context=context)
def _prepare_procurement_from_move(self, cr, uid, move, context=None):
origin = (move.group_id and (move.group_id.name + ":") or "") + (move.rule_id and move.rule_id.name or move.origin or move.picking_id.name or "/")
group_id = move.group_id and move.group_id.id or False
if move.rule_id:
if move.rule_id.group_propagation_option == 'fixed' and move.rule_id.group_id:
group_id = move.rule_id.group_id.id
elif move.rule_id.group_propagation_option == 'none':
group_id = False
return {
'name': move.rule_id and move.rule_id.name or "/",
'origin': origin,
'company_id': move.company_id and move.company_id.id or False,
'date_planned': move.date,
'product_id': move.product_id.id,
'product_qty': move.product_uom_qty,
'product_uom': move.product_uom.id,
'product_uos_qty': (move.product_uos and move.product_uos_qty) or move.product_uom_qty,
'product_uos': (move.product_uos and move.product_uos.id) or move.product_uom.id,
'location_id': move.location_id.id,
'move_dest_id': move.id,
'group_id': group_id,
'route_ids': [(4, x.id) for x in move.route_ids],
'warehouse_id': move.warehouse_id.id or (move.picking_type_id and move.picking_type_id.warehouse_id.id or False),
'priority': move.priority,
}
def _push_apply(self, cr, uid, moves, context=None):
push_obj = self.pool.get("stock.location.path")
for move in moves:
#1) if the move is already chained, there is no need to check push rules
#2) if the move is a returned move, we don't want to check push rules, as returning a returned move is the only decent way
# to receive goods without triggering the push rules again (which would duplicate chained operations)
if not move.move_dest_id and not move.origin_returned_move_id:
domain = [('location_from_id', '=', move.location_dest_id.id)]
#priority goes to the route defined on the product and product category
route_ids = [x.id for x in move.product_id.route_ids + move.product_id.categ_id.total_route_ids]
rules = push_obj.search(cr, uid, domain + [('route_id', 'in', route_ids)], order='route_sequence, sequence', context=context)
if not rules:
#then we search on the warehouse if a rule can apply
wh_route_ids = []
if move.warehouse_id:
wh_route_ids = [x.id for x in move.warehouse_id.route_ids]
elif move.picking_type_id and move.picking_type_id.warehouse_id:
wh_route_ids = [x.id for x in move.picking_type_id.warehouse_id.route_ids]
if wh_route_ids:
rules = push_obj.search(cr, uid, domain + [('route_id', 'in', wh_route_ids)], order='route_sequence, sequence', context=context)
if not rules:
#if no specialized push rule has been found yet, we try to find a general one (without route)
rules = push_obj.search(cr, uid, domain + [('route_id', '=', False)], order='sequence', context=context)
if rules:
rule = push_obj.browse(cr, uid, rules[0], context=context)
push_obj._apply(cr, uid, rule, move, context=context)
return True
def _create_procurement(self, cr, uid, move, context=None):
""" This will create a procurement order """
return self.pool.get("procurement.order").create(cr, uid, self._prepare_procurement_from_move(cr, uid, move, context=context), context=context)
def _create_procurements(self, cr, uid, moves, context=None):
res = []
for move in moves:
res.append(self._create_procurement(cr, uid, move, context=context))
return res
def write(self, cr, uid, ids, vals, context=None):
if context is None:
context = {}
if isinstance(ids, (int, long)):
ids = [ids]
# Check that we do not modify a stock.move which is done
frozen_fields = set(['product_qty', 'product_uom', 'product_uos_qty', 'product_uos', 'location_id', 'location_dest_id', 'product_id'])
for move in self.browse(cr, uid, ids, context=context):
if move.state == 'done':
if frozen_fields.intersection(vals):
raise osv.except_osv(_('Operation Forbidden!'),
_('Quantities, Units of Measure, Products and Locations cannot be modified on stock moves that have already been processed (except by the Administrator).'))
propagated_changes_dict = {}
#propagation of quantity change
if vals.get('product_uom_qty'):
propagated_changes_dict['product_uom_qty'] = vals['product_uom_qty']
if vals.get('product_uom_id'):
propagated_changes_dict['product_uom_id'] = vals['product_uom_id']
#propagation of expected date:
propagated_date_field = False
if vals.get('date_expected'):
#propagate any manual change of the expected date
propagated_date_field = 'date_expected'
elif (vals.get('state', '') == 'done' and vals.get('date')):
#propagate also any delta observed when setting the move as done
propagated_date_field = 'date'
if not context.get('do_not_propagate', False) and (propagated_date_field or propagated_changes_dict):
#any propagation is (maybe) needed
for move in self.browse(cr, uid, ids, context=context):
if move.move_dest_id and move.propagate:
if 'date_expected' in propagated_changes_dict:
propagated_changes_dict.pop('date_expected')
if propagated_date_field:
current_date = datetime.strptime(move.date_expected, DEFAULT_SERVER_DATETIME_FORMAT)
new_date = datetime.strptime(vals.get(propagated_date_field), DEFAULT_SERVER_DATETIME_FORMAT)
delta = new_date - current_date
if abs(delta.days) >= move.company_id.propagation_minimum_delta:
old_move_date = datetime.strptime(move.move_dest_id.date_expected, DEFAULT_SERVER_DATETIME_FORMAT)
new_move_date = (old_move_date + relativedelta.relativedelta(days=delta.days or 0)).strftime(DEFAULT_SERVER_DATETIME_FORMAT)
propagated_changes_dict['date_expected'] = new_move_date
#For pushed moves as well as for pulled moves, propagate by recursive call of write().
#Note that, for pulled moves we intentionally don't propagate on the procurement.
if propagated_changes_dict:
self.write(cr, uid, [move.move_dest_id.id], propagated_changes_dict, context=context)
return super(stock_move, self).write(cr, uid, ids, vals, context=context)
def onchange_quantity(self, cr, uid, ids, product_id, product_qty, product_uom, product_uos):
""" On change of product quantity finds UoM and UoS quantities
@param product_id: Product id
@param product_qty: Changed Quantity of product
@param product_uom: Unit of measure of product
@param product_uos: Unit of sale of product
@return: Dictionary of values
"""
result = {
'product_uos_qty': 0.00
}
warning = {}
if (not product_id) or (product_qty <= 0.0):
result['product_qty'] = 0.0
return {'value': result}
product_obj = self.pool.get('product.product')
uos_coeff = product_obj.read(cr, uid, product_id, ['uos_coeff'])
# Warn if the quantity was decreased
if ids:
for move in self.read(cr, uid, ids, ['product_qty']):
if product_qty < move['product_qty']:
warning.update({
'title': _('Information'),
'message': _("By changing this quantity here, you accept the "
"new quantity as complete: Odoo will not "
"automatically generate a back order.")})
break
if product_uos and product_uom and (product_uom != product_uos):
result['product_uos_qty'] = product_qty * uos_coeff['uos_coeff']
else:
result['product_uos_qty'] = product_qty
return {'value': result, 'warning': warning}
def onchange_uos_quantity(self, cr, uid, ids, product_id, product_uos_qty,
product_uos, product_uom):
""" On change of product quantity finds UoM and UoS quantities
@param product_id: Product id
@param product_uos_qty: Changed UoS Quantity of product
@param product_uom: Unit of measure of product
@param product_uos: Unit of sale of product
@return: Dictionary of values
"""
result = {
'product_uom_qty': 0.00
}
if (not product_id) or (product_uos_qty <= 0.0):
result['product_uos_qty'] = 0.0
return {'value': result}
product_obj = self.pool.get('product.product')
uos_coeff = product_obj.read(cr, uid, product_id, ['uos_coeff'])
# No warning if the quantity was decreased to avoid double warnings:
# The clients should call onchange_quantity too anyway
if product_uos and product_uom and (product_uom != product_uos):
result['product_uom_qty'] = product_uos_qty / uos_coeff['uos_coeff']
else:
result['product_uom_qty'] = product_uos_qty
return {'value': result}
def onchange_product_id(self, cr, uid, ids, prod_id=False, loc_id=False, loc_dest_id=False, partner_id=False):
""" On change of product id, if finds UoM, UoS, quantity and UoS quantity.
@param prod_id: Changed Product id
@param loc_id: Source location id
@param loc_dest_id: Destination location id
@param partner_id: Address id of partner
@return: Dictionary of values
"""
if not prod_id:
return {}
user = self.pool.get('res.users').browse(cr, uid, uid)
lang = user and user.lang or False
if partner_id:
addr_rec = self.pool.get('res.partner').browse(cr, uid, partner_id)
if addr_rec:
lang = addr_rec and addr_rec.lang or False
ctx = {'lang': lang}
product = self.pool.get('product.product').browse(cr, uid, [prod_id], context=ctx)[0]
uos_id = product.uos_id and product.uos_id.id or False
result = {
'name': product.partner_ref,
'product_uom': product.uom_id.id,
'product_uos': uos_id,
'product_uom_qty': 1.00,
'product_uos_qty': self.pool.get('stock.move').onchange_quantity(cr, uid, ids, prod_id, 1.00, product.uom_id.id, uos_id)['value']['product_uos_qty'],
}
if loc_id:
result['location_id'] = loc_id
if loc_dest_id:
result['location_dest_id'] = loc_dest_id
return {'value': result}
def _prepare_picking_assign(self, cr, uid, move, context=None):
""" Prepares a new picking for this move as it could not be assigned to
another picking. This method is designed to be inherited.
"""
values = {
'origin': move.origin,
'company_id': move.company_id and move.company_id.id or False,
'move_type': move.group_id and move.group_id.move_type or 'direct',
'partner_id': move.partner_id.id or False,
'picking_type_id': move.picking_type_id and move.picking_type_id.id or False,
}
return values
@api.cr_uid_ids_context
def _picking_assign(self, cr, uid, move_ids, procurement_group, location_from, location_to, context=None):
"""Assign a picking on the given move_ids, which is a list of move supposed to share the same procurement_group, location_from and location_to
(and company). Those attributes are also given as parameters.
"""
pick_obj = self.pool.get("stock.picking")
# Use a SQL query as doing with the ORM will split it in different queries with id IN (,,)
# In the next version, the locations on the picking should be stored again.
query = """
SELECT stock_picking.id FROM stock_picking, stock_move
WHERE
stock_picking.state in ('draft', 'confirmed', 'waiting') AND
stock_move.picking_id = stock_picking.id AND
stock_move.location_id = %s AND
stock_move.location_dest_id = %s AND
"""
params = (location_from, location_to)
if not procurement_group:
query += "stock_picking.group_id IS NULL LIMIT 1"
else:
query += "stock_picking.group_id = %s LIMIT 1"
params += (procurement_group,)
cr.execute(query, params)
[pick] = cr.fetchone() or [None]
if not pick:
move = self.browse(cr, uid, move_ids, context=context)[0]
values = self._prepare_picking_assign(cr, uid, move, context=context)
pick = pick_obj.create(cr, uid, values, context=context)
return self.write(cr, uid, move_ids, {'picking_id': pick}, context=context)
def onchange_date(self, cr, uid, ids, date, date_expected, context=None):
""" On change of Scheduled Date gives a Move date.
@param date_expected: Scheduled Date
@param date: Move Date
@return: Move Date
"""
if not date_expected:
date_expected = time.strftime(DEFAULT_SERVER_DATETIME_FORMAT)
return {'value': {'date': date_expected}}
def attribute_price(self, cr, uid, move, context=None):
"""
Attribute price to move, important in inter-company moves or receipts with only one partner
"""
if not move.price_unit:
price = move.product_id.standard_price
self.write(cr, uid, [move.id], {'price_unit': price})
def action_confirm(self, cr, uid, ids, context=None):
""" Confirms stock move or put it in waiting if it's linked to another move.
@return: List of ids.
"""
if not context:
context = {}
if isinstance(ids, (int, long)):
ids = [ids]
states = {
'confirmed': [],
'waiting': []
}
to_assign = {}
for move in self.browse(cr, uid, ids, context=context):
self.attribute_price(cr, uid, move, context=context)
state = 'confirmed'
#if the move is preceeded, then it's waiting (if preceeding move is done, then action_assign has been called already and its state is already available)
if move.move_orig_ids:
state = 'waiting'
#if the move is split and some of the ancestor was preceeded, then it's waiting as well
elif move.split_from:
move2 = move.split_from
while move2 and state != 'waiting':
if move2.move_orig_ids:
state = 'waiting'
move2 = move2.split_from
states[state].append(move.id)
if not move.picking_id and move.picking_type_id:
key = (move.group_id.id, move.location_id.id, move.location_dest_id.id)
if key not in to_assign:
to_assign[key] = []
to_assign[key].append(move.id)
moves = [move for move in self.browse(cr, uid, states['confirmed'], context=context) if move.procure_method == 'make_to_order']
self._create_procurements(cr, uid, moves, context=context)
for move in moves:
states['waiting'].append(move.id)
states['confirmed'].remove(move.id)
for state, write_ids in states.items():
if len(write_ids):
self.write(cr, uid, write_ids, {'state': state})
#assign picking in batch for all confirmed move that share the same details
for key, move_ids in to_assign.items():
procurement_group, location_from, location_to = key
self._picking_assign(cr, uid, move_ids, procurement_group, location_from, location_to, context=context)
moves = self.browse(cr, uid, ids, context=context)
self._push_apply(cr, uid, moves, context=context)
return ids
def force_assign(self, cr, uid, ids, context=None):
""" Changes the state to assigned.
@return: True
"""
return self.write(cr, uid, ids, {'state': 'assigned'}, context=context)
def check_tracking_product(self, cr, uid, product, lot_id, location, location_dest, context=None):
check = False
if product.track_all and not location_dest.usage == 'inventory':
check = True
elif product.track_incoming and location.usage in ('supplier', 'transit', 'inventory') and location_dest.usage == 'internal':
check = True
elif product.track_outgoing and location_dest.usage in ('customer', 'transit') and location.usage == 'internal':
check = True
if check and not lot_id:
raise osv.except_osv(_('Warning!'), _('You must assign a serial number for the product %s') % (product.name))
def check_tracking(self, cr, uid, move, lot_id, context=None):
""" Checks if serial number is assigned to stock move or not and raise an error if it had to.
"""
self.check_tracking_product(cr, uid, move.product_id, lot_id, move.location_id, move.location_dest_id, context=context)
def action_assign(self, cr, uid, ids, context=None):
""" Checks the product type and accordingly writes the state.
"""
context = context or {}
quant_obj = self.pool.get("stock.quant")
to_assign_moves = []
main_domain = {}
todo_moves = []
operations = set()
for move in self.browse(cr, uid, ids, context=context):
if move.state not in ('confirmed', 'waiting', 'assigned'):
continue
if move.location_id.usage in ('supplier', 'inventory', 'production'):
to_assign_moves.append(move.id)
#in case the move is returned, we want to try to find quants before forcing the assignment
if not move.origin_returned_move_id:
continue
if move.product_id.type == 'consu':
to_assign_moves.append(move.id)
continue
else:
todo_moves.append(move)
#we always keep the quants already assigned and try to find the remaining quantity on quants not assigned only
main_domain[move.id] = [('reservation_id', '=', False), ('qty', '>', 0)]
#if the move is preceeded, restrict the choice of quants in the ones moved previously in original move
ancestors = self.find_move_ancestors(cr, uid, move, context=context)
if move.state == 'waiting' and not ancestors:
#if the waiting move hasn't yet any ancestor (PO/MO not confirmed yet), don't find any quant available in stock
main_domain[move.id] += [('id', '=', False)]
elif ancestors:
main_domain[move.id] += [('history_ids', 'in', ancestors)]
#if the move is returned from another, restrict the choice of quants to the ones that follow the returned move
if move.origin_returned_move_id:
main_domain[move.id] += [('history_ids', 'in', move.origin_returned_move_id.id)]
for link in move.linked_move_operation_ids:
operations.add(link.operation_id)
# Check all ops and sort them: we want to process first the packages, then operations with lot then the rest
operations = list(operations)
operations.sort(key=lambda x: ((x.package_id and not x.product_id) and -4 or 0) + (x.package_id and -2 or 0) + (x.lot_id and -1 or 0))
for ops in operations:
#first try to find quants based on specific domains given by linked operations
for record in ops.linked_move_operation_ids:
move = record.move_id
if move.id in main_domain:
domain = main_domain[move.id] + self.pool.get('stock.move.operation.link').get_specific_domain(cr, uid, record, context=context)
qty = record.qty
if qty:
quants = quant_obj.quants_get_prefered_domain(cr, uid, ops.location_id, move.product_id, qty, domain=domain, prefered_domain_list=[], restrict_lot_id=move.restrict_lot_id.id, restrict_partner_id=move.restrict_partner_id.id, context=context)
quant_obj.quants_reserve(cr, uid, quants, move, record, context=context)
for move in todo_moves:
if move.linked_move_operation_ids:
continue
#then if the move isn't totally assigned, try to find quants without any specific domain
if move.state != 'assigned':
qty_already_assigned = move.reserved_availability
qty = move.product_qty - qty_already_assigned
quants = quant_obj.quants_get_prefered_domain(cr, uid, move.location_id, move.product_id, qty, domain=main_domain[move.id], prefered_domain_list=[], restrict_lot_id=move.restrict_lot_id.id, restrict_partner_id=move.restrict_partner_id.id, context=context)
quant_obj.quants_reserve(cr, uid, quants, move, context=context)
#force assignation of consumable products and incoming from supplier/inventory/production
if to_assign_moves:
self.force_assign(cr, uid, to_assign_moves, context=context)
def action_cancel(self, cr, uid, ids, context=None):
""" Cancels the moves and if all moves are cancelled it cancels the picking.
@return: True
"""
procurement_obj = self.pool.get('procurement.order')
context = context or {}
procs_to_check = []
for move in self.browse(cr, uid, ids, context=context):
if move.state == 'done':
raise osv.except_osv(_('Operation Forbidden!'),
_('You cannot cancel a stock move that has been set to \'Done\'.'))
if move.reserved_quant_ids:
self.pool.get("stock.quant").quants_unreserve(cr, uid, move, context=context)
if context.get('cancel_procurement'):
if move.propagate:
procurement_ids = procurement_obj.search(cr, uid, [('move_dest_id', '=', move.id)], context=context)
procurement_obj.cancel(cr, uid, procurement_ids, context=context)
else:
if move.move_dest_id:
if move.propagate:
self.action_cancel(cr, uid, [move.move_dest_id.id], context=context)
elif move.move_dest_id.state == 'waiting':
#If waiting, the chain will be broken and we are not sure if we can still wait for it (=> could take from stock instead)
self.write(cr, uid, [move.move_dest_id.id], {'state': 'confirmed'}, context=context)
if move.procurement_id:
# Does the same as procurement check, only eliminating a refresh
procs_to_check.append(move.procurement_id.id)
res = self.write(cr, uid, ids, {'state': 'cancel', 'move_dest_id': False}, context=context)
if procs_to_check:
procurement_obj.check(cr, uid, procs_to_check, context=context)
return res
def _check_package_from_moves(self, cr, uid, ids, context=None):
pack_obj = self.pool.get("stock.quant.package")
packs = set()
for move in self.browse(cr, uid, ids, context=context):
packs |= set([q.package_id for q in move.quant_ids if q.package_id and q.qty > 0])
return pack_obj._check_location_constraint(cr, uid, list(packs), context=context)
def find_move_ancestors(self, cr, uid, move, context=None):
'''Find the first level ancestors of given move '''
ancestors = []
move2 = move
while move2:
ancestors += [x.id for x in move2.move_orig_ids]
#loop on the split_from to find the ancestor of split moves only if the move has not direct ancestor (priority goes to them)
move2 = not move2.move_orig_ids and move2.split_from or False
return ancestors
@api.cr_uid_ids_context
def recalculate_move_state(self, cr, uid, move_ids, context=None):
'''Recompute the state of moves given because their reserved quants were used to fulfill another operation'''
for move in self.browse(cr, uid, move_ids, context=context):
vals = {}
reserved_quant_ids = move.reserved_quant_ids
if len(reserved_quant_ids) > 0 and not move.partially_available:
vals['partially_available'] = True
if len(reserved_quant_ids) == 0 and move.partially_available:
vals['partially_available'] = False
if move.state == 'assigned':
if self.find_move_ancestors(cr, uid, move, context=context):
vals['state'] = 'waiting'
else:
vals['state'] = 'confirmed'
if vals:
self.write(cr, uid, [move.id], vals, context=context)
def action_done(self, cr, uid, ids, context=None):
""" Process completely the moves given as ids and if all moves are done, it will finish the picking.
"""
context = context or {}
picking_obj = self.pool.get("stock.picking")
quant_obj = self.pool.get("stock.quant")
todo = [move.id for move in self.browse(cr, uid, ids, context=context) if move.state == "draft"]
if todo:
ids = self.action_confirm(cr, uid, todo, context=context)
pickings = set()
procurement_ids = set()
#Search operations that are linked to the moves
operations = set()
move_qty = {}
for move in self.browse(cr, uid, ids, context=context):
move_qty[move.id] = move.product_qty
for link in move.linked_move_operation_ids:
operations.add(link.operation_id)
#Sort operations according to entire packages first, then package + lot, package only, lot only
operations = list(operations)
operations.sort(key=lambda x: ((x.package_id and not x.product_id) and -4 or 0) + (x.package_id and -2 or 0) + (x.lot_id and -1 or 0))
for ops in operations:
if ops.picking_id:
pickings.add(ops.picking_id.id)
main_domain = [('qty', '>', 0)]
for record in ops.linked_move_operation_ids:
move = record.move_id
self.check_tracking(cr, uid, move, not ops.product_id and ops.package_id.id or ops.lot_id.id, context=context)
prefered_domain = [('reservation_id', '=', move.id)]
fallback_domain = [('reservation_id', '=', False)]
fallback_domain2 = ['&', ('reservation_id', '!=', move.id), ('reservation_id', '!=', False)]
prefered_domain_list = [prefered_domain] + [fallback_domain] + [fallback_domain2]
dom = main_domain + self.pool.get('stock.move.operation.link').get_specific_domain(cr, uid, record, context=context)
quants = quant_obj.quants_get_prefered_domain(cr, uid, ops.location_id, move.product_id, record.qty, domain=dom, prefered_domain_list=prefered_domain_list,
restrict_lot_id=move.restrict_lot_id.id, restrict_partner_id=move.restrict_partner_id.id, context=context)
if ops.product_id:
#If a product is given, the result is always put immediately in the result package (if it is False, they are without package)
quant_dest_package_id = ops.result_package_id.id
ctx = context
else:
# When a pack is moved entirely, the quants should not be written anything for the destination package
quant_dest_package_id = False
ctx = context.copy()
ctx['entire_pack'] = True
quant_obj.quants_move(cr, uid, quants, move, ops.location_dest_id, location_from=ops.location_id, lot_id=ops.lot_id.id, owner_id=ops.owner_id.id, src_package_id=ops.package_id.id, dest_package_id=quant_dest_package_id, context=ctx)
# Handle pack in pack
if not ops.product_id and ops.package_id and ops.result_package_id.id != ops.package_id.parent_id.id:
self.pool.get('stock.quant.package').write(cr, SUPERUSER_ID, [ops.package_id.id], {'parent_id': ops.result_package_id.id}, context=context)
if not move_qty.get(move.id):
raise osv.except_osv(_("Error"), _("The roundings of your Unit of Measures %s on the move vs. %s on the product don't allow to do these operations or you are not transferring the picking at once. ") % (move.product_uom.name, move.product_id.uom_id.name))
move_qty[move.id] -= record.qty
#Check for remaining qtys and unreserve/check move_dest_id in
move_dest_ids = set()
for move in self.browse(cr, uid, ids, context=context):
move_qty_cmp = float_compare(move_qty[move.id], 0, precision_rounding=move.product_id.uom_id.rounding)
if move_qty_cmp > 0: # (=In case no pack operations in picking)
main_domain = [('qty', '>', 0)]
prefered_domain = [('reservation_id', '=', move.id)]
fallback_domain = [('reservation_id', '=', False)]
fallback_domain2 = ['&', ('reservation_id', '!=', move.id), ('reservation_id', '!=', False)]
prefered_domain_list = [prefered_domain] + [fallback_domain] + [fallback_domain2]
self.check_tracking(cr, uid, move, move.restrict_lot_id.id, context=context)
qty = move_qty[move.id]
quants = quant_obj.quants_get_prefered_domain(cr, uid, move.location_id, move.product_id, qty, domain=main_domain, prefered_domain_list=prefered_domain_list, restrict_lot_id=move.restrict_lot_id.id, restrict_partner_id=move.restrict_partner_id.id, context=context)
quant_obj.quants_move(cr, uid, quants, move, move.location_dest_id, lot_id=move.restrict_lot_id.id, owner_id=move.restrict_partner_id.id, context=context)
# If the move has a destination, add it to the list to reserve
if move.move_dest_id and move.move_dest_id.state in ('waiting', 'confirmed'):
move_dest_ids.add(move.move_dest_id.id)
if move.procurement_id:
procurement_ids.add(move.procurement_id.id)
#unreserve the quants and make them available for other operations/moves
quant_obj.quants_unreserve(cr, uid, move, context=context)
# Check the packages have been placed in the correct locations
self._check_package_from_moves(cr, uid, ids, context=context)
#set the move as done
self.write(cr, uid, ids, {'state': 'done', 'date': time.strftime(DEFAULT_SERVER_DATETIME_FORMAT)}, context=context)
self.pool.get('procurement.order').check(cr, uid, list(procurement_ids), context=context)
#assign destination moves
if move_dest_ids:
self.action_assign(cr, uid, list(move_dest_ids), context=context)
#check picking state to set the date_done is needed
done_picking = []
for picking in picking_obj.browse(cr, uid, list(pickings), context=context):
if picking.state == 'done' and not picking.date_done:
done_picking.append(picking.id)
if done_picking:
picking_obj.write(cr, uid, done_picking, {'date_done': time.strftime(DEFAULT_SERVER_DATETIME_FORMAT)}, context=context)
return True
def unlink(self, cr, uid, ids, context=None):
context = context or {}
for move in self.browse(cr, uid, ids, context=context):
if move.state not in ('draft', 'cancel'):
raise osv.except_osv(_('User Error!'), _('You can only delete draft moves.'))
return super(stock_move, self).unlink(cr, uid, ids, context=context)
def action_scrap(self, cr, uid, ids, quantity, location_id, restrict_lot_id=False, restrict_partner_id=False, context=None):
""" Move the scrap/damaged product into scrap location
@param cr: the database cursor
@param uid: the user id
@param ids: ids of stock move object to be scrapped
@param quantity : specify scrap qty
@param location_id : specify scrap location
@param context: context arguments
@return: Scraped lines
"""
quant_obj = self.pool.get("stock.quant")
#quantity should be given in MOVE UOM
if quantity <= 0:
raise osv.except_osv(_('Warning!'), _('Please provide a positive quantity to scrap.'))
res = []
for move in self.browse(cr, uid, ids, context=context):
source_location = move.location_id
if move.state == 'done':
source_location = move.location_dest_id
#Previously used to prevent scraping from virtual location but not necessary anymore
#if source_location.usage != 'internal':
#restrict to scrap from a virtual location because it's meaningless and it may introduce errors in stock ('creating' new products from nowhere)
#raise osv.except_osv(_('Error!'), _('Forbidden operation: it is not allowed to scrap products from a virtual location.'))
move_qty = move.product_qty
uos_qty = quantity / move_qty * move.product_uos_qty
default_val = {
'location_id': source_location.id,
'product_uom_qty': quantity,
'product_uos_qty': uos_qty,
'state': move.state,
'scrapped': True,
'location_dest_id': location_id,
'restrict_lot_id': restrict_lot_id,
'restrict_partner_id': restrict_partner_id,
}
new_move = self.copy(cr, uid, move.id, default_val)
res += [new_move]
product_obj = self.pool.get('product.product')
for product in product_obj.browse(cr, uid, [move.product_id.id], context=context):
if move.picking_id:
uom = product.uom_id.name if product.uom_id else ''
message = _("%s %s %s has been <b>moved to</b> scrap.") % (quantity, uom, product.name)
move.picking_id.message_post(body=message)
# We "flag" the quant from which we want to scrap the products. To do so:
# - we select the quants related to the move we scrap from
# - we reserve the quants with the scrapped move
# See self.action_done, et particularly how is defined the "prefered_domain" for clarification
scrap_move = self.browse(cr, uid, new_move, context=context)
if move.state == 'done' and scrap_move.location_id.usage not in ('supplier', 'inventory', 'production'):
domain = [('qty', '>', 0), ('history_ids', 'in', [move.id])]
# We use scrap_move data since a reservation makes sense for a move not already done
quants = quant_obj.quants_get_prefered_domain(cr, uid, scrap_move.location_id,
scrap_move.product_id, quantity, domain=domain, prefered_domain_list=[],
restrict_lot_id=scrap_move.restrict_lot_id.id, restrict_partner_id=scrap_move.restrict_partner_id.id, context=context)
quant_obj.quants_reserve(cr, uid, quants, scrap_move, context=context)
self.action_done(cr, uid, res, context=context)
return res
def split(self, cr, uid, move, qty, restrict_lot_id=False, restrict_partner_id=False, context=None):
""" Splits qty from move move into a new move
:param move: browse record
:param qty: float. quantity to split (given in product UoM)
:param restrict_lot_id: optional production lot that can be given in order to force the new move to restrict its choice of quants to this lot.
:param restrict_partner_id: optional partner that can be given in order to force the new move to restrict its choice of quants to the ones belonging to this partner.
:param context: dictionay. can contains the special key 'source_location_id' in order to force the source location when copying the move
returns the ID of the backorder move created
"""
if move.state in ('done', 'cancel'):
raise osv.except_osv(_('Error'), _('You cannot split a move done'))
if move.state == 'draft':
#we restrict the split of a draft move because if not confirmed yet, it may be replaced by several other moves in
#case of phantom bom (with mrp module). And we don't want to deal with this complexity by copying the product that will explode.
raise osv.except_osv(_('Error'), _('You cannot split a draft move. It needs to be confirmed first.'))
if move.product_qty <= qty or qty == 0:
return move.id
uom_obj = self.pool.get('product.uom')
context = context or {}
#HALF-UP rounding as only rounding errors will be because of propagation of error from default UoM
uom_qty = uom_obj._compute_qty_obj(cr, uid, move.product_id.uom_id, qty, move.product_uom, rounding_method='HALF-UP', context=context)
uos_qty = uom_qty * move.product_uos_qty / move.product_uom_qty
defaults = {
'product_uom_qty': uom_qty,
'product_uos_qty': uos_qty,
'procure_method': 'make_to_stock',
'restrict_lot_id': restrict_lot_id,
'restrict_partner_id': restrict_partner_id,
'split_from': move.id,
'procurement_id': move.procurement_id.id,
'move_dest_id': move.move_dest_id.id,
'origin_returned_move_id': move.origin_returned_move_id.id,
}
if context.get('source_location_id'):
defaults['location_id'] = context['source_location_id']
new_move = self.copy(cr, uid, move.id, defaults, context=context)
ctx = context.copy()
ctx['do_not_propagate'] = True
self.write(cr, uid, [move.id], {
'product_uom_qty': move.product_uom_qty - uom_qty,
'product_uos_qty': move.product_uos_qty - uos_qty,
}, context=ctx)
if move.move_dest_id and move.propagate and move.move_dest_id.state not in ('done', 'cancel'):
new_move_prop = self.split(cr, uid, move.move_dest_id, qty, context=context)
self.write(cr, uid, [new_move], {'move_dest_id': new_move_prop}, context=context)
#returning the first element of list returned by action_confirm is ok because we checked it wouldn't be exploded (and
#thus the result of action_confirm should always be a list of 1 element length)
return self.action_confirm(cr, uid, [new_move], context=context)[0]
def get_code_from_locs(self, cr, uid, move, location_id=False, location_dest_id=False, context=None):
"""
Returns the code the picking type should have. This can easily be used
to check if a move is internal or not
move, location_id and location_dest_id are browse records
"""
code = 'internal'
src_loc = location_id or move.location_id
dest_loc = location_dest_id or move.location_dest_id
if src_loc.usage == 'internal' and dest_loc.usage != 'internal':
code = 'outgoing'
if src_loc.usage != 'internal' and dest_loc.usage == 'internal':
code = 'incoming'
return code
def _get_taxes(self, cr, uid, move, context=None):
return []
class stock_inventory(osv.osv):
_name = "stock.inventory"
_description = "Inventory"
def _get_move_ids_exist(self, cr, uid, ids, field_name, arg, context=None):
res = {}
for inv in self.browse(cr, uid, ids, context=context):
res[inv.id] = False
if inv.move_ids:
res[inv.id] = True
return res
def _get_available_filters(self, cr, uid, context=None):
"""
This function will return the list of filter allowed according to the options checked
in 'Settings\Warehouse'.
:rtype: list of tuple
"""
#default available choices
res_filter = [('none', _('All products')), ('partial', _('Manual Selection of Products')), ('product', _('One product only'))]
settings_obj = self.pool.get('stock.config.settings')
config_ids = settings_obj.search(cr, uid, [], limit=1, order='id DESC', context=context)
#If we don't have updated config until now, all fields are by default false and so should be not dipslayed
if not config_ids:
return res_filter
stock_settings = settings_obj.browse(cr, uid, config_ids[0], context=context)
if stock_settings.group_stock_tracking_owner:
res_filter.append(('owner', _('One owner only')))
res_filter.append(('product_owner', _('One product for a specific owner')))
if stock_settings.group_stock_tracking_lot:
res_filter.append(('lot', _('One Lot/Serial Number')))
if stock_settings.group_stock_packaging:
res_filter.append(('pack', _('A Pack')))
return res_filter
def _get_total_qty(self, cr, uid, ids, field_name, args, context=None):
res = {}
for inv in self.browse(cr, uid, ids, context=context):
res[inv.id] = sum([x.product_qty for x in inv.line_ids])
return res
INVENTORY_STATE_SELECTION = [
('draft', 'Draft'),
('cancel', 'Cancelled'),
('confirm', 'In Progress'),
('done', 'Validated'),
]
_columns = {
'name': fields.char('Inventory Reference', required=True, readonly=True, states={'draft': [('readonly', False)]}, help="Inventory Name."),
'date': fields.datetime('Inventory Date', required=True, readonly=True, help="The date that will be used for the stock level check of the products and the validation of the stock move related to this inventory."),
'line_ids': fields.one2many('stock.inventory.line', 'inventory_id', 'Inventories', readonly=False, states={'done': [('readonly', True)]}, help="Inventory Lines.", copy=True),
'move_ids': fields.one2many('stock.move', 'inventory_id', 'Created Moves', help="Inventory Moves.", states={'done': [('readonly', True)]}),
'state': fields.selection(INVENTORY_STATE_SELECTION, 'Status', readonly=True, select=True, copy=False),
'company_id': fields.many2one('res.company', 'Company', required=True, select=True, readonly=True, states={'draft': [('readonly', False)]}),
'location_id': fields.many2one('stock.location', 'Inventoried Location', required=True, readonly=True, states={'draft': [('readonly', False)]}),
'product_id': fields.many2one('product.product', 'Inventoried Product', readonly=True, states={'draft': [('readonly', False)]}, help="Specify Product to focus your inventory on a particular Product."),
'package_id': fields.many2one('stock.quant.package', 'Inventoried Pack', readonly=True, states={'draft': [('readonly', False)]}, help="Specify Pack to focus your inventory on a particular Pack."),
'partner_id': fields.many2one('res.partner', 'Inventoried Owner', readonly=True, states={'draft': [('readonly', False)]}, help="Specify Owner to focus your inventory on a particular Owner."),
'lot_id': fields.many2one('stock.production.lot', 'Inventoried Lot/Serial Number', readonly=True, states={'draft': [('readonly', False)]}, help="Specify Lot/Serial Number to focus your inventory on a particular Lot/Serial Number.", copy=False),
'move_ids_exist': fields.function(_get_move_ids_exist, type='boolean', string=' Stock Move Exists?', help='technical field for attrs in view'),
'filter': fields.selection(_get_available_filters, 'Inventory of', required=True,
help="If you do an entire inventory, you can choose 'All Products' and it will prefill the inventory with the current stock. If you only do some products "\
"(e.g. Cycle Counting) you can choose 'Manual Selection of Products' and the system won't propose anything. You can also let the "\
"system propose for a single product / lot /... "),
'total_qty': fields.function(_get_total_qty, type="float"),
}
def _default_stock_location(self, cr, uid, context=None):
try:
warehouse = self.pool.get('ir.model.data').get_object(cr, uid, 'stock', 'warehouse0')
return warehouse.lot_stock_id.id
except:
return False
_defaults = {
'date': fields.datetime.now,
'state': 'draft',
'company_id': lambda self, cr, uid, c: self.pool.get('res.company')._company_default_get(cr, uid, 'stock.inventory', context=c),
'location_id': _default_stock_location,
'filter': 'none',
}
def reset_real_qty(self, cr, uid, ids, context=None):
inventory = self.browse(cr, uid, ids[0], context=context)
line_ids = [line.id for line in inventory.line_ids]
self.pool.get('stock.inventory.line').write(cr, uid, line_ids, {'product_qty': 0})
return True
def action_done(self, cr, uid, ids, context=None):
""" Finish the inventory
@return: True
"""
for inv in self.browse(cr, uid, ids, context=context):
for inventory_line in inv.line_ids:
if inventory_line.product_qty < 0 and inventory_line.product_qty != inventory_line.theoretical_qty:
raise osv.except_osv(_('Warning'), _('You cannot set a negative product quantity in an inventory line:\n\t%s - qty: %s' % (inventory_line.product_id.name, inventory_line.product_qty)))
self.action_check(cr, uid, [inv.id], context=context)
self.write(cr, uid, [inv.id], {'state': 'done'}, context=context)
self.post_inventory(cr, uid, inv, context=context)
return True
def post_inventory(self, cr, uid, inv, context=None):
#The inventory is posted as a single step which means quants cannot be moved from an internal location to another using an inventory
#as they will be moved to inventory loss, and other quants will be created to the encoded quant location. This is a normal behavior
#as quants cannot be reuse from inventory location (users can still manually move the products before/after the inventory if they want).
move_obj = self.pool.get('stock.move')
move_obj.action_done(cr, uid, [x.id for x in inv.move_ids if x.state != 'done'], context=context)
def action_check(self, cr, uid, ids, context=None):
""" Checks the inventory and computes the stock move to do
@return: True
"""
inventory_line_obj = self.pool.get('stock.inventory.line')
stock_move_obj = self.pool.get('stock.move')
for inventory in self.browse(cr, uid, ids, context=context):
#first remove the existing stock moves linked to this inventory
move_ids = [move.id for move in inventory.move_ids]
stock_move_obj.unlink(cr, uid, move_ids, context=context)
for line in inventory.line_ids:
#compare the checked quantities on inventory lines to the theorical one
stock_move = inventory_line_obj._resolve_inventory_line(cr, uid, line, context=context)
def action_cancel_draft(self, cr, uid, ids, context=None):
""" Cancels the stock move and change inventory state to draft.
@return: True
"""
for inv in self.browse(cr, uid, ids, context=context):
self.write(cr, uid, [inv.id], {'line_ids': [(5,)]}, context=context)
self.pool.get('stock.move').action_cancel(cr, uid, [x.id for x in inv.move_ids], context=context)
self.write(cr, uid, [inv.id], {'state': 'draft'}, context=context)
return True
def action_cancel_inventory(self, cr, uid, ids, context=None):
self.action_cancel_draft(cr, uid, ids, context=context)
def prepare_inventory(self, cr, uid, ids, context=None):
inventory_line_obj = self.pool.get('stock.inventory.line')
for inventory in self.browse(cr, uid, ids, context=context):
# If there are inventory lines already (e.g. from import), respect those and set their theoretical qty
line_ids = [line.id for line in inventory.line_ids]
if not line_ids and inventory.filter != 'partial':
#compute the inventory lines and create them
vals = self._get_inventory_lines(cr, uid, inventory, context=context)
for product_line in vals:
inventory_line_obj.create(cr, uid, product_line, context=context)
return self.write(cr, uid, ids, {'state': 'confirm', 'date': time.strftime(DEFAULT_SERVER_DATETIME_FORMAT)})
def _get_inventory_lines(self, cr, uid, inventory, context=None):
location_obj = self.pool.get('stock.location')
product_obj = self.pool.get('product.product')
location_ids = location_obj.search(cr, uid, [('id', 'child_of', [inventory.location_id.id])], context=context)
domain = ' location_id in %s'
args = (tuple(location_ids),)
if inventory.partner_id:
domain += ' and owner_id = %s'
args += (inventory.partner_id.id,)
if inventory.lot_id:
domain += ' and lot_id = %s'
args += (inventory.lot_id.id,)
if inventory.product_id:
domain += ' and product_id = %s'
args += (inventory.product_id.id,)
if inventory.package_id:
domain += ' and package_id = %s'
args += (inventory.package_id.id,)
cr.execute('''
SELECT product_id, sum(qty) as product_qty, location_id, lot_id as prod_lot_id, package_id, owner_id as partner_id
FROM stock_quant WHERE''' + domain + '''
GROUP BY product_id, location_id, lot_id, package_id, partner_id
''', args)
vals = []
for product_line in cr.dictfetchall():
#replace the None the dictionary by False, because falsy values are tested later on
for key, value in product_line.items():
if not value:
product_line[key] = False
product_line['inventory_id'] = inventory.id
product_line['theoretical_qty'] = product_line['product_qty']
if product_line['product_id']:
product = product_obj.browse(cr, uid, product_line['product_id'], context=context)
product_line['product_uom_id'] = product.uom_id.id
vals.append(product_line)
return vals
def _check_filter_product(self, cr, uid, ids, context=None):
for inventory in self.browse(cr, uid, ids, context=context):
if inventory.filter == 'none' and inventory.product_id and inventory.location_id and inventory.lot_id:
return True
if inventory.filter not in ('product', 'product_owner') and inventory.product_id:
return False
if inventory.filter != 'lot' and inventory.lot_id:
return False
if inventory.filter not in ('owner', 'product_owner') and inventory.partner_id:
return False
if inventory.filter != 'pack' and inventory.package_id:
return False
return True
def onchange_filter(self, cr, uid, ids, filter, context=None):
to_clean = { 'value': {} }
if filter not in ('product', 'product_owner'):
to_clean['value']['product_id'] = False
if filter != 'lot':
to_clean['value']['lot_id'] = False
if filter not in ('owner', 'product_owner'):
to_clean['value']['partner_id'] = False
if filter != 'pack':
to_clean['value']['package_id'] = False
return to_clean
_constraints = [
(_check_filter_product, 'The selected inventory options are not coherent.',
['filter', 'product_id', 'lot_id', 'partner_id', 'package_id']),
]
class stock_inventory_line(osv.osv):
_name = "stock.inventory.line"
_description = "Inventory Line"
_order = "inventory_id, location_name, product_code, product_name, prodlot_name"
def _get_product_name_change(self, cr, uid, ids, context=None):
return self.pool.get('stock.inventory.line').search(cr, uid, [('product_id', 'in', ids)], context=context)
def _get_location_change(self, cr, uid, ids, context=None):
return self.pool.get('stock.inventory.line').search(cr, uid, [('location_id', 'in', ids)], context=context)
def _get_prodlot_change(self, cr, uid, ids, context=None):
return self.pool.get('stock.inventory.line').search(cr, uid, [('prod_lot_id', 'in', ids)], context=context)
def _get_theoretical_qty(self, cr, uid, ids, name, args, context=None):
res = {}
quant_obj = self.pool["stock.quant"]
uom_obj = self.pool["product.uom"]
for line in self.browse(cr, uid, ids, context=context):
quant_ids = self._get_quants(cr, uid, line, context=context)
quants = quant_obj.browse(cr, uid, quant_ids, context=context)
tot_qty = sum([x.qty for x in quants])
if line.product_uom_id and line.product_id.uom_id.id != line.product_uom_id.id:
tot_qty = uom_obj._compute_qty_obj(cr, uid, line.product_id.uom_id, tot_qty, line.product_uom_id, context=context)
res[line.id] = tot_qty
return res
_columns = {
'inventory_id': fields.many2one('stock.inventory', 'Inventory', ondelete='cascade', select=True),
'location_id': fields.many2one('stock.location', 'Location', required=True, select=True),
'product_id': fields.many2one('product.product', 'Product', required=True, select=True),
'package_id': fields.many2one('stock.quant.package', 'Pack', select=True),
'product_uom_id': fields.many2one('product.uom', 'Product Unit of Measure', required=True),
'product_qty': fields.float('Checked Quantity', digits_compute=dp.get_precision('Product Unit of Measure')),
'company_id': fields.related('inventory_id', 'company_id', type='many2one', relation='res.company', string='Company', store=True, select=True, readonly=True),
'prod_lot_id': fields.many2one('stock.production.lot', 'Serial Number', domain="[('product_id','=',product_id)]"),
'state': fields.related('inventory_id', 'state', type='char', string='Status', readonly=True),
'theoretical_qty': fields.function(_get_theoretical_qty, type='float', digits_compute=dp.get_precision('Product Unit of Measure'),
store={'stock.inventory.line': (lambda self, cr, uid, ids, c={}: ids, ['location_id', 'product_id', 'package_id', 'product_uom_id', 'company_id', 'prod_lot_id', 'partner_id'], 20),},
readonly=True, string="Theoretical Quantity"),
'partner_id': fields.many2one('res.partner', 'Owner'),
'product_name': fields.related('product_id', 'name', type='char', string='Product Name', store={
'product.product': (_get_product_name_change, ['name', 'default_code'], 20),
'stock.inventory.line': (lambda self, cr, uid, ids, c={}: ids, ['product_id'], 20),}),
'product_code': fields.related('product_id', 'default_code', type='char', string='Product Code', store={
'product.product': (_get_product_name_change, ['name', 'default_code'], 20),
'stock.inventory.line': (lambda self, cr, uid, ids, c={}: ids, ['product_id'], 20),}),
'location_name': fields.related('location_id', 'complete_name', type='char', string='Location Name', store={
'stock.location': (_get_location_change, ['name', 'location_id', 'active'], 20),
'stock.inventory.line': (lambda self, cr, uid, ids, c={}: ids, ['location_id'], 20),}),
'prodlot_name': fields.related('prod_lot_id', 'name', type='char', string='Serial Number Name', store={
'stock.production.lot': (_get_prodlot_change, ['name'], 20),
'stock.inventory.line': (lambda self, cr, uid, ids, c={}: ids, ['prod_lot_id'], 20),}),
}
_defaults = {
'product_qty': 0,
'product_uom_id': lambda self, cr, uid, ctx=None: self.pool['ir.model.data'].get_object_reference(cr, uid, 'product', 'product_uom_unit')[1]
}
def _get_quants(self, cr, uid, line, context=None):
quant_obj = self.pool["stock.quant"]
dom = [('company_id', '=', line.company_id.id), ('location_id', '=', line.location_id.id), ('lot_id', '=', line.prod_lot_id.id),
('product_id','=', line.product_id.id), ('owner_id', '=', line.partner_id.id), ('package_id', '=', line.package_id.id)]
quants = quant_obj.search(cr, uid, dom, context=context)
return quants
def onchange_createline(self, cr, uid, ids, location_id=False, product_id=False, uom_id=False, package_id=False, prod_lot_id=False, partner_id=False, company_id=False, context=None):
quant_obj = self.pool["stock.quant"]
uom_obj = self.pool["product.uom"]
res = {'value': {}}
# If no UoM already put the default UoM of the product
if product_id:
product = self.pool.get('product.product').browse(cr, uid, product_id, context=context)
uom = self.pool['product.uom'].browse(cr, uid, uom_id, context=context)
if product.uom_id.category_id.id != uom.category_id.id:
res['value']['product_uom_id'] = product.uom_id.id
res['domain'] = {'product_uom_id': [('category_id','=',product.uom_id.category_id.id)]}
uom_id = product.uom_id.id
# Calculate theoretical quantity by searching the quants as in quants_get
if product_id and location_id:
product = self.pool.get('product.product').browse(cr, uid, product_id, context=context)
if not company_id:
company_id = self.pool.get('res.users').browse(cr, uid, uid, context=context).company_id.id
dom = [('company_id', '=', company_id), ('location_id', '=', location_id), ('lot_id', '=', prod_lot_id),
('product_id','=', product_id), ('owner_id', '=', partner_id), ('package_id', '=', package_id)]
quants = quant_obj.search(cr, uid, dom, context=context)
th_qty = sum([x.qty for x in quant_obj.browse(cr, uid, quants, context=context)])
if product_id and uom_id and product.uom_id.id != uom_id:
th_qty = uom_obj._compute_qty(cr, uid, product.uom_id.id, th_qty, uom_id)
res['value']['theoretical_qty'] = th_qty
res['value']['product_qty'] = th_qty
return res
def _resolve_inventory_line(self, cr, uid, inventory_line, context=None):
stock_move_obj = self.pool.get('stock.move')
quant_obj = self.pool.get('stock.quant')
diff = inventory_line.theoretical_qty - inventory_line.product_qty
if not diff:
return
#each theorical_lines where difference between theoretical and checked quantities is not 0 is a line for which we need to create a stock move
vals = {
'name': _('INV:') + (inventory_line.inventory_id.name or ''),
'product_id': inventory_line.product_id.id,
'product_uom': inventory_line.product_uom_id.id,
'date': inventory_line.inventory_id.date,
'company_id': inventory_line.inventory_id.company_id.id,
'inventory_id': inventory_line.inventory_id.id,
'state': 'confirmed',
'restrict_lot_id': inventory_line.prod_lot_id.id,
'restrict_partner_id': inventory_line.partner_id.id,
}
inventory_location_id = inventory_line.product_id.property_stock_inventory.id
if diff < 0:
#found more than expected
vals['location_id'] = inventory_location_id
vals['location_dest_id'] = inventory_line.location_id.id
vals['product_uom_qty'] = -diff
else:
#found less than expected
vals['location_id'] = inventory_line.location_id.id
vals['location_dest_id'] = inventory_location_id
vals['product_uom_qty'] = diff
move_id = stock_move_obj.create(cr, uid, vals, context=context)
move = stock_move_obj.browse(cr, uid, move_id, context=context)
if diff > 0:
domain = [('qty', '>', 0.0), ('package_id', '=', inventory_line.package_id.id), ('lot_id', '=', inventory_line.prod_lot_id.id), ('location_id', '=', inventory_line.location_id.id)]
preferred_domain_list = [[('reservation_id', '=', False)], [('reservation_id.inventory_id', '!=', inventory_line.inventory_id.id)]]
quants = quant_obj.quants_get_prefered_domain(cr, uid, move.location_id, move.product_id, move.product_qty, domain=domain, prefered_domain_list=preferred_domain_list, restrict_partner_id=move.restrict_partner_id.id, context=context)
quant_obj.quants_reserve(cr, uid, quants, move, context=context)
elif inventory_line.package_id:
stock_move_obj.action_done(cr, uid, move_id, context=context)
quants = [x.id for x in move.quant_ids]
quant_obj.write(cr, uid, quants, {'package_id': inventory_line.package_id.id}, context=context)
res = quant_obj.search(cr, uid, [('qty', '<', 0.0), ('product_id', '=', move.product_id.id),
('location_id', '=', move.location_dest_id.id), ('package_id', '!=', False)], limit=1, context=context)
if res:
for quant in move.quant_ids:
if quant.location_id.id == move.location_dest_id.id: #To avoid we take a quant that was reconcile already
quant_obj._quant_reconcile_negative(cr, uid, quant, move, context=context)
return move_id
# Should be left out in next version
def restrict_change(self, cr, uid, ids, theoretical_qty, context=None):
return {}
# Should be left out in next version
def on_change_product_id(self, cr, uid, ids, product, uom, theoretical_qty, context=None):
""" Changes UoM
@param location_id: Location id
@param product: Changed product_id
@param uom: UoM product
@return: Dictionary of changed values
"""
if not product:
return {'value': {'product_uom_id': False}}
obj_product = self.pool.get('product.product').browse(cr, uid, product, context=context)
return {'value': {'product_uom_id': uom or obj_product.uom_id.id}}
#----------------------------------------------------------
# Stock Warehouse
#----------------------------------------------------------
class stock_warehouse(osv.osv):
_name = "stock.warehouse"
_description = "Warehouse"
_columns = {
'name': fields.char('Warehouse Name', required=True, select=True),
'company_id': fields.many2one('res.company', 'Company', required=True, readonly=True, select=True),
'partner_id': fields.many2one('res.partner', 'Address'),
'view_location_id': fields.many2one('stock.location', 'View Location', required=True, domain=[('usage', '=', 'view')]),
'lot_stock_id': fields.many2one('stock.location', 'Location Stock', domain=[('usage', '=', 'internal')], required=True),
'code': fields.char('Short Name', size=5, required=True, help="Short name used to identify your warehouse"),
'route_ids': fields.many2many('stock.location.route', 'stock_route_warehouse', 'warehouse_id', 'route_id', 'Routes', domain="[('warehouse_selectable', '=', True)]", help='Defaults routes through the warehouse'),
'reception_steps': fields.selection([
('one_step', 'Receive goods directly in stock (1 step)'),
('two_steps', 'Unload in input location then go to stock (2 steps)'),
('three_steps', 'Unload in input location, go through a quality control before being admitted in stock (3 steps)')], 'Incoming Shipments',
help="Default incoming route to follow", required=True),
'delivery_steps': fields.selection([
('ship_only', 'Ship directly from stock (Ship only)'),
('pick_ship', 'Bring goods to output location before shipping (Pick + Ship)'),
('pick_pack_ship', 'Make packages into a dedicated location, then bring them to the output location for shipping (Pick + Pack + Ship)')], 'Outgoing Shippings',
help="Default outgoing route to follow", required=True),
'wh_input_stock_loc_id': fields.many2one('stock.location', 'Input Location'),
'wh_qc_stock_loc_id': fields.many2one('stock.location', 'Quality Control Location'),
'wh_output_stock_loc_id': fields.many2one('stock.location', 'Output Location'),
'wh_pack_stock_loc_id': fields.many2one('stock.location', 'Packing Location'),
'mto_pull_id': fields.many2one('procurement.rule', 'MTO rule'),
'pick_type_id': fields.many2one('stock.picking.type', 'Pick Type'),
'pack_type_id': fields.many2one('stock.picking.type', 'Pack Type'),
'out_type_id': fields.many2one('stock.picking.type', 'Out Type'),
'in_type_id': fields.many2one('stock.picking.type', 'In Type'),
'int_type_id': fields.many2one('stock.picking.type', 'Internal Type'),
'crossdock_route_id': fields.many2one('stock.location.route', 'Crossdock Route'),
'reception_route_id': fields.many2one('stock.location.route', 'Receipt Route'),
'delivery_route_id': fields.many2one('stock.location.route', 'Delivery Route'),
'resupply_from_wh': fields.boolean('Resupply From Other Warehouses'),
'resupply_wh_ids': fields.many2many('stock.warehouse', 'stock_wh_resupply_table', 'supplied_wh_id', 'supplier_wh_id', 'Resupply Warehouses'),
'resupply_route_ids': fields.one2many('stock.location.route', 'supplied_wh_id', 'Resupply Routes',
help="Routes will be created for these resupply warehouses and you can select them on products and product categories"),
'default_resupply_wh_id': fields.many2one('stock.warehouse', 'Default Resupply Warehouse', help="Goods will always be resupplied from this warehouse"),
}
def onchange_filter_default_resupply_wh_id(self, cr, uid, ids, default_resupply_wh_id, resupply_wh_ids, context=None):
resupply_wh_ids = set([x['id'] for x in (self.resolve_2many_commands(cr, uid, 'resupply_wh_ids', resupply_wh_ids, ['id']))])
if default_resupply_wh_id: #If we are removing the default resupply, we don't have default_resupply_wh_id
resupply_wh_ids.add(default_resupply_wh_id)
resupply_wh_ids = list(resupply_wh_ids)
return {'value': {'resupply_wh_ids': resupply_wh_ids}}
def _get_external_transit_location(self, cr, uid, warehouse, context=None):
''' returns browse record of inter company transit location, if found'''
data_obj = self.pool.get('ir.model.data')
location_obj = self.pool.get('stock.location')
try:
inter_wh_loc = data_obj.get_object_reference(cr, uid, 'stock', 'stock_location_inter_wh')[1]
except:
return False
return location_obj.browse(cr, uid, inter_wh_loc, context=context)
def _get_inter_wh_route(self, cr, uid, warehouse, wh, context=None):
return {
'name': _('%s: Supply Product from %s') % (warehouse.name, wh.name),
'warehouse_selectable': False,
'product_selectable': True,
'product_categ_selectable': True,
'supplied_wh_id': warehouse.id,
'supplier_wh_id': wh.id,
}
def _create_resupply_routes(self, cr, uid, warehouse, supplier_warehouses, default_resupply_wh, context=None):
route_obj = self.pool.get('stock.location.route')
pull_obj = self.pool.get('procurement.rule')
#create route selectable on the product to resupply the warehouse from another one
external_transit_location = self._get_external_transit_location(cr, uid, warehouse, context=context)
internal_transit_location = warehouse.company_id.internal_transit_location_id
input_loc = warehouse.wh_input_stock_loc_id
if warehouse.reception_steps == 'one_step':
input_loc = warehouse.lot_stock_id
for wh in supplier_warehouses:
transit_location = wh.company_id.id == warehouse.company_id.id and internal_transit_location or external_transit_location
if transit_location:
output_loc = wh.wh_output_stock_loc_id
if wh.delivery_steps == 'ship_only':
output_loc = wh.lot_stock_id
# Create extra MTO rule (only for 'ship only' because in the other cases MTO rules already exists)
mto_pull_vals = self._get_mto_pull_rule(cr, uid, wh, [(output_loc, transit_location, wh.out_type_id.id)], context=context)[0]
pull_obj.create(cr, uid, mto_pull_vals, context=context)
inter_wh_route_vals = self._get_inter_wh_route(cr, uid, warehouse, wh, context=context)
inter_wh_route_id = route_obj.create(cr, uid, vals=inter_wh_route_vals, context=context)
values = [(output_loc, transit_location, wh.out_type_id.id, wh), (transit_location, input_loc, warehouse.in_type_id.id, warehouse)]
pull_rules_list = self._get_supply_pull_rules(cr, uid, wh.id, values, inter_wh_route_id, context=context)
for pull_rule in pull_rules_list:
pull_obj.create(cr, uid, vals=pull_rule, context=context)
#if the warehouse is also set as default resupply method, assign this route automatically to the warehouse
if default_resupply_wh and default_resupply_wh.id == wh.id:
self.write(cr, uid, [warehouse.id], {'route_ids': [(4, inter_wh_route_id)]}, context=context)
_defaults = {
'company_id': lambda self, cr, uid, c: self.pool.get('res.company')._company_default_get(cr, uid, 'stock.inventory', context=c),
'reception_steps': 'one_step',
'delivery_steps': 'ship_only',
}
_sql_constraints = [
('warehouse_name_uniq', 'unique(name, company_id)', 'The name of the warehouse must be unique per company!'),
('warehouse_code_uniq', 'unique(code, company_id)', 'The code of the warehouse must be unique per company!'),
]
def _get_partner_locations(self, cr, uid, ids, context=None):
''' returns a tuple made of the browse record of customer location and the browse record of supplier location'''
data_obj = self.pool.get('ir.model.data')
location_obj = self.pool.get('stock.location')
try:
customer_loc = data_obj.get_object_reference(cr, uid, 'stock', 'stock_location_customers')[1]
supplier_loc = data_obj.get_object_reference(cr, uid, 'stock', 'stock_location_suppliers')[1]
except:
customer_loc = location_obj.search(cr, uid, [('usage', '=', 'customer')], context=context)
customer_loc = customer_loc and customer_loc[0] or False
supplier_loc = location_obj.search(cr, uid, [('usage', '=', 'supplier')], context=context)
supplier_loc = supplier_loc and supplier_loc[0] or False
if not (customer_loc and supplier_loc):
raise osv.except_osv(_('Error!'), _('Can\'t find any customer or supplier location.'))
return location_obj.browse(cr, uid, [customer_loc, supplier_loc], context=context)
def _location_used(self, cr, uid, location_id, warehouse, context=None):
pull_obj = self.pool['procurement.rule']
push_obj = self.pool['stock.location.path']
pulls = pull_obj.search(cr, uid, ['&', ('route_id', 'not in', [x.id for x in warehouse.route_ids]), '|', ('location_src_id', '=', location_id), ('location_id', '=', location_id)], context=context)
pushs = push_obj.search(cr, uid, ['&', ('route_id', 'not in', [x.id for x in warehouse.route_ids]), '|', ('location_from_id', '=', location_id), ('location_dest_id', '=', location_id)], context=context)
if pulls or pushs:
return True
return False
def switch_location(self, cr, uid, ids, warehouse, new_reception_step=False, new_delivery_step=False, context=None):
location_obj = self.pool.get('stock.location')
new_reception_step = new_reception_step or warehouse.reception_steps
new_delivery_step = new_delivery_step or warehouse.delivery_steps
if warehouse.reception_steps != new_reception_step:
if not self._location_used(cr, uid, warehouse.wh_input_stock_loc_id.id, warehouse, context=context):
location_obj.write(cr, uid, [warehouse.wh_input_stock_loc_id.id, warehouse.wh_qc_stock_loc_id.id], {'active': False}, context=context)
if new_reception_step != 'one_step':
location_obj.write(cr, uid, warehouse.wh_input_stock_loc_id.id, {'active': True}, context=context)
if new_reception_step == 'three_steps':
location_obj.write(cr, uid, warehouse.wh_qc_stock_loc_id.id, {'active': True}, context=context)
if warehouse.delivery_steps != new_delivery_step:
if not self._location_used(cr, uid, warehouse.wh_output_stock_loc_id.id, warehouse, context=context):
location_obj.write(cr, uid, [warehouse.wh_output_stock_loc_id.id], {'active': False}, context=context)
if not self._location_used(cr, uid, warehouse.wh_pack_stock_loc_id.id, warehouse, context=context):
location_obj.write(cr, uid, [warehouse.wh_pack_stock_loc_id.id], {'active': False}, context=context)
if new_delivery_step != 'ship_only':
location_obj.write(cr, uid, warehouse.wh_output_stock_loc_id.id, {'active': True}, context=context)
if new_delivery_step == 'pick_pack_ship':
location_obj.write(cr, uid, warehouse.wh_pack_stock_loc_id.id, {'active': True}, context=context)
return True
def _get_reception_delivery_route(self, cr, uid, warehouse, route_name, context=None):
return {
'name': self._format_routename(cr, uid, warehouse, route_name, context=context),
'product_categ_selectable': True,
'product_selectable': False,
'sequence': 10,
}
def _get_supply_pull_rules(self, cr, uid, supply_warehouse, values, new_route_id, context=None):
pull_rules_list = []
for from_loc, dest_loc, pick_type_id, warehouse in values:
pull_rules_list.append({
'name': self._format_rulename(cr, uid, warehouse, from_loc, dest_loc, context=context),
'location_src_id': from_loc.id,
'location_id': dest_loc.id,
'route_id': new_route_id,
'action': 'move',
'picking_type_id': pick_type_id,
'procure_method': warehouse.lot_stock_id.id != from_loc.id and 'make_to_order' or 'make_to_stock', # first part of the resuply route is MTS
'warehouse_id': warehouse.id,
'propagate_warehouse_id': supply_warehouse,
})
return pull_rules_list
def _get_push_pull_rules(self, cr, uid, warehouse, active, values, new_route_id, context=None):
first_rule = True
push_rules_list = []
pull_rules_list = []
for from_loc, dest_loc, pick_type_id in values:
push_rules_list.append({
'name': self._format_rulename(cr, uid, warehouse, from_loc, dest_loc, context=context),
'location_from_id': from_loc.id,
'location_dest_id': dest_loc.id,
'route_id': new_route_id,
'auto': 'manual',
'picking_type_id': pick_type_id,
'active': active,
'warehouse_id': warehouse.id,
})
pull_rules_list.append({
'name': self._format_rulename(cr, uid, warehouse, from_loc, dest_loc, context=context),
'location_src_id': from_loc.id,
'location_id': dest_loc.id,
'route_id': new_route_id,
'action': 'move',
'picking_type_id': pick_type_id,
'procure_method': first_rule is True and 'make_to_stock' or 'make_to_order',
'active': active,
'warehouse_id': warehouse.id,
})
first_rule = False
return push_rules_list, pull_rules_list
def _get_mto_route(self, cr, uid, context=None):
route_obj = self.pool.get('stock.location.route')
data_obj = self.pool.get('ir.model.data')
try:
mto_route_id = data_obj.get_object_reference(cr, uid, 'stock', 'route_warehouse0_mto')[1]
except:
mto_route_id = route_obj.search(cr, uid, [('name', 'like', _('Make To Order'))], context=context)
mto_route_id = mto_route_id and mto_route_id[0] or False
if not mto_route_id:
raise osv.except_osv(_('Error!'), _('Can\'t find any generic Make To Order route.'))
return mto_route_id
def _check_remove_mto_resupply_rules(self, cr, uid, warehouse, context=None):
""" Checks that the moves from the different """
pull_obj = self.pool.get('procurement.rule')
mto_route_id = self._get_mto_route(cr, uid, context=context)
rules = pull_obj.search(cr, uid, ['&', ('location_src_id', '=', warehouse.lot_stock_id.id), ('location_id.usage', '=', 'transit')], context=context)
pull_obj.unlink(cr, uid, rules, context=context)
def _get_mto_pull_rule(self, cr, uid, warehouse, values, context=None):
mto_route_id = self._get_mto_route(cr, uid, context=context)
res = []
for value in values:
from_loc, dest_loc, pick_type_id = value
res += [{
'name': self._format_rulename(cr, uid, warehouse, from_loc, dest_loc, context=context) + _(' MTO'),
'location_src_id': from_loc.id,
'location_id': dest_loc.id,
'route_id': mto_route_id,
'action': 'move',
'picking_type_id': pick_type_id,
'procure_method': 'make_to_order',
'active': True,
'warehouse_id': warehouse.id,
}]
return res
def _get_crossdock_route(self, cr, uid, warehouse, route_name, context=None):
return {
'name': self._format_routename(cr, uid, warehouse, route_name, context=context),
'warehouse_selectable': False,
'product_selectable': True,
'product_categ_selectable': True,
'active': warehouse.delivery_steps != 'ship_only' and warehouse.reception_steps != 'one_step',
'sequence': 20,
}
def create_routes(self, cr, uid, ids, warehouse, context=None):
wh_route_ids = []
route_obj = self.pool.get('stock.location.route')
pull_obj = self.pool.get('procurement.rule')
push_obj = self.pool.get('stock.location.path')
routes_dict = self.get_routes_dict(cr, uid, ids, warehouse, context=context)
#create reception route and rules
route_name, values = routes_dict[warehouse.reception_steps]
route_vals = self._get_reception_delivery_route(cr, uid, warehouse, route_name, context=context)
reception_route_id = route_obj.create(cr, uid, route_vals, context=context)
wh_route_ids.append((4, reception_route_id))
push_rules_list, pull_rules_list = self._get_push_pull_rules(cr, uid, warehouse, True, values, reception_route_id, context=context)
#create the push/pull rules
for push_rule in push_rules_list:
push_obj.create(cr, uid, vals=push_rule, context=context)
for pull_rule in pull_rules_list:
#all pull rules in reception route are mto, because we don't want to wait for the scheduler to trigger an orderpoint on input location
pull_rule['procure_method'] = 'make_to_order'
pull_obj.create(cr, uid, vals=pull_rule, context=context)
#create MTS route and pull rules for delivery and a specific route MTO to be set on the product
route_name, values = routes_dict[warehouse.delivery_steps]
route_vals = self._get_reception_delivery_route(cr, uid, warehouse, route_name, context=context)
#create the route and its pull rules
delivery_route_id = route_obj.create(cr, uid, route_vals, context=context)
wh_route_ids.append((4, delivery_route_id))
dummy, pull_rules_list = self._get_push_pull_rules(cr, uid, warehouse, True, values, delivery_route_id, context=context)
for pull_rule in pull_rules_list:
pull_obj.create(cr, uid, vals=pull_rule, context=context)
#create MTO pull rule and link it to the generic MTO route
mto_pull_vals = self._get_mto_pull_rule(cr, uid, warehouse, values, context=context)[0]
mto_pull_id = pull_obj.create(cr, uid, mto_pull_vals, context=context)
#create a route for cross dock operations, that can be set on products and product categories
route_name, values = routes_dict['crossdock']
crossdock_route_vals = self._get_crossdock_route(cr, uid, warehouse, route_name, context=context)
crossdock_route_id = route_obj.create(cr, uid, vals=crossdock_route_vals, context=context)
wh_route_ids.append((4, crossdock_route_id))
dummy, pull_rules_list = self._get_push_pull_rules(cr, uid, warehouse, warehouse.delivery_steps != 'ship_only' and warehouse.reception_steps != 'one_step', values, crossdock_route_id, context=context)
for pull_rule in pull_rules_list:
# Fixed cross-dock is logically mto
pull_rule['procure_method'] = 'make_to_order'
pull_obj.create(cr, uid, vals=pull_rule, context=context)
#create route selectable on the product to resupply the warehouse from another one
self._create_resupply_routes(cr, uid, warehouse, warehouse.resupply_wh_ids, warehouse.default_resupply_wh_id, context=context)
#return routes and mto pull rule to store on the warehouse
return {
'route_ids': wh_route_ids,
'mto_pull_id': mto_pull_id,
'reception_route_id': reception_route_id,
'delivery_route_id': delivery_route_id,
'crossdock_route_id': crossdock_route_id,
}
def change_route(self, cr, uid, ids, warehouse, new_reception_step=False, new_delivery_step=False, context=None):
picking_type_obj = self.pool.get('stock.picking.type')
pull_obj = self.pool.get('procurement.rule')
push_obj = self.pool.get('stock.location.path')
route_obj = self.pool.get('stock.location.route')
new_reception_step = new_reception_step or warehouse.reception_steps
new_delivery_step = new_delivery_step or warehouse.delivery_steps
#change the default source and destination location and (de)activate picking types
input_loc = warehouse.wh_input_stock_loc_id
if new_reception_step == 'one_step':
input_loc = warehouse.lot_stock_id
output_loc = warehouse.wh_output_stock_loc_id
if new_delivery_step == 'ship_only':
output_loc = warehouse.lot_stock_id
picking_type_obj.write(cr, uid, warehouse.in_type_id.id, {'default_location_dest_id': input_loc.id}, context=context)
picking_type_obj.write(cr, uid, warehouse.out_type_id.id, {'default_location_src_id': output_loc.id}, context=context)
picking_type_obj.write(cr, uid, warehouse.pick_type_id.id, {
'active': new_delivery_step != 'ship_only',
'default_location_dest_id': output_loc.id if new_delivery_step == 'pick_ship' else warehouse.wh_pack_stock_loc_id.id,
}, context=context)
picking_type_obj.write(cr, uid, warehouse.pack_type_id.id, {'active': new_delivery_step == 'pick_pack_ship'}, context=context)
routes_dict = self.get_routes_dict(cr, uid, ids, warehouse, context=context)
#update delivery route and rules: unlink the existing rules of the warehouse delivery route and recreate it
pull_obj.unlink(cr, uid, [pu.id for pu in warehouse.delivery_route_id.pull_ids], context=context)
route_name, values = routes_dict[new_delivery_step]
route_obj.write(cr, uid, warehouse.delivery_route_id.id, {'name': self._format_routename(cr, uid, warehouse, route_name, context=context)}, context=context)
dummy, pull_rules_list = self._get_push_pull_rules(cr, uid, warehouse, True, values, warehouse.delivery_route_id.id, context=context)
#create the pull rules
for pull_rule in pull_rules_list:
pull_obj.create(cr, uid, vals=pull_rule, context=context)
#update receipt route and rules: unlink the existing rules of the warehouse receipt route and recreate it
pull_obj.unlink(cr, uid, [pu.id for pu in warehouse.reception_route_id.pull_ids], context=context)
push_obj.unlink(cr, uid, [pu.id for pu in warehouse.reception_route_id.push_ids], context=context)
route_name, values = routes_dict[new_reception_step]
route_obj.write(cr, uid, warehouse.reception_route_id.id, {'name': self._format_routename(cr, uid, warehouse, route_name, context=context)}, context=context)
push_rules_list, pull_rules_list = self._get_push_pull_rules(cr, uid, warehouse, True, values, warehouse.reception_route_id.id, context=context)
#create the push/pull rules
for push_rule in push_rules_list:
push_obj.create(cr, uid, vals=push_rule, context=context)
for pull_rule in pull_rules_list:
#all pull rules in receipt route are mto, because we don't want to wait for the scheduler to trigger an orderpoint on input location
pull_rule['procure_method'] = 'make_to_order'
pull_obj.create(cr, uid, vals=pull_rule, context=context)
route_obj.write(cr, uid, warehouse.crossdock_route_id.id, {'active': new_reception_step != 'one_step' and new_delivery_step != 'ship_only'}, context=context)
#change MTO rule
dummy, values = routes_dict[new_delivery_step]
mto_pull_vals = self._get_mto_pull_rule(cr, uid, warehouse, values, context=context)[0]
pull_obj.write(cr, uid, warehouse.mto_pull_id.id, mto_pull_vals, context=context)
return True
def create_sequences_and_picking_types(self, cr, uid, warehouse, context=None):
seq_obj = self.pool.get('ir.sequence')
picking_type_obj = self.pool.get('stock.picking.type')
#create new sequences
in_seq_id = seq_obj.create(cr, SUPERUSER_ID, values={'name': warehouse.name + _(' Sequence in'), 'prefix': warehouse.code + '/IN/', 'padding': 5}, context=context)
out_seq_id = seq_obj.create(cr, SUPERUSER_ID, values={'name': warehouse.name + _(' Sequence out'), 'prefix': warehouse.code + '/OUT/', 'padding': 5}, context=context)
pack_seq_id = seq_obj.create(cr, SUPERUSER_ID, values={'name': warehouse.name + _(' Sequence packing'), 'prefix': warehouse.code + '/PACK/', 'padding': 5}, context=context)
pick_seq_id = seq_obj.create(cr, SUPERUSER_ID, values={'name': warehouse.name + _(' Sequence picking'), 'prefix': warehouse.code + '/PICK/', 'padding': 5}, context=context)
int_seq_id = seq_obj.create(cr, SUPERUSER_ID, values={'name': warehouse.name + _(' Sequence internal'), 'prefix': warehouse.code + '/INT/', 'padding': 5}, context=context)
wh_stock_loc = warehouse.lot_stock_id
wh_input_stock_loc = warehouse.wh_input_stock_loc_id
wh_output_stock_loc = warehouse.wh_output_stock_loc_id
wh_pack_stock_loc = warehouse.wh_pack_stock_loc_id
#fetch customer and supplier locations, for references
customer_loc, supplier_loc = self._get_partner_locations(cr, uid, warehouse.id, context=context)
#create in, out, internal picking types for warehouse
input_loc = wh_input_stock_loc
if warehouse.reception_steps == 'one_step':
input_loc = wh_stock_loc
output_loc = wh_output_stock_loc
if warehouse.delivery_steps == 'ship_only':
output_loc = wh_stock_loc
#choose the next available color for the picking types of this warehouse
color = 0
available_colors = [c%9 for c in range(3, 12)] # put flashy colors first
all_used_colors = self.pool.get('stock.picking.type').search_read(cr, uid, [('warehouse_id', '!=', False), ('color', '!=', False)], ['color'], order='color')
#don't use sets to preserve the list order
for x in all_used_colors:
if x['color'] in available_colors:
available_colors.remove(x['color'])
if available_colors:
color = available_colors[0]
#order the picking types with a sequence allowing to have the following suit for each warehouse: reception, internal, pick, pack, ship.
max_sequence = self.pool.get('stock.picking.type').search_read(cr, uid, [], ['sequence'], order='sequence desc')
max_sequence = max_sequence and max_sequence[0]['sequence'] or 0
in_type_id = picking_type_obj.create(cr, uid, vals={
'name': _('Receipts'),
'warehouse_id': warehouse.id,
'code': 'incoming',
'sequence_id': in_seq_id,
'default_location_src_id': supplier_loc.id,
'default_location_dest_id': input_loc.id,
'sequence': max_sequence + 1,
'color': color}, context=context)
out_type_id = picking_type_obj.create(cr, uid, vals={
'name': _('Delivery Orders'),
'warehouse_id': warehouse.id,
'code': 'outgoing',
'sequence_id': out_seq_id,
'return_picking_type_id': in_type_id,
'default_location_src_id': output_loc.id,
'default_location_dest_id': customer_loc.id,
'sequence': max_sequence + 4,
'color': color}, context=context)
picking_type_obj.write(cr, uid, [in_type_id], {'return_picking_type_id': out_type_id}, context=context)
int_type_id = picking_type_obj.create(cr, uid, vals={
'name': _('Internal Transfers'),
'warehouse_id': warehouse.id,
'code': 'internal',
'sequence_id': int_seq_id,
'default_location_src_id': wh_stock_loc.id,
'default_location_dest_id': wh_stock_loc.id,
'active': True,
'sequence': max_sequence + 2,
'color': color}, context=context)
pack_type_id = picking_type_obj.create(cr, uid, vals={
'name': _('Pack'),
'warehouse_id': warehouse.id,
'code': 'internal',
'sequence_id': pack_seq_id,
'default_location_src_id': wh_pack_stock_loc.id,
'default_location_dest_id': output_loc.id,
'active': warehouse.delivery_steps == 'pick_pack_ship',
'sequence': max_sequence + 3,
'color': color}, context=context)
pick_type_id = picking_type_obj.create(cr, uid, vals={
'name': _('Pick'),
'warehouse_id': warehouse.id,
'code': 'internal',
'sequence_id': pick_seq_id,
'default_location_src_id': wh_stock_loc.id,
'default_location_dest_id': output_loc.id if warehouse.delivery_steps == 'pick_ship' else wh_pack_stock_loc.id,
'active': warehouse.delivery_steps != 'ship_only',
'sequence': max_sequence + 2,
'color': color}, context=context)
#write picking types on WH
vals = {
'in_type_id': in_type_id,
'out_type_id': out_type_id,
'pack_type_id': pack_type_id,
'pick_type_id': pick_type_id,
'int_type_id': int_type_id,
}
super(stock_warehouse, self).write(cr, uid, warehouse.id, vals=vals, context=context)
def create(self, cr, uid, vals, context=None):
if context is None:
context = {}
if vals is None:
vals = {}
data_obj = self.pool.get('ir.model.data')
seq_obj = self.pool.get('ir.sequence')
picking_type_obj = self.pool.get('stock.picking.type')
location_obj = self.pool.get('stock.location')
#create view location for warehouse
loc_vals = {
'name': _(vals.get('code')),
'usage': 'view',
'location_id': data_obj.get_object_reference(cr, uid, 'stock', 'stock_location_locations')[1],
}
if vals.get('company_id'):
loc_vals['company_id'] = vals.get('company_id')
wh_loc_id = location_obj.create(cr, uid, loc_vals, context=context)
vals['view_location_id'] = wh_loc_id
#create all location
def_values = self.default_get(cr, uid, {'reception_steps', 'delivery_steps'})
reception_steps = vals.get('reception_steps', def_values['reception_steps'])
delivery_steps = vals.get('delivery_steps', def_values['delivery_steps'])
context_with_inactive = context.copy()
context_with_inactive['active_test'] = False
sub_locations = [
{'name': _('Stock'), 'active': True, 'field': 'lot_stock_id'},
{'name': _('Input'), 'active': reception_steps != 'one_step', 'field': 'wh_input_stock_loc_id'},
{'name': _('Quality Control'), 'active': reception_steps == 'three_steps', 'field': 'wh_qc_stock_loc_id'},
{'name': _('Output'), 'active': delivery_steps != 'ship_only', 'field': 'wh_output_stock_loc_id'},
{'name': _('Packing Zone'), 'active': delivery_steps == 'pick_pack_ship', 'field': 'wh_pack_stock_loc_id'},
]
for values in sub_locations:
loc_vals = {
'name': values['name'],
'usage': 'internal',
'location_id': wh_loc_id,
'active': values['active'],
}
if vals.get('company_id'):
loc_vals['company_id'] = vals.get('company_id')
location_id = location_obj.create(cr, uid, loc_vals, context=context_with_inactive)
vals[values['field']] = location_id
#create WH
new_id = super(stock_warehouse, self).create(cr, uid, vals=vals, context=context)
warehouse = self.browse(cr, uid, new_id, context=context)
self.create_sequences_and_picking_types(cr, uid, warehouse, context=context)
#create routes and push/pull rules
new_objects_dict = self.create_routes(cr, uid, new_id, warehouse, context=context)
self.write(cr, uid, warehouse.id, new_objects_dict, context=context)
return new_id
def _format_rulename(self, cr, uid, obj, from_loc, dest_loc, context=None):
return obj.code + ': ' + from_loc.name + ' -> ' + dest_loc.name
def _format_routename(self, cr, uid, obj, name, context=None):
return obj.name + ': ' + name
def get_routes_dict(self, cr, uid, ids, warehouse, context=None):
#fetch customer and supplier locations, for references
customer_loc, supplier_loc = self._get_partner_locations(cr, uid, ids, context=context)
return {
'one_step': (_('Receipt in 1 step'), []),
'two_steps': (_('Receipt in 2 steps'), [(warehouse.wh_input_stock_loc_id, warehouse.lot_stock_id, warehouse.int_type_id.id)]),
'three_steps': (_('Receipt in 3 steps'), [(warehouse.wh_input_stock_loc_id, warehouse.wh_qc_stock_loc_id, warehouse.int_type_id.id), (warehouse.wh_qc_stock_loc_id, warehouse.lot_stock_id, warehouse.int_type_id.id)]),
'crossdock': (_('Cross-Dock'), [(warehouse.wh_input_stock_loc_id, warehouse.wh_output_stock_loc_id, warehouse.int_type_id.id), (warehouse.wh_output_stock_loc_id, customer_loc, warehouse.out_type_id.id)]),
'ship_only': (_('Ship Only'), [(warehouse.lot_stock_id, customer_loc, warehouse.out_type_id.id)]),
'pick_ship': (_('Pick + Ship'), [(warehouse.lot_stock_id, warehouse.wh_output_stock_loc_id, warehouse.pick_type_id.id), (warehouse.wh_output_stock_loc_id, customer_loc, warehouse.out_type_id.id)]),
'pick_pack_ship': (_('Pick + Pack + Ship'), [(warehouse.lot_stock_id, warehouse.wh_pack_stock_loc_id, warehouse.pick_type_id.id), (warehouse.wh_pack_stock_loc_id, warehouse.wh_output_stock_loc_id, warehouse.pack_type_id.id), (warehouse.wh_output_stock_loc_id, customer_loc, warehouse.out_type_id.id)]),
}
def _handle_renaming(self, cr, uid, warehouse, name, code, context=None):
location_obj = self.pool.get('stock.location')
route_obj = self.pool.get('stock.location.route')
pull_obj = self.pool.get('procurement.rule')
push_obj = self.pool.get('stock.location.path')
#rename location
location_id = warehouse.lot_stock_id.location_id.id
location_obj.write(cr, uid, location_id, {'name': code}, context=context)
#rename route and push-pull rules
for route in warehouse.route_ids:
route_obj.write(cr, uid, route.id, {'name': route.name.replace(warehouse.name, name, 1)}, context=context)
for pull in route.pull_ids:
pull_obj.write(cr, uid, pull.id, {'name': pull.name.replace(warehouse.name, name, 1)}, context=context)
for push in route.push_ids:
push_obj.write(cr, uid, push.id, {'name': pull.name.replace(warehouse.name, name, 1)}, context=context)
#change the mto pull rule name
if warehouse.mto_pull_id.id:
pull_obj.write(cr, uid, warehouse.mto_pull_id.id, {'name': warehouse.mto_pull_id.name.replace(warehouse.name, name, 1)}, context=context)
def _check_delivery_resupply(self, cr, uid, warehouse, new_location, change_to_multiple, context=None):
""" Will check if the resupply routes from this warehouse follow the changes of number of delivery steps """
#Check routes that are being delivered by this warehouse and change the rule going to transit location
route_obj = self.pool.get("stock.location.route")
pull_obj = self.pool.get("procurement.rule")
routes = route_obj.search(cr, uid, [('supplier_wh_id','=', warehouse.id)], context=context)
pulls = pull_obj.search(cr, uid, ['&', ('route_id', 'in', routes), ('location_id.usage', '=', 'transit')], context=context)
if pulls:
pull_obj.write(cr, uid, pulls, {'location_src_id': new_location, 'procure_method': change_to_multiple and "make_to_order" or "make_to_stock"}, context=context)
# Create or clean MTO rules
mto_route_id = self._get_mto_route(cr, uid, context=context)
if not change_to_multiple:
# If single delivery we should create the necessary MTO rules for the resupply
# pulls = pull_obj.search(cr, uid, ['&', ('route_id', '=', mto_route_id), ('location_id.usage', '=', 'transit'), ('location_src_id', '=', warehouse.lot_stock_id.id)], context=context)
pull_recs = pull_obj.browse(cr, uid, pulls, context=context)
transfer_locs = list(set([x.location_id for x in pull_recs]))
vals = [(warehouse.lot_stock_id , x, warehouse.out_type_id.id) for x in transfer_locs]
mto_pull_vals = self._get_mto_pull_rule(cr, uid, warehouse, vals, context=context)
for mto_pull_val in mto_pull_vals:
pull_obj.create(cr, uid, mto_pull_val, context=context)
else:
# We need to delete all the MTO pull rules, otherwise they risk to be used in the system
pulls = pull_obj.search(cr, uid, ['&', ('route_id', '=', mto_route_id), ('location_id.usage', '=', 'transit'), ('location_src_id', '=', warehouse.lot_stock_id.id)], context=context)
if pulls:
pull_obj.unlink(cr, uid, pulls, context=context)
def _check_reception_resupply(self, cr, uid, warehouse, new_location, context=None):
"""
Will check if the resupply routes to this warehouse follow the changes of number of receipt steps
"""
#Check routes that are being delivered by this warehouse and change the rule coming from transit location
route_obj = self.pool.get("stock.location.route")
pull_obj = self.pool.get("procurement.rule")
routes = route_obj.search(cr, uid, [('supplied_wh_id','=', warehouse.id)], context=context)
pulls= pull_obj.search(cr, uid, ['&', ('route_id', 'in', routes), ('location_src_id.usage', '=', 'transit')])
if pulls:
pull_obj.write(cr, uid, pulls, {'location_id': new_location}, context=context)
def _check_resupply(self, cr, uid, warehouse, reception_new, delivery_new, context=None):
if reception_new:
old_val = warehouse.reception_steps
new_val = reception_new
change_to_one = (old_val != 'one_step' and new_val == 'one_step')
change_to_multiple = (old_val == 'one_step' and new_val != 'one_step')
if change_to_one or change_to_multiple:
new_location = change_to_one and warehouse.lot_stock_id.id or warehouse.wh_input_stock_loc_id.id
self._check_reception_resupply(cr, uid, warehouse, new_location, context=context)
if delivery_new:
old_val = warehouse.delivery_steps
new_val = delivery_new
change_to_one = (old_val != 'ship_only' and new_val == 'ship_only')
change_to_multiple = (old_val == 'ship_only' and new_val != 'ship_only')
if change_to_one or change_to_multiple:
new_location = change_to_one and warehouse.lot_stock_id.id or warehouse.wh_output_stock_loc_id.id
self._check_delivery_resupply(cr, uid, warehouse, new_location, change_to_multiple, context=context)
def write(self, cr, uid, ids, vals, context=None):
if context is None:
context = {}
if isinstance(ids, (int, long)):
ids = [ids]
seq_obj = self.pool.get('ir.sequence')
route_obj = self.pool.get('stock.location.route')
context_with_inactive = context.copy()
context_with_inactive['active_test'] = False
for warehouse in self.browse(cr, uid, ids, context=context_with_inactive):
#first of all, check if we need to delete and recreate route
if vals.get('reception_steps') or vals.get('delivery_steps'):
#activate and deactivate location according to reception and delivery option
self.switch_location(cr, uid, warehouse.id, warehouse, vals.get('reception_steps', False), vals.get('delivery_steps', False), context=context)
# switch between route
self.change_route(cr, uid, ids, warehouse, vals.get('reception_steps', False), vals.get('delivery_steps', False), context=context_with_inactive)
# Check if we need to change something to resupply warehouses and associated MTO rules
self._check_resupply(cr, uid, warehouse, vals.get('reception_steps'), vals.get('delivery_steps'), context=context)
if vals.get('code') or vals.get('name'):
name = warehouse.name
#rename sequence
if vals.get('name'):
name = vals.get('name', warehouse.name)
self._handle_renaming(cr, uid, warehouse, name, vals.get('code', warehouse.code), context=context_with_inactive)
if warehouse.in_type_id:
seq_obj.write(cr, uid, warehouse.in_type_id.sequence_id.id, {'name': name + _(' Sequence in'), 'prefix': vals.get('code', warehouse.code) + '\IN\\'}, context=context)
seq_obj.write(cr, uid, warehouse.out_type_id.sequence_id.id, {'name': name + _(' Sequence out'), 'prefix': vals.get('code', warehouse.code) + '\OUT\\'}, context=context)
seq_obj.write(cr, uid, warehouse.pack_type_id.sequence_id.id, {'name': name + _(' Sequence packing'), 'prefix': vals.get('code', warehouse.code) + '\PACK\\'}, context=context)
seq_obj.write(cr, uid, warehouse.pick_type_id.sequence_id.id, {'name': name + _(' Sequence picking'), 'prefix': vals.get('code', warehouse.code) + '\PICK\\'}, context=context)
seq_obj.write(cr, uid, warehouse.int_type_id.sequence_id.id, {'name': name + _(' Sequence internal'), 'prefix': vals.get('code', warehouse.code) + '\INT\\'}, context=context)
if vals.get('resupply_wh_ids') and not vals.get('resupply_route_ids'):
for cmd in vals.get('resupply_wh_ids'):
if cmd[0] == 6:
new_ids = set(cmd[2])
old_ids = set([wh.id for wh in warehouse.resupply_wh_ids])
to_add_wh_ids = new_ids - old_ids
if to_add_wh_ids:
supplier_warehouses = self.browse(cr, uid, list(to_add_wh_ids), context=context)
self._create_resupply_routes(cr, uid, warehouse, supplier_warehouses, warehouse.default_resupply_wh_id, context=context)
to_remove_wh_ids = old_ids - new_ids
if to_remove_wh_ids:
to_remove_route_ids = route_obj.search(cr, uid, [('supplied_wh_id', '=', warehouse.id), ('supplier_wh_id', 'in', list(to_remove_wh_ids))], context=context)
if to_remove_route_ids:
route_obj.unlink(cr, uid, to_remove_route_ids, context=context)
else:
#not implemented
pass
if 'default_resupply_wh_id' in vals:
if vals.get('default_resupply_wh_id') == warehouse.id:
raise osv.except_osv(_('Warning'),_('The default resupply warehouse should be different than the warehouse itself!'))
if warehouse.default_resupply_wh_id:
#remove the existing resupplying route on the warehouse
to_remove_route_ids = route_obj.search(cr, uid, [('supplied_wh_id', '=', warehouse.id), ('supplier_wh_id', '=', warehouse.default_resupply_wh_id.id)], context=context)
for inter_wh_route_id in to_remove_route_ids:
self.write(cr, uid, [warehouse.id], {'route_ids': [(3, inter_wh_route_id)]})
if vals.get('default_resupply_wh_id'):
#assign the new resupplying route on all products
to_assign_route_ids = route_obj.search(cr, uid, [('supplied_wh_id', '=', warehouse.id), ('supplier_wh_id', '=', vals.get('default_resupply_wh_id'))], context=context)
for inter_wh_route_id in to_assign_route_ids:
self.write(cr, uid, [warehouse.id], {'route_ids': [(4, inter_wh_route_id)]})
return super(stock_warehouse, self).write(cr, uid, ids, vals=vals, context=context)
def get_all_routes_for_wh(self, cr, uid, warehouse, context=None):
route_obj = self.pool.get("stock.location.route")
all_routes = [route.id for route in warehouse.route_ids]
all_routes += route_obj.search(cr, uid, [('supplied_wh_id', '=', warehouse.id)], context=context)
all_routes += [warehouse.mto_pull_id.route_id.id]
return all_routes
def view_all_routes_for_wh(self, cr, uid, ids, context=None):
all_routes = []
for wh in self.browse(cr, uid, ids, context=context):
all_routes += self.get_all_routes_for_wh(cr, uid, wh, context=context)
domain = [('id', 'in', all_routes)]
return {
'name': _('Warehouse\'s Routes'),
'domain': domain,
'res_model': 'stock.location.route',
'type': 'ir.actions.act_window',
'view_id': False,
'view_mode': 'tree,form',
'view_type': 'form',
'limit': 20
}
class stock_location_path(osv.osv):
_name = "stock.location.path"
_description = "Pushed Flows"
_order = "name"
def _get_rules(self, cr, uid, ids, context=None):
res = []
for route in self.browse(cr, uid, ids, context=context):
res += [x.id for x in route.push_ids]
return res
_columns = {
'name': fields.char('Operation Name', required=True),
'company_id': fields.many2one('res.company', 'Company'),
'route_id': fields.many2one('stock.location.route', 'Route'),
'location_from_id': fields.many2one('stock.location', 'Source Location', ondelete='cascade', select=1, required=True),
'location_dest_id': fields.many2one('stock.location', 'Destination Location', ondelete='cascade', select=1, required=True),
'delay': fields.integer('Delay (days)', help="Number of days to do this transition"),
'picking_type_id': fields.many2one('stock.picking.type', 'Type of the new Operation', required=True, help="This is the picking type associated with the different pickings"),
'auto': fields.selection(
[('auto','Automatic Move'), ('manual','Manual Operation'),('transparent','Automatic No Step Added')],
'Automatic Move',
required=True, select=1,
help="This is used to define paths the product has to follow within the location tree.\n" \
"The 'Automatic Move' value will create a stock move after the current one that will be "\
"validated automatically. With 'Manual Operation', the stock move has to be validated "\
"by a worker. With 'Automatic No Step Added', the location is replaced in the original move."
),
'propagate': fields.boolean('Propagate cancel and split', help='If checked, when the previous move is cancelled or split, the move generated by this move will too'),
'active': fields.boolean('Active', help="If unchecked, it will allow you to hide the rule without removing it."),
'warehouse_id': fields.many2one('stock.warehouse', 'Warehouse'),
'route_sequence': fields.related('route_id', 'sequence', string='Route Sequence',
store={
'stock.location.route': (_get_rules, ['sequence'], 10),
'stock.location.path': (lambda self, cr, uid, ids, c={}: ids, ['route_id'], 10),
}),
'sequence': fields.integer('Sequence'),
}
_defaults = {
'auto': 'auto',
'delay': 0,
'company_id': lambda self, cr, uid, c: self.pool.get('res.company')._company_default_get(cr, uid, 'procurement.order', context=c),
'propagate': True,
'active': True,
}
def _prepare_push_apply(self, cr, uid, rule, move, context=None):
newdate = (datetime.strptime(move.date_expected, DEFAULT_SERVER_DATETIME_FORMAT) + relativedelta.relativedelta(days=rule.delay or 0)).strftime(DEFAULT_SERVER_DATETIME_FORMAT)
return {
'origin': move.origin or move.picking_id.name or "/",
'location_id': move.location_dest_id.id,
'location_dest_id': rule.location_dest_id.id,
'date': newdate,
'company_id': rule.company_id and rule.company_id.id or False,
'date_expected': newdate,
'picking_id': False,
'picking_type_id': rule.picking_type_id and rule.picking_type_id.id or False,
'propagate': rule.propagate,
'push_rule_id': rule.id,
'warehouse_id': rule.warehouse_id and rule.warehouse_id.id or False,
}
def _apply(self, cr, uid, rule, move, context=None):
move_obj = self.pool.get('stock.move')
newdate = (datetime.strptime(move.date_expected, DEFAULT_SERVER_DATETIME_FORMAT) + relativedelta.relativedelta(days=rule.delay or 0)).strftime(DEFAULT_SERVER_DATETIME_FORMAT)
if rule.auto == 'transparent':
old_dest_location = move.location_dest_id.id
move_obj.write(cr, uid, [move.id], {
'date': newdate,
'date_expected': newdate,
'location_dest_id': rule.location_dest_id.id
})
#avoid looping if a push rule is not well configured
if rule.location_dest_id.id != old_dest_location:
#call again push_apply to see if a next step is defined
move_obj._push_apply(cr, uid, [move], context=context)
else:
vals = self._prepare_push_apply(cr, uid, rule, move, context=context)
move_id = move_obj.copy(cr, uid, move.id, vals, context=context)
move_obj.write(cr, uid, [move.id], {
'move_dest_id': move_id,
})
move_obj.action_confirm(cr, uid, [move_id], context=None)
# -------------------------
# Packaging related stuff
# -------------------------
from openerp.report import report_sxw
class stock_package(osv.osv):
"""
These are the packages, containing quants and/or other packages
"""
_name = "stock.quant.package"
_description = "Physical Packages"
_parent_name = "parent_id"
_parent_store = True
_parent_order = 'name'
_order = 'parent_left'
def name_get(self, cr, uid, ids, context=None):
res = self._complete_name(cr, uid, ids, 'complete_name', None, context=context)
return res.items()
def _complete_name(self, cr, uid, ids, name, args, context=None):
""" Forms complete name of location from parent location to child location.
@return: Dictionary of values
"""
res = {}
for m in self.browse(cr, uid, ids, context=context):
res[m.id] = m.name
parent = m.parent_id
while parent:
res[m.id] = parent.name + ' / ' + res[m.id]
parent = parent.parent_id
return res
def _get_packages(self, cr, uid, ids, context=None):
"""Returns packages from quants for store"""
res = set()
for quant in self.browse(cr, uid, ids, context=context):
pack = quant.package_id
while pack:
res.add(pack.id)
pack = pack.parent_id
return list(res)
def _get_package_info(self, cr, uid, ids, name, args, context=None):
quant_obj = self.pool.get("stock.quant")
default_company_id = self.pool.get('res.users').browse(cr, uid, uid, context=context).company_id.id
res = dict((res_id, {'location_id': False, 'company_id': default_company_id, 'owner_id': False}) for res_id in ids)
for pack in self.browse(cr, uid, ids, context=context):
quants = quant_obj.search(cr, uid, [('package_id', 'child_of', pack.id)], context=context)
if quants:
quant = quant_obj.browse(cr, uid, quants[0], context=context)
res[pack.id]['location_id'] = quant.location_id.id
res[pack.id]['owner_id'] = quant.owner_id.id
res[pack.id]['company_id'] = quant.company_id.id
else:
res[pack.id]['location_id'] = False
res[pack.id]['owner_id'] = False
res[pack.id]['company_id'] = False
return res
def _get_packages_to_relocate(self, cr, uid, ids, context=None):
res = set()
for pack in self.browse(cr, uid, ids, context=context):
res.add(pack.id)
if pack.parent_id:
res.add(pack.parent_id.id)
return list(res)
_columns = {
'name': fields.char('Package Reference', select=True, copy=False),
'complete_name': fields.function(_complete_name, type='char', string="Package Name",),
'parent_left': fields.integer('Left Parent', select=1),
'parent_right': fields.integer('Right Parent', select=1),
'packaging_id': fields.many2one('product.packaging', 'Packaging', help="This field should be completed only if everything inside the package share the same product, otherwise it doesn't really makes sense.", select=True),
'ul_id': fields.many2one('product.ul', 'Logistic Unit'),
'location_id': fields.function(_get_package_info, type='many2one', relation='stock.location', string='Location', multi="package",
store={
'stock.quant': (_get_packages, ['location_id'], 10),
'stock.quant.package': (_get_packages_to_relocate, ['quant_ids', 'children_ids', 'parent_id'], 10),
}, readonly=True, select=True),
'quant_ids': fields.one2many('stock.quant', 'package_id', 'Bulk Content', readonly=True),
'parent_id': fields.many2one('stock.quant.package', 'Parent Package', help="The package containing this item", ondelete='restrict', readonly=True),
'children_ids': fields.one2many('stock.quant.package', 'parent_id', 'Contained Packages', readonly=True),
'company_id': fields.function(_get_package_info, type="many2one", relation='res.company', string='Company', multi="package",
store={
'stock.quant': (_get_packages, ['company_id'], 10),
'stock.quant.package': (_get_packages_to_relocate, ['quant_ids', 'children_ids', 'parent_id'], 10),
}, readonly=True, select=True),
'owner_id': fields.function(_get_package_info, type='many2one', relation='res.partner', string='Owner', multi="package",
store={
'stock.quant': (_get_packages, ['owner_id'], 10),
'stock.quant.package': (_get_packages_to_relocate, ['quant_ids', 'children_ids', 'parent_id'], 10),
}, readonly=True, select=True),
}
_defaults = {
'name': lambda self, cr, uid, context: self.pool.get('ir.sequence').get(cr, uid, 'stock.quant.package') or _('Unknown Pack')
}
def _check_location_constraint(self, cr, uid, packs, context=None):
'''checks that all quants in a package are stored in the same location. This function cannot be used
as a constraint because it needs to be checked on pack operations (they may not call write on the
package)
'''
quant_obj = self.pool.get('stock.quant')
for pack in packs:
parent = pack
while parent.parent_id:
parent = parent.parent_id
quant_ids = self.get_content(cr, uid, [parent.id], context=context)
quants = [x for x in quant_obj.browse(cr, uid, quant_ids, context=context) if x.qty > 0]
location_id = quants and quants[0].location_id.id or False
if not [quant.location_id.id == location_id for quant in quants]:
raise osv.except_osv(_('Error'), _('Everything inside a package should be in the same location'))
return True
def action_print(self, cr, uid, ids, context=None):
context = dict(context or {}, active_ids=ids)
return self.pool.get("report").get_action(cr, uid, ids, 'stock.report_package_barcode_small', context=context)
def unpack(self, cr, uid, ids, context=None):
quant_obj = self.pool.get('stock.quant')
for package in self.browse(cr, uid, ids, context=context):
quant_ids = [quant.id for quant in package.quant_ids]
quant_obj.write(cr, uid, quant_ids, {'package_id': package.parent_id.id or False}, context=context)
children_package_ids = [child_package.id for child_package in package.children_ids]
self.write(cr, uid, children_package_ids, {'parent_id': package.parent_id.id or False}, context=context)
#delete current package since it contains nothing anymore
self.unlink(cr, uid, ids, context=context)
return self.pool.get('ir.actions.act_window').for_xml_id(cr, uid, 'stock', 'action_package_view', context=context)
def get_content(self, cr, uid, ids, context=None):
child_package_ids = self.search(cr, uid, [('id', 'child_of', ids)], context=context)
return self.pool.get('stock.quant').search(cr, uid, [('package_id', 'in', child_package_ids)], context=context)
def get_content_package(self, cr, uid, ids, context=None):
quants_ids = self.get_content(cr, uid, ids, context=context)
res = self.pool.get('ir.actions.act_window').for_xml_id(cr, uid, 'stock', 'quantsact', context=context)
res['domain'] = [('id', 'in', quants_ids)]
return res
def _get_product_total_qty(self, cr, uid, package_record, product_id, context=None):
''' find the total of given product 'product_id' inside the given package 'package_id'''
quant_obj = self.pool.get('stock.quant')
all_quant_ids = self.get_content(cr, uid, [package_record.id], context=context)
total = 0
for quant in quant_obj.browse(cr, uid, all_quant_ids, context=context):
if quant.product_id.id == product_id:
total += quant.qty
return total
def _get_all_products_quantities(self, cr, uid, package_id, context=None):
'''This function computes the different product quantities for the given package
'''
quant_obj = self.pool.get('stock.quant')
res = {}
for quant in quant_obj.browse(cr, uid, self.get_content(cr, uid, package_id, context=context)):
if quant.product_id.id not in res:
res[quant.product_id.id] = 0
res[quant.product_id.id] += quant.qty
return res
def copy_pack(self, cr, uid, id, default_pack_values=None, default=None, context=None):
stock_pack_operation_obj = self.pool.get('stock.pack.operation')
if default is None:
default = {}
new_package_id = self.copy(cr, uid, id, default_pack_values, context=context)
default['result_package_id'] = new_package_id
op_ids = stock_pack_operation_obj.search(cr, uid, [('result_package_id', '=', id)], context=context)
for op_id in op_ids:
stock_pack_operation_obj.copy(cr, uid, op_id, default, context=context)
class stock_pack_operation(osv.osv):
_name = "stock.pack.operation"
_description = "Packing Operation"
def _get_remaining_prod_quantities(self, cr, uid, operation, context=None):
'''Get the remaining quantities per product on an operation with a package. This function returns a dictionary'''
#if the operation doesn't concern a package, it's not relevant to call this function
if not operation.package_id or operation.product_id:
return {operation.product_id.id: operation.remaining_qty}
#get the total of products the package contains
res = self.pool.get('stock.quant.package')._get_all_products_quantities(cr, uid, operation.package_id.id, context=context)
#reduce by the quantities linked to a move
for record in operation.linked_move_operation_ids:
if record.move_id.product_id.id not in res:
res[record.move_id.product_id.id] = 0
res[record.move_id.product_id.id] -= record.qty
return res
def _get_remaining_qty(self, cr, uid, ids, name, args, context=None):
uom_obj = self.pool.get('product.uom')
res = {}
for ops in self.browse(cr, uid, ids, context=context):
res[ops.id] = 0
if ops.package_id and not ops.product_id:
#dont try to compute the remaining quantity for packages because it's not relevant (a package could include different products).
#should use _get_remaining_prod_quantities instead
continue
else:
qty = ops.product_qty
if ops.product_uom_id:
qty = uom_obj._compute_qty_obj(cr, uid, ops.product_uom_id, ops.product_qty, ops.product_id.uom_id, context=context)
for record in ops.linked_move_operation_ids:
qty -= record.qty
res[ops.id] = float_round(qty, precision_rounding=ops.product_id.uom_id.rounding)
return res
def product_id_change(self, cr, uid, ids, product_id, product_uom_id, product_qty, context=None):
res = self.on_change_tests(cr, uid, ids, product_id, product_uom_id, product_qty, context=context)
if product_id and not product_uom_id:
product = self.pool.get('product.product').browse(cr, uid, product_id, context=context)
res['value']['product_uom_id'] = product.uom_id.id
return res
def on_change_tests(self, cr, uid, ids, product_id, product_uom_id, product_qty, context=None):
res = {'value': {}}
uom_obj = self.pool.get('product.uom')
if product_id:
product = self.pool.get('product.product').browse(cr, uid, product_id, context=context)
product_uom_id = product_uom_id or product.uom_id.id
selected_uom = uom_obj.browse(cr, uid, product_uom_id, context=context)
if selected_uom.category_id.id != product.uom_id.category_id.id:
res['warning'] = {
'title': _('Warning: wrong UoM!'),
'message': _('The selected UoM for product %s is not compatible with the UoM set on the product form. \nPlease choose an UoM within the same UoM category.') % (product.name)
}
if product_qty and 'warning' not in res:
rounded_qty = uom_obj._compute_qty(cr, uid, product_uom_id, product_qty, product_uom_id, round=True)
if rounded_qty != product_qty:
res['warning'] = {
'title': _('Warning: wrong quantity!'),
'message': _('The chosen quantity for product %s is not compatible with the UoM rounding. It will be automatically converted at confirmation') % (product.name)
}
return res
_columns = {
'picking_id': fields.many2one('stock.picking', 'Stock Picking', help='The stock operation where the packing has been made', required=True),
'product_id': fields.many2one('product.product', 'Product', ondelete="CASCADE"), # 1
'product_uom_id': fields.many2one('product.uom', 'Product Unit of Measure'),
'product_qty': fields.float('Quantity', digits_compute=dp.get_precision('Product Unit of Measure'), required=True),
'qty_done': fields.float('Quantity Processed', digits_compute=dp.get_precision('Product Unit of Measure')),
'package_id': fields.many2one('stock.quant.package', 'Source Package'), # 2
'lot_id': fields.many2one('stock.production.lot', 'Lot/Serial Number'),
'result_package_id': fields.many2one('stock.quant.package', 'Destination Package', help="If set, the operations are packed into this package", required=False, ondelete='cascade'),
'date': fields.datetime('Date', required=True),
'owner_id': fields.many2one('res.partner', 'Owner', help="Owner of the quants"),
#'update_cost': fields.boolean('Need cost update'),
'cost': fields.float("Cost", help="Unit Cost for this product line"),
'currency': fields.many2one('res.currency', string="Currency", help="Currency in which Unit cost is expressed", ondelete='CASCADE'),
'linked_move_operation_ids': fields.one2many('stock.move.operation.link', 'operation_id', string='Linked Moves', readonly=True, help='Moves impacted by this operation for the computation of the remaining quantities'),
'remaining_qty': fields.function(_get_remaining_qty, type='float', digits = 0, string="Remaining Qty", help="Remaining quantity in default UoM according to moves matched with this operation. "),
'location_id': fields.many2one('stock.location', 'Source Location', required=True),
'location_dest_id': fields.many2one('stock.location', 'Destination Location', required=True),
'processed': fields.selection([('true','Yes'), ('false','No')],'Has been processed?', required=True),
}
_defaults = {
'date': fields.date.context_today,
'qty_done': 0,
'processed': lambda *a: 'false',
}
def write(self, cr, uid, ids, vals, context=None):
context = context or {}
res = super(stock_pack_operation, self).write(cr, uid, ids, vals, context=context)
if isinstance(ids, (int, long)):
ids = [ids]
if not context.get("no_recompute"):
pickings = vals.get('picking_id') and [vals['picking_id']] or list(set([x.picking_id.id for x in self.browse(cr, uid, ids, context=context)]))
self.pool.get("stock.picking").do_recompute_remaining_quantities(cr, uid, pickings, context=context)
return res
def create(self, cr, uid, vals, context=None):
context = context or {}
res_id = super(stock_pack_operation, self).create(cr, uid, vals, context=context)
if vals.get("picking_id") and not context.get("no_recompute"):
self.pool.get("stock.picking").do_recompute_remaining_quantities(cr, uid, [vals['picking_id']], context=context)
return res_id
def action_drop_down(self, cr, uid, ids, context=None):
''' Used by barcode interface to say that pack_operation has been moved from src location
to destination location, if qty_done is less than product_qty than we have to split the
operation in two to process the one with the qty moved
'''
processed_ids = []
move_obj = self.pool.get("stock.move")
for pack_op in self.browse(cr, uid, ids, context=None):
if pack_op.product_id and pack_op.location_id and pack_op.location_dest_id:
move_obj.check_tracking_product(cr, uid, pack_op.product_id, pack_op.lot_id.id, pack_op.location_id, pack_op.location_dest_id, context=context)
op = pack_op.id
if pack_op.qty_done < pack_op.product_qty:
# we split the operation in two
op = self.copy(cr, uid, pack_op.id, {'product_qty': pack_op.qty_done, 'qty_done': pack_op.qty_done}, context=context)
self.write(cr, uid, [pack_op.id], {'product_qty': pack_op.product_qty - pack_op.qty_done, 'qty_done': 0, 'lot_id': False}, context=context)
processed_ids.append(op)
self.write(cr, uid, processed_ids, {'processed': 'true'}, context=context)
def create_and_assign_lot(self, cr, uid, id, name, context=None):
''' Used by barcode interface to create a new lot and assign it to the operation
'''
obj = self.browse(cr,uid,id,context)
product_id = obj.product_id.id
val = {'product_id': product_id}
new_lot_id = False
if name:
lots = self.pool.get('stock.production.lot').search(cr, uid, ['&', ('name', '=', name), ('product_id', '=', product_id)], context=context)
if lots:
new_lot_id = lots[0]
val.update({'name': name})
if not new_lot_id:
new_lot_id = self.pool.get('stock.production.lot').create(cr, uid, val, context=context)
self.write(cr, uid, id, {'lot_id': new_lot_id}, context=context)
def _search_and_increment(self, cr, uid, picking_id, domain, filter_visible=False, visible_op_ids=False, increment=True, context=None):
'''Search for an operation with given 'domain' in a picking, if it exists increment the qty (+1) otherwise create it
:param domain: list of tuple directly reusable as a domain
context can receive a key 'current_package_id' with the package to consider for this operation
returns True
'''
if context is None:
context = {}
#if current_package_id is given in the context, we increase the number of items in this package
package_clause = [('result_package_id', '=', context.get('current_package_id', False))]
existing_operation_ids = self.search(cr, uid, [('picking_id', '=', picking_id)] + domain + package_clause, context=context)
todo_operation_ids = []
if existing_operation_ids:
if filter_visible:
todo_operation_ids = [val for val in existing_operation_ids if val in visible_op_ids]
else:
todo_operation_ids = existing_operation_ids
if todo_operation_ids:
#existing operation found for the given domain and picking => increment its quantity
operation_id = todo_operation_ids[0]
op_obj = self.browse(cr, uid, operation_id, context=context)
qty = op_obj.qty_done
if increment:
qty += 1
else:
qty -= 1 if qty >= 1 else 0
if qty == 0 and op_obj.product_qty == 0:
#we have a line with 0 qty set, so delete it
self.unlink(cr, uid, [operation_id], context=context)
return False
self.write(cr, uid, [operation_id], {'qty_done': qty}, context=context)
else:
#no existing operation found for the given domain and picking => create a new one
picking_obj = self.pool.get("stock.picking")
picking = picking_obj.browse(cr, uid, picking_id, context=context)
values = {
'picking_id': picking_id,
'product_qty': 0,
'location_id': picking.location_id.id,
'location_dest_id': picking.location_dest_id.id,
'qty_done': 1,
}
for key in domain:
var_name, dummy, value = key
uom_id = False
if var_name == 'product_id':
uom_id = self.pool.get('product.product').browse(cr, uid, value, context=context).uom_id.id
update_dict = {var_name: value}
if uom_id:
update_dict['product_uom_id'] = uom_id
values.update(update_dict)
operation_id = self.create(cr, uid, values, context=context)
return operation_id
class stock_move_operation_link(osv.osv):
"""
Table making the link between stock.moves and stock.pack.operations to compute the remaining quantities on each of these objects
"""
_name = "stock.move.operation.link"
_description = "Link between stock moves and pack operations"
_columns = {
'qty': fields.float('Quantity', help="Quantity of products to consider when talking about the contribution of this pack operation towards the remaining quantity of the move (and inverse). Given in the product main uom."),
'operation_id': fields.many2one('stock.pack.operation', 'Operation', required=True, ondelete="cascade"),
'move_id': fields.many2one('stock.move', 'Move', required=True, ondelete="cascade"),
'reserved_quant_id': fields.many2one('stock.quant', 'Reserved Quant', help="Technical field containing the quant that created this link between an operation and a stock move. Used at the stock_move_obj.action_done() time to avoid seeking a matching quant again"),
}
def get_specific_domain(self, cr, uid, record, context=None):
'''Returns the specific domain to consider for quant selection in action_assign() or action_done() of stock.move,
having the record given as parameter making the link between the stock move and a pack operation'''
op = record.operation_id
domain = []
if op.package_id and op.product_id:
#if removing a product from a box, we restrict the choice of quants to this box
domain.append(('package_id', '=', op.package_id.id))
elif op.package_id:
#if moving a box, we allow to take everything from inside boxes as well
domain.append(('package_id', 'child_of', [op.package_id.id]))
else:
#if not given any information about package, we don't open boxes
domain.append(('package_id', '=', False))
#if lot info is given, we restrict choice to this lot otherwise we can take any
if op.lot_id:
domain.append(('lot_id', '=', op.lot_id.id))
#if owner info is given, we restrict to this owner otherwise we restrict to no owner
if op.owner_id:
domain.append(('owner_id', '=', op.owner_id.id))
else:
domain.append(('owner_id', '=', False))
return domain
class stock_warehouse_orderpoint(osv.osv):
"""
Defines Minimum stock rules.
"""
_name = "stock.warehouse.orderpoint"
_description = "Minimum Inventory Rule"
def subtract_procurements(self, cr, uid, orderpoint, context=None):
'''This function returns quantity of product that needs to be deducted from the orderpoint computed quantity because there's already a procurement created with aim to fulfill it.
'''
qty = 0
uom_obj = self.pool.get("product.uom")
for procurement in orderpoint.procurement_ids:
if procurement.state in ('cancel', 'done'):
continue
procurement_qty = uom_obj._compute_qty_obj(cr, uid, procurement.product_uom, procurement.product_qty, procurement.product_id.uom_id, context=context)
for move in procurement.move_ids:
#need to add the moves in draft as they aren't in the virtual quantity + moves that have not been created yet
if move.state not in ('draft'):
#if move is already confirmed, assigned or done, the virtual stock is already taking this into account so it shouldn't be deducted
procurement_qty -= move.product_qty
qty += procurement_qty
return qty
def _check_product_uom(self, cr, uid, ids, context=None):
'''
Check if the UoM has the same category as the product standard UoM
'''
if not context:
context = {}
for rule in self.browse(cr, uid, ids, context=context):
if rule.product_id.uom_id.category_id.id != rule.product_uom.category_id.id:
return False
return True
def action_view_proc_to_process(self, cr, uid, ids, context=None):
act_obj = self.pool.get('ir.actions.act_window')
mod_obj = self.pool.get('ir.model.data')
proc_ids = self.pool.get('procurement.order').search(cr, uid, [('orderpoint_id', 'in', ids), ('state', 'not in', ('done', 'cancel'))], context=context)
result = mod_obj.get_object_reference(cr, uid, 'procurement', 'do_view_procurements')
if not result:
return False
result = act_obj.read(cr, uid, [result[1]], context=context)[0]
result['domain'] = "[('id', 'in', [" + ','.join(map(str, proc_ids)) + "])]"
return result
_columns = {
'name': fields.char('Name', required=True, copy=False),
'active': fields.boolean('Active', help="If the active field is set to False, it will allow you to hide the orderpoint without removing it."),
'logic': fields.selection([('max', 'Order to Max'), ('price', 'Best price (not yet active!)')], 'Reordering Mode', required=True),
'warehouse_id': fields.many2one('stock.warehouse', 'Warehouse', required=True, ondelete="cascade"),
'location_id': fields.many2one('stock.location', 'Location', required=True, ondelete="cascade"),
'product_id': fields.many2one('product.product', 'Product', required=True, ondelete='cascade', domain=[('type', '=', 'product')]),
'product_uom': fields.related('product_id', 'uom_id', type='many2one', relation='product.uom', string='Product Unit of Measure', readonly=True, required=True),
'product_min_qty': fields.float('Minimum Quantity', required=True,
digits_compute=dp.get_precision('Product Unit of Measure'),
help="When the virtual stock goes below the Min Quantity specified for this field, Odoo generates "\
"a procurement to bring the forecasted quantity to the Max Quantity."),
'product_max_qty': fields.float('Maximum Quantity', required=True,
digits_compute=dp.get_precision('Product Unit of Measure'),
help="When the virtual stock goes below the Min Quantity, Odoo generates "\
"a procurement to bring the forecasted quantity to the Quantity specified as Max Quantity."),
'qty_multiple': fields.float('Qty Multiple', required=True,
digits_compute=dp.get_precision('Product Unit of Measure'),
help="The procurement quantity will be rounded up to this multiple. If it is 0, the exact quantity will be used. "),
'procurement_ids': fields.one2many('procurement.order', 'orderpoint_id', 'Created Procurements'),
'group_id': fields.many2one('procurement.group', 'Procurement Group', help="Moves created through this orderpoint will be put in this procurement group. If none is given, the moves generated by procurement rules will be grouped into one big picking.", copy=False),
'company_id': fields.many2one('res.company', 'Company', required=True),
}
_defaults = {
'active': lambda *a: 1,
'logic': lambda *a: 'max',
'qty_multiple': lambda *a: 1,
'name': lambda self, cr, uid, context: self.pool.get('ir.sequence').get(cr, uid, 'stock.orderpoint') or '',
'product_uom': lambda self, cr, uid, context: context.get('product_uom', False),
'company_id': lambda self, cr, uid, context: self.pool.get('res.company')._company_default_get(cr, uid, 'stock.warehouse.orderpoint', context=context)
}
_sql_constraints = [
('qty_multiple_check', 'CHECK( qty_multiple >= 0 )', 'Qty Multiple must be greater than or equal to zero.'),
]
_constraints = [
(_check_product_uom, 'You have to select a product unit of measure in the same category than the default unit of measure of the product', ['product_id', 'product_uom']),
]
def default_get(self, cr, uid, fields, context=None):
warehouse_obj = self.pool.get('stock.warehouse')
res = super(stock_warehouse_orderpoint, self).default_get(cr, uid, fields, context)
# default 'warehouse_id' and 'location_id'
if 'warehouse_id' not in res:
warehouse_ids = res.get('company_id') and warehouse_obj.search(cr, uid, [('company_id', '=', res['company_id'])], limit=1, context=context) or []
res['warehouse_id'] = warehouse_ids and warehouse_ids[0] or False
if 'location_id' not in res:
res['location_id'] = res.get('warehouse_id') and warehouse_obj.browse(cr, uid, res['warehouse_id'], context).lot_stock_id.id or False
return res
def onchange_warehouse_id(self, cr, uid, ids, warehouse_id, context=None):
""" Finds location id for changed warehouse.
@param warehouse_id: Changed id of warehouse.
@return: Dictionary of values.
"""
if warehouse_id:
w = self.pool.get('stock.warehouse').browse(cr, uid, warehouse_id, context=context)
v = {'location_id': w.lot_stock_id.id}
return {'value': v}
return {}
def onchange_product_id(self, cr, uid, ids, product_id, context=None):
""" Finds UoM for changed product.
@param product_id: Changed id of product.
@return: Dictionary of values.
"""
if product_id:
prod = self.pool.get('product.product').browse(cr, uid, product_id, context=context)
d = {'product_uom': [('category_id', '=', prod.uom_id.category_id.id)]}
v = {'product_uom': prod.uom_id.id}
return {'value': v, 'domain': d}
return {'domain': {'product_uom': []}}
class stock_picking_type(osv.osv):
_name = "stock.picking.type"
_description = "The picking type determines the picking view"
_order = 'sequence'
def open_barcode_interface(self, cr, uid, ids, context=None):
final_url = "/barcode/web/#action=stock.ui&picking_type_id=" + str(ids[0]) if len(ids) else '0'
return {'type': 'ir.actions.act_url', 'url': final_url, 'target': 'self'}
def _get_tristate_values(self, cr, uid, ids, field_name, arg, context=None):
picking_obj = self.pool.get('stock.picking')
res = {}
for picking_type_id in ids:
#get last 10 pickings of this type
picking_ids = picking_obj.search(cr, uid, [('picking_type_id', '=', picking_type_id), ('state', '=', 'done')], order='date_done desc', limit=10, context=context)
tristates = []
for picking in picking_obj.browse(cr, uid, picking_ids, context=context):
if picking.date_done > picking.date:
tristates.insert(0, {'tooltip': picking.name or '' + ": " + _('Late'), 'value': -1})
elif picking.backorder_id:
tristates.insert(0, {'tooltip': picking.name or '' + ": " + _('Backorder exists'), 'value': 0})
else:
tristates.insert(0, {'tooltip': picking.name or '' + ": " + _('OK'), 'value': 1})
res[picking_type_id] = json.dumps(tristates)
return res
def _get_picking_count(self, cr, uid, ids, field_names, arg, context=None):
obj = self.pool.get('stock.picking')
domains = {
'count_picking_draft': [('state', '=', 'draft')],
'count_picking_waiting': [('state', '=', 'confirmed')],
'count_picking_ready': [('state', 'in', ('assigned', 'partially_available'))],
'count_picking': [('state', 'in', ('assigned', 'waiting', 'confirmed', 'partially_available'))],
'count_picking_late': [('min_date', '<', time.strftime(DEFAULT_SERVER_DATETIME_FORMAT)), ('state', 'in', ('assigned', 'waiting', 'confirmed', 'partially_available'))],
'count_picking_backorders': [('backorder_id', '!=', False), ('state', 'in', ('confirmed', 'assigned', 'waiting', 'partially_available'))],
}
result = {}
for field in domains:
data = obj.read_group(cr, uid, domains[field] +
[('state', 'not in', ('done', 'cancel')), ('picking_type_id', 'in', ids)],
['picking_type_id'], ['picking_type_id'], context=context)
count = dict(map(lambda x: (x['picking_type_id'] and x['picking_type_id'][0], x['picking_type_id_count']), data))
for tid in ids:
result.setdefault(tid, {})[field] = count.get(tid, 0)
for tid in ids:
if result[tid]['count_picking']:
result[tid]['rate_picking_late'] = result[tid]['count_picking_late'] * 100 / result[tid]['count_picking']
result[tid]['rate_picking_backorders'] = result[tid]['count_picking_backorders'] * 100 / result[tid]['count_picking']
else:
result[tid]['rate_picking_late'] = 0
result[tid]['rate_picking_backorders'] = 0
return result
def onchange_picking_code(self, cr, uid, ids, picking_code=False):
if not picking_code:
return False
obj_data = self.pool.get('ir.model.data')
stock_loc = obj_data.xmlid_to_res_id(cr, uid, 'stock.stock_location_stock')
result = {
'default_location_src_id': stock_loc,
'default_location_dest_id': stock_loc,
}
if picking_code == 'incoming':
result['default_location_src_id'] = obj_data.xmlid_to_res_id(cr, uid, 'stock.stock_location_suppliers')
elif picking_code == 'outgoing':
result['default_location_dest_id'] = obj_data.xmlid_to_res_id(cr, uid, 'stock.stock_location_customers')
return {'value': result}
def _get_name(self, cr, uid, ids, field_names, arg, context=None):
return dict(self.name_get(cr, uid, ids, context=context))
def name_get(self, cr, uid, ids, context=None):
"""Overides orm name_get method to display 'Warehouse_name: PickingType_name' """
if context is None:
context = {}
if not isinstance(ids, list):
ids = [ids]
res = []
if not ids:
return res
for record in self.browse(cr, uid, ids, context=context):
name = record.name
if record.warehouse_id:
name = record.warehouse_id.name + ': ' +name
if context.get('special_shortened_wh_name'):
if record.warehouse_id:
name = record.warehouse_id.name
else:
name = _('Customer') + ' (' + record.name + ')'
res.append((record.id, name))
return res
def _default_warehouse(self, cr, uid, context=None):
user = self.pool.get('res.users').browse(cr, uid, uid, context)
res = self.pool.get('stock.warehouse').search(cr, uid, [('company_id', '=', user.company_id.id)], limit=1, context=context)
return res and res[0] or False
_columns = {
'name': fields.char('Picking Type Name', translate=True, required=True),
'complete_name': fields.function(_get_name, type='char', string='Name'),
'color': fields.integer('Color'),
'sequence': fields.integer('Sequence', help="Used to order the 'All Operations' kanban view"),
'sequence_id': fields.many2one('ir.sequence', 'Reference Sequence', required=True),
'default_location_src_id': fields.many2one('stock.location', 'Default Source Location'),
'default_location_dest_id': fields.many2one('stock.location', 'Default Destination Location'),
'code': fields.selection([('incoming', 'Suppliers'), ('outgoing', 'Customers'), ('internal', 'Internal')], 'Type of Operation', required=True),
'return_picking_type_id': fields.many2one('stock.picking.type', 'Picking Type for Returns'),
'warehouse_id': fields.many2one('stock.warehouse', 'Warehouse', ondelete='cascade'),
'active': fields.boolean('Active'),
# Statistics for the kanban view
'last_done_picking': fields.function(_get_tristate_values,
type='char',
string='Last 10 Done Pickings'),
'count_picking_draft': fields.function(_get_picking_count,
type='integer', multi='_get_picking_count'),
'count_picking_ready': fields.function(_get_picking_count,
type='integer', multi='_get_picking_count'),
'count_picking': fields.function(_get_picking_count,
type='integer', multi='_get_picking_count'),
'count_picking_waiting': fields.function(_get_picking_count,
type='integer', multi='_get_picking_count'),
'count_picking_late': fields.function(_get_picking_count,
type='integer', multi='_get_picking_count'),
'count_picking_backorders': fields.function(_get_picking_count,
type='integer', multi='_get_picking_count'),
'rate_picking_late': fields.function(_get_picking_count,
type='integer', multi='_get_picking_count'),
'rate_picking_backorders': fields.function(_get_picking_count,
type='integer', multi='_get_picking_count'),
}
_defaults = {
'warehouse_id': _default_warehouse,
'active': True,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
rhertzog/django | refs/heads/master | tests/get_object_or_404/tests.py | 44 | from __future__ import unicode_literals
from django.http import Http404
from django.shortcuts import get_list_or_404, get_object_or_404
from django.test import TestCase
from .models import Article, Author
class GetObjectOr404Tests(TestCase):
def test_get_object_or_404(self):
a1 = Author.objects.create(name="Brave Sir Robin")
a2 = Author.objects.create(name="Patsy")
# No Articles yet, so we should get a Http404 error.
with self.assertRaises(Http404):
get_object_or_404(Article, title="Foo")
article = Article.objects.create(title="Run away!")
article.authors.set([a1, a2])
# get_object_or_404 can be passed a Model to query.
self.assertEqual(
get_object_or_404(Article, title__contains="Run"),
article
)
# We can also use the Article manager through an Author object.
self.assertEqual(
get_object_or_404(a1.article_set, title__contains="Run"),
article
)
# No articles containing "Camelot". This should raise a Http404 error.
with self.assertRaises(Http404):
get_object_or_404(a1.article_set, title__contains="Camelot")
# Custom managers can be used too.
self.assertEqual(
get_object_or_404(Article.by_a_sir, title="Run away!"),
article
)
# QuerySets can be used too.
self.assertEqual(
get_object_or_404(Article.objects.all(), title__contains="Run"),
article
)
# Just as when using a get() lookup, you will get an error if more than
# one object is returned.
with self.assertRaises(Author.MultipleObjectsReturned):
get_object_or_404(Author.objects.all())
# Using an empty QuerySet raises a Http404 error.
with self.assertRaises(Http404):
get_object_or_404(Article.objects.none(), title__contains="Run")
# get_list_or_404 can be used to get lists of objects
self.assertEqual(
get_list_or_404(a1.article_set, title__icontains="Run"),
[article]
)
# Http404 is returned if the list is empty.
with self.assertRaises(Http404):
get_list_or_404(a1.article_set, title__icontains="Shrubbery")
# Custom managers can be used too.
self.assertEqual(
get_list_or_404(Article.by_a_sir, title__icontains="Run"),
[article]
)
# QuerySets can be used too.
self.assertEqual(
get_list_or_404(Article.objects.all(), title__icontains="Run"),
[article]
)
def test_bad_class(self):
# Given an argument klass that is not a Model, Manager, or Queryset
# raises a helpful ValueError message
msg = "First argument to get_object_or_404() must be a Model, Manager, or QuerySet, not 'str'."
with self.assertRaisesMessage(ValueError, msg):
get_object_or_404(str("Article"), title__icontains="Run")
class CustomClass(object):
pass
msg = "First argument to get_object_or_404() must be a Model, Manager, or QuerySet, not 'CustomClass'."
with self.assertRaisesMessage(ValueError, msg):
get_object_or_404(CustomClass, title__icontains="Run")
# Works for lists too
msg = "First argument to get_list_or_404() must be a Model, Manager, or QuerySet, not 'list'."
with self.assertRaisesMessage(ValueError, msg):
get_list_or_404([Article], title__icontains="Run")
|
dkubiak789/OpenUpgrade | refs/heads/8.0 | addons/mail/wizard/__init__.py | 438 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2010-Today OpenERP SA (<http://www.openerp.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
##############################################################################
import invite
import mail_compose_message
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
CharlesMcKinnis/ecommStackStatus | refs/heads/master | stack-recon/mysql/connector/dbapi.py | 35 | # MySQL Connector/Python - MySQL driver written in Python.
# Copyright (c) 2009, 2014, Oracle and/or its affiliates. All rights reserved.
# MySQL Connector/Python is licensed under the terms of the GPLv2
# <http://www.gnu.org/licenses/old-licenses/gpl-2.0.html>, like most
# MySQL Connectors. There are special exceptions to the terms and
# conditions of the GPLv2 as it is applied to this software, see the
# FOSS License Exception
# <http://www.mysql.com/about/legal/licensing/foss-exception.html>.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""
This module implements some constructors and singletons as required by the
DB API v2.0 (PEP-249).
"""
# Python Db API v2
apilevel = '2.0'
threadsafety = 1
paramstyle = 'pyformat'
import time
import datetime
from . import constants
class _DBAPITypeObject(object):
def __init__(self, *values):
self.values = values
def __eq__(self, other):
if other in self.values:
return True
else:
return False
def __ne__(self, other):
if other in self.values:
return False
else:
return True
Date = datetime.date
Time = datetime.time
Timestamp = datetime.datetime
def DateFromTicks(ticks):
return Date(*time.localtime(ticks)[:3])
def TimeFromTicks(ticks):
return Time(*time.localtime(ticks)[3:6])
def TimestampFromTicks(ticks):
return Timestamp(*time.localtime(ticks)[:6])
Binary = bytes
STRING = _DBAPITypeObject(*constants.FieldType.get_string_types())
BINARY = _DBAPITypeObject(*constants.FieldType.get_binary_types())
NUMBER = _DBAPITypeObject(*constants.FieldType.get_number_types())
DATETIME = _DBAPITypeObject(*constants.FieldType.get_timestamp_types())
ROWID = _DBAPITypeObject()
|
proxysh/Safejumper-for-Mac | refs/heads/master | buildmac/Resources/env/lib/python2.7/site-packages/zope/interface/ro.py | 33 | ##############################################################################
#
# Copyright (c) 2003 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Compute a resolution order for an object and its bases
"""
__docformat__ = 'restructuredtext'
def _mergeOrderings(orderings):
"""Merge multiple orderings so that within-ordering order is preserved
Orderings are constrained in such a way that if an object appears
in two or more orderings, then the suffix that begins with the
object must be in both orderings.
For example:
>>> _mergeOrderings([
... ['x', 'y', 'z'],
... ['q', 'z'],
... [1, 3, 5],
... ['z']
... ])
['x', 'y', 'q', 1, 3, 5, 'z']
"""
seen = {}
result = []
for ordering in reversed(orderings):
for o in reversed(ordering):
if o not in seen:
seen[o] = 1
result.insert(0, o)
return result
def _flatten(ob):
result = [ob]
i = 0
for ob in iter(result):
i += 1
# The recursive calls can be avoided by inserting the base classes
# into the dynamically growing list directly after the currently
# considered object; the iterator makes sure this will keep working
# in the future, since it cannot rely on the length of the list
# by definition.
result[i:i] = ob.__bases__
return result
def ro(object):
"""Compute a "resolution order" for an object
"""
return _mergeOrderings([_flatten(object)])
|
simone-campagna/invoice | refs/heads/master | invoice/database/upgrade/upgrader_v2_1_x__v2_2_0.py | 1 | # -*- coding: utf-8 -*-
#
# Copyright 2015 Simone Campagna
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
__author__ = "Simone Campagna"
__all__ = [
'Upgrader_v2_1_x__v2_2_0',
]
import collections
from ..db_types import Bool, Str, StrTuple
from ..db_table import DbTable
from .upgrader import MajorMinorUpgrader
from ...version import Version
class Upgrader_v2_1_x__v2_2_0(MajorMinorUpgrader):
VERSION_FROM_MAJOR_MINOR = Version(2, 1, None)
VERSION_TO_MAJOR_MINOR = Version(2, 2, 0)
Configuration_v2_1_x = collections.namedtuple(
'Configuration',
('warning_mode', 'error_mode',
'partial_update', 'remove_orphaned',
'header', 'total',
'stats_group', 'list_field_names'))
CONFIGURATION_TABLE_v2_1_x = DbTable(
fields=(
('warning_mode', Str()),
('error_mode', Str()),
('remove_orphaned', Bool()),
('partial_update', Bool()),
('header', Bool()),
('total', Bool()),
('stats_group', Str()),
('list_field_names', StrTuple()),
),
dict_type=Configuration_v2_1_x,
)
Configuration_v2_2_0 = collections.namedtuple(
'Configuration',
('warning_mode', 'error_mode',
'partial_update', 'remove_orphaned',
'header', 'total',
'stats_group', 'list_field_names',
'show_scan_report'))
CONFIGURATION_TABLE_v2_2_0 = DbTable(
fields=(
('warning_mode', Str()),
('error_mode', Str()),
('remove_orphaned', Bool()),
('partial_update', Bool()),
('header', Bool()),
('total', Bool()),
('stats_group', Str()),
('list_field_names', StrTuple()),
('show_scan_report', Bool()),
),
dict_type=Configuration_v2_1_x,
)
def impl_downgrade(self, db, version_from, version_to, connection=None):
with db.connect(connection) as connection:
cursor = connection.cursor()
sql = """SELECT * FROM configuration;"""
v_list = list(db.execute(cursor, sql))
db.drop('configuration', connection=connection)
db.create_table('configuration', self.CONFIGURATION_TABLE_v2_1_x.fields, connection=connection)
field_names = self.Configuration_v2_1_x._fields
sql = """INSERT INTO configuration ({field_names}) VALUES ({placeholders});""".format(
field_names=', '.join(field_names),
placeholders=', '.join('?' for field in field_names),
)
for v in v_list:
db.execute(cursor, sql, v[:-1])
def impl_upgrade(self, db, version_from, version_to, connection=None):
with db.connect(connection) as connection:
cursor = connection.cursor()
sql = """SELECT * FROM configuration;"""
values = list(db.execute(cursor, sql))[-1]
db.drop('configuration', connection=connection)
db.create_table('configuration', self.CONFIGURATION_TABLE_v2_2_0.fields, connection=connection)
values += (False, )
field_names = self.Configuration_v2_2_0._fields
sql = """INSERT INTO configuration ({field_names}) VALUES ({placeholders});""".format(
field_names=', '.join(field_names),
placeholders=', '.join('?' for field in field_names),
)
db.execute(cursor, sql, values)
|
tomasreimers/tensorflow-emscripten | refs/heads/master | tensorflow/g3doc/how_tos/adding_an_op/cuda_op_test.py | 56 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test for version 1 of the zero_out op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow.g3doc.how_tos.adding_an_op import cuda_op
class AddOneTest(tf.test.TestCase):
def test(self):
if tf.test.is_built_with_cuda():
with self.test_session():
result = cuda_op.add_one([5, 4, 3, 2, 1])
self.assertAllEqual(result.eval(), [6, 5, 4, 3, 2])
if __name__ == '__main__':
tf.test.main()
|
reinout/django | refs/heads/master | tests/utils_tests/test_module/bad_module.py | 581 | import a_package_name_that_does_not_exist # NOQA
content = 'Bad Module'
|
mosaic-cloud/mosaic-distribution-dependencies | refs/heads/development | dependencies/nodejs/0.8.22/deps/npm/node_modules/node-gyp/gyp/test/additional-targets/src/dir1/emit.py | 337 | #!/usr/bin/env python
# Copyright (c) 2009 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import sys
f = open(sys.argv[1], 'wb')
f.write('Hello from emit.py\n')
f.close()
|
stuart-knock/bokeh | refs/heads/master | examples/charts/file/blaze_input.py | 37 | from os.path import dirname, join
from blaze import Data
from bokeh.sampledata import iris
from bokeh.charts import Line, show, output_file
bbvalues = Data(join(dirname(iris.__file__), 'iris.csv'))
columns = ['sepal_length', 'sepal_width', 'petal_length', 'petal_width']
result = bbvalues[columns]
output_file("blaze_input.html")
line = Line(
result, title="Line Chart",
ylabel='Petals', notebook=True, legend="top_left"
)
show(line) |
ladybug-analysis-tools/butterfly | refs/heads/master | butterfly/p.py | 2 | # coding=utf-8
"""p class."""
from foamfile import FoamFileZeroFolder, foam_file_from_file
from collections import OrderedDict
class P(FoamFileZeroFolder):
"""P (pressure) class."""
# set default valus for this class
__default_values = OrderedDict()
__default_values['dimensions'] = '[0 2 -2 0 0 0 0]'
__default_values['#include'] = None
__default_values['internalField'] = 'uniform 0'
__default_values['boundaryField'] = {}
def __init__(self, values=None):
"""Init class."""
FoamFileZeroFolder.__init__(self, name='p', cls='volScalarField',
location='0',
default_values=self.__default_values,
values=values)
@classmethod
def from_file(cls, filepath):
"""Create a FoamFile from a file.
Args:
filepath: Full file path to dictionary.
"""
return cls(values=foam_file_from_file(filepath, cls.__name__))
|
openstates/openstates | refs/heads/master | openstates/pr/bills.py | 1 | # -*- coding: utf-8 -*-
import re
import lxml.html
import datetime
import math
import requests
import pytz
from pupa.scrape import Scraper, Bill, VoteEvent as Vote
class NoSuchBill(Exception):
pass
_classifiers = (
("Radicado", "", "introduction"),
(u"Aprobado por Cámara en Votación Final", "lower", "passage"),
(u"Aprobado por el Senado en Votación", "upper", "passage"),
("Aparece en Primera Lectura del", "upper", "reading-1"),
("Aparece en Primera Lectura de la", "lower", "reading-1"),
("Enviado al Gobernador", "executive", "executive-receipt"),
("Veto", "executive", "executive-veto"),
("Veto de Bolsillo", "executive", "executive-veto"),
# comissions give a report but sometimes they dont do any amendments and
# leave them as they are.
# i am not checking if they did or not. but it be easy just read the end and
# if it dosnt have amendments it should say 'sin enmiendas'
("1er Informe", "", "amendment-amendment"),
("2do Informe", "", "amendment-amendment"),
("Aprobado con enmiendas", "", "amendment-passage"),
(u"Remitido a Comisión", "", "referral-committee"),
(u"Referido a Comisión", "", "referral-committee"),
("Retirada por su Autor", "", "withdrawal"),
(
"Comisión : * no recomienda aprobación de la medida",
"",
"committee-passage-unfavorable",
),
("Ley N", "executive", "executive-signature"),
)
# Reports we're not currently using that might come in handy:
# all bill ranges https://sutra.oslpr.org/osl/esutra/VerSQLReportingPRM.aspx?rpt=SUTRA-015
# updated since https://sutra.oslpr.org/osl/esutra/VerSQLReportingPRM.aspx?rpt=SUTRA-016
class PRBillScraper(Scraper):
_TZ = pytz.timezone("America/Puerto_Rico")
s = requests.Session()
# keep a reference to the last search results page
# so we can scrape the right event validation code
# for paginating
last_page = None
bill_types = {
"P": "bill",
"R": "resolution",
"RK": "concurrent resolution",
"RC": "joint resolution",
"NM": "appointment",
# 'PR': 'plan de reorganizacion',
}
def asp_post(self, url, params, page=None):
headers = {
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) "
"Chrome/79.0.3945.117 Safari/537.36",
"referer": url,
"origin": "https://sutra.oslpr.org",
"authority": "sutra.oslpr.org",
}
if page is None:
page = self.s.get(url, headers=headers)
page = lxml.html.fromstring(page.content)
(viewstate,) = page.xpath('//input[@id="__VIEWSTATE"]/@value')
(viewstategenerator,) = page.xpath('//input[@id="__VIEWSTATEGENERATOR"]/@value')
(eventvalidation,) = page.xpath('//input[@id="__EVENTVALIDATION"]/@value')
hiddenfield_js_url = page.xpath(
'//script[contains(@src,"?_TSM_HiddenField")]/@src'
)[0]
hiddenfield_js_url = "{}{}".format(
"https://sutra.oslpr.org/", hiddenfield_js_url
)
hiddenfield_js = self.s.get(hiddenfield_js_url).text
before = re.escape('get("ctl00_tsm_HiddenField").value += \'')
after = re.escape("';Sys.Application.remove_load(fn);")
token_re = "{}(.*){}".format(before, after)
result = re.search(token_re, hiddenfield_js)
hiddenfield = result.group(1)
form = {
"__VIEWSTATE": viewstate,
"__VIEWSTATEGENERATOR": viewstategenerator,
"__EVENTVALIDATION": eventvalidation,
"__LASTFOCUS": "",
"ctl00_tsm_HiddenField": hiddenfield,
"__SCROLLPOSITIONX": "0",
"__SCROLLPOSITIONY": "453",
}
form = {**form, **params}
cookie_obj = requests.cookies.create_cookie(
domain="sutra.oslpr.org", name="SUTRASplash", value="NoSplash"
)
self.s.cookies.set_cookie(cookie_obj)
xml = self.s.post(url, data=form, headers=headers).text
return xml
def clean_name(self, name):
for ch in ["Sr,", "Sr.", "Sra.", "Rep.", "Sen."]:
if ch in name:
name = name.replace(ch, "")
return name
# Additional options:
# window_start / window_end - Show bills updated between start and end. Format Y-m-d
# window_end is optional, defaults to today if window_start is set
def scrape(self, session=None, chamber=None, window_start=None, window_end=None):
self.seen_votes = set()
self.seen_bills = set()
if not session:
session = self.latest_session()
self.info("no session specified using %s", session)
chambers = [chamber] if chamber is not None else ["upper", "lower"]
for chamber in chambers:
yield from self.scrape_chamber(chamber, session, window_start, window_end)
def scrape_chamber(self, chamber, session, window_start=None, window_end=None):
page_number = 1
start_year = session[0:4]
chamber_letter = {"lower": "C", "upper": "S"}[chamber]
# If a window_start is provided, parse it
# If a window_end is provided, parse it, if not default to today
if window_start is None:
start = ""
end = ""
else:
window_start = datetime.datetime.strptime(window_start, "%Y-%m-%d")
start = window_start.strftime("%m/%d/%Y")
if window_end is None:
end = datetime.datetime.now().strftime("%m/%d/%Y")
else:
window_end = datetime.datetime.strptime(window_end, "%Y-%m-%d")
end = window_start.strftime("%m/%d/%Y")
params = {
"ctl00$CPHBody$lovCuatrienio": start_year,
"ctl00$CPHBody$lovTipoMedida": "-1",
"ctl00$CPHBody$lovCuerpoId": chamber_letter,
"ctl00$CPHBody$txt_Medida": "",
"ctl00$CPHBody$txt_FechaDesde": start,
"ctl00$CPHBody$ME_txt_FechaDesde_ClientState": "",
"ctl00$CPHBody$txt_FechaHasta": end,
"ctl00$CPHBody$ME_txt_FechaHasta_ClientState": "",
"ctl00$CPHBody$txt_Titulo": "",
"ctl00$CPHBody$lovLegisladorId": "-1",
"ctl00$CPHBody$lovEvento": "-1",
"ctl00$CPHBody$lovComision": "-1",
"__EVENTTARGET": "",
"__EVENTARGUMENT": "",
}
# required for page 1, we need a copy of the dict to set Buscar for just this page
first_scrape_params = params.copy()
first_scrape_params["ctl00$CPHBody$btnFilter"] = "Buscar"
yield from self.scrape_search_results(chamber, session, first_scrape_params)
page = self.last_page
result_count = int(page.xpath('//span[@id="ctl00_CPHBody_lblCount"]/text()')[0])
max_page = math.ceil(result_count / 50)
for page_number in range(2, max_page):
page_str = str(page_number - 1).rjust(2, "0")
page_field = "ctl00$CPHBody$dgResults$ctl54$ctl{}".format(page_str)
params["__EVENTTARGET"] = page_field
params["ctl00$CPHBody$ddlPageSize"] = "50"
self.info(
"Chamber: {}, scraping page {} of {}".format(
chamber, page_number, max_page
)
)
yield from self.scrape_search_results(
chamber, session, params, self.last_page
)
def scrape_search_results(self, chamber, session, params, page=None):
resp = self.asp_post(
"https://sutra.oslpr.org/osl/esutra/MedidaBus.aspx", params, page
)
page = lxml.html.fromstring(resp)
self.last_page = page
# note there's a typo in a css class, one set is DataGridItemSyle (syle)
# and the other is DataGridAltItemStyle (style)
# if we're ever suddenly missing half the bills, check this
for row in page.xpath(
'//tr[contains(@class,"DataGridItemSyle") or contains(@class,"DataGridAltItemStyle")]/@onclick'
):
bill_rid = self.extract_bill_rid(row)
# Good test bills: 127866 132106 122472
# bill_rid = '122472'
bill_url = "https://sutra.oslpr.org/osl/esutra/MedidaReg.aspx?rid={}".format(
bill_rid
)
if bill_url not in self.seen_bills:
yield from self.scrape_bill(chamber, session, bill_url)
self.seen_bills.add(bill_url)
def extract_bill_rid(self, onclick):
# bill links look like onclick="javascript:location.replace('MedidaReg.aspx?rid=125217');"
before = re.escape("javascript:location.replace('MedidaReg.aspx?rid=")
after = re.escape("');")
token_re = "{}(.*){}".format(before, after)
result = re.search(token_re, onclick)
return result.group(1)
def extract_version_url(self, onclick):
before = re.escape("javascript:OpenDoc('")
after = re.escape("');")
token_re = "{}(.*){}".format(before, after)
result = re.search(token_re, onclick)
return result.group(1)
def classify_action(self, action_text):
for pattern, action_actor, atype in _classifiers:
if re.match(pattern, action_text):
return [action_actor, atype]
return ["", None]
def classify_bill_type(self, bill_id):
for abbr, value in self.bill_types.items():
if bill_id.startswith(abbr):
return value
return None
def classify_media_type(self, url):
url = url.lower()
if url.endswith((".doc", "dot")):
media_type = "application/msword"
elif url.endswith(".rtf"):
media_type = "application/rtf"
elif url.endswith(".pdf"):
media_type = "application/pdf"
elif url.endswith(("docx", "dotx")):
media_type = (
"application/vnd.openxmlformats-officedocument"
+ ".wordprocessingml.document"
)
elif url.endswith("docm"):
self.warning("Erroneous filename found: {}".format(url))
return None
else:
raise Exception("unknown version type: %s" % url)
return media_type
def clean_broken_html(self, html):
return html.strip().replace(" ", "")
def parse_vote_chamber(self, bill_chamber, vote_name):
if u"Confirmado por Senado" in vote_name:
vote_chamber = "upper"
elif u"Votación Final" in vote_name:
(vote_chamber, vote_name) = re.search(
r"(?u)^\w+ por (.*?) en (.*)$", vote_name
).groups()
if "Senado" in vote_chamber:
vote_chamber = "upper"
else:
vote_chamber = "lower"
elif "Cuerpo de Origen" in vote_name:
vote_name = re.search(r"(?u)^Cuerpo de Origen (.*)$", vote_name).group(1)
vote_chamber = bill_chamber
elif u"informe de Comisión de Conferencia" in vote_name:
# (vote_chamber, vote_name) = re.search(
# r"(?u)^(\w+) (\w+ informe de Comisi\wn de Conferencia)$",
# vote_name,
# ).groups()
if "Senado" in vote_name:
vote_chamber = "upper"
elif u"Cámara" in vote_name:
vote_chamber = "lower"
else:
raise AssertionError(
u"Unable to identify vote chamber: {}".format(vote_name)
)
# TODO replace bill['votes']
elif u"Se reconsideró" in vote_name:
vote_chamber = bill_chamber
elif "por Senado" in vote_name:
vote_chamber = "upper"
elif "Cámara aprueba" in vote_name:
vote_chamber = "lower"
elif u"Senado aprueba" in vote_name:
vote_chamber = "upper"
else:
raise AssertionError(u"Unknown vote text found: {}".format(vote_name))
return vote_chamber
def parse_vote(self, chamber, bill, row, action_text, action_date, url):
yes = int(
row.xpath(
'.//div[label[contains(text(), "A Favor")]]/span[contains(@class,"smalltxt")]/text()'
)[0]
)
no = int(
row.xpath(
'.//div[label[contains(text(), "En Contra")]]/span[contains(@class,"smalltxt")]/text()'
)[0]
)
abstain = int(
row.xpath(
'.//div[label[contains(text(), "Abstenido")]]/span[contains(@class,"smalltxt")]/text()'
)[0]
)
absent = int(
row.xpath(
'.//div[label[contains(text(), "Ausente")]]/span[contains(@class,"smalltxt")]/text()'
)[0]
)
vote_chamber = self.parse_vote_chamber(chamber, action_text)
classification = "passage" if u"Votación Final" in action_text else "other"
vote = Vote(
chamber=vote_chamber,
start_date=action_date,
motion_text=action_text,
result="pass" if (yes > no) else "fail",
bill=bill,
classification=classification,
)
vote.add_source(url)
vote.set_count("yes", yes)
vote.set_count("no", no)
vote.set_count("absent", absent)
vote.set_count("abstain", abstain)
# we don't want to add the attached vote PDF as a version,
# so add it as a document
# TODO: maybe this should be set as the source?
self.parse_version(bill, row, is_document=True)
yield vote
def parse_version(self, bill, row, is_document=False):
# they have empty links in every action, and icon links preceeding the actual link
# so only select links with an href set, and skip the icon links
for version_row in row.xpath(
'.//a[contains(@class,"gridlinktxt") and contains(@id, "FileLink") and boolean(@href)]'
):
version_url = version_row.xpath("@href")[0]
# version url is in an onclick handler built into the href
version_url = self.extract_version_url(version_url)
if version_url.startswith("../SUTRA"):
version_url = version_url.replace("../SUTRA/", "")
version_url = "https://sutra.oslpr.org/osl/SUTRA/{}".format(version_url)
elif not version_url.lower().startwith("http"):
self.error("Unknown version url in onclick: {}".format(version_url))
version_title = self.clean_broken_html(version_row.xpath("text()")[0])
if is_document:
bill.add_document_link(
note=version_title,
url=version_url,
media_type=self.classify_media_type(version_url),
on_duplicate="ignore",
)
else:
bill.add_version_link(
note=version_title,
url=version_url,
media_type=self.classify_media_type(version_url),
on_duplicate="ignore",
)
def scrape_author_table(self, year, bill, bill_id):
report_url = "https://sutra.oslpr.org/osl/esutra/VerSQLReportingPRM.aspx?rpt=SUTRA-011&Q={}&Medida={}".format(
"2017", bill_id
)
html = self.get(report_url).text
page = lxml.html.fromstring(html)
for row in page.xpath('//tr[td/div/div[contains(text(),"Autor")]]')[1:]:
name = row.xpath("td[2]/div/div/text()")[0].strip()
# currently not saving sponsor party, but here's the xpath
# party = row.xpath('td[3]/div/div/text()')[0].strip()
# sometimes there's an extra dummy row beyond the first
if name == "Legislador":
continue
bill.add_sponsorship(
name, entity_type="person", classification="primary", primary=True,
)
def scrape_action_table(self, chamber, bill, page, url):
# NOTE: in theory this paginates, but it defaults to 50 actions per page
# and I couldn't find examples of bills with > 50
page.make_links_absolute("https://sutra.oslpr.org/osl/SUTRA/")
# note there's a typo in a class, one set is
# DataGridItemSyle (syle) and the other is DataGridAltItemStyle (style)
# if we're ever suddenly missing half the actions, check this
for row in page.xpath(
'//table[@id="ctl00_CPHBody_TabEventos_dgResults"]/'
'tr[contains(@class,"DataGridItemSyle") or contains(@class,"DataGridAltItemStyle")]'
):
action_text = row.xpath(
'.//label[contains(@class,"DetailFormLbl")]/text()'
)[0]
action_text = self.clean_broken_html(action_text)
# div with a label containing Fecha, following span.smalltxt
# need to be this specific because votes have the same markup
raw_date = row.xpath(
'.//div[label[contains(text(), "Fecha")]]/span[contains(@class,"smalltxt")]/text()'
)[0]
raw_date = self.clean_broken_html(raw_date)
action_date = self._TZ.localize(
datetime.datetime.strptime(raw_date, "%m/%d/%Y")
)
parsed_action = self.classify_action(action_text)
# manual fix for data error on 2017-2020 P S0623
if action_date == datetime.datetime(1826, 8, 1):
action_date = action_date.replace(year=2018)
bill.add_action(
description=action_text,
date=action_date,
chamber=parsed_action[0],
classification=parsed_action[1],
)
# if it's a vote, we don't want to add the document as a bill version
if row.xpath('.//label[contains(text(), "A Favor")]'):
if url not in self.seen_votes:
yield from self.parse_vote(
chamber, bill, row, action_text, action_date, url
)
self.seen_votes.add(url)
else:
self.parse_version(bill, row)
def scrape_bill(self, chamber, session, url):
html = self.get(url).text
page = lxml.html.fromstring(html)
# search for Titulo, accent over i messes up lxml, so use 'tulo'
title = page.xpath('//span[@id="ctl00_CPHBody_txtTitulo"]/text()')[0].strip()
bill_id = page.xpath('//span[@id="ctl00_CPHBody_txt_Medida"]/text()')[0].strip()
bill_type = self.classify_bill_type(bill_id)
bill = Bill(
bill_id,
legislative_session=session,
chamber=chamber,
title=title,
classification=bill_type,
)
start_year = session[0:4]
self.scrape_author_table(start_year, bill, bill_id)
# action table contains votes, hence the yield
yield from self.scrape_action_table(chamber, bill, page, url)
bill.add_source(url)
yield bill
|
sroze/kubernetes | refs/heads/master | cluster/saltbase/salt/_states/container_bridge.py | 32 | #!/usr/bin/env python
# Copyright 2014 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import salt.exceptions
import salt.utils.ipaddr as ipaddr
def ensure(name, cidr, mtu=1460):
'''
Ensure that a bridge (named <name>) is configured for containers.
Under the covers we will make sure that
- The bridge exists
- The MTU is set
- The correct network is added to the bridge
- iptables is set up for MASQUERADE for egress
cidr:
The cidr range in the form of 10.244.x.0/24
mtu:
The MTU to set on the interface
'''
ret = {'name': name, 'changes': {}, 'result': False, 'comment': ''}
# This is a little hacky. I should probably import a real library for this
# but this'll work for now.
try:
cidr_network = ipaddr.IPNetwork(cidr, strict=True)
except Exception:
raise salt.exceptions.SaltInvocationError(
'Invalid CIDR \'{0}\''.format(cidr))
if cidr_network.version == 4:
iptables_rule = {
'table': 'nat',
'chain': 'POSTROUTING',
'rule': '-o eth0 -j MASQUERADE \! -d 10.0.0.0/8'
}
else:
iptables_rule = None
def bridge_exists(name):
'Determine if a bridge exists already.'
out = __salt__['cmd.run_stdout']('brctl show {0}'.format(name))
for line in out.splitlines():
# get rid of first line
if line.startswith('bridge name'):
continue
# get rid of ^\n's
vals = line.split()
if not vals:
continue
if len(vals) > 1:
return True
return False
def get_ip_addr_details(name):
'For the given interface, get address details.'
out = __salt__['cmd.run']('ip addr show dev {0}'.format(name))
ret = { 'networks': [] }
for line in out.splitlines():
match = re.match(
r'^\d*:\s+([\w.\-]+)(?:@)?([\w.\-]+)?:\s+<(.+)>.*mtu (\d+)',
line)
if match:
iface, parent, attrs, mtu = match.groups()
if 'UP' in attrs.split(','):
ret['up'] = True
else:
ret['up'] = False
if parent:
ret['parent'] = parent
ret['mtu'] = int(mtu)
continue
cols = line.split()
if len(cols) > 2 and cols[0] == 'inet':
ret['networks'].append(cols[1])
return ret
def get_current_state():
'Helper that returns a dict of current bridge state.'
ret = {}
ret['name'] = name
ret['exists'] = bridge_exists(name)
if ret['exists']:
ret['details'] = get_ip_addr_details(name)
else:
ret['details'] = {}
# This module function is strange and returns True if the rule exists.
# If not, it returns a string with the error from the call to iptables.
if iptables_rule:
ret['iptables_rule_exists'] = \
__salt__['iptables.check'](**iptables_rule) == True
else:
ret['iptables_rule_exists'] = True
return ret
desired_network = '{0}/{1}'.format(
str(ipaddr.IPAddress(cidr_network._ip + 1)),
str(cidr_network.prefixlen))
current_state = get_current_state()
if (current_state['exists']
and current_state['details']['mtu'] == mtu
and desired_network in current_state['details']['networks']
and current_state['details']['up']
and current_state['iptables_rule_exists']):
ret['result'] = True
ret['comment'] = 'System already in the correct state'
return ret
# The state of the system does need to be changed. Check if we're running
# in ``test=true`` mode.
if __opts__['test'] == True:
ret['comment'] = 'The state of "{0}" will be changed.'.format(name)
ret['changes'] = {
'old': current_state,
'new': 'Create and configure bridge'
}
# Return ``None`` when running with ``test=true``.
ret['result'] = None
return ret
# Finally, make the actual change and return the result.
if not current_state['exists']:
__salt__['cmd.run']('brctl addbr {0}'.format(name))
new_state = get_current_state()
if new_state['details']['mtu'] != mtu:
__salt__['cmd.run'](
'ip link set dev {0} mtu {1}'.format(name, str(mtu)))
new_state = get_current_state()
if desired_network not in new_state['details']['networks']:
__salt__['cmd.run'](
'ip addr add {0} dev {1}'.format(desired_network, name))
new_state = get_current_state()
if not new_state['details']['up']:
__salt__['cmd.run'](
'ip link set dev {0} up'.format(name))
new_state = get_current_state()
if iptables_rule and not new_state['iptables_rule_exists']:
__salt__['iptables.append'](**iptables_rule)
new_state = get_current_state()
ret['comment'] = 'The state of "{0}" was changed!'.format(name)
ret['changes'] = {
'old': current_state,
'new': new_state,
}
ret['result'] = True
return ret
|
dvliman/jaikuengine | refs/heads/master | .google_appengine/lib/jinja2-2.6/jinja2/testsuite/ext.py | 90 | # -*- coding: utf-8 -*-
"""
jinja2.testsuite.ext
~~~~~~~~~~~~~~~~~~~~
Tests for the extensions.
:copyright: (c) 2010 by the Jinja Team.
:license: BSD, see LICENSE for more details.
"""
import re
import unittest
from jinja2.testsuite import JinjaTestCase
from jinja2 import Environment, DictLoader, contextfunction, nodes
from jinja2.exceptions import TemplateAssertionError
from jinja2.ext import Extension
from jinja2.lexer import Token, count_newlines
from jinja2.utils import next
# 2.x / 3.x
try:
from io import BytesIO
except ImportError:
from StringIO import StringIO as BytesIO
importable_object = 23
_gettext_re = re.compile(r'_\((.*?)\)(?s)')
i18n_templates = {
'master.html': '<title>{{ page_title|default(_("missing")) }}</title>'
'{% block body %}{% endblock %}',
'child.html': '{% extends "master.html" %}{% block body %}'
'{% trans %}watch out{% endtrans %}{% endblock %}',
'plural.html': '{% trans user_count %}One user online{% pluralize %}'
'{{ user_count }} users online{% endtrans %}',
'stringformat.html': '{{ _("User: %(num)s")|format(num=user_count) }}'
}
newstyle_i18n_templates = {
'master.html': '<title>{{ page_title|default(_("missing")) }}</title>'
'{% block body %}{% endblock %}',
'child.html': '{% extends "master.html" %}{% block body %}'
'{% trans %}watch out{% endtrans %}{% endblock %}',
'plural.html': '{% trans user_count %}One user online{% pluralize %}'
'{{ user_count }} users online{% endtrans %}',
'stringformat.html': '{{ _("User: %(num)s", num=user_count) }}',
'ngettext.html': '{{ ngettext("%(num)s apple", "%(num)s apples", apples) }}',
'ngettext_long.html': '{% trans num=apples %}{{ num }} apple{% pluralize %}'
'{{ num }} apples{% endtrans %}',
'transvars1.html': '{% trans %}User: {{ num }}{% endtrans %}',
'transvars2.html': '{% trans num=count %}User: {{ num }}{% endtrans %}',
'transvars3.html': '{% trans count=num %}User: {{ count }}{% endtrans %}',
'novars.html': '{% trans %}%(hello)s{% endtrans %}',
'vars.html': '{% trans %}{{ foo }}%(foo)s{% endtrans %}',
'explicitvars.html': '{% trans foo="42" %}%(foo)s{% endtrans %}'
}
languages = {
'de': {
'missing': u'fehlend',
'watch out': u'pass auf',
'One user online': u'Ein Benutzer online',
'%(user_count)s users online': u'%(user_count)s Benutzer online',
'User: %(num)s': u'Benutzer: %(num)s',
'User: %(count)s': u'Benutzer: %(count)s',
'%(num)s apple': u'%(num)s Apfel',
'%(num)s apples': u'%(num)s Äpfel'
}
}
@contextfunction
def gettext(context, string):
language = context.get('LANGUAGE', 'en')
return languages.get(language, {}).get(string, string)
@contextfunction
def ngettext(context, s, p, n):
language = context.get('LANGUAGE', 'en')
if n != 1:
return languages.get(language, {}).get(p, p)
return languages.get(language, {}).get(s, s)
i18n_env = Environment(
loader=DictLoader(i18n_templates),
extensions=['jinja2.ext.i18n']
)
i18n_env.globals.update({
'_': gettext,
'gettext': gettext,
'ngettext': ngettext
})
newstyle_i18n_env = Environment(
loader=DictLoader(newstyle_i18n_templates),
extensions=['jinja2.ext.i18n']
)
newstyle_i18n_env.install_gettext_callables(gettext, ngettext, newstyle=True)
class TestExtension(Extension):
tags = set(['test'])
ext_attr = 42
def parse(self, parser):
return nodes.Output([self.call_method('_dump', [
nodes.EnvironmentAttribute('sandboxed'),
self.attr('ext_attr'),
nodes.ImportedName(__name__ + '.importable_object'),
nodes.ContextReference()
])]).set_lineno(next(parser.stream).lineno)
def _dump(self, sandboxed, ext_attr, imported_object, context):
return '%s|%s|%s|%s' % (
sandboxed,
ext_attr,
imported_object,
context.blocks
)
class PreprocessorExtension(Extension):
def preprocess(self, source, name, filename=None):
return source.replace('[[TEST]]', '({{ foo }})')
class StreamFilterExtension(Extension):
def filter_stream(self, stream):
for token in stream:
if token.type == 'data':
for t in self.interpolate(token):
yield t
else:
yield token
def interpolate(self, token):
pos = 0
end = len(token.value)
lineno = token.lineno
while 1:
match = _gettext_re.search(token.value, pos)
if match is None:
break
value = token.value[pos:match.start()]
if value:
yield Token(lineno, 'data', value)
lineno += count_newlines(token.value)
yield Token(lineno, 'variable_begin', None)
yield Token(lineno, 'name', 'gettext')
yield Token(lineno, 'lparen', None)
yield Token(lineno, 'string', match.group(1))
yield Token(lineno, 'rparen', None)
yield Token(lineno, 'variable_end', None)
pos = match.end()
if pos < end:
yield Token(lineno, 'data', token.value[pos:])
class ExtensionsTestCase(JinjaTestCase):
def test_extend_late(self):
env = Environment()
env.add_extension('jinja2.ext.autoescape')
t = env.from_string('{% autoescape true %}{{ "<test>" }}{% endautoescape %}')
assert t.render() == '<test>'
def test_loop_controls(self):
env = Environment(extensions=['jinja2.ext.loopcontrols'])
tmpl = env.from_string('''
{%- for item in [1, 2, 3, 4] %}
{%- if item % 2 == 0 %}{% continue %}{% endif -%}
{{ item }}
{%- endfor %}''')
assert tmpl.render() == '13'
tmpl = env.from_string('''
{%- for item in [1, 2, 3, 4] %}
{%- if item > 2 %}{% break %}{% endif -%}
{{ item }}
{%- endfor %}''')
assert tmpl.render() == '12'
def test_do(self):
env = Environment(extensions=['jinja2.ext.do'])
tmpl = env.from_string('''
{%- set items = [] %}
{%- for char in "foo" %}
{%- do items.append(loop.index0 ~ char) %}
{%- endfor %}{{ items|join(', ') }}''')
assert tmpl.render() == '0f, 1o, 2o'
def test_with(self):
env = Environment(extensions=['jinja2.ext.with_'])
tmpl = env.from_string('''\
{% with a=42, b=23 -%}
{{ a }} = {{ b }}
{% endwith -%}
{{ a }} = {{ b }}\
''')
assert [x.strip() for x in tmpl.render(a=1, b=2).splitlines()] \
== ['42 = 23', '1 = 2']
def test_extension_nodes(self):
env = Environment(extensions=[TestExtension])
tmpl = env.from_string('{% test %}')
assert tmpl.render() == 'False|42|23|{}'
def test_identifier(self):
assert TestExtension.identifier == __name__ + '.TestExtension'
def test_rebinding(self):
original = Environment(extensions=[TestExtension])
overlay = original.overlay()
for env in original, overlay:
for ext in env.extensions.itervalues():
assert ext.environment is env
def test_preprocessor_extension(self):
env = Environment(extensions=[PreprocessorExtension])
tmpl = env.from_string('{[[TEST]]}')
assert tmpl.render(foo=42) == '{(42)}'
def test_streamfilter_extension(self):
env = Environment(extensions=[StreamFilterExtension])
env.globals['gettext'] = lambda x: x.upper()
tmpl = env.from_string('Foo _(bar) Baz')
out = tmpl.render()
assert out == 'Foo BAR Baz'
def test_extension_ordering(self):
class T1(Extension):
priority = 1
class T2(Extension):
priority = 2
env = Environment(extensions=[T1, T2])
ext = list(env.iter_extensions())
assert ext[0].__class__ is T1
assert ext[1].__class__ is T2
class InternationalizationTestCase(JinjaTestCase):
def test_trans(self):
tmpl = i18n_env.get_template('child.html')
assert tmpl.render(LANGUAGE='de') == '<title>fehlend</title>pass auf'
def test_trans_plural(self):
tmpl = i18n_env.get_template('plural.html')
assert tmpl.render(LANGUAGE='de', user_count=1) == 'Ein Benutzer online'
assert tmpl.render(LANGUAGE='de', user_count=2) == '2 Benutzer online'
def test_complex_plural(self):
tmpl = i18n_env.from_string('{% trans foo=42, count=2 %}{{ count }} item{% '
'pluralize count %}{{ count }} items{% endtrans %}')
assert tmpl.render() == '2 items'
self.assert_raises(TemplateAssertionError, i18n_env.from_string,
'{% trans foo %}...{% pluralize bar %}...{% endtrans %}')
def test_trans_stringformatting(self):
tmpl = i18n_env.get_template('stringformat.html')
assert tmpl.render(LANGUAGE='de', user_count=5) == 'Benutzer: 5'
def test_extract(self):
from jinja2.ext import babel_extract
source = BytesIO('''
{{ gettext('Hello World') }}
{% trans %}Hello World{% endtrans %}
{% trans %}{{ users }} user{% pluralize %}{{ users }} users{% endtrans %}
'''.encode('ascii')) # make python 3 happy
assert list(babel_extract(source, ('gettext', 'ngettext', '_'), [], {})) == [
(2, 'gettext', u'Hello World', []),
(3, 'gettext', u'Hello World', []),
(4, 'ngettext', (u'%(users)s user', u'%(users)s users', None), [])
]
def test_comment_extract(self):
from jinja2.ext import babel_extract
source = BytesIO('''
{# trans first #}
{{ gettext('Hello World') }}
{% trans %}Hello World{% endtrans %}{# trans second #}
{#: third #}
{% trans %}{{ users }} user{% pluralize %}{{ users }} users{% endtrans %}
'''.encode('utf-8')) # make python 3 happy
assert list(babel_extract(source, ('gettext', 'ngettext', '_'), ['trans', ':'], {})) == [
(3, 'gettext', u'Hello World', ['first']),
(4, 'gettext', u'Hello World', ['second']),
(6, 'ngettext', (u'%(users)s user', u'%(users)s users', None), ['third'])
]
class NewstyleInternationalizationTestCase(JinjaTestCase):
def test_trans(self):
tmpl = newstyle_i18n_env.get_template('child.html')
assert tmpl.render(LANGUAGE='de') == '<title>fehlend</title>pass auf'
def test_trans_plural(self):
tmpl = newstyle_i18n_env.get_template('plural.html')
assert tmpl.render(LANGUAGE='de', user_count=1) == 'Ein Benutzer online'
assert tmpl.render(LANGUAGE='de', user_count=2) == '2 Benutzer online'
def test_complex_plural(self):
tmpl = newstyle_i18n_env.from_string('{% trans foo=42, count=2 %}{{ count }} item{% '
'pluralize count %}{{ count }} items{% endtrans %}')
assert tmpl.render() == '2 items'
self.assert_raises(TemplateAssertionError, i18n_env.from_string,
'{% trans foo %}...{% pluralize bar %}...{% endtrans %}')
def test_trans_stringformatting(self):
tmpl = newstyle_i18n_env.get_template('stringformat.html')
assert tmpl.render(LANGUAGE='de', user_count=5) == 'Benutzer: 5'
def test_newstyle_plural(self):
tmpl = newstyle_i18n_env.get_template('ngettext.html')
assert tmpl.render(LANGUAGE='de', apples=1) == '1 Apfel'
assert tmpl.render(LANGUAGE='de', apples=5) == u'5 Äpfel'
def test_autoescape_support(self):
env = Environment(extensions=['jinja2.ext.autoescape',
'jinja2.ext.i18n'])
env.install_gettext_callables(lambda x: u'<strong>Wert: %(name)s</strong>',
lambda s, p, n: s, newstyle=True)
t = env.from_string('{% autoescape ae %}{{ gettext("foo", name='
'"<test>") }}{% endautoescape %}')
assert t.render(ae=True) == '<strong>Wert: <test></strong>'
assert t.render(ae=False) == '<strong>Wert: <test></strong>'
def test_num_used_twice(self):
tmpl = newstyle_i18n_env.get_template('ngettext_long.html')
assert tmpl.render(apples=5, LANGUAGE='de') == u'5 Äpfel'
def test_num_called_num(self):
source = newstyle_i18n_env.compile('''
{% trans num=3 %}{{ num }} apple{% pluralize
%}{{ num }} apples{% endtrans %}
''', raw=True)
# quite hacky, but the only way to properly test that. The idea is
# that the generated code does not pass num twice (although that
# would work) for better performance. This only works on the
# newstyle gettext of course
assert re.search(r"l_ngettext, u?'\%\(num\)s apple', u?'\%\(num\)s "
r"apples', 3", source) is not None
def test_trans_vars(self):
t1 = newstyle_i18n_env.get_template('transvars1.html')
t2 = newstyle_i18n_env.get_template('transvars2.html')
t3 = newstyle_i18n_env.get_template('transvars3.html')
assert t1.render(num=1, LANGUAGE='de') == 'Benutzer: 1'
assert t2.render(count=23, LANGUAGE='de') == 'Benutzer: 23'
assert t3.render(num=42, LANGUAGE='de') == 'Benutzer: 42'
def test_novars_vars_escaping(self):
t = newstyle_i18n_env.get_template('novars.html')
assert t.render() == '%(hello)s'
t = newstyle_i18n_env.get_template('vars.html')
assert t.render(foo='42') == '42%(foo)s'
t = newstyle_i18n_env.get_template('explicitvars.html')
assert t.render() == '%(foo)s'
class AutoEscapeTestCase(JinjaTestCase):
def test_scoped_setting(self):
env = Environment(extensions=['jinja2.ext.autoescape'],
autoescape=True)
tmpl = env.from_string('''
{{ "<HelloWorld>" }}
{% autoescape false %}
{{ "<HelloWorld>" }}
{% endautoescape %}
{{ "<HelloWorld>" }}
''')
assert tmpl.render().split() == \
[u'<HelloWorld>', u'<HelloWorld>', u'<HelloWorld>']
env = Environment(extensions=['jinja2.ext.autoescape'],
autoescape=False)
tmpl = env.from_string('''
{{ "<HelloWorld>" }}
{% autoescape true %}
{{ "<HelloWorld>" }}
{% endautoescape %}
{{ "<HelloWorld>" }}
''')
assert tmpl.render().split() == \
[u'<HelloWorld>', u'<HelloWorld>', u'<HelloWorld>']
def test_nonvolatile(self):
env = Environment(extensions=['jinja2.ext.autoescape'],
autoescape=True)
tmpl = env.from_string('{{ {"foo": "<test>"}|xmlattr|escape }}')
assert tmpl.render() == ' foo="<test>"'
tmpl = env.from_string('{% autoescape false %}{{ {"foo": "<test>"}'
'|xmlattr|escape }}{% endautoescape %}')
assert tmpl.render() == ' foo="&lt;test&gt;"'
def test_volatile(self):
env = Environment(extensions=['jinja2.ext.autoescape'],
autoescape=True)
tmpl = env.from_string('{% autoescape foo %}{{ {"foo": "<test>"}'
'|xmlattr|escape }}{% endautoescape %}')
assert tmpl.render(foo=False) == ' foo="&lt;test&gt;"'
assert tmpl.render(foo=True) == ' foo="<test>"'
def test_scoping(self):
env = Environment(extensions=['jinja2.ext.autoescape'])
tmpl = env.from_string('{% autoescape true %}{% set x = "<x>" %}{{ x }}'
'{% endautoescape %}{{ x }}{{ "<y>" }}')
assert tmpl.render(x=1) == '<x>1<y>'
def test_volatile_scoping(self):
env = Environment(extensions=['jinja2.ext.autoescape'])
tmplsource = '''
{% autoescape val %}
{% macro foo(x) %}
[{{ x }}]
{% endmacro %}
{{ foo().__class__.__name__ }}
{% endautoescape %}
{{ '<testing>' }}
'''
tmpl = env.from_string(tmplsource)
assert tmpl.render(val=True).split()[0] == 'Markup'
assert tmpl.render(val=False).split()[0] == unicode.__name__
# looking at the source we should see <testing> there in raw
# (and then escaped as well)
env = Environment(extensions=['jinja2.ext.autoescape'])
pysource = env.compile(tmplsource, raw=True)
assert '<testing>\\n' in pysource
env = Environment(extensions=['jinja2.ext.autoescape'],
autoescape=True)
pysource = env.compile(tmplsource, raw=True)
assert '<testing>\\n' in pysource
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(ExtensionsTestCase))
suite.addTest(unittest.makeSuite(InternationalizationTestCase))
suite.addTest(unittest.makeSuite(NewstyleInternationalizationTestCase))
suite.addTest(unittest.makeSuite(AutoEscapeTestCase))
return suite
|
gisce/OCB | refs/heads/7.0 | addons/pad_project/project_task.py | 433 | # -*- coding: utf-8 -*-
from openerp.tools.translate import _
from openerp.osv import fields, osv
class task(osv.osv):
_name = "project.task"
_inherit = ["project.task",'pad.common']
_columns = {
'description_pad': fields.char('Description PAD', pad_content_field='description')
}
|
QianBIG/odoo | refs/heads/8.0 | addons/website_forum/__init__.py | 363 | # -*- coding: utf-8 -*-
import controllers
import models
import tests
|
mixturemodel-flow/tensorflow | refs/heads/master | tensorflow/contrib/distributions/python/ops/logistic.py | 46 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The Logistic distribution class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops.distributions import distribution
class Logistic(distribution.Distribution):
"""The Logistic distribution with location `loc` and `scale` parameters.
#### Mathematical details
The cumulative density function of this distribution is:
```none
cdf(x; mu, sigma) = 1 / (1 + exp(-(x - mu) / sigma))
```
where `loc = mu` and `scale = sigma`.
The Logistic distribution is a member of the [location-scale family](
https://en.wikipedia.org/wiki/Location-scale_family), i.e., it can be
constructed as,
```none
X ~ Logistic(loc=0, scale=1)
Y = loc + scale * X
```
#### Examples
Examples of initialization of one or a batch of distributions.
```python
# Define a single scalar Logistic distribution.
dist = tf.contrib.distributions.Logistic(loc=0., scale=3.)
# Evaluate the cdf at 1, returning a scalar.
dist.cdf(1.)
# Define a batch of two scalar valued Logistics.
# The first has mean 1 and scale 11, the second 2 and 22.
dist = tf.contrib.distributions.Logistic(loc=[1, 2.], scale=[11, 22.])
# Evaluate the pdf of the first distribution on 0, and the second on 1.5,
# returning a length two tensor.
dist.prob([0, 1.5])
# Get 3 samples, returning a 3 x 2 tensor.
dist.sample([3])
```
Arguments are broadcast when possible.
```python
# Define a batch of two scalar valued Logistics.
# Both have mean 1, but different scales.
dist = tf.contrib.distributions.Logistic(loc=1., scale=[11, 22.])
# Evaluate the pdf of both distributions on the same point, 3.0,
# returning a length 2 tensor.
dist.prob(3.0)
```
"""
def __init__(self,
loc,
scale,
validate_args=False,
allow_nan_stats=True,
name="Logistic"):
"""Construct Logistic distributions with mean and scale `loc` and `scale`.
The parameters `loc` and `scale` must be shaped in a way that supports
broadcasting (e.g. `loc + scale` is a valid operation).
Args:
loc: Floating point tensor, the means of the distribution(s).
scale: Floating point tensor, the scales of the distribution(s). Must
contain only positive values.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `bool`, default `True`. When `True`, statistics
(e.g., mean, mode, variance) use the value "`NaN`" to indicate the
result is undefined. When `False`, an exception is raised if one or
more of the statistic's batch members are undefined.
name: The name to give Ops created by the initializer.
Raises:
TypeError: if loc and scale are different dtypes.
"""
parameters = locals()
with ops.name_scope(name, values=[loc, scale]):
with ops.control_dependencies([check_ops.assert_positive(scale)] if
validate_args else []):
self._loc = array_ops.identity(loc, name="loc")
self._scale = array_ops.identity(scale, name="scale")
check_ops.assert_same_float_dtype([self._loc, self._scale])
super(Logistic, self).__init__(
dtype=self._scale.dtype,
reparameterization_type=distribution.FULLY_REPARAMETERIZED,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
parameters=parameters,
graph_parents=[self._loc, self._scale],
name=name)
@staticmethod
def _param_shapes(sample_shape):
return dict(
zip(("loc", "scale"), ([ops.convert_to_tensor(
sample_shape, dtype=dtypes.int32)] * 2)))
@property
def loc(self):
"""Distribution parameter for the location."""
return self._loc
@property
def scale(self):
"""Distribution parameter for scale."""
return self._scale
def _batch_shape_tensor(self):
return array_ops.broadcast_dynamic_shape(
array_ops.shape(self.loc), array_ops.shape(self.scale))
def _batch_shape(self):
return array_ops.broadcast_static_shape(
self.loc.get_shape(), self.scale.get_shape())
def _event_shape_tensor(self):
return constant_op.constant([], dtype=dtypes.int32)
def _event_shape(self):
return tensor_shape.scalar()
def _sample_n(self, n, seed=None):
# Uniform variates must be sampled from the open-interval `(0, 1)` rather
# than `[0, 1)`. To do so, we use `np.finfo(self.dtype.as_numpy_dtype).tiny`
# because it is the smallest, positive, "normal" number. A "normal" number
# is such that the mantissa has an implicit leading 1. Normal, positive
# numbers x, y have the reasonable property that, `x + y >= max(x, y)`. In
# this case, a subnormal number (i.e., np.nextafter) can cause us to sample
# 0.
uniform = random_ops.random_uniform(
shape=array_ops.concat([[n], self.batch_shape_tensor()], 0),
minval=np.finfo(self.dtype.as_numpy_dtype).tiny,
maxval=1.,
dtype=self.dtype,
seed=seed)
sampled = math_ops.log(uniform) - math_ops.log1p(-1. * uniform)
return sampled * self.scale + self.loc
def _log_prob(self, x):
return self._log_unnormalized_prob(x) - self._log_normalization()
def _prob(self, x):
return math_ops.exp(self._log_prob(x))
def _log_cdf(self, x):
return -nn_ops.softplus(-self._z(x))
def _cdf(self, x):
return math_ops.sigmoid(self._z(x))
def _log_survival_function(self, x):
return -nn_ops.softplus(self._z(x))
def _survival_function(self, x):
return math_ops.sigmoid(-self._z(x))
def _log_unnormalized_prob(self, x):
z = self._z(x)
return - z - 2. * nn_ops.softplus(-z)
def _log_normalization(self):
return math_ops.log(self.scale)
def _entropy(self):
# Use broadcasting rules to calculate the full broadcast sigma.
scale = self.scale * array_ops.ones_like(self.loc)
return 2 + math_ops.log(scale)
def _mean(self):
return self.loc * array_ops.ones_like(self.scale)
def _stddev(self):
return self.scale * array_ops.ones_like(self.loc) * math.pi / math.sqrt(3)
def _mode(self):
return self._mean()
def _z(self, x):
"""Standardize input `x` to a unit logistic."""
with ops.name_scope("standardize", values=[x]):
return (x - self.loc) / self.scale
|
boundarydevices/android_external_chromium_org | refs/heads/cm-12.0 | chrome/common/extensions/docs/server2/github_file_system.py | 94 | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import json
import logging
from StringIO import StringIO
import posixpath
from appengine_blobstore import AppEngineBlobstore, BLOBSTORE_GITHUB
from appengine_url_fetcher import AppEngineUrlFetcher
from appengine_wrappers import urlfetch, blobstore
from docs_server_utils import StringIdentity
from file_system import FileSystem, StatInfo
from future import Future
from path_util import IsDirectory
import url_constants
from zipfile import ZipFile, BadZipfile
ZIP_KEY = 'zipball'
USERNAME = None
PASSWORD = None
def _MakeBlobstoreKey(version):
return ZIP_KEY + '.' + str(version)
def _GetAsyncFetchCallback(fetcher,
username,
password,
blobstore,
key_to_set,
key_to_delete=None):
fetch = fetcher.FetchAsync(ZIP_KEY, username=username, password=password)
def resolve():
try:
result = fetch.Get()
# Check if Github authentication failed.
if result.status_code == 401:
logging.error('Github authentication failed for %s, falling back to '
'unauthenticated.' % USERNAME)
blob = fetcher.Fetch(ZIP_KEY).content
else:
blob = result.content
except urlfetch.DownloadError as e:
logging.error('Bad github zip file: %s' % e)
return None
if key_to_delete is not None:
blobstore.Delete(_MakeBlobstoreKey(key_to_delete, BLOBSTORE_GITHUB))
try:
return_zip = ZipFile(StringIO(blob))
except BadZipfile as e:
logging.error('Bad github zip file: %s' % e)
return None
blobstore.Set(_MakeBlobstoreKey(key_to_set), blob, BLOBSTORE_GITHUB)
return return_zip
return resolve
class GithubFileSystem(FileSystem):
@staticmethod
def CreateChromeAppsSamples(object_store_creator):
return GithubFileSystem(
'%s/GoogleChrome/chrome-app-samples' % url_constants.GITHUB_REPOS,
AppEngineBlobstore(),
object_store_creator)
def __init__(self, url, blobstore, object_store_creator):
# If we key the password store on the app version then the whole advantage
# of having it in the first place is greatly lessened (likewise it should
# always start populated).
password_store = object_store_creator.Create(
GithubFileSystem,
app_version=None,
category='password',
start_empty=False)
if USERNAME is None:
password_data = password_store.GetMulti(('username', 'password')).Get()
self._username, self._password = (password_data.get('username'),
password_data.get('password'))
else:
password_store.SetMulti({'username': USERNAME, 'password': PASSWORD})
self._username, self._password = (USERNAME, PASSWORD)
self._url = url
self._fetcher = AppEngineUrlFetcher(url)
self._blobstore = blobstore
self._stat_object_store = object_store_creator.Create(GithubFileSystem)
self._version = None
self._GetZip(self.Stat(ZIP_KEY).version)
def _GetZip(self, version):
try:
blob = self._blobstore.Get(_MakeBlobstoreKey(version), BLOBSTORE_GITHUB)
except blobstore.BlobNotFoundError:
self._zip_file = Future(value=None)
return
if blob is not None:
try:
self._zip_file = Future(value=ZipFile(StringIO(blob)))
except BadZipfile as e:
self._blobstore.Delete(_MakeBlobstoreKey(version), BLOBSTORE_GITHUB)
logging.error('Bad github zip file: %s' % e)
self._zip_file = Future(value=None)
else:
self._zip_file = Future(
callback=_GetAsyncFetchCallback(self._fetcher,
self._username,
self._password,
self._blobstore,
version,
key_to_delete=self._version))
self._version = version
def _ReadFile(self, path):
try:
zip_file = self._zip_file.Get()
except Exception as e:
logging.error('Github ReadFile error: %s' % e)
return ''
if zip_file is None:
logging.error('Bad github zip file.')
return ''
prefix = zip_file.namelist()[0]
return zip_file.read(prefix + path)
def _ListDir(self, path):
try:
zip_file = self._zip_file.Get()
except Exception as e:
logging.error('Github ListDir error: %s' % e)
return []
if zip_file is None:
logging.error('Bad github zip file.')
return []
filenames = zip_file.namelist()
# Take out parent directory name (GoogleChrome-chrome-app-samples-c78a30f)
filenames = [f[len(filenames[0]):] for f in filenames]
# Remove the path of the directory we're listing from the filenames.
filenames = [f[len(path):] for f in filenames
if f != path and f.startswith(path)]
# Remove all files not directly in this directory.
return [f for f in filenames if f[:-1].count('/') == 0]
def Read(self, paths, skip_not_found=False):
version = self.Stat(ZIP_KEY).version
if version != self._version:
self._GetZip(version)
result = {}
for path in paths:
if IsDirectory(path):
result[path] = self._ListDir(path)
else:
result[path] = self._ReadFile(path)
return Future(value=result)
def _DefaultStat(self, path):
version = 0
# TODO(kalman): we should replace all of this by wrapping the
# GithubFileSystem in a CachingFileSystem. A lot of work has been put into
# CFS to be robust, and GFS is missing out.
# For example: the following line is wrong, but it could be moot.
self._stat_object_store.Set(path, version)
return StatInfo(version)
def Stat(self, path):
version = self._stat_object_store.Get(path).Get()
if version is not None:
return StatInfo(version)
try:
result = self._fetcher.Fetch('commits/HEAD',
username=USERNAME,
password=PASSWORD)
except urlfetch.DownloadError as e:
logging.warning('GithubFileSystem Stat: %s' % e)
return self._DefaultStat(path)
# Check if Github authentication failed.
if result.status_code == 401:
logging.warning('Github authentication failed for %s, falling back to '
'unauthenticated.' % USERNAME)
try:
result = self._fetcher.Fetch('commits/HEAD')
except urlfetch.DownloadError as e:
logging.warning('GithubFileSystem Stat: %s' % e)
return self._DefaultStat(path)
# Parse response JSON - but sometimes github gives us invalid JSON.
try:
version = json.loads(result.content)['sha']
self._stat_object_store.Set(path, version)
return StatInfo(version)
except StandardError as e:
logging.warning(
('%s: got invalid or unexpected JSON from github. Response status ' +
'was %s, content %s') % (e, result.status_code, result.content))
return self._DefaultStat(path)
def GetIdentity(self):
return '%s@%s' % (self.__class__.__name__, StringIdentity(self._url))
|
837468220/python-for-android | refs/heads/master | python3-alpha/python3-src/Lib/idlelib/configHandler.py | 48 | """Provides access to stored IDLE configuration information.
Refer to the comments at the beginning of config-main.def for a description of
the available configuration files and the design implemented to update user
configuration information. In particular, user configuration choices which
duplicate the defaults will be removed from the user's configuration files,
and if a file becomes empty, it will be deleted.
The contents of the user files may be altered using the Options/Configure IDLE
menu to access the configuration GUI (configDialog.py), or manually.
Throughout this module there is an emphasis on returning useable defaults
when a problem occurs in returning a requested configuration value back to
idle. This is to allow IDLE to continue to function in spite of errors in
the retrieval of config information. When a default is returned instead of
a requested config value, a message is printed to stderr to aid in
configuration problem notification and resolution.
"""
import os
import sys
from idlelib import macosxSupport
from configparser import ConfigParser, NoOptionError, NoSectionError
class InvalidConfigType(Exception): pass
class InvalidConfigSet(Exception): pass
class InvalidFgBg(Exception): pass
class InvalidTheme(Exception): pass
class IdleConfParser(ConfigParser):
"""
A ConfigParser specialised for idle configuration file handling
"""
def __init__(self, cfgFile, cfgDefaults=None):
"""
cfgFile - string, fully specified configuration file name
"""
self.file=cfgFile
ConfigParser.__init__(self,defaults=cfgDefaults)
def Get(self, section, option, type=None, default=None, raw=False):
"""
Get an option value for given section/option or return default.
If type is specified, return as type.
"""
if not self.has_option(section, option):
return default
if type=='bool':
return self.getboolean(section, option)
elif type=='int':
return self.getint(section, option)
else:
return self.get(section, option, raw=raw)
def GetOptionList(self,section):
"""
Get an option list for given section
"""
if self.has_section(section):
return self.options(section)
else: #return a default value
return []
def Load(self):
"""
Load the configuration file from disk
"""
self.read(self.file)
class IdleUserConfParser(IdleConfParser):
"""
IdleConfigParser specialised for user configuration handling.
"""
def AddSection(self,section):
"""
if section doesn't exist, add it
"""
if not self.has_section(section):
self.add_section(section)
def RemoveEmptySections(self):
"""
remove any sections that have no options
"""
for section in self.sections():
if not self.GetOptionList(section):
self.remove_section(section)
def IsEmpty(self):
"""
Remove empty sections and then return 1 if parser has no sections
left, else return 0.
"""
self.RemoveEmptySections()
if self.sections():
return 0
else:
return 1
def RemoveOption(self,section,option):
"""
If section/option exists, remove it.
Returns 1 if option was removed, 0 otherwise.
"""
if self.has_section(section):
return self.remove_option(section,option)
def SetOption(self,section,option,value):
"""
Sets option to value, adding section if required.
Returns 1 if option was added or changed, otherwise 0.
"""
if self.has_option(section,option):
if self.get(section,option)==value:
return 0
else:
self.set(section,option,value)
return 1
else:
if not self.has_section(section):
self.add_section(section)
self.set(section,option,value)
return 1
def RemoveFile(self):
"""
Removes the user config file from disk if it exists.
"""
if os.path.exists(self.file):
os.remove(self.file)
def Save(self):
"""Update user configuration file.
Remove empty sections. If resulting config isn't empty, write the file
to disk. If config is empty, remove the file from disk if it exists.
"""
if not self.IsEmpty():
fname = self.file
try:
cfgFile = open(fname, 'w')
except IOError:
os.unlink(fname)
cfgFile = open(fname, 'w')
self.write(cfgFile)
else:
self.RemoveFile()
class IdleConf:
"""
holds config parsers for all idle config files:
default config files
(idle install dir)/config-main.def
(idle install dir)/config-extensions.def
(idle install dir)/config-highlight.def
(idle install dir)/config-keys.def
user config files
(user home dir)/.idlerc/config-main.cfg
(user home dir)/.idlerc/config-extensions.cfg
(user home dir)/.idlerc/config-highlight.cfg
(user home dir)/.idlerc/config-keys.cfg
"""
def __init__(self):
self.defaultCfg={}
self.userCfg={}
self.cfg={}
self.CreateConfigHandlers()
self.LoadCfgFiles()
#self.LoadCfg()
def CreateConfigHandlers(self):
"""
set up a dictionary of config parsers for default and user
configurations respectively
"""
#build idle install path
if __name__ != '__main__': # we were imported
idleDir=os.path.dirname(__file__)
else: # we were exec'ed (for testing only)
idleDir=os.path.abspath(sys.path[0])
userDir=self.GetUserCfgDir()
configTypes=('main','extensions','highlight','keys')
defCfgFiles={}
usrCfgFiles={}
for cfgType in configTypes: #build config file names
defCfgFiles[cfgType]=os.path.join(idleDir,'config-'+cfgType+'.def')
usrCfgFiles[cfgType]=os.path.join(userDir,'config-'+cfgType+'.cfg')
for cfgType in configTypes: #create config parsers
self.defaultCfg[cfgType]=IdleConfParser(defCfgFiles[cfgType])
self.userCfg[cfgType]=IdleUserConfParser(usrCfgFiles[cfgType])
def GetUserCfgDir(self):
"""
Creates (if required) and returns a filesystem directory for storing
user config files.
"""
cfgDir = '.idlerc'
userDir = os.path.expanduser('~')
if userDir != '~': # expanduser() found user home dir
if not os.path.exists(userDir):
warn = ('\n Warning: os.path.expanduser("~") points to\n '+
userDir+',\n but the path does not exist.\n')
try:
sys.stderr.write(warn)
except IOError:
pass
userDir = '~'
if userDir == "~": # still no path to home!
# traditionally IDLE has defaulted to os.getcwd(), is this adequate?
userDir = os.getcwd()
userDir = os.path.join(userDir, cfgDir)
if not os.path.exists(userDir):
try:
os.mkdir(userDir)
except (OSError, IOError):
warn = ('\n Warning: unable to create user config directory\n'+
userDir+'\n Check path and permissions.\n Exiting!\n\n')
sys.stderr.write(warn)
raise SystemExit
return userDir
def GetOption(self, configType, section, option, default=None, type=None,
warn_on_default=True, raw=False):
"""
Get an option value for given config type and given general
configuration section/option or return a default. If type is specified,
return as type. Firstly the user configuration is checked, with a
fallback to the default configuration, and a final 'catch all'
fallback to a useable passed-in default if the option isn't present in
either the user or the default configuration.
configType must be one of ('main','extensions','highlight','keys')
If a default is returned, and warn_on_default is True, a warning is
printed to stderr.
"""
if self.userCfg[configType].has_option(section,option):
return self.userCfg[configType].Get(section, option,
type=type, raw=raw)
elif self.defaultCfg[configType].has_option(section,option):
return self.defaultCfg[configType].Get(section, option,
type=type, raw=raw)
else: #returning default, print warning
if warn_on_default:
warning = ('\n Warning: configHandler.py - IdleConf.GetOption -\n'
' problem retrieving configuration option %r\n'
' from section %r.\n'
' returning default value: %r\n' %
(option, section, default))
try:
sys.stderr.write(warning)
except IOError:
pass
return default
def SetOption(self, configType, section, option, value):
"""In user's config file, set section's option to value.
"""
self.userCfg[configType].SetOption(section, option, value)
def GetSectionList(self, configSet, configType):
"""
Get a list of sections from either the user or default config for
the given config type.
configSet must be either 'user' or 'default'
configType must be one of ('main','extensions','highlight','keys')
"""
if not (configType in ('main','extensions','highlight','keys')):
raise InvalidConfigType('Invalid configType specified')
if configSet == 'user':
cfgParser=self.userCfg[configType]
elif configSet == 'default':
cfgParser=self.defaultCfg[configType]
else:
raise InvalidConfigSet('Invalid configSet specified')
return cfgParser.sections()
def GetHighlight(self, theme, element, fgBg=None):
"""
return individual highlighting theme elements.
fgBg - string ('fg'or'bg') or None, if None return a dictionary
containing fg and bg colours (appropriate for passing to Tkinter in,
e.g., a tag_config call), otherwise fg or bg colour only as specified.
"""
if self.defaultCfg['highlight'].has_section(theme):
themeDict=self.GetThemeDict('default',theme)
else:
themeDict=self.GetThemeDict('user',theme)
fore=themeDict[element+'-foreground']
if element=='cursor': #there is no config value for cursor bg
back=themeDict['normal-background']
else:
back=themeDict[element+'-background']
highlight={"foreground": fore,"background": back}
if not fgBg: #return dict of both colours
return highlight
else: #return specified colour only
if fgBg == 'fg':
return highlight["foreground"]
if fgBg == 'bg':
return highlight["background"]
else:
raise InvalidFgBg('Invalid fgBg specified')
def GetThemeDict(self,type,themeName):
"""
type - string, 'default' or 'user' theme type
themeName - string, theme name
Returns a dictionary which holds {option:value} for each element
in the specified theme. Values are loaded over a set of ultimate last
fallback defaults to guarantee that all theme elements are present in
a newly created theme.
"""
if type == 'user':
cfgParser=self.userCfg['highlight']
elif type == 'default':
cfgParser=self.defaultCfg['highlight']
else:
raise InvalidTheme('Invalid theme type specified')
#foreground and background values are provded for each theme element
#(apart from cursor) even though all these values are not yet used
#by idle, to allow for their use in the future. Default values are
#generally black and white.
theme={ 'normal-foreground':'#000000',
'normal-background':'#ffffff',
'keyword-foreground':'#000000',
'keyword-background':'#ffffff',
'builtin-foreground':'#000000',
'builtin-background':'#ffffff',
'comment-foreground':'#000000',
'comment-background':'#ffffff',
'string-foreground':'#000000',
'string-background':'#ffffff',
'definition-foreground':'#000000',
'definition-background':'#ffffff',
'hilite-foreground':'#000000',
'hilite-background':'gray',
'break-foreground':'#ffffff',
'break-background':'#000000',
'hit-foreground':'#ffffff',
'hit-background':'#000000',
'error-foreground':'#ffffff',
'error-background':'#000000',
#cursor (only foreground can be set)
'cursor-foreground':'#000000',
#shell window
'stdout-foreground':'#000000',
'stdout-background':'#ffffff',
'stderr-foreground':'#000000',
'stderr-background':'#ffffff',
'console-foreground':'#000000',
'console-background':'#ffffff' }
for element in theme:
if not cfgParser.has_option(themeName,element):
#we are going to return a default, print warning
warning=('\n Warning: configHandler.py - IdleConf.GetThemeDict'
' -\n problem retrieving theme element %r'
'\n from theme %r.\n'
' returning default value: %r\n' %
(element, themeName, theme[element]))
try:
sys.stderr.write(warning)
except IOError:
pass
colour=cfgParser.Get(themeName,element,default=theme[element])
theme[element]=colour
return theme
def CurrentTheme(self):
"""
Returns the name of the currently active theme
"""
return self.GetOption('main','Theme','name',default='')
def CurrentKeys(self):
"""
Returns the name of the currently active key set
"""
return self.GetOption('main','Keys','name',default='')
def GetExtensions(self, active_only=True, editor_only=False, shell_only=False):
"""
Gets a list of all idle extensions declared in the config files.
active_only - boolean, if true only return active (enabled) extensions
"""
extns=self.RemoveKeyBindNames(
self.GetSectionList('default','extensions'))
userExtns=self.RemoveKeyBindNames(
self.GetSectionList('user','extensions'))
for extn in userExtns:
if extn not in extns: #user has added own extension
extns.append(extn)
if active_only:
activeExtns=[]
for extn in extns:
if self.GetOption('extensions', extn, 'enable', default=True,
type='bool'):
#the extension is enabled
if editor_only or shell_only:
if editor_only:
option = "enable_editor"
else:
option = "enable_shell"
if self.GetOption('extensions', extn,option,
default=True, type='bool',
warn_on_default=False):
activeExtns.append(extn)
else:
activeExtns.append(extn)
return activeExtns
else:
return extns
def RemoveKeyBindNames(self,extnNameList):
#get rid of keybinding section names
names=extnNameList
kbNameIndicies=[]
for name in names:
if name.endswith(('_bindings', '_cfgBindings')):
kbNameIndicies.append(names.index(name))
kbNameIndicies.sort()
kbNameIndicies.reverse()
for index in kbNameIndicies: #delete each keybinding section name
del(names[index])
return names
def GetExtnNameForEvent(self,virtualEvent):
"""
Returns the name of the extension that virtualEvent is bound in, or
None if not bound in any extension.
virtualEvent - string, name of the virtual event to test for, without
the enclosing '<< >>'
"""
extName=None
vEvent='<<'+virtualEvent+'>>'
for extn in self.GetExtensions(active_only=0):
for event in self.GetExtensionKeys(extn):
if event == vEvent:
extName=extn
return extName
def GetExtensionKeys(self,extensionName):
"""
returns a dictionary of the configurable keybindings for a particular
extension,as they exist in the dictionary returned by GetCurrentKeySet;
that is, where previously used bindings are disabled.
"""
keysName=extensionName+'_cfgBindings'
activeKeys=self.GetCurrentKeySet()
extKeys={}
if self.defaultCfg['extensions'].has_section(keysName):
eventNames=self.defaultCfg['extensions'].GetOptionList(keysName)
for eventName in eventNames:
event='<<'+eventName+'>>'
binding=activeKeys[event]
extKeys[event]=binding
return extKeys
def __GetRawExtensionKeys(self,extensionName):
"""
returns a dictionary of the configurable keybindings for a particular
extension, as defined in the configuration files, or an empty dictionary
if no bindings are found
"""
keysName=extensionName+'_cfgBindings'
extKeys={}
if self.defaultCfg['extensions'].has_section(keysName):
eventNames=self.defaultCfg['extensions'].GetOptionList(keysName)
for eventName in eventNames:
binding=self.GetOption('extensions',keysName,
eventName,default='').split()
event='<<'+eventName+'>>'
extKeys[event]=binding
return extKeys
def GetExtensionBindings(self,extensionName):
"""
Returns a dictionary of all the event bindings for a particular
extension. The configurable keybindings are returned as they exist in
the dictionary returned by GetCurrentKeySet; that is, where re-used
keybindings are disabled.
"""
bindsName=extensionName+'_bindings'
extBinds=self.GetExtensionKeys(extensionName)
#add the non-configurable bindings
if self.defaultCfg['extensions'].has_section(bindsName):
eventNames=self.defaultCfg['extensions'].GetOptionList(bindsName)
for eventName in eventNames:
binding=self.GetOption('extensions',bindsName,
eventName,default='').split()
event='<<'+eventName+'>>'
extBinds[event]=binding
return extBinds
def GetKeyBinding(self, keySetName, eventStr):
"""
returns the keybinding for a specific event.
keySetName - string, name of key binding set
eventStr - string, the virtual event we want the binding for,
represented as a string, eg. '<<event>>'
"""
eventName=eventStr[2:-2] #trim off the angle brackets
binding=self.GetOption('keys',keySetName,eventName,default='').split()
return binding
def GetCurrentKeySet(self):
result = self.GetKeySet(self.CurrentKeys())
if macosxSupport.runningAsOSXApp():
# We're using AquaTk, replace all keybingings that use the
# Alt key by ones that use the Option key because the former
# don't work reliably.
for k, v in result.items():
v2 = [ x.replace('<Alt-', '<Option-') for x in v ]
if v != v2:
result[k] = v2
return result
def GetKeySet(self,keySetName):
"""
Returns a dictionary of: all requested core keybindings, plus the
keybindings for all currently active extensions. If a binding defined
in an extension is already in use, that binding is disabled.
"""
keySet=self.GetCoreKeys(keySetName)
activeExtns=self.GetExtensions(active_only=1)
for extn in activeExtns:
extKeys=self.__GetRawExtensionKeys(extn)
if extKeys: #the extension defines keybindings
for event in extKeys:
if extKeys[event] in keySet.values():
#the binding is already in use
extKeys[event]='' #disable this binding
keySet[event]=extKeys[event] #add binding
return keySet
def IsCoreBinding(self,virtualEvent):
"""
returns true if the virtual event is bound in the core idle keybindings.
virtualEvent - string, name of the virtual event to test for, without
the enclosing '<< >>'
"""
return ('<<'+virtualEvent+'>>') in self.GetCoreKeys()
def GetCoreKeys(self, keySetName=None):
"""
returns the requested set of core keybindings, with fallbacks if
required.
Keybindings loaded from the config file(s) are loaded _over_ these
defaults, so if there is a problem getting any core binding there will
be an 'ultimate last resort fallback' to the CUA-ish bindings
defined here.
"""
keyBindings={
'<<copy>>': ['<Control-c>', '<Control-C>'],
'<<cut>>': ['<Control-x>', '<Control-X>'],
'<<paste>>': ['<Control-v>', '<Control-V>'],
'<<beginning-of-line>>': ['<Control-a>', '<Home>'],
'<<center-insert>>': ['<Control-l>'],
'<<close-all-windows>>': ['<Control-q>'],
'<<close-window>>': ['<Alt-F4>'],
'<<do-nothing>>': ['<Control-x>'],
'<<end-of-file>>': ['<Control-d>'],
'<<python-docs>>': ['<F1>'],
'<<python-context-help>>': ['<Shift-F1>'],
'<<history-next>>': ['<Alt-n>'],
'<<history-previous>>': ['<Alt-p>'],
'<<interrupt-execution>>': ['<Control-c>'],
'<<view-restart>>': ['<F6>'],
'<<restart-shell>>': ['<Control-F6>'],
'<<open-class-browser>>': ['<Alt-c>'],
'<<open-module>>': ['<Alt-m>'],
'<<open-new-window>>': ['<Control-n>'],
'<<open-window-from-file>>': ['<Control-o>'],
'<<plain-newline-and-indent>>': ['<Control-j>'],
'<<print-window>>': ['<Control-p>'],
'<<redo>>': ['<Control-y>'],
'<<remove-selection>>': ['<Escape>'],
'<<save-copy-of-window-as-file>>': ['<Alt-Shift-S>'],
'<<save-window-as-file>>': ['<Alt-s>'],
'<<save-window>>': ['<Control-s>'],
'<<select-all>>': ['<Alt-a>'],
'<<toggle-auto-coloring>>': ['<Control-slash>'],
'<<undo>>': ['<Control-z>'],
'<<find-again>>': ['<Control-g>', '<F3>'],
'<<find-in-files>>': ['<Alt-F3>'],
'<<find-selection>>': ['<Control-F3>'],
'<<find>>': ['<Control-f>'],
'<<replace>>': ['<Control-h>'],
'<<goto-line>>': ['<Alt-g>'],
'<<smart-backspace>>': ['<Key-BackSpace>'],
'<<newline-and-indent>>': ['<Key-Return> <Key-KP_Enter>'],
'<<smart-indent>>': ['<Key-Tab>'],
'<<indent-region>>': ['<Control-Key-bracketright>'],
'<<dedent-region>>': ['<Control-Key-bracketleft>'],
'<<comment-region>>': ['<Alt-Key-3>'],
'<<uncomment-region>>': ['<Alt-Key-4>'],
'<<tabify-region>>': ['<Alt-Key-5>'],
'<<untabify-region>>': ['<Alt-Key-6>'],
'<<toggle-tabs>>': ['<Alt-Key-t>'],
'<<change-indentwidth>>': ['<Alt-Key-u>'],
'<<del-word-left>>': ['<Control-Key-BackSpace>'],
'<<del-word-right>>': ['<Control-Key-Delete>']
}
if keySetName:
for event in keyBindings:
binding=self.GetKeyBinding(keySetName,event)
if binding:
keyBindings[event]=binding
else: #we are going to return a default, print warning
warning=('\n Warning: configHandler.py - IdleConf.GetCoreKeys'
' -\n problem retrieving key binding for event %r'
'\n from key set %r.\n'
' returning default value: %r\n' %
(event, keySetName, keyBindings[event]))
try:
sys.stderr.write(warning)
except IOError:
pass
return keyBindings
def GetExtraHelpSourceList(self,configSet):
"""Fetch list of extra help sources from a given configSet.
Valid configSets are 'user' or 'default'. Return a list of tuples of
the form (menu_item , path_to_help_file , option), or return the empty
list. 'option' is the sequence number of the help resource. 'option'
values determine the position of the menu items on the Help menu,
therefore the returned list must be sorted by 'option'.
"""
helpSources=[]
if configSet=='user':
cfgParser=self.userCfg['main']
elif configSet=='default':
cfgParser=self.defaultCfg['main']
else:
raise InvalidConfigSet('Invalid configSet specified')
options=cfgParser.GetOptionList('HelpFiles')
for option in options:
value=cfgParser.Get('HelpFiles',option,default=';')
if value.find(';')==-1: #malformed config entry with no ';'
menuItem='' #make these empty
helpPath='' #so value won't be added to list
else: #config entry contains ';' as expected
value=value.split(';')
menuItem=value[0].strip()
helpPath=value[1].strip()
if menuItem and helpPath: #neither are empty strings
helpSources.append( (menuItem,helpPath,option) )
helpSources.sort(key=lambda x: x[2])
return helpSources
def GetAllExtraHelpSourcesList(self):
"""
Returns a list of tuples containing the details of all additional help
sources configured, or an empty list if there are none. Tuples are of
the format returned by GetExtraHelpSourceList.
"""
allHelpSources=( self.GetExtraHelpSourceList('default')+
self.GetExtraHelpSourceList('user') )
return allHelpSources
def LoadCfgFiles(self):
"""
load all configuration files.
"""
for key in self.defaultCfg:
self.defaultCfg[key].Load()
self.userCfg[key].Load() #same keys
def SaveUserCfgFiles(self):
"""
write all loaded user configuration files back to disk
"""
for key in self.userCfg:
self.userCfg[key].Save()
idleConf=IdleConf()
### module test
if __name__ == '__main__':
def dumpCfg(cfg):
print('\n',cfg,'\n')
for key in cfg:
sections=cfg[key].sections()
print(key)
print(sections)
for section in sections:
options=cfg[key].options(section)
print(section)
print(options)
for option in options:
print(option, '=', cfg[key].Get(section,option))
dumpCfg(idleConf.defaultCfg)
dumpCfg(idleConf.userCfg)
print(idleConf.userCfg['main'].Get('Theme','name'))
#print idleConf.userCfg['highlight'].GetDefHighlight('Foo','normal')
|
fullfanta/mxnet | refs/heads/master | example/module/python_loss.py | 26 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: skip-file
import numpy as np
import mxnet as mx
import numba
import logging
# We use numba.jit to implement the loss gradient.
@numba.jit
def mc_hinge_grad(scores, labels):
scores = scores.asnumpy()
labels = labels.asnumpy().astype(int)
n, _ = scores.shape
grad = np.zeros_like(scores)
for i in range(n):
score = 1 + scores[i] - scores[i, labels[i]]
score[labels[i]] = 0
ind_pred = score.argmax()
grad[i, labels[i]] -= 1
grad[i, ind_pred] += 1
return grad
if __name__ == '__main__':
n_epoch = 10
batch_size = 100
num_gpu = 2
contexts = mx.context.cpu() if num_gpu < 1 else [mx.context.gpu(i) for i in range(num_gpu)]
# build a MLP module
data = mx.symbol.Variable('data')
fc1 = mx.symbol.FullyConnected(data, name='fc1', num_hidden=128)
act1 = mx.symbol.Activation(fc1, name='relu1', act_type="relu")
fc2 = mx.symbol.FullyConnected(act1, name = 'fc2', num_hidden = 64)
act2 = mx.symbol.Activation(fc2, name='relu2', act_type="relu")
fc3 = mx.symbol.FullyConnected(act2, name='fc3', num_hidden=10)
mlp = mx.mod.Module(fc3, context=contexts)
loss = mx.mod.PythonLossModule(grad_func=mc_hinge_grad)
mod = mx.mod.SequentialModule() \
.add(mlp) \
.add(loss, take_labels=True, auto_wiring=True)
train_dataiter = mx.io.MNISTIter(
image="data/train-images-idx3-ubyte",
label="data/train-labels-idx1-ubyte",
data_shape=(784,),
batch_size=batch_size, shuffle=True, flat=True, silent=False, seed=10)
val_dataiter = mx.io.MNISTIter(
image="data/t10k-images-idx3-ubyte",
label="data/t10k-labels-idx1-ubyte",
data_shape=(784,),
batch_size=batch_size, shuffle=True, flat=True, silent=False)
logging.basicConfig(level=logging.DEBUG)
mod.fit(train_dataiter, eval_data=val_dataiter,
optimizer_params={'learning_rate':0.01, 'momentum': 0.9},
num_epoch=n_epoch)
|
likit/medtech-ieqa | refs/heads/master | scripts/insert_customer_data.py | 1 | # -*- coding: utf-8 -*-
import sys
import json
from sqlalchemy import create_engine, MetaData, Table
db = create_engine('sqlite:///'+sys.argv[1])
connect = db.connect()
meta = MetaData()
def add_data(table, attrs):
ins = table.insert().values(**attrs)
result = connect.execute(ins)
return result.inserted_primary_key[0]
def insert_provinces(province_file=None, amphur_file=None):
if not province_file or not amphur_file:
raise IOError, "Province and amphur files are required."
province_table = Table('provinces', meta, autoload=True, autoload_with=db)
provinces = json.load(open(province_file))
amphur_table = Table('amphurs', meta, autoload=True, autoload_with=db)
amphurs = json.load(open(amphur_file))
district_table = Table('districts', meta, autoload=True, autoload_with=db)
for province in provinces:
print(province)
attrs = {'name': province}
province_id = add_data(province_table, attrs)
for amphur in provinces[province]:
print('\t'+amphur)
attrs = {'name': amphur,
'province_id': province_id,
'zip_code': '00000',
}
amphur_id = add_data(amphur_table, attrs)
for district in amphurs[amphur]:
print('\t\t'+district)
attrs = {'name': district, 'amphur_id': amphur_id}
_ = add_data(district_table, attrs)
if __name__=='__main__':
insert_provinces(sys.argv[2], sys.argv[3])
|
manashmndl/scikit-learn | refs/heads/master | examples/neighbors/plot_classification.py | 287 | """
================================
Nearest Neighbors Classification
================================
Sample usage of Nearest Neighbors classification.
It will plot the decision boundaries for each class.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
from sklearn import neighbors, datasets
n_neighbors = 15
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
y = iris.target
h = .02 # step size in the mesh
# Create color maps
cmap_light = ListedColormap(['#FFAAAA', '#AAFFAA', '#AAAAFF'])
cmap_bold = ListedColormap(['#FF0000', '#00FF00', '#0000FF'])
for weights in ['uniform', 'distance']:
# we create an instance of Neighbours Classifier and fit the data.
clf = neighbors.KNeighborsClassifier(n_neighbors, weights=weights)
clf.fit(X, y)
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure()
plt.pcolormesh(xx, yy, Z, cmap=cmap_light)
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=cmap_bold)
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
plt.title("3-Class classification (k = %i, weights = '%s')"
% (n_neighbors, weights))
plt.show()
|
supersven/intellij-community | refs/heads/master | python/testData/completion/alias.after.py | 83 | import datetime as timedate
timedate |
SlimRoms/android_external_chromium_org | refs/heads/lp5.0 | content/test/gpu/page_sets/page_set_unittest.py | 51 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
from telemetry.unittest import page_set_smoke_test
class PageSetUnitTest(page_set_smoke_test.PageSetSmokeTest):
def testSmoke(self):
page_sets_dir = os.path.dirname(os.path.realpath(__file__))
top_level_dir = os.path.dirname(page_sets_dir)
self.RunSmokeTest(page_sets_dir, top_level_dir)
|
zozo123/buildbot | refs/heads/master | master/buildbot/test/unit/test_data_buildrequests.py | 1 | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
import datetime
import mock
from buildbot.data import buildrequests
from buildbot.data import resultspec
from buildbot.test.fake import fakedb
from buildbot.test.fake import fakemaster
from buildbot.test.util import endpoint
from buildbot.test.util import interfaces
from buildbot.util import UTC
from twisted.internet import defer
from twisted.internet import reactor
from twisted.trial import unittest
class TestBuildRequestEndpoint(endpoint.EndpointMixin, unittest.TestCase):
endpointClass = buildrequests.BuildRequestEndpoint
resourceTypeClass = buildrequests.BuildRequest
CLAIMED_AT = datetime.datetime(1978, 6, 15, 12, 31, 15, tzinfo=UTC)
CLAIMED_AT_EPOCH = 266761875
SUBMITTED_AT = datetime.datetime(1979, 6, 15, 12, 31, 15, tzinfo=UTC)
SUBMITTED_AT_EPOCH = 298297875
COMPLETE_AT = datetime.datetime(1980, 6, 15, 12, 31, 15, tzinfo=UTC)
COMPLETE_AT_EPOCH = 329920275
def setUp(self):
self.setUpEndpoint()
self.db.insertTestData([
fakedb.Builder(id=77, name='bbb'),
fakedb.Master(id=fakedb.FakeBuildRequestsComponent.MASTER_ID),
fakedb.Buildslave(id=13, name='sl'),
fakedb.Buildset(id=8822),
fakedb.BuildRequest(id=44, buildsetid=8822, buildername='bbb',
priority=7, submitted_at=self.SUBMITTED_AT_EPOCH,
waited_for=1),
])
def tearDown(self):
self.tearDownEndpoint()
@defer.inlineCallbacks
def testGetExisting(self):
self.db.buildrequests.claimBuildRequests([44], claimed_at=self.CLAIMED_AT)
self.db.buildrequests.completeBuildRequests([44], 75, complete_at=self.COMPLETE_AT)
buildrequest = yield self.callGet(('buildrequests', 44))
self.validateData(buildrequest)
# check data formatting:
self.assertEqual(buildrequest['buildrequestid'], 44)
self.assertEqual(buildrequest['complete'], True)
self.assertEqual(buildrequest['builderid'], 77)
self.assertEqual(buildrequest['buildername'], 'bbb')
self.assertEqual(buildrequest['waited_for'], True)
self.assertEqual(buildrequest['claimed_at'], self.CLAIMED_AT)
self.assertEqual(buildrequest['results'], 75)
self.assertEqual(buildrequest['claimed_by_masterid'],
fakedb.FakeBuildRequestsComponent.MASTER_ID)
self.assertEqual(buildrequest['claimed'], True)
self.assertEqual(buildrequest['submitted_at'], self.SUBMITTED_AT)
self.assertEqual(buildrequest['complete_at'], self.COMPLETE_AT)
self.assertEqual(buildrequest['buildsetid'], 8822)
self.assertEqual(buildrequest['priority'], 7)
@defer.inlineCallbacks
def testGetMissing(self):
buildrequest = yield self.callGet(('buildrequests', 9999))
self.assertEqual(buildrequest, None)
class TestBuildRequestsEndpoint(endpoint.EndpointMixin, unittest.TestCase):
endpointClass = buildrequests.BuildRequestsEndpoint
resourceTypeClass = buildrequests.BuildRequest
CLAIMED_AT = datetime.datetime(1978, 6, 15, 12, 31, 15, tzinfo=UTC)
CLAIMED_AT_EPOCH = 266761875
SUBMITTED_AT = datetime.datetime(1979, 6, 15, 12, 31, 15, tzinfo=UTC)
SUBMITTED_AT_EPOCH = 298297875
COMPLETE_AT = datetime.datetime(1980, 6, 15, 12, 31, 15, tzinfo=UTC)
COMPLETE_AT_EPOCH = 329920275
def setUp(self):
self.setUpEndpoint()
self.db.insertTestData([
fakedb.Builder(id=77, name='bbb'),
fakedb.Builder(id=78, name='ccc'),
fakedb.Master(id=fakedb.FakeBuildRequestsComponent.MASTER_ID),
fakedb.Buildslave(id=13, name='sl'),
fakedb.Buildset(id=8822),
fakedb.BuildRequest(id=44, buildsetid=8822, buildername='bbb',
priority=7, submitted_at=self.SUBMITTED_AT_EPOCH,
waited_for=1),
fakedb.BuildRequest(id=45, buildsetid=8822, buildername='bbb'),
fakedb.BuildRequest(id=46, buildsetid=8822, buildername='ccc'),
])
def tearDown(self):
self.tearDownEndpoint()
@defer.inlineCallbacks
def testGetAll(self):
buildrequests = yield self.callGet(('buildrequests',))
[self.validateData(br) for br in buildrequests]
self.assertEqual(sorted([br['buildrequestid'] for br in buildrequests]),
[44, 45, 46])
@defer.inlineCallbacks
def testGetBuildername(self):
buildrequests = yield self.callGet(('builders', 'ccc', 'buildrequests'))
[self.validateData(br) for br in buildrequests]
self.assertEqual(sorted([br['buildrequestid'] for br in buildrequests]), [46])
@defer.inlineCallbacks
def testGetNoBuildRequest(self):
buildrequests = yield self.callGet(('builders', 'ddd', 'buildrequests'))
self.assertEqual(buildrequests, [])
@defer.inlineCallbacks
def testGetBuilderid(self):
buildrequests = yield self.callGet(('builders', 78, 'buildrequests'))
[self.validateData(br) for br in buildrequests]
self.assertEqual(sorted([br['buildrequestid'] for br in buildrequests]), [46])
@defer.inlineCallbacks
def testGetUnknownBuilderid(self):
buildrequests = yield self.callGet(('builders', 79, 'buildrequests'))
self.assertEqual(buildrequests, [])
@defer.inlineCallbacks
def testGetNoFilters(self):
getBuildRequestsMock = mock.Mock(return_value={})
self.patch(self.master.db.buildrequests, 'getBuildRequests', getBuildRequestsMock)
yield self.callGet(('buildrequests',))
getBuildRequestsMock.assert_called_with(
buildername=None,
bsid=None,
complete=None,
claimed=None)
@defer.inlineCallbacks
def testGetFilters(self):
getBuildRequestsMock = mock.Mock(return_value={})
self.patch(self.master.db.buildrequests, 'getBuildRequests', getBuildRequestsMock)
f1 = resultspec.Filter('complete', 'eq', [False])
f2 = resultspec.Filter('claimed', 'eq', [True])
f3 = resultspec.Filter('buildsetid', 'eq', [55])
f4 = resultspec.Filter('branch', 'eq', ['mybranch'])
f5 = resultspec.Filter('repository', 'eq', ['myrepo'])
yield self.callGet(
('buildrequests',),
resultSpec=resultspec.ResultSpec(filters=[f1, f2, f3, f4, f5]))
getBuildRequestsMock.assert_called_with(
buildername=None,
bsid=55,
complete=False,
claimed=True)
@defer.inlineCallbacks
def testGetClaimedByMasterIdFilters(self):
getBuildRequestsMock = mock.Mock(return_value={})
self.patch(self.master.db.buildrequests, 'getBuildRequests', getBuildRequestsMock)
f1 = resultspec.Filter('claimed', 'eq', [True])
f2 = resultspec.Filter('claimed_by_masterid', 'eq',
[fakedb.FakeBuildRequestsComponent.MASTER_ID])
yield self.callGet(
('buildrequests',),
resultSpec=resultspec.ResultSpec(filters=[f1, f2]))
getBuildRequestsMock.assert_called_with(
buildername=None,
bsid=None,
complete=None,
claimed=fakedb.FakeBuildRequestsComponent.MASTER_ID)
class TestBuildRequest(interfaces.InterfaceTests, unittest.TestCase):
CLAIMED_AT = datetime.datetime(1978, 6, 15, 12, 31, 15, tzinfo=UTC)
COMPLETE_AT = datetime.datetime(1980, 6, 15, 12, 31, 15, tzinfo=UTC)
class dBLayerException(Exception):
pass
def setUp(self):
self.master = fakemaster.make_master(testcase=self,
wantMq=True, wantDb=True, wantData=True)
self.rtype = buildrequests.BuildRequest(self.master)
@defer.inlineCallbacks
def doTestCallthrough(self, dbMethodName, dbMockedMethod, method,
methodargs=None, methodkwargs=None,
expectedRes=None, expectedException=None,
expectedDbApiCalled=True):
self.patch(self.master.db.buildrequests, dbMethodName, dbMockedMethod)
if expectedException is not None:
try:
yield method(*methodargs, **methodkwargs)
except expectedException:
pass
except Exception as e:
self.fail('%s exception should be raised, but got %r' % (expectedException, e))
else:
self.fail('%s exception should be raised' % (expectedException,))
else:
res = yield method(*methodargs, **methodkwargs)
self.assertEqual(res, expectedRes)
if expectedDbApiCalled:
dbMockedMethod.assert_called_with(*methodargs, **methodkwargs)
def testSignatureClaimBuildRequests(self):
@self.assertArgSpecMatches(
self.master.data.updates.claimBuildRequests, # fake
self.rtype.claimBuildRequests) # real
def claimBuildRequests(self, brids, claimed_at=None, _reactor=reactor):
pass
@defer.inlineCallbacks
def testFakeDataClaimBuildRequests(self):
self.master.db.insertTestData([
fakedb.BuildRequest(id=44, buildsetid=8822),
fakedb.BuildRequest(id=55, buildsetid=8822),
])
res = yield self.master.data.updates.claimBuildRequests(
[44, 55],
claimed_at=self.CLAIMED_AT,
_reactor=reactor)
self.assertTrue(res)
@defer.inlineCallbacks
def testFakeDataClaimBuildRequestsNoneArgs(self):
res = yield self.master.data.updates.claimBuildRequests([])
self.assertTrue(res)
@defer.inlineCallbacks
def testClaimBuildRequests(self):
claimBuildRequestsMock = mock.Mock(return_value=defer.succeed(None))
yield self.doTestCallthrough('claimBuildRequests', claimBuildRequestsMock,
self.rtype.claimBuildRequests,
methodargs=[[44]],
methodkwargs=dict(claimed_at=self.CLAIMED_AT,
_reactor=reactor),
expectedRes=True,
expectedException=None)
@defer.inlineCallbacks
def testClaimBuildRequestsNoBrids(self):
claimBuildRequestsMock = mock.Mock(return_value=defer.succeed(None))
yield self.doTestCallthrough('claimBuildRequests', claimBuildRequestsMock,
self.rtype.claimBuildRequests,
methodargs=[[]],
methodkwargs=dict(),
expectedRes=True,
expectedException=None,
expectedDbApiCalled=False)
@defer.inlineCallbacks
def testClaimBuildRequestsAlreadyClaimed(self):
claimBuildRequestsMock = mock.Mock(
side_effect=buildrequests.AlreadyClaimedError('oups ! buildrequest already claimed'))
yield self.doTestCallthrough('claimBuildRequests', claimBuildRequestsMock,
self.rtype.claimBuildRequests,
methodargs=[[44]],
methodkwargs=dict(claimed_at=self.CLAIMED_AT,
_reactor=reactor),
expectedRes=False,
expectedException=None)
@defer.inlineCallbacks
def testClaimBuildRequestsUnknownException(self):
claimBuildRequestsMock = mock.Mock(
side_effect=self.dBLayerException('oups ! unknown error'))
yield self.doTestCallthrough('claimBuildRequests', claimBuildRequestsMock,
self.rtype.claimBuildRequests,
methodargs=[[44]],
methodkwargs=dict(claimed_at=self.CLAIMED_AT,
_reactor=reactor),
expectedRes=None,
expectedException=self.dBLayerException)
def testSignatureReclaimBuildRequests(self):
@self.assertArgSpecMatches(
self.master.data.updates.reclaimBuildRequests, # fake
self.rtype.reclaimBuildRequests) # real
def reclaimBuildRequests(self, brids, _reactor=reactor):
pass
@defer.inlineCallbacks
def testFakeDataReclaimBuildRequests(self):
res = yield self.master.data.updates.reclaimBuildRequests(
[44, 55],
_reactor=reactor)
self.assertTrue(res)
@defer.inlineCallbacks
def testFakeDataReclaimBuildRequestsNoneArgs(self):
res = yield self.master.data.updates.reclaimBuildRequests([])
self.assertTrue(res)
@defer.inlineCallbacks
def testReclaimBuildRequests(self):
reclaimBuildRequestsMock = mock.Mock(return_value=defer.succeed(None))
yield self.doTestCallthrough('reclaimBuildRequests',
reclaimBuildRequestsMock,
self.rtype.reclaimBuildRequests,
methodargs=[[44]],
methodkwargs=dict(_reactor=reactor),
expectedRes=True,
expectedException=None)
@defer.inlineCallbacks
def testReclaimBuildRequestsNoBrids(self):
reclaimBuildRequestsMock = mock.Mock(return_value=defer.succeed(None))
yield self.doTestCallthrough('reclaimBuildRequests',
reclaimBuildRequestsMock,
self.rtype.reclaimBuildRequests,
methodargs=[[]],
methodkwargs=dict(),
expectedRes=True,
expectedException=None,
expectedDbApiCalled=False)
@defer.inlineCallbacks
def testReclaimBuildRequestsAlreadyClaimed(self):
reclaimBuildRequestsMock = mock.Mock(
side_effect=buildrequests.AlreadyClaimedError('oups ! buildrequest already claimed'))
yield self.doTestCallthrough('reclaimBuildRequests',
reclaimBuildRequestsMock,
self.rtype.reclaimBuildRequests,
methodargs=[[44]],
methodkwargs=dict(_reactor=reactor),
expectedRes=False,
expectedException=None)
@defer.inlineCallbacks
def testReclaimBuildRequestsUnknownException(self):
reclaimBuildRequestsMock = mock.Mock(
side_effect=self.dBLayerException('oups ! unknown error'))
yield self.doTestCallthrough('reclaimBuildRequests',
reclaimBuildRequestsMock,
self.rtype.reclaimBuildRequests,
methodargs=[[44]],
methodkwargs=dict(_reactor=reactor),
expectedRes=True,
expectedException=self.dBLayerException)
def testSignatureUnclaimBuildRequests(self):
@self.assertArgSpecMatches(
self.master.data.updates.unclaimBuildRequests, # fake
self.rtype.unclaimBuildRequests) # real
def unclaimBuildRequests(self, brids):
pass
@defer.inlineCallbacks
def testFakeDataUnclaimBuildRequests(self):
res = yield self.master.data.updates.unclaimBuildRequests([44, 55])
self.assertEqual(res, None)
@defer.inlineCallbacks
def testFakeDataUnclaimBuildRequestsNoneArgs(self):
res = yield self.master.data.updates.unclaimBuildRequests([])
self.assertEqual(res, None)
@defer.inlineCallbacks
def testUnclaimBuildRequests(self):
unclaimBuildRequestsMock = mock.Mock(return_value=defer.succeed(None))
yield self.doTestCallthrough('unclaimBuildRequests',
unclaimBuildRequestsMock,
self.rtype.unclaimBuildRequests,
methodargs=[[46]],
methodkwargs=dict(),
expectedRes=None,
expectedException=None)
@defer.inlineCallbacks
def testUnclaimBuildRequestsNoBrids(self):
unclaimBuildRequestsMock = mock.Mock(return_value=defer.succeed(None))
yield self.doTestCallthrough('unclaimBuildRequests',
unclaimBuildRequestsMock,
self.rtype.unclaimBuildRequests,
methodargs=[[]],
methodkwargs=dict(),
expectedRes=None,
expectedException=None,
expectedDbApiCalled=False)
def testSignatureCompleteBuildRequests(self):
@self.assertArgSpecMatches(
self.master.data.updates.completeBuildRequests, # fake
self.rtype.completeBuildRequests) # real
def completeBuildRequests(self, brids, results, complete_at=None,
_reactor=reactor):
pass
@defer.inlineCallbacks
def testFakeDataCompleteBuildRequests(self):
res = yield self.master.data.updates.completeBuildRequests(
[44, 55],
12,
complete_at=self.COMPLETE_AT,
_reactor=reactor)
self.assertTrue(res)
@defer.inlineCallbacks
def testFakeDataCompleteBuildRequestsNoneArgs(self):
res = yield self.master.data.updates.completeBuildRequests([], 0)
self.assertTrue(res)
@defer.inlineCallbacks
def testCompleteBuildRequests(self):
completeBuildRequestsMock = mock.Mock(return_value=defer.succeed(None))
yield self.doTestCallthrough('completeBuildRequests',
completeBuildRequestsMock,
self.rtype.completeBuildRequests,
methodargs=[[46], 12],
methodkwargs=dict(complete_at=self.COMPLETE_AT,
_reactor=reactor),
expectedRes=True,
expectedException=None)
@defer.inlineCallbacks
def testCompleteBuildRequestsNoBrids(self):
completeBuildRequestsMock = mock.Mock(return_value=defer.succeed(None))
yield self.doTestCallthrough('completeBuildRequests',
completeBuildRequestsMock,
self.rtype.completeBuildRequests,
methodargs=[[], 0],
methodkwargs=dict(),
expectedRes=True,
expectedException=None,
expectedDbApiCalled=False)
@defer.inlineCallbacks
def testCompleteBuildRequestsNotClaimed(self):
completeBuildRequestsMock = mock.Mock(
side_effect=buildrequests.NotClaimedError('oups ! buildrequest not claimed'))
yield self.doTestCallthrough('completeBuildRequests',
completeBuildRequestsMock,
self.rtype.completeBuildRequests,
methodargs=[[46], 12],
methodkwargs=dict(complete_at=self.COMPLETE_AT,
_reactor=reactor),
expectedRes=False,
expectedException=None)
@defer.inlineCallbacks
def testCompleteBuildRequestsUnknownException(self):
completeBuildRequestsMock = mock.Mock(
side_effect=self.dBLayerException('oups ! unknown error'))
yield self.doTestCallthrough('completeBuildRequests',
completeBuildRequestsMock,
self.rtype.completeBuildRequests,
methodargs=[[46], 12],
methodkwargs=dict(complete_at=self.COMPLETE_AT,
_reactor=reactor),
expectedRes=None,
expectedException=self.dBLayerException)
def testSignatureUnclaimExpireddRequests(self):
@self.assertArgSpecMatches(
self.master.data.updates.unclaimExpiredRequests, # fake
self.rtype.unclaimExpiredRequests) # real
def unclaimExpiredRequests(self, old, _reactor=reactor):
pass
@defer.inlineCallbacks
def testFakeDataUnclaimExpiredBuildRequests(self):
res = yield self.master.data.updates.unclaimExpiredRequests(
600,
_reactor=reactor)
self.assertEqual(res, None)
@defer.inlineCallbacks
def testFakeDataUnclaimExpiredRequestsNoneArgs(self):
res = yield self.master.data.updates.unclaimExpiredRequests(0)
self.assertEqual(res, None)
@defer.inlineCallbacks
def testUnclaimExpiredRequests(self):
unclaimExpiredRequestsMock = mock.Mock(return_value=defer.succeed(None))
yield self.doTestCallthrough('unclaimExpiredRequests',
unclaimExpiredRequestsMock,
self.rtype.unclaimExpiredRequests,
methodargs=[600],
methodkwargs=dict(_reactor=reactor),
expectedRes=None,
expectedException=None)
|
ardydedase/amadeus-python | refs/heads/master | runtests.py | 2 | import unittest
testmodules = [
'tests.test_amadeus'
]
suite = unittest.TestSuite()
for t in testmodules:
try:
# If the module defines a suite() function, call it to get the suite.
mod = __import__(t, globals(), locals(), ['suite'])
suitefn = getattr(mod, 'suite')
suite.addTest(suitefn())
except (ImportError, AttributeError):
# else, just load all the test cases from the module.
suite.addTest(unittest.defaultTestLoader.loadTestsFromName(t))
unittest.TextTestRunner().run(suite) |
evandrix/zxing | refs/heads/master | cpp/scons/scons-local-2.0.0.final.0/SCons/Tool/filesystem.py | 34 | """SCons.Tool.filesystem
Tool-specific initialization for the filesystem tools.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/filesystem.py 5023 2010/06/14 22:05:46 scons"
import SCons
from SCons.Tool.install import copyFunc
copyToBuilder, copyAsBuilder = None, None
def copyto_emitter(target, source, env):
""" changes the path of the source to be under the target (which
are assumed to be directories.
"""
n_target = []
for t in target:
n_target = n_target + [t.File( str( s ) ) for s in source]
return (n_target, source)
def copy_action_func(target, source, env):
assert( len(target) == len(source) ), "\ntarget: %s\nsource: %s" %(list(map(str, target)),list(map(str, source)))
for t, s in zip(target, source):
if copyFunc(t.get_path(), s.get_path(), env):
return 1
return 0
def copy_action_str(target, source, env):
return env.subst_target_source(env['COPYSTR'], 0, target, source)
copy_action = SCons.Action.Action( copy_action_func, copy_action_str )
def generate(env):
try:
env['BUILDERS']['CopyTo']
env['BUILDERS']['CopyAs']
except KeyError, e:
global copyToBuilder
if copyToBuilder is None:
copyToBuilder = SCons.Builder.Builder(
action = copy_action,
target_factory = env.fs.Dir,
source_factory = env.fs.Entry,
multi = 1,
emitter = [ copyto_emitter, ] )
global copyAsBuilder
if copyAsBuilder is None:
copyAsBuilder = SCons.Builder.Builder(
action = copy_action,
target_factory = env.fs.Entry,
source_factory = env.fs.Entry )
env['BUILDERS']['CopyTo'] = copyToBuilder
env['BUILDERS']['CopyAs'] = copyAsBuilder
env['COPYSTR'] = 'Copy file(s): "$SOURCES" to "$TARGETS"'
def exists(env):
return 1
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
hoh/pytest-django | refs/heads/master | tests/test_django_settings_module.py | 8 | """Tests which check the various ways you can set DJANGO_SETTINGS_MODULE
If these tests fail you probably forgot to run "python setup.py develop".
"""
import django
import pytest
BARE_SETTINGS = '''
# At least one database must be configured
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': ':memory:'
},
}
SECRET_KEY = 'foobar'
'''
def test_ds_env(testdir, monkeypatch):
monkeypatch.setenv('DJANGO_SETTINGS_MODULE', 'tpkg.settings_env')
pkg = testdir.mkpydir('tpkg')
settings = pkg.join('settings_env.py')
settings.write(BARE_SETTINGS)
testdir.makepyfile("""
import os
def test_settings():
assert os.environ['DJANGO_SETTINGS_MODULE'] == 'tpkg.settings_env'
""")
result = testdir.runpytest()
result.stdout.fnmatch_lines(['*1 passed*'])
assert result.ret == 0
def test_ds_ini(testdir, monkeypatch):
"DSM env should override ini."
monkeypatch.setenv('DJANGO_SETTINGS_MODULE', 'tpkg.settings_ini')
testdir.makeini("""\
[pytest]
DJANGO_SETTINGS_MODULE = DO_NOT_USE_ini
""")
pkg = testdir.mkpydir('tpkg')
settings = pkg.join('settings_ini.py')
settings.write(BARE_SETTINGS)
testdir.makepyfile("""
import os
def test_ds():
assert os.environ['DJANGO_SETTINGS_MODULE'] == 'tpkg.settings_ini'
""")
result = testdir.runpytest()
result.stdout.fnmatch_lines(['*1 passed*'])
assert result.ret == 0
def test_ds_option(testdir, monkeypatch):
monkeypatch.setenv('DJANGO_SETTINGS_MODULE', 'DO_NOT_USE_env')
testdir.makeini("""
[pytest]
DJANGO_SETTINGS_MODULE = DO_NOT_USE_ini
""")
pkg = testdir.mkpydir('tpkg')
settings = pkg.join('settings_opt.py')
settings.write(BARE_SETTINGS)
testdir.makepyfile("""
import os
def test_ds():
assert os.environ['DJANGO_SETTINGS_MODULE'] == 'tpkg.settings_opt'
""")
result = testdir.runpytest('--ds=tpkg.settings_opt')
result.stdout.fnmatch_lines(['*1 passed*'])
assert result.ret == 0
def test_ds_non_existent(testdir, monkeypatch):
"""
Make sure we do not fail with INTERNALERROR if an incorrect
DJANGO_SETTINGS_MODULE is given.
"""
monkeypatch.setenv('DJANGO_SETTINGS_MODULE', 'DOES_NOT_EXIST')
testdir.makepyfile('def test_ds(): pass')
result = testdir.runpytest()
result.stderr.fnmatch_lines(["*ImportError:*DOES_NOT_EXIST*"])
assert result.ret != 0
def test_ds_after_user_conftest(testdir, monkeypatch):
"""
Test that the settings module can be imported, after pytest has adjusted
the sys.path.
"""
monkeypatch.setenv('DJANGO_SETTINGS_MODULE', 'settings_after_conftest')
testdir.makepyfile('def test_ds(): pass')
testdir.makepyfile(settings_after_conftest="SECRET_KEY='secret'")
# testdir.makeconftest("import sys; print(sys.path)")
result = testdir.runpytest('-v')
result.stdout.fnmatch_lines(['*1 passed*'])
assert result.ret == 0
def test_ds_in_pytest_configure(testdir, monkeypatch):
monkeypatch.delenv('DJANGO_SETTINGS_MODULE')
pkg = testdir.mkpydir('tpkg')
settings = pkg.join('settings_ds.py')
settings.write(BARE_SETTINGS)
testdir.makeconftest("""
import os
from django.conf import settings
def pytest_configure():
if not settings.configured:
os.environ.setdefault('DJANGO_SETTINGS_MODULE',
'tpkg.settings_ds')
""")
r = testdir.runpytest()
assert r.ret == 0
def test_django_settings_configure(testdir, monkeypatch):
"""
Make sure Django can be configured without setting
DJANGO_SETTINGS_MODULE altogether, relying on calling
django.conf.settings.configure() and then invoking pytest.
"""
monkeypatch.delenv('DJANGO_SETTINGS_MODULE')
p = testdir.makepyfile(run="""
from django.conf import settings
settings.configure(SECRET_KEY='set from settings.configure()',
DATABASES={'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': ':memory:'
}},
INSTALLED_APPS=['django.contrib.auth',
'django.contrib.contenttypes',])
import pytest
pytest.main()
""")
testdir.makepyfile("""
import pytest
from django.conf import settings
from django.test.client import RequestFactory
from django.test import TestCase
from django.contrib.auth.models import User
def test_access_to_setting():
assert settings.SECRET_KEY == 'set from settings.configure()'
# This test requires Django to be properly configured to be run
def test_rf(rf):
assert isinstance(rf, RequestFactory)
# This tests that pytest-django actually configures the database
# according to the settings above
class ATestCase(TestCase):
def test_user_count(self):
assert User.objects.count() == 0
@pytest.mark.django_db
def test_user_count():
assert User.objects.count() == 0
""")
result = testdir.runpython(p)
result.stdout.fnmatch_lines([
"*4 passed*",
])
def test_settings_in_hook(testdir, monkeypatch):
monkeypatch.delenv('DJANGO_SETTINGS_MODULE')
testdir.makeconftest("""
from django.conf import settings
def pytest_configure():
settings.configure(SECRET_KEY='set from pytest_configure',
DATABASES={'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': ':memory:'}},
INSTALLED_APPS=['django.contrib.auth',
'django.contrib.contenttypes',])
""")
testdir.makepyfile("""
import pytest
from django.conf import settings
from django.contrib.auth.models import User
def test_access_to_setting():
assert settings.SECRET_KEY == 'set from pytest_configure'
@pytest.mark.django_db
def test_user_count():
assert User.objects.count() == 0
""")
r = testdir.runpytest()
assert r.ret == 0
def test_django_not_loaded_without_settings(testdir, monkeypatch):
"""
Make sure Django is not imported at all if no Django settings is specified.
"""
monkeypatch.delenv('DJANGO_SETTINGS_MODULE')
testdir.makepyfile("""
import sys
def test_settings():
assert 'django' not in sys.modules
""")
result = testdir.runpytest()
result.stdout.fnmatch_lines(['*1 passed*'])
assert result.ret == 0
def test_debug_false(testdir, monkeypatch):
monkeypatch.delenv('DJANGO_SETTINGS_MODULE')
testdir.makeconftest("""
from django.conf import settings
def pytest_configure():
settings.configure(SECRET_KEY='set from pytest_configure',
DEBUG=True,
DATABASES={'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': ':memory:'}},
INSTALLED_APPS=['django.contrib.auth',
'django.contrib.contenttypes',])
""")
testdir.makepyfile("""
from django.conf import settings
def test_debug_is_false():
assert settings.DEBUG is False
""")
r = testdir.runpytest()
assert r.ret == 0
@pytest.mark.skipif(not hasattr(django, 'setup'),
reason="This Django version does not support app loading")
@pytest.mark.django_project(extra_settings="""
INSTALLED_APPS = [
'tpkg.app.apps.TestApp',
]
""")
def test_django_setup_sequence(django_testdir):
django_testdir.create_app_file("""
from django.apps import apps, AppConfig
class TestApp(AppConfig):
name = 'tpkg.app'
def ready(self):
print ('READY(): populating=%r' % apps._lock.locked())
""", 'apps.py')
django_testdir.create_app_file("""
from django.apps import apps
print ('IMPORT: populating=%r,ready=%r' % (
apps._lock.locked(), apps.ready))
SOME_THING = 1234
""", 'models.py')
django_testdir.create_app_file("", '__init__.py')
django_testdir.makepyfile("""
from django.apps import apps
from tpkg.app.models import SOME_THING
def test_anything():
print ('TEST: populating=%r,ready=%r' % (
apps._lock.locked(), apps.ready))
""")
result = django_testdir.runpytest('-s', '--tb=line')
result.stdout.fnmatch_lines(['*IMPORT: populating=True,ready=False*'])
result.stdout.fnmatch_lines(['*READY(): populating=True*'])
result.stdout.fnmatch_lines(['*TEST: populating=False,ready=True*'])
assert result.ret == 0
def test_no_ds_but_django_imported(testdir, monkeypatch):
"""pytest-django should not bail out, if "django" has been imported
somewhere, e.g. via pytest-splinter."""
monkeypatch.delenv('DJANGO_SETTINGS_MODULE')
testdir.makepyfile("""
import os
import django
from pytest_django.lazy_django import django_settings_is_configured
def test_django_settings_is_configured():
assert django_settings_is_configured() is False
def test_env():
assert 'DJANGO_SETTINGS_MODULE' not in os.environ
def test_cfg(pytestconfig):
assert pytestconfig.option.ds is None
""")
r = testdir.runpytest('-s')
assert r.ret == 0
|
yanheven/nova | refs/heads/master | nova/openstack/common/report/views/xml/__init__.py | 82 | # Copyright 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Provides basic XML views
This module provides several basic views which serialize
models into XML.
"""
|
kishori82/MetaPathways_Python.3.0 | refs/heads/master | libs/python_modules/parsers/interpro.py | 3 | """This module defines classes for working with InterProScan output."""
import datetime
import re
class InterProScanParser(object):
"""Parses InterProScan output."""
GO_TERM_REGEXP = re.compile(r'^(.*?): (.*?) \((GO:\d{7})\)$')
def __init__(self, contents):
self.contents = contents
def __get_rows(self):
input_rows = self.contents.split('\n')
for input_row in input_rows:
if input_row:
(locus_tag, uid, unknown_1, database, accession_number,
classification, start_coordinate, end_coordinate, e_value,
true_positive, date, interpro_id, protein_name,
go_terms) = input_row.split('\t')
# Massage the input
unknown_1 = int(unknown_1)
coordinates = (int(start_coordinate), int(end_coordinate))
e_value = float(e_value)
true_positive = true_positive == 'T'
date = datetime.datetime.strptime(date.upper(), '%d-%b-%Y')
interpro_id = (interpro_id if interpro_id
and interpro_id != 'NULL' else None)
protein_name = (protein_name if protein_name
and protein_name != 'NULL' else None)
split_go_terms = set()
for f in go_terms.split(', '):
matches = self.GO_TERM_REGEXP.match(f)
if matches:
split_go_terms.add(matches.groups())
yield {
'locus_tag': locus_tag,
'uid': uid,
'unknown_1': unknown_1,
'database': database,
'accession_number': accession_number,
'classification': classification,
'coordinates': coordinates,
'expect_value': e_value,
'true_positive': true_positive,
'date': date,
'interpro_id': interpro_id,
'protein_name': protein_name,
'go_terms': split_go_terms
}
rows = property(__get_rows)
|
fgaudin/aemanager | refs/heads/master | core/decorators.py | 1 | from django.contrib.auth import REDIRECT_FIELD_NAME
from django.http import HttpResponseRedirect, HttpResponseNotAllowed, \
HttpResponseForbidden
from django.core.urlresolvers import reverse
from django.contrib import messages
from django.utils.translation import ugettext_lazy as _, ugettext
from django.contrib.auth.decorators import login_required
from django.utils.functional import wraps
from django.utils.decorators import available_attrs
from django.conf import settings
def settings_required(view_func, redirect_field_name=REDIRECT_FIELD_NAME):
"""
decorator which redirects to settings page if mandatory
values are not set, use login_required.
"""
def decorator(request, *args, **kwargs):
if request.user.get_profile().settings_defined():
return view_func(request, *args, **kwargs)
messages.info(request, _('You need to fill these informations to continue'))
return HttpResponseRedirect(reverse('settings_edit'))
return login_required(wraps(view_func, assigned=available_attrs(view_func))(decorator), redirect_field_name=redirect_field_name)
def disabled_for_demo(view_func):
"""
decorator which redirects to settings page if mandatory
values are not set, use login_required.
"""
def decorator(request, *args, **kwargs):
if not settings.DEMO:
return view_func(request, *args, **kwargs)
return HttpResponseForbidden(ugettext("You can't use this feature in demo"))
return wraps(view_func, assigned=available_attrs(view_func))(decorator)
|
jalexvig/tensorflow | refs/heads/master | tensorflow/contrib/eager/python/examples/l2hmc/neural_nets.py | 30 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Neural nets utility for L2HMC compatible with TensorFlow's eager execution.
Reference [Generalizing Hamiltonian Monte Carlo with Neural
Networks](https://arxiv.org/pdf/1711.09268.pdf)
Code adapted from the released TensorFlow graph implementation by original
authors https://github.com/brain-research/l2hmc.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
class GenericNet(tf.keras.Model):
"""Generic neural net with different initialization scale based on input.
Args:
x_dim: dimensionality of observed data
factor: factor of variance scaling initializer
n_hidden: number of hidden units
"""
def __init__(self, x_dim, factor, n_hidden=10):
super(GenericNet, self).__init__()
self.v_layer = _custom_dense(n_hidden, 1. / 3.)
self.x_layer = _custom_dense(n_hidden, factor / 3.)
self.t_layer = _custom_dense(n_hidden, 1. / 3.)
self.h_layer = _custom_dense(n_hidden)
# Scale
self.scale_layer = _custom_dense(x_dim, .001)
self.coeff_scale = tf.Variable(
initial_value=tf.zeros([1, x_dim]), name='coeff_scale', trainable=True)
# Translation
self.translation_layer = _custom_dense(x_dim, factor=.001)
# Transformation
self.transformation_layer = _custom_dense(x_dim, .001)
self.coeff_transformation = tf.Variable(
initial_value=tf.zeros([1, x_dim]),
name='coeff_transformation',
trainable=True)
def call(self, inputs):
v, x, t = inputs
h = self.v_layer(v) + self.x_layer(x) + self.t_layer(t)
h = tf.nn.relu(h)
h = self.h_layer(h)
h = tf.nn.relu(h)
scale = tf.nn.tanh(self.scale_layer(h)) * tf.exp(self.coeff_scale)
translation = self.translation_layer(h)
transformation = (
tf.nn.tanh(self.transformation_layer(h)) * tf.exp(
self.coeff_transformation))
return scale, translation, transformation
def _custom_dense(units, factor=1.):
"""Custom dense layer with specified weight initialization."""
return tf.keras.layers.Dense(
units=units,
use_bias=True,
kernel_initializer=tf.contrib.layers.variance_scaling_initializer(
factor=factor * 2., mode='FAN_IN', uniform=False),
bias_initializer=tf.constant_initializer(0., dtype=tf.float32))
|
joelarmstrong/treeBuildingEvaluation | refs/heads/master | znfTruth/getBestScoringChains.py | 1 | #!/usr/bin/env python
import sys
f = open(sys.argv[1])
s = f.read()
a = map(lambda x: x.split(), s.split("\n"))
bestScoring = {}
for line in a:
if len(line) < 10:
continue
query = line[1]
score = int(line[9])
if query in bestScoring:
if int(bestScoring[query][9]) < score:
bestScoring[query] = line
else:
bestScoring[query] = line
for line in bestScoring.values():
print " ".join(line)
|
Axure/shadowsocks | refs/heads/master | tests/test.py | 1016 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2015 clowwindy
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import, division, print_function, \
with_statement
import sys
import os
import signal
import select
import time
import argparse
from subprocess import Popen, PIPE
python = ['python']
default_url = 'http://localhost/'
parser = argparse.ArgumentParser(description='test Shadowsocks')
parser.add_argument('-c', '--client-conf', type=str, default=None)
parser.add_argument('-s', '--server-conf', type=str, default=None)
parser.add_argument('-a', '--client-args', type=str, default=None)
parser.add_argument('-b', '--server-args', type=str, default=None)
parser.add_argument('--with-coverage', action='store_true', default=None)
parser.add_argument('--should-fail', action='store_true', default=None)
parser.add_argument('--tcp-only', action='store_true', default=None)
parser.add_argument('--url', type=str, default=default_url)
parser.add_argument('--dns', type=str, default='8.8.8.8')
config = parser.parse_args()
if config.with_coverage:
python = ['coverage', 'run', '-p', '-a']
client_args = python + ['shadowsocks/local.py', '-v']
server_args = python + ['shadowsocks/server.py', '-v']
if config.client_conf:
client_args.extend(['-c', config.client_conf])
if config.server_conf:
server_args.extend(['-c', config.server_conf])
else:
server_args.extend(['-c', config.client_conf])
if config.client_args:
client_args.extend(config.client_args.split())
if config.server_args:
server_args.extend(config.server_args.split())
else:
server_args.extend(config.client_args.split())
if config.url == default_url:
server_args.extend(['--forbidden-ip', ''])
p1 = Popen(server_args, stdin=PIPE, stdout=PIPE, stderr=PIPE, close_fds=True)
p2 = Popen(client_args, stdin=PIPE, stdout=PIPE, stderr=PIPE, close_fds=True)
p3 = None
p4 = None
p3_fin = False
p4_fin = False
# 1 shadowsocks started
# 2 curl started
# 3 curl finished
# 4 dig started
# 5 dig finished
stage = 1
try:
local_ready = False
server_ready = False
fdset = [p1.stdout, p2.stdout, p1.stderr, p2.stderr]
while True:
r, w, e = select.select(fdset, [], fdset)
if e:
break
for fd in r:
line = fd.readline()
if not line:
if stage == 2 and fd == p3.stdout:
stage = 3
if stage == 4 and fd == p4.stdout:
stage = 5
if bytes != str:
line = str(line, 'utf8')
sys.stderr.write(line)
if line.find('starting local') >= 0:
local_ready = True
if line.find('starting server') >= 0:
server_ready = True
if stage == 1:
time.sleep(2)
p3 = Popen(['curl', config.url, '-v', '-L',
'--socks5-hostname', '127.0.0.1:1081',
'-m', '15', '--connect-timeout', '10'],
stdin=PIPE, stdout=PIPE, stderr=PIPE, close_fds=True)
if p3 is not None:
fdset.append(p3.stdout)
fdset.append(p3.stderr)
stage = 2
else:
sys.exit(1)
if stage == 3 and p3 is not None:
fdset.remove(p3.stdout)
fdset.remove(p3.stderr)
r = p3.wait()
if config.should_fail:
if r == 0:
sys.exit(1)
else:
if r != 0:
sys.exit(1)
if config.tcp_only:
break
p4 = Popen(['socksify', 'dig', '@%s' % config.dns,
'www.google.com'],
stdin=PIPE, stdout=PIPE, stderr=PIPE, close_fds=True)
if p4 is not None:
fdset.append(p4.stdout)
fdset.append(p4.stderr)
stage = 4
else:
sys.exit(1)
if stage == 5:
r = p4.wait()
if config.should_fail:
if r == 0:
sys.exit(1)
print('test passed (expecting failure)')
else:
if r != 0:
sys.exit(1)
print('test passed')
break
finally:
for p in [p1, p2]:
try:
os.kill(p.pid, signal.SIGINT)
os.waitpid(p.pid, 0)
except OSError:
pass
|
wfxiang08/thrift | refs/heads/wf/20171009 | lib/py/src/transport/TTransport.py | 9 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from struct import pack, unpack
from thrift.Thrift import TException
from ..compat import BufferIO
class TTransportException(TException):
"""Custom Transport Exception class"""
UNKNOWN = 0
NOT_OPEN = 1
ALREADY_OPEN = 2
TIMED_OUT = 3
END_OF_FILE = 4
NEGATIVE_SIZE = 5
SIZE_LIMIT = 6
def __init__(self, type=UNKNOWN, message=None):
TException.__init__(self, message)
self.type = type
class TTransportBase(object):
"""Base class for Thrift transport layer."""
def isOpen(self):
pass
def open(self):
pass
def close(self):
pass
def read(self, sz):
pass
def readAll(self, sz):
buff = b''
have = 0
while (have < sz):
chunk = self.read(sz - have)
chunkLen = len(chunk)
have += chunkLen
buff += chunk
if chunkLen == 0:
raise EOFError()
return buff
def write(self, buf):
pass
def flush(self):
pass
# This class should be thought of as an interface.
class CReadableTransport(object):
"""base class for transports that are readable from C"""
# TODO(dreiss): Think about changing this interface to allow us to use
# a (Python, not c) StringIO instead, because it allows
# you to write after reading.
# NOTE: This is a classic class, so properties will NOT work
# correctly for setting.
@property
def cstringio_buf(self):
"""A cStringIO buffer that contains the current chunk we are reading."""
pass
def cstringio_refill(self, partialread, reqlen):
"""Refills cstringio_buf.
Returns the currently used buffer (which can but need not be the same as
the old cstringio_buf). partialread is what the C code has read from the
buffer, and should be inserted into the buffer before any more reads. The
return value must be a new, not borrowed reference. Something along the
lines of self._buf should be fine.
If reqlen bytes can't be read, throw EOFError.
"""
pass
class TServerTransportBase(object):
"""Base class for Thrift server transports."""
def listen(self):
pass
def accept(self):
pass
def close(self):
pass
class TTransportFactoryBase(object):
"""Base class for a Transport Factory"""
def getTransport(self, trans):
return trans
class TBufferedTransportFactory(object):
"""Factory transport that builds buffered transports"""
def getTransport(self, trans):
buffered = TBufferedTransport(trans)
return buffered
class TBufferedTransport(TTransportBase, CReadableTransport):
"""Class that wraps another transport and buffers its I/O.
The implementation uses a (configurable) fixed-size read buffer
but buffers all writes until a flush is performed.
"""
DEFAULT_BUFFER = 4096
def __init__(self, trans, rbuf_size=DEFAULT_BUFFER):
self.__trans = trans
self.__wbuf = BufferIO()
# Pass string argument to initialize read buffer as cStringIO.InputType
self.__rbuf = BufferIO(b'')
self.__rbuf_size = rbuf_size
def isOpen(self):
return self.__trans.isOpen()
def open(self):
return self.__trans.open()
def close(self):
return self.__trans.close()
def read(self, sz):
ret = self.__rbuf.read(sz)
if len(ret) != 0:
return ret
self.__rbuf = BufferIO(self.__trans.read(max(sz, self.__rbuf_size)))
return self.__rbuf.read(sz)
def write(self, buf):
try:
self.__wbuf.write(buf)
except Exception as e:
# on exception reset wbuf so it doesn't contain a partial function call
self.__wbuf = BufferIO()
raise e
def flush(self):
out = self.__wbuf.getvalue()
# reset wbuf before write/flush to preserve state on underlying failure
self.__wbuf = BufferIO()
self.__trans.write(out)
self.__trans.flush()
# Implement the CReadableTransport interface.
@property
def cstringio_buf(self):
return self.__rbuf
def cstringio_refill(self, partialread, reqlen):
retstring = partialread
if reqlen < self.__rbuf_size:
# try to make a read of as much as we can.
retstring += self.__trans.read(self.__rbuf_size)
# but make sure we do read reqlen bytes.
if len(retstring) < reqlen:
retstring += self.__trans.readAll(reqlen - len(retstring))
self.__rbuf = BufferIO(retstring)
return self.__rbuf
class TMemoryBuffer(TTransportBase, CReadableTransport):
"""Wraps a cBytesIO object as a TTransport.
NOTE: Unlike the C++ version of this class, you cannot write to it
then immediately read from it. If you want to read from a
TMemoryBuffer, you must either pass a string to the constructor.
TODO(dreiss): Make this work like the C++ version.
"""
def __init__(self, value=None, offset=0):
"""value -- a value to read from for stringio
If value is set, this will be a transport for reading,
otherwise, it is for writing"""
if value is not None:
self._buffer = BufferIO(value)
else:
self._buffer = BufferIO()
if offset:
self._buffer.seek(offset)
def isOpen(self):
return not self._buffer.closed
def open(self):
pass
def close(self):
self._buffer.close()
def read(self, sz):
return self._buffer.read(sz)
def write(self, buf):
self._buffer.write(buf)
def flush(self):
pass
def getvalue(self):
return self._buffer.getvalue()
# Implement the CReadableTransport interface.
@property
def cstringio_buf(self):
return self._buffer
def cstringio_refill(self, partialread, reqlen):
# only one shot at reading...
raise EOFError()
class TFramedTransportFactory(object):
"""Factory transport that builds framed transports"""
def getTransport(self, trans):
framed = TFramedTransport(trans)
return framed
class TFramedTransport(TTransportBase, CReadableTransport):
"""Class that wraps another transport and frames its I/O when writing."""
def __init__(self, trans,):
self.__trans = trans
self.__rbuf = BufferIO(b'')
self.__wbuf = BufferIO()
def isOpen(self):
return self.__trans.isOpen()
def open(self):
return self.__trans.open()
def close(self):
return self.__trans.close()
def read(self, sz):
ret = self.__rbuf.read(sz)
if len(ret) != 0:
return ret
self.readFrame()
return self.__rbuf.read(sz)
def readFrame(self):
buff = self.__trans.readAll(4)
sz, = unpack('!i', buff)
self.__rbuf = BufferIO(self.__trans.readAll(sz))
def write(self, buf):
self.__wbuf.write(buf)
def flush(self):
wout = self.__wbuf.getvalue()
wsz = len(wout)
# reset wbuf before write/flush to preserve state on underlying failure
self.__wbuf = BufferIO()
# N.B.: Doing this string concatenation is WAY cheaper than making
# two separate calls to the underlying socket object. Socket writes in
# Python turn out to be REALLY expensive, but it seems to do a pretty
# good job of managing string buffer operations without excessive copies
buf = pack("!i", wsz) + wout
self.__trans.write(buf)
self.__trans.flush()
# Implement the CReadableTransport interface.
@property
def cstringio_buf(self):
return self.__rbuf
def cstringio_refill(self, prefix, reqlen):
# self.__rbuf will already be empty here because fastbinary doesn't
# ask for a refill until the previous buffer is empty. Therefore,
# we can start reading new frames immediately.
while len(prefix) < reqlen:
self.readFrame()
prefix += self.__rbuf.getvalue()
self.__rbuf = BufferIO(prefix)
return self.__rbuf
class TFileObjectTransport(TTransportBase):
"""Wraps a file-like object to make it work as a Thrift transport."""
def __init__(self, fileobj):
self.fileobj = fileobj
def isOpen(self):
return True
def close(self):
self.fileobj.close()
def read(self, sz):
return self.fileobj.read(sz)
def write(self, buf):
self.fileobj.write(buf)
def flush(self):
self.fileobj.flush()
class TSaslClientTransport(TTransportBase, CReadableTransport):
"""
SASL transport
"""
START = 1
OK = 2
BAD = 3
ERROR = 4
COMPLETE = 5
def __init__(self, transport, host, service, mechanism='GSSAPI',
**sasl_kwargs):
"""
transport: an underlying transport to use, typically just a TSocket
host: the name of the server, from a SASL perspective
service: the name of the server's service, from a SASL perspective
mechanism: the name of the preferred mechanism to use
All other kwargs will be passed to the puresasl.client.SASLClient
constructor.
"""
from puresasl.client import SASLClient
self.transport = transport
self.sasl = SASLClient(host, service, mechanism, **sasl_kwargs)
self.__wbuf = BufferIO()
self.__rbuf = BufferIO(b'')
def open(self):
if not self.transport.isOpen():
self.transport.open()
self.send_sasl_msg(self.START, self.sasl.mechanism)
self.send_sasl_msg(self.OK, self.sasl.process())
while True:
status, challenge = self.recv_sasl_msg()
if status == self.OK:
self.send_sasl_msg(self.OK, self.sasl.process(challenge))
elif status == self.COMPLETE:
if not self.sasl.complete:
raise TTransportException(
TTransportException.NOT_OPEN,
"The server erroneously indicated "
"that SASL negotiation was complete")
else:
break
else:
raise TTransportException(
TTransportException.NOT_OPEN,
"Bad SASL negotiation status: %d (%s)"
% (status, challenge))
def send_sasl_msg(self, status, body):
header = pack(">BI", status, len(body))
self.transport.write(header + body)
self.transport.flush()
def recv_sasl_msg(self):
header = self.transport.readAll(5)
status, length = unpack(">BI", header)
if length > 0:
payload = self.transport.readAll(length)
else:
payload = ""
return status, payload
def write(self, data):
self.__wbuf.write(data)
def flush(self):
data = self.__wbuf.getvalue()
encoded = self.sasl.wrap(data)
self.transport.write(''.join((pack("!i", len(encoded)), encoded)))
self.transport.flush()
self.__wbuf = BufferIO()
def read(self, sz):
ret = self.__rbuf.read(sz)
if len(ret) != 0:
return ret
self._read_frame()
return self.__rbuf.read(sz)
def _read_frame(self):
header = self.transport.readAll(4)
length, = unpack('!i', header)
encoded = self.transport.readAll(length)
self.__rbuf = BufferIO(self.sasl.unwrap(encoded))
def close(self):
self.sasl.dispose()
self.transport.close()
# based on TFramedTransport
@property
def cstringio_buf(self):
return self.__rbuf
def cstringio_refill(self, prefix, reqlen):
# self.__rbuf will already be empty here because fastbinary doesn't
# ask for a refill until the previous buffer is empty. Therefore,
# we can start reading new frames immediately.
while len(prefix) < reqlen:
self._read_frame()
prefix += self.__rbuf.getvalue()
self.__rbuf = BufferIO(prefix)
return self.__rbuf
|
hanzz/qsimkit | refs/heads/master | 3rdparty/pythonqt21-qt48/examples/PyGettingStarted/GettingStarted.py | 3 | from PythonQt import *
# set the title of the group box, accessing the title property
box.title = 'PythonQt Example'
# set the html content of the QTextBrowser
box.browser.html = 'Hello <b>Qt</b>!'
# set the title of the button
box.button1.text = 'Append Text'
# set the text of the line edit
box.edit.text = '42'
# define our own python method that appends the text from the line edit
# to the text browser
def appendLine():
box.browser.append(box.edit.text)
# connect the button's clicked signal to our python method
box.button1.connect('clicked()', appendLine)
# connect the lineedit's returnPressed signal to our python method
box.edit.connect('returnPressed()', appendLine)
# show the window
box.show()
|
Nattefrost/Biblio-shelf | refs/heads/master | main.py | 2 | __author__ = 'Nattefrost'
import tkinter as tk
import db_access
from tkinter import ttk
from tkinter import simpledialog
from tkinter import messagebox
import find_book_ISBN as isbn
import add_dialog
import plot
import stats
import math
class Biblio(tk.Frame):
def __init__(self, root ):
tk.Frame.__init__(self, root)
root['bg'] = 'lightgray'
root.windowIcon = tk.PhotoImage("photo", file="./book_icon.gif") # setting icon
root.tk.call('wm','iconphoto',root._w, root.windowIcon)
root['bd'] = 10
root['relief'] = tk.FLAT
root.resizable(0,0)
root.geometry("850x685")
self.PLOT_WINDOW = 0
self.style = ttk.Style()
self.style.theme_use('clam')
self.tree_data = db_access.get_books_to_view()
self.treeFrame = tk.Frame(root,relief=tk.FLAT).grid(row=0,column=0)
self.view = ttk.Treeview(self.treeFrame, height=27,
columns=("id","Title","Author","Collection"),
selectmode='browse',
displaycolumns=[0,1,2,3])
self.view['show'] = 'headings' #removes first empty column
self.view.heading('#0', text='id', anchor='w')
self.view.heading('#1', text="Title", anchor="w")
self.view.heading('#2', text="Author", anchor="w")
self.view.heading('#3', text="Publisher", anchor="w")
self.view.heading('#4',text="Read",anchor="w")
self.view.grid(row=1,column=0,sticky=tk.W+tk.S+tk.NE,columnspan=3)
self.ysb = ttk.Scrollbar(self.treeFrame, orient='vertical', command=self.view.yview)
self.view.configure(yscroll=self.ysb.set )
self.ysb.grid(row=1,column=3,sticky=tk.E+tk.N+tk.S)
self.view.tag_configure('oddrow', background='black',foreground="white")
self.view.tag_configure('evenrow',background='#133596', foreground='white')
self.insert_content(self.tree_data)
# Searchbar
self.searchVar = tk.StringVar()
self.search_entry = tk.Entry(root, bg="#9DB8AE", bd=1,fg="#280041", relief=tk.SOLID, font="Consolas 12 bold italic",textvariable=self.searchVar)
self.search_entry.grid(row=2,column=0,sticky=tk.W+tk.N+tk.E,columnspan=3)
# Search buttons
self.search_title_button = ttk.Button(root, text='Search title',underline=7, command=self.onClick_title )
self.search_title_button.grid(row=3,column=0,sticky=tk.W+tk.S+tk.E)
self.search_author_button = ttk.Button(root,text="Search author",underline=7,command=self.onClick_author)
self.search_author_button.grid(row=3,column=1,sticky=tk.W+tk.S+tk.E)
self.search_col_button = ttk.Button(root,text="Search publisher",underline=10,command=self.onClick_collection)
self.search_col_button.grid(row=3,column=2,sticky=tk.W+tk.S+tk.E)
self.load_all = ttk.Button(root,text='Reload (F5)', command=self.load_all_callback)
self.load_all.grid(row=4,column=0,sticky=tk.W+tk.S+tk.E)
self.load_stats = ttk.Button(root, text="Show stats", command=self.onClick_stats)
self.load_stats.grid(row=4,column=2,sticky=tk.W+tk.S+tk.E)
# Right click contextual menu
self.contextual_menu = tk.Menu(root, tearoff=0, activebackground='#690093',activeforeground="white",bg="gray8",
fg="white",font="Verdana 10 bold",relief=tk.FLAT)
self.contextual_menu.add_command(label="Delete selected book.",command=self.delete_selected)
self.contextual_menu.add_separator()
self.contextual_menu.add_command(label="Mark book as read.",command=self.mark_read )
self.contextual_menu.add_separator()
self.contextual_menu.add_command(label="Add full book references. F1",command=self.add_book_window)
self.contextual_menu.add_command(label="Add book by ISBN number. Needs network F2",command=self.ask_isbn)
self.contextual_menu.add_separator()
self.contextual_menu.add_command(label="Cancel")
# Keyboard bindings
root.bind('<Button-3>', self.contextual_menu_display )
root.bind('<Return>', self.onClick_title )
root.bind('<Control-o>', self.load_all_callback )
root.bind('<Control-a>', self.onClick_author )
root.bind('<Control-l>', self.onClick_collection)
root.bind('<F2>', self.ask_isbn)
root.bind('<F1>', self.add_book_window)
root.bind('<F5>', self.load_all_callback)
root.mainloop()
def ask_isbn(self, event=None):
isbn_nb = simpledialog.askstring(title="Search book by ISBN", prompt="ISBN number :")
if isbn_nb:
book_data = isbn.get_isbn_ref(isbn_nb)
if book_data[0] and book_data[1] != "Unknown":
res = messagebox.askquestion("Add this book ?","\nTitle : {} \nAuthor : {} \nPublisher : {} ".format(book_data[0],book_data[1], book_data[2]) )
if res == "yes":
db_access.add_book(book_data[0],book_data[1],book_data[2],0)
self.load_all_callback()
else:
messagebox.showerror("Book not found", """Google books could not find the book. \nPlease enter full book references""")
def mark_read(self):
item = self.view.selection()[0]
to_mark = self.view.item(item,"values")
db_access.mark_read(to_mark[0])
self.load_all_callback()
# buttons onclick
def delete_selected(self):
item = self.view.selection()
to_delete = self.view.item(item,"values")
if to_delete:
res = messagebox.askquestion("Delete book ?","Are you sure you want to delete this book : {} ?".format(to_delete[0]))
if res == "yes":
db_access.remove_book(to_delete)
self.view.delete(self.view.selection())
self.load_all_callback()
def onClick_title(self, event=None):
self.search_start(0)
def onClick_author(self, event=None):
self.search_start(1)
def onClick_collection(self, event=None):
self.search_start(2)
def onClick_stats(self, event=None):
if not self.PLOT_WINDOW:
self.PLOT_WINDOW +=1
plot.TkPlot(stats.generate_authors_top_ten(), self)
def load_all_callback(self,event=None):
self.clean_tree()
self.tree_data = db_access.get_books_to_view()
self.insert_content(self.tree_data)
def search_start(self, criteria):
pattern = self.searchVar.get().lower()
if len(pattern) > 0:
found_books = []
for x in range(len(self.tree_data)):
if self.tree_data[x][criteria].lower().startswith(pattern):
found_books.append(self.tree_data[x])
if len(found_books) == 0:
found_books = (["NO BOOK WAS FOUND"])
self.clean_tree()
self.insert_content(found_books)
def add_book_window(self, event=None):
add_dialog.AddDialog(self)
def clean_tree(self):
self.view.delete(*self.view.get_children())
def insert_content(self, data):
for x in range(len(data)):
if x % 2 == 0:
self.view.insert('', 'end', values=data[x],tags=("oddrow",))
else:
self.view.insert('', 'end', values=data[x],tags=("evenrow",))
def contextual_menu_display(self, event):
"""
Displays a menu under cursor when right click
is pressed over the treeview table
:param event: right click
:return: void
"""
try:
self.contextual_menu.tk_popup(event.x_root,event.y_root,0)
finally:
self.contextual_menu.grab_release()
if __name__ == "__main__":
root = tk.Tk()
root.title("Biblio")
app = Biblio(root)
|
MichaelNedzelsky/intellij-community | refs/heads/master | python/helpers/py2only/docutils/parsers/rst/languages/zh_tw.py | 57 | # -*- coding: utf-8 -*-
# $Id: zh_tw.py 4564 2006-05-21 20:44:42Z wiemann $
# Author: David Goodger <goodger@python.org>
# Copyright: This module has been placed in the public domain.
# New language mappings are welcome. Before doing a new translation, please
# read <http://docutils.sf.net/docs/howto/i18n.html>. Two files must be
# translated for each language: one in docutils/languages, the other in
# docutils/parsers/rst/languages.
"""
Traditional Chinese language mappings for language-dependent features of
reStructuredText.
"""
__docformat__ = 'reStructuredText'
directives = {
# language-dependent: fixed
'attention (translation required)': 'attention',
'caution (translation required)': 'caution',
'danger (translation required)': 'danger',
'error (translation required)': 'error',
'hint (translation required)': 'hint',
'important (translation required)': 'important',
'note (translation required)': 'note',
'tip (translation required)': 'tip',
'warning (translation required)': 'warning',
'admonition (translation required)': 'admonition',
'sidebar (translation required)': 'sidebar',
'topic (translation required)': 'topic',
'line-block (translation required)': 'line-block',
'parsed-literal (translation required)': 'parsed-literal',
'rubric (translation required)': 'rubric',
'epigraph (translation required)': 'epigraph',
'highlights (translation required)': 'highlights',
'pull-quote (translation required)': 'pull-quote',
'compound (translation required)': 'compound',
u'container (translation required)': 'container',
#'questions (translation required)': 'questions',
'table (translation required)': 'table',
'csv-table (translation required)': 'csv-table',
'list-table (translation required)': 'list-table',
#'qa (translation required)': 'questions',
#'faq (translation required)': 'questions',
'meta (translation required)': 'meta',
#'imagemap (translation required)': 'imagemap',
'image (translation required)': 'image',
'figure (translation required)': 'figure',
'include (translation required)': 'include',
'raw (translation required)': 'raw',
'replace (translation required)': 'replace',
'unicode (translation required)': 'unicode',
u'日期': 'date',
'class (translation required)': 'class',
'role (translation required)': 'role',
u'default-role (translation required)': 'default-role',
u'title (translation required)': 'title',
'contents (translation required)': 'contents',
'sectnum (translation required)': 'sectnum',
'section-numbering (translation required)': 'sectnum',
u'header (translation required)': 'header',
u'footer (translation required)': 'footer',
#'footnotes (translation required)': 'footnotes',
#'citations (translation required)': 'citations',
'target-notes (translation required)': 'target-notes',
'restructuredtext-test-directive': 'restructuredtext-test-directive'}
"""Traditional Chinese name to registered (in directives/__init__.py)
directive name mapping."""
roles = {
# language-dependent: fixed
'abbreviation (translation required)': 'abbreviation',
'ab (translation required)': 'abbreviation',
'acronym (translation required)': 'acronym',
'ac (translation required)': 'acronym',
'index (translation required)': 'index',
'i (translation required)': 'index',
'subscript (translation required)': 'subscript',
'sub (translation required)': 'subscript',
'superscript (translation required)': 'superscript',
'sup (translation required)': 'superscript',
'title-reference (translation required)': 'title-reference',
'title (translation required)': 'title-reference',
't (translation required)': 'title-reference',
'pep-reference (translation required)': 'pep-reference',
'pep (translation required)': 'pep-reference',
'rfc-reference (translation required)': 'rfc-reference',
'rfc (translation required)': 'rfc-reference',
'emphasis (translation required)': 'emphasis',
'strong (translation required)': 'strong',
'literal (translation required)': 'literal',
'named-reference (translation required)': 'named-reference',
'anonymous-reference (translation required)': 'anonymous-reference',
'footnote-reference (translation required)': 'footnote-reference',
'citation-reference (translation required)': 'citation-reference',
'substitution-reference (translation required)': 'substitution-reference',
'target (translation required)': 'target',
'uri-reference (translation required)': 'uri-reference',
'uri (translation required)': 'uri-reference',
'url (translation required)': 'uri-reference',
'raw (translation required)': 'raw',}
"""Mapping of Traditional Chinese role names to canonical role names for
interpreted text."""
|
mosquito/TelePY | refs/heads/master | telephony/models.py | 1 | #-*- coding: utf-8 -*-
from django.db.models import *
from datetime import time, datetime
from timezones import TIME_ZONES
from random import randint, choice
import string
class Cdr(Model):
DISPOSITIONS=(
('ANSWERED','Отвечен'),
('FAILED', 'Ошибка'),
('NO ANSWER','Нет ответа'),
('BUSY','Занято'),
('DOCUMENTATION','Документировано'),
('BILL','Биллинг'),
('IGNORE','Игнорировано'),
)
AMAFLAGS=(
(1,'Не обсчитывать'),
(2,'Обсчитывать'),
(3,'По умолчанию (Документировать)'),
)
clid = CharField(max_length=80, editable=True, default='', null=False, verbose_name=u'имя CallerID', db_index=True)
src = CharField(max_length=80, editable=True, default='', null=False, verbose_name=u'номер CallerID', db_index=True)
dst = CharField(max_length=80, editable=True, default='', null=False, verbose_name=u'назначение', db_index=True)
dcontext = ForeignKey("Contexts", blank=False, null=False, verbose_name='контекст', db_index=True, db_column='dcontext')
channel = CharField(max_length=80, editable=True, default='', null=False, verbose_name=u'канал источника')
dstchannel = CharField(max_length=80, editable=True, default='', null=False, verbose_name=u'канал назначения')
lastapp = CharField(max_length=80, editable=True, default='', null=False, verbose_name=u'последнее приложение')
lastdata = CharField(max_length=80, editable=True, default='', null=False, verbose_name=u'последнее значение')
start = DateTimeField(editable=True, blank=False, auto_now_add=True, auto_now=False, null=False, verbose_name=u'начало вызова', db_index=True)
answer = DateTimeField(editable=True, blank=False, auto_now_add=True, auto_now=False, null=False, verbose_name=u'начало разговора', db_index=True)
end = DateTimeField(editable=True, blank=False, auto_now_add=True, auto_now=False, null=False, verbose_name=u'конец разговора', db_index=True)
duration = PositiveIntegerField(editable=True, default=0, null=False, verbose_name=u'длительность разговора', db_index=True)
billsec = PositiveIntegerField(editable=True, default=0, null=False, verbose_name=u'длительность разговора для учета', db_index=True)
disposition = CharField(max_length=45, editable=True, default='', null=False, verbose_name=u'статус', choices=DISPOSITIONS, db_index=True)
amaflags = PositiveIntegerField(editable=True, default=0, null=False, verbose_name=u'флаги для биллинга', choices=AMAFLAGS, db_index=True)
accountcode = ForeignKey('address.Unit', blank=True, null=True, verbose_name='плательщик', db_index=True, db_column='accountcode')
uniqueid = CharField(max_length=128, editable=True, default='', null=False, verbose_name=u'идентификатор канала', db_index=True)
userfield = CharField(max_length=255, editable=True, default='', null=False, verbose_name=u'поле пользователя')
callrecord = FileField(upload_to='/mnt/records', editable=False, blank=True, null=True, verbose_name='запись разговора')
def __unicode__(self, *args, **kwargs):
return u'%s | %s --> %s длит.: %s сек.' % (self.start, self.src, self.dst, self.billsec)
class Meta:
ordering = ['-start', 'src', 'disposition']
unique_together = (('uniqueid',) , ('src', 'dst', 'start',))
verbose_name = 'звонок'
verbose_name_plural = 'звонки'
class Contexts(Model):
IN_OUT = ((True, 'Входящий'), (False, 'Исходящий'), )
name = CharField(max_length=15, primary_key=True, blank=False, null=False, verbose_name='контекст', editable=True)
full_name = CharField(max_length=15, blank=False, null=False, verbose_name='описание', editable=True)
incoming = BooleanField(default=False, null=False, blank=False, verbose_name='Входящий', choices=IN_OUT, db_index=True)
def __unicode__(self, *args, **kwargs):
return u'%s' % (self.full_name)
class Meta:
ordering = ['name', ]
verbose_name = 'контекст'
verbose_name_plural = 'контексты'
class Extensions(Model):
COMMENTED=(
(1,'Нет'),
(0,'Да')
)
APPS=(
("Dial", u"(Dial) Набрать номер"),
("HangUp", u"(HangUp) Положить трубку"),
("Wait", u"(Wait) Подождать x секунд"),
("Answer", u"(Answer) Поднять трубку"),
("AddQueueMember", u"AddQueueMember(queue,channel) Удалить из группы"),
("Queue", u"Queue (queue_name) Перейти в очередь"),
("PlayBack", u"(PlayBack) Проиграть звуковой файл"),
("Set", u"(Set) Установить переменную"),
("Read", u"(Read) Прочитать клав в перемен."),
("BackGround", u"(BackGround) Играть звук и ждать exten"),
("Bridge", u"(Bridge) Сделать мост 2х каналов"),
("Busy", u"(Busy) Вернуть \"занято\""),
("ChangeMonitor", u"(ChangeMonitor) Изм. файл канала монитора"),
("Congestion", u"(Congestion) Перегружено направление"),
("DBdel", u"(DBdel) Удалить ключ из внутр. БД"),
("DBdeltree", u"(DBdeltree) Удалить дерево из вн. БД"),
("Echo", u"(Echo) Проигрывать слышимое"),
("ConfBridge", u"(ConfBridge) Создать мост конференции"),
("Exec", u"(Exec) Выполнить прил. диалплана"),
("ExecIf", u"(ExecIf) Выполнить если"),
("ExecIfTime", u"(ExecIfTime) Выполнить если+время"),
("GoSub", u"GoSub([[context|]extension|]priority) Перейти в экстеншн после чего вернуться"),
("GoTo", u"GoTo([[context|]extension|]priority) Перейти в экстеншн"),
("GoToIf", u"GoToIf(condition?context1,extension1,priority1:context2,extension2,priority2)"),
("GotoIfTime", u"(GotoIfTime) Перейти экст. если+время"),
("ImportVar", u"(ImportVar) Импорт переменной в новую"),
("Incomplete", u"(Incomplete) Возвр. невыполненные"),
("Macro", u"(Macro) Выполнить макрос"),
("MacroExclusive", u"(MacroExclusive) Выпол. только один макрос"),
("MacroIf", u"(MacroIf) Макрос если"),
("Monitor", u"(Monitor) Мониторинг канала"),
("StopMonitor", u"(StopMonitor) Остановить мониторинг канала"),
("MSet", u"(MSet) Уст. переменные канала"),
("MusicOnHold", u"(MusicOnHold) Играть музыку ожидания"),
("NoCDR", u"(NoCDR) Не записывать CDR"),
("NoOp", u"(NoOp) Ничего не делать"),
("Park", u"(Park) Парковать"),
("MeetMeChannelAdmin", u"(MeetMeChannelAdmin) Администрирование канала"),
("ParkedCall", u"(ParkedCall) Ответить на паркованый"),
("PauseMonitor", u"(PauseMonitor) Приостановить монитор"),
("Proceeding", u"(Proceeding) Вызов совершается"),
("Progress", u"(Progress) Вызов в процессе"),
("RaiseException", u"(RaiseException) Вызвать исключение"),
("ReadExten", u"(ReadExten) Прочитать номер из перем."),
("ReadFile", u"(ReadFile) Проч. файл в перм. канала"),
("MeetMeAdmin", u"(MeetMeAdmin) Администрирование комнаты"),
("Record", u"(Record) Записать файл"),
("ResetCDR", u"(ResetCDR) Сбросить CDR"),
("RetryDial", u"(RetryDial) Повтор набора при неудаче"),
("RemoveQueueMember", u"RemoveQueueMember(queue,channel) Добавить в группу"),
("Ringing", u"(Ringing) Звонит телефон"),
("SayAlpha", u"(SayAlpha) Сказать Alpha"),
("SayDigits", u"(SayDigits) Говорить цифры"),
("SayNumber", u"(SayNumber) Говорить номер"),
("SayPhonetic", u"(SayPhonetic) Говорить фонетически"),
("SendFAX", u"(SendFAX) Передать факс"),
("ReceiveFAX", u"(ReceiveFAX) Принять факс"),
("SetAMAFlags", u"(SetAMAFlags) Установить AMA флаг"),
("SetCallerPres", u"(SetCallerPres) Установить показ callerID"),
("SetMusicOnHold", u"(SetMusicOnHold) Установить муз. ожидания"),
("SIPAddHeader", u"(SIPAddHeader) Доб. исх. заг. sip пакета"),
("SIPDtmfMode", u"(SIPDtmfMode) Изменить DTMF Mode"),
("SIPRemoveHeader", u"(SIPRemoveHeader) Уд. исх. заг. sip пакета"),
("StartMusicOnHold", u"(StartMusicOnHold) Начать проигрывать MOH"),
("MeetMeCount", u"(MeetMeCount) Счетчик"),
("Transfer", u"(Transfer) Перевод вызова на номер"),
("TryExec", u"(TryExec) Попробовать выполнить"),
("TrySystem", u"(TrySystem) Попробовать выполн. UNIX"),
("System", u"(System) Выполнить UNIX комманду"),
("UnpauseMonitor", u"(UnpauseMonitor) Снять монитор с паузы"),
("WaitExten", u"(WaitExten) Ждать добавочного"),
("WaitMusicOnHold", u"(WaitMusicOnHold) Ждать добав. играя MOH"),
("MeetMe", u"(MeetMe) Приложение конференций"),
("SLAStation", u"(SLAStation) Run Shared Line"),
("SLATrunk", u"(SLATrunk) Shared Line Appearance"),
)
commented = SmallIntegerField(default=0, null=False, blank=False, verbose_name='включен', choices=COMMENTED, db_index=True)
context = ForeignKey(Contexts, blank=False, null=False, verbose_name='контекст', db_index=True, db_column='context')
exten = CharField(max_length=80, blank=False, null=False, verbose_name='экстен/шаблон', db_index=True, db_column='exten')
priority = IntegerField(null=False, blank=False, verbose_name='приоритет',)
app = CharField(max_length=80, blank=False, null=False, verbose_name='приложение диалплана', db_index=True, db_column='app', choices=APPS)
appdata = CharField(max_length=200, blank=True, null=True, verbose_name='параметры приложения', editable=True, db_index=True)
class Meta:
ordering = ['context__name', 'exten', 'priority']
unique_together = (('context', 'exten', 'priority',) ,)
verbose_name = 'экстеншн'
verbose_name_plural = 'экстеншны'
def __unicode__(self, *args, **kwargs):
return u"%s| exten => %s,%s,%s(%s)" % (self.context, self.exten, self.priority, self.app, self.appdata)
class Numbers(Model):
YESNO=(
('yes','Разрешить'),
('no', 'Запретить'),
)
TRUEFALSE=(
(False,'Нет'),
(True,'Да')
)
COMMENTED=(
(1,'Нет'),
(0,'Да')
)
TYPES=(
('peer','Только исходящие (peer)'),
('user','Только входящие (user)'),
('friend','Входящие и исходящие (friend)')
)
AMAFLAGS=(
('omit','Не обсчитывать (1)'),
('billing','Обсчитывать (2)'),
('default','По умолчанию (3)'),
('documentation','Документировать (3)'),
)
DTMFS=(
('inband','В потоке звука'),
('rfc2833','RFC2833'),
('info','SIP info DTMF'),
('auto','Автоопределение'),
)
INVITE=(
('port', "Игнорировать номер порта, с которого пришла аутентификация"),
('invite', "Не требовать начальное сообщение INVITE для аутентификации"),
("port,invite", """Не требовать начальное сообщение INVITE для аутентификации и игнорировать порт, с которого пришел запрос"""),
)
commented = SmallIntegerField(default=0, verbose_name='включен', choices=COMMENTED, db_index=True)
name = CharField(max_length=15, verbose_name='номер', primary_key=True)
host = CharField(max_length=25, verbose_name='хост', default='dynamic', help_text='привязка к определенному хосту или IP, или \'dynamic\'')
nat = CharField(max_length=5, default='no', editable=True, verbose_name='NAT', help_text='разрешать ли работу через NAT', choices=YESNO)
type = CharField(max_length=8, default='friend', editable=True, verbose_name='тип', help_text='тип пользователя', choices=TYPES)
accountcode = ForeignKey('address.Unit', blank=True, null=True, verbose_name='принадлежит', db_column='accountcode')
amaflags = CharField(max_length=20, default='billing', blank=False, null=False, editable=True, verbose_name='флаги биллинга', help_text='специальные флаги для управления обсчетом по умолчанию', choices=AMAFLAGS)
callgroup = CharField(max_length=25, blank=True, null=True, editable=False, help_text='не используется, для совместимости')
callerid = CharField(max_length=250, blank=True, null=True, editable=True, help_text='Оставить пустым для автоподстановки')
cancallforward = CharField(max_length=3, default='yes', blank=False, null=False, editable=True, verbose_name='Перевод звонков', help_text='разрешать ли перевод звонков', choices=YESNO)
directmedia = CharField(max_length=3, default='no', editable=True, verbose_name='Прямой поток', help_text='разрешать ли прямое прохождение трафика', choices=YESNO)
context = ForeignKey(Contexts, blank=True, null=True, verbose_name='контекст', db_column='context')
defaultip = CharField(max_length=25, blank=True, null=True, verbose_name='IP клиента', help_text='Если Вы знаете IP адрес телефона, Вы можете указать его здесь. Эти настройки будут использоваться при совершении вызовов на данный телефон, если он еще не зарегистрировался на сервере. После регистрации, телефон сам сообщит Asterisk под каким именем пользователя и IP адресом он доступен.')
dtmfmode = CharField(max_length=8, default='info', editable=True, verbose_name='тип DTMF сигнализации', help_text='в режиме auto Asterisk будет использовать режим rfc2833 для передачи DTMF, по умолчанию, но будет переключаться в режим inband, для передачи DTMF сигналов, если удаленный клиент не укажет в SDP сообщении, что он поддерживает режим передачи DTMF - rfc2833', choices=DTMFS)
fromuser = CharField(max_length=80, blank=True, null=True, editable=False, help_text='не используется, для совместимости')
fromdomain = CharField(max_length=80, blank=True, null=True, editable=False, help_text='не используется, для совместимости')
insecure = CharField(max_length=20, default='', blank=True, null=True, editable=True, verbose_name='игнорировать', choices=INVITE)
language = CharField(max_length=2, editable=True, default='ru', verbose_name='язык')
mailbox = CharField(max_length=15, blank=False, null=True, editable=False, help_text='Оставить пустым для автоподстановки')
md5secret = CharField(max_length=80, blank=True, null=True, editable=False, verbose_name='MD5 пароль', help_text='не используется, для совместимости')
deny = CharField(max_length=25, blank=True, null=True, editable=False, verbose_name='запрещенные подсети')
permit = CharField(max_length=25, blank=True, null=True, editable=False, verbose_name='разрешенные подсети')
mask = CharField(max_length=25, blank=True, null=True, editable=False, help_text='устарел')
musiconhold = ForeignKey('asterfiles.SndFile', db_column='musiconhold', blank=True, null=True, editable=True, verbose_name='музыка ожидания', db_index=True, related_name='musiconhold')
pickupgroup = CharField(max_length=80, blank=True, null=True, editable=False, help_text='не используется, для совместимости')
qualify = CharField(max_length=5, default='no', blank=False, null=False, editable=True, verbose_name='SIP тест', help_text='если yes тогда Asterisk периодически (раз в 2 секунды) будет отправлять SIP сообщение типа OPTIONS, для проверки, что данное устройство работает и доступно для совершения вызовов. Если данное устройство, не ответит в течении заданного периода, тогда Asterisk рассматривает это устройство как выключенное и недоступное для совершения вызовов.', choices=YESNO)
regexten = CharField(max_length=80, blank=True, null=True, editable=False, help_text='не используется, для совместимости')
restrictcid = CharField(max_length=25, blank=True, null=True, editable=False, help_text='устарел')
rtptimeout = CharField(max_length=3, blank=True, null=True, editable=False, help_text='не используется, для совместимости')
rtpholdtimeout = CharField(max_length=3, blank=True, null=True, editable=False, help_text='не используется, для совместимости')
secret = CharField(max_length=15, blank=True, null=False, editable=True, verbose_name='пароль', help_text='Для генерации оставьте пустым')
setvar = CharField(max_length=25, blank=True, null=True, editable=False, help_text='устарел')
disallow = CharField(max_length=100, editable=True, default='all', verbose_name='запрещенные кодеки')
allow = CharField(max_length=100, editable=True, default='alaw', verbose_name='разрешенные кодеки')
comment = TextField(blank=True, null=True, verbose_name='комментарий')
trustrpid = CharField(max_length=3, blank=True, null=True, editable=True, default='no', verbose_name='Принимать RPID', choices=YESNO, help_text='Можно ли доверять полученному от SIP клиента Remote-Party-ID')
sendrpid = CharField(max_length=3, blank=True, null=True, editable=True, default='yes', verbose_name='Передавать RPID', choices=YESNO, help_text='Необходимо передавать SIP клиенту Remote-Party-ID')
videosupport = CharField(max_length=3, blank=True, null=True, editable=True, default='no', choices=YESNO, verbose_name='Поддержка видео')
fullcontact = CharField(max_length=80, blank=True, null=True, editable=False, help_text='для совместимости')
ipaddr = IPAddressField(blank=True, null=True, editable=True, verbose_name='последний IP', help_text='для совместимости')
port = PositiveIntegerField(blank=True, null=True, editable=True, help_text='порт не dynamic клиентов')
regseconds = BigIntegerField(blank=True, null=True, editable=True, help_text='для совместимости')
username = CharField(max_length=100, blank=True, null=True, editable=True, help_text='для совместимости')
regserver = CharField(max_length=100, blank=True, null=True, editable=True, help_text='для совместимости')
useragent = CharField(max_length=100, blank=True, null=True, editable=True, help_text='для совместимости')
defaultuser = CharField(max_length=100, blank=True, null=True, editable=True, help_text='для совместимости')
useragent = CharField(max_length=100, blank=True, null=True, editable=False, help_text='для совместимости')
lastms = CharField(max_length=100, blank=True, null=True, editable=False, help_text='для совместимости')
defaultuser = CharField(max_length=15, blank=True, null=True, editable=False, help_text='сервер Asterisk будет посылать сообщения INVITE на username@defaultip')
def gen_passwd(self):
self.secret = ''.join(choice(string.letters.lower()+string.digits) for i in xrange(12))
return self.save()
def __unicode__(self):
return u'%s (%s)' % (self.name, self.accountcode)
def __init__(self,*args,**kwargs):
super(Numbers,self).__init__(*args, **kwargs)
if not self.secret.__len__():
self.gen_passwd()
class Meta:
unique_together = (('name',),)
ordering = ['name']
verbose_name = 'номер'
verbose_name_plural = 'номера'
class Voicemail(Model):
YESNO=(
('yes','Да'),
('no', 'Нет'),
)
uniqueid = AutoField(primary_key=True, blank=False, null=False, verbose_name='id', editable=False)
payer = ForeignKey('address.Unit', blank=False, null=False, verbose_name='плательщик', db_index=True, db_column='customer_id', editable=False)
context = CharField(max_length=10, blank=False, null=False, verbose_name='контекст', db_column='context', default=u'city', editable=False, db_index=True)
mailbox = ForeignKey(Numbers, blank=False, null=False, verbose_name=u'Номер ящика', db_index=True, db_column='mailbox')
password = PositiveSmallIntegerField(editable=False, blank=True, null=True, verbose_name=u'пароль', db_index=True)
fullname = TextField(blank=True, null=True, verbose_name='полное имя')
email = EmailField(blank=True, null=True, verbose_name='электронная почта', db_index=True)
pager = EmailField(blank=True, null=True, verbose_name='E-mail для пейджера', db_index=True)
tz = CharField(max_length=20, blank=False, null=False, verbose_name=u'часовой пояс', default=u'Europe/Moscow', choices=TIME_ZONES)
attach = CharField(max_length=3, blank=False, null=False, verbose_name=u'прикреплять файлы', choices=YESNO, default='yes')
saycid = CharField(max_length=3, blank=False, null=False, verbose_name=u'указывать ли номер', choices=YESNO, default='yes')
review = CharField(max_length=3, blank=False, null=False, verbose_name=u'пересмотр', help_text='Если "Да", то дает возможность перезаписывать оставленное сообщение перед отправкой', choices=YESNO, default='no')
operator = CharField(max_length=3, blank=False, editable=False, null=False, verbose_name=u'вызывать экстеншн 0', help_text='''Если "Да", то если вызывающий абонент нажмет 0 (ноль) в процессе проигрыша анонса, то выполнения плана набора продолжиться с екстеншена 'o' (сокр. от "Out"), в текущем контексте для голосовой почты. Эта функция может использоваться для перехода к вызову секретаря.''', choices=YESNO, default='no')
envelope = CharField(max_length=3, blank=False, null=False, verbose_name=u'Региональные параметры', choices=YESNO, default='no', help_text='''Применять ли к сообщениям региональные параметры''')
sayduration = CharField(max_length=3, blank=False, null=False, verbose_name=u'воспроизводить длительность', help_text='''Если "Да", то воспроизводит длительность сообщения''', choices=YESNO, default='no')
saydurationm = PositiveSmallIntegerField(blank=True, null=False, verbose_name=u'начальная длительность воспроизведения', default=1, help_text='''Если "Воспроизводить длительность" "Да", то воспроизводит длительность сообщения если сообщение более %значение% минут''', db_index=True)
sendvoicemail = CharField(max_length=3, blank=False, null=False, verbose_name=u'может пересылать сообщения', choices=YESNO, default='no')
delete = CharField(max_length=3, blank=False, null=False, verbose_name=u'удалять сообщения после отправки', choices=YESNO, default='no', db_index=True)
nextaftercmd = CharField(max_length=3, blank=False, null=False, verbose_name=u'проигрывать сдедующее после комманды', choices=YESNO, default='yes', db_index=True)
forcename = CharField(max_length=3, blank=False, null=False, verbose_name=u'принудительно требовать ввод номера ящика', choices=YESNO, default='no')
forcegreetings = CharField(max_length=3, blank=False, null=False, verbose_name=u'принудительно требовать запись приветствия', choices=YESNO, default='no')
hidefromdir = CharField(max_length=3, blank=False, null=False, verbose_name=u'X3 hidefromdir', choices=YESNO, default='yes')
stamp = DateTimeField(verbose_name=u'хз что это', blank=True, null=True, editable=False, db_index=True)
attachfmt = CharField(max_length=80, default='wav49', blank=False, null=False, editable=False, verbose_name=u'Формат вложений')
searchcontexts = CharField(max_length=3, blank=True, null=True, verbose_name=u'Пока не ясно но что-то там искать во всех контекстах', choices=YESNO, default='no', db_index=True)
cidinternalcontexts = CharField(max_length=10, default='', blank=True, null=True, verbose_name="Пока не ясно cidinternalcontexts", db_index=True)
exitcontext = CharField(max_length=10, default='', blank=True, null=True, verbose_name="Пока не ясно exitcontext", db_index=True)
volgain = CharField(max_length=3, blank=False, null=False, verbose_name=u'сделать громче на (dB)', db_index=True, default='0.0')
tempgreetwarn = CharField(max_length=3, blank=False, null=False, verbose_name=u'напоминать что установлено временное приветствие', choices=YESNO, default='yes', db_index=True)
messagewrap = CharField(max_length=3, blank=False, null=False, verbose_name=u'воспроизводить более позние первыми', choices=YESNO, default='no', db_index=True)
minpassword = PositiveSmallIntegerField(blank=False, null=False, verbose_name=u'минимальная длинна пароля', default=4, db_index=True)
listen_control_forward_key = CharField(max_length=2, blank=False, null=False, verbose_name=u'клавиша перемотки вперед', default='6', db_column='listen-control-forward-key')
listen_control_reverse_key = CharField(max_length=2, blank=False, null=False, verbose_name=u'клавиша перемотки назад', default='4', db_column='listen-control-reverse-key')
listen_control_pause_key = CharField(max_length=2, blank=False, null=False, verbose_name=u'клавиша перемотки вперед', default='5', db_column='listen-control-pause-key')
listen_control_restart_key = CharField(max_length=2, blank=False, null=False, verbose_name=u'клавиша повтора', default='2', db_column='listen-control-restart-key')
listen_control_stop_key = CharField(max_length=2, blank=False, null=False, verbose_name=u'клавиша перемотки вперед', default='8', db_column='listen-control-stop-key')
backupdeleted = CharField(max_length=2, default='25', blank=False, null=False, verbose_name=u'сообщений в корзине не более', db_index=True)
class Meta:
verbose_name = 'голосовая почта'
verbose_name_plural = 'учетные записи голосовой почты'
def __unicode__(self):
return u'%s' % (self.mailbox)
def gen_passwd(self):
self.password = randint(1000,9999)
class Queue(Model):
YESNO=(
('yes','Да'),
('no', 'Нет'),
)
YESNOONCE=(
('yes', u'Да'),
('no', u'Нет'),
('once', u'Один раз')
)
TRUEFALSE=(
(False,'Нет'),
(True,'Да'),
)
STRATEGY=(
('ringall', u'Звонить во все доступные каналы пока не ответят'),
('leastrecent', u'Звонить в интерфейс который отвечал последним'),
('fewestcalls', u'Звонить тому кто принял меньше всего звонков в этой очереди'),
('random', u'Звонить совершенно случайно'),
('rrmemory', u'Round-Robin с памятью, Звонить по очереди помня кто последний отвечал'),
('wrandom', u'Звонить случайно но использовать веса'),
('linear', u'Звонить по порядку перечисленному в самой очереди'),
)
MONITOR_FORMAT=(
('gsm', u'GSM'),
('wav', u'WAV'),
('wav49', u'WAV, в котором записан GSM 6.10 кодек в MS формате'),
)
name = CharField(db_column='name', max_length=128, blank=False, null=False, verbose_name=u'имя', editable=True, db_index=True, primary_key=True)
musiconhold = CharField(max_length=128, blank=True, null=True, editable=True, db_index=True)
announce = CharField(max_length=128, blank=True, null=True, editable=True, db_index=True, verbose_name=u'Анонс', help_text=u'использовать анонс, проиграть этот файл анонса из файла',)
context = ForeignKey("Contexts", blank=True, null=True, verbose_name='контекст', db_index=True, db_column='context', help_text=u'контекст в который будут перенаправлен ожидающий вызов набравший номер')
timeout = PositiveIntegerField(blank=True, null=True, editable=True, db_index=True, help_text=u'столько секунд звонок без ответа на трубке прежде чем он перейдет на следующего агента')
monitor_join = BooleanField(default=False, null=False, blank=False, verbose_name=u'Смешивать запись', choices=TRUEFALSE, db_index=True)
monitor_format = CharField(max_length=128, blank=True, null=True, editable=True, db_index=True, choices=MONITOR_FORMAT)
queue_youarenext = ForeignKey('asterfiles.SndFile', db_column='queue_youarenext', blank=True, null=True, editable=True, db_index=True, help_text=u'сообщение, которое будет сыграно: Теперь Вы первый на линии.', related_name='queue_youarenext')
queue_thereare = ForeignKey('asterfiles.SndFile', db_column='queue_thereare', blank=True, null=True, editable=True, db_index=True, help_text=u'сообщение, которое будет сыграно: Ваша позиция в очереди', related_name='queue_thereare')
queue_callswaiting = ForeignKey('asterfiles.SndFile', db_column='queue_callswaiting', blank=True, null=True, editable=True, db_index=True, help_text=u'сообщение, которое будет сыграно: ожидайте ответа', related_name='queue_callswaiting')
queue_holdtime = ForeignKey('asterfiles.SndFile', db_column='queue_holdtime', blank=True, null=True, editable=True, db_index=True, help_text=u'сообщение, которое будет сыграно: приблизительное время ожидания', related_name='queue_holdtime')
queue_minutes = ForeignKey('asterfiles.SndFile', db_column='queue_minutes', blank=True, null=True, editable=True, db_index=True, help_text=u'сообщение, которое будет сыграно: минут', related_name='queue_minutes')
queue_seconds = ForeignKey('asterfiles.SndFile', db_column='queue_seconds', blank=True, null=True, editable=True, db_index=True, help_text=u'сообщение, которое будет сыграно: секунд', related_name='queue_seconds')
queue_lessthan = ForeignKey('asterfiles.SndFile', db_column='queue_lessthan', blank=True, null=True, editable=True, db_index=True, help_text=u'сообщение, которое будет сыграно: менее', related_name='queue_lessthan')
queue_thankyou = ForeignKey('asterfiles.SndFile', db_column='queue_thankyou', blank=True, null=True, editable=True, db_index=True, help_text=u'сообщение, которое будет сыграно: спасибо за ожидание', related_name='queue_thankyou')
queue_reporthold = ForeignKey('asterfiles.SndFile', db_column='queue_reporthold', blank=True, null=True, editable=True, db_index=True, help_text=u'сообщение, которое будет сыграно: время ожидания', related_name='queue_reporthold')
announce_frequency = PositiveIntegerField(blank=True, null=True, editable=True, db_index=True, verbose_name=u'Частота анонса')
announce_round_seconds = PositiveIntegerField(blank=True, null=True, editable=True, db_index=True, verbose_name=u'Округление', help_text=u'Округлять минуты/секунды до этого значения')
announce_holdtime = CharField(max_length=5, blank=True, null=True, editable=True, db_index=True, choices=YESNOONCE, verbose_name=u'Анонс предпологаемого время ожидания')
retry = PositiveIntegerField(blank=True, null=True, editable=True, db_index=True, verbose_name=u'Повтор', help_text=u'Сколько мы можем ждать прежде чем попробывать звонить участникам очереди снова')
wrapuptime = PositiveIntegerField(blank=True, null=True, editable=True, db_index=True, verbose_name=u'время завершения', help_text='сделать паузу во столько секунд прежде чем снова передавать вызов этому участнику')
maxlen = PositiveIntegerField(blank=True, null=True, editable=True, db_index=True, verbose_name=u'максимальный размер очереди')
servicelevel = PositiveIntegerField(blank=True, null=True, editable=True, db_index=True, default=0, verbose_name=u'Уровень обслуживания', help_text=u'опция используется для статистики об уровне обслуживания. Вы устанавливаете период времени, в котором звонки должен быть дан ответ. По умолчанию он установлен в 0 (отключен).')
strategy = CharField(max_length=128, blank=True, null=True, editable=True, db_index=True, default='ringall', choices=STRATEGY, verbose_name=u"стратегия")
joinempty = CharField(max_length=128, blank=True, null=True, editable=True, db_index=True)
leavewhenempty = CharField(max_length=128, blank=True, null=True, editable=True, db_index=True)
eventmemberstatus = BooleanField(default=False, null=False, blank=False, verbose_name=u'генерировать события статуса', choices=TRUEFALSE, db_index=True)
eventwhencalled = BooleanField(default=False, null=False, blank=False, verbose_name=u'генерировать события вызовов', choices=TRUEFALSE, db_index=True)
reportholdtime = BooleanField(default=False, null=False, blank=False, verbose_name=u'сообщать сколько ждал абонент', help_text=u'Эта опция очень полезна. Когда установлено Да, член очереди, который отвечает, услышит, как долго абонент был в ожидании и слушал музыку ожидания.', choices=TRUEFALSE, db_index=True)
memberdelay = PositiveIntegerField(blank=True, null=True, editable=True, db_index=True, help_text=u'Пауза в секундах, прежде чем агент будет соединен с абонентом из очереди', verbose_name=u'Пауза соединения', default=0)
weight = PositiveIntegerField(default=1, blank=True, null=True, editable=True, db_index=True, verbose_name=u'вес очереди', help_text=u'Очереди с большим весом имеют больший приоритет в канале')
timeoutrestart = BooleanField(null=False, blank=False, verbose_name='рестарт по таймауту', choices=TRUEFALSE, db_index=True, default=False, help_text=u'Если этот параметр имеет значение Да, и на входящей линии сигнал ЗАНЯТО или ПЕРЕГРУЗКА, агенты будут сброшенны по таймауту. Это может быть полезно с агентами, которые имеет разрешения для отмены вызова.')
class Meta:
unique_together = (('name', ),)
verbose_name = u'Очередь'
verbose_name_plural = u'Очереди'
def __unicode__(self):
return u'%s' % (self.name)
class QueueMember(Model):
queue = ForeignKey("Queue", blank=False, null=False, verbose_name='очередь', db_index=True, db_column='queue_name')
interface = CharField(max_length=128, blank=True, null=True, editable=True, db_index=True)
penalty = PositiveIntegerField(blank=True, null=True, editable=True, db_index=True, verbose_name=u'пеннальти', help_text=u'Это какой-то приоритет. Идея в том, что система будет пытаться звонить первым агентам с более низким приоритетом, а агентов с высшим пеннальти будет пытаться вызвать после')
class Meta:
verbose_name = u'Участник очереди'
verbose_name_plural = u'Участники очередей'
def __unicode__(self):
return u'%s' % (self.interface)
class QueueLog(Model):
time = CharField(max_length=20, blank=False, null=True, editable=False, db_index=True)
callid = CharField(max_length=32, blank=True, null=False, editable=False, db_index=True)
queue = CharField(max_length=32, blank=True, null=False, editable=False, db_index=True, db_column='queuename')
agent = CharField(max_length=32, blank=True, null=False, editable=False, db_index=True)
event = CharField(max_length=32, blank=True, null=False, editable=False, db_index=True)
data = TextField(blank=True, null=False, editable=False)
class Meta:
verbose_name = u'Статистика очередей'
def __unicode__(self):
time = datetime.fromtimestamp(float(self.time)).strftime('%Y-%m-%d %H:%M:%S')
call = Cdr.objects.get(uniqueid=self.callid)
return u'%s' % (call)
|
Acehaidrey/incubator-airflow | refs/heads/master | airflow/utils/log/es_task_handler.py | 7 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module is deprecated. Please use `airflow.providers.elasticsearch.log.es_task_handler`."""
import warnings
# pylint: disable=unused-import
from airflow.providers.elasticsearch.log.es_task_handler import ElasticsearchTaskHandler # noqa
warnings.warn(
"This module is deprecated. Please use `airflow.providers.elasticsearch.log.es_task_handler`.",
DeprecationWarning,
stacklevel=2,
)
|
kennethgillen/ansible | refs/heads/devel | lib/ansible/modules/cloud/openstack/os_flavor_facts.py | 27 | #!/usr/bin/python
# Copyright (c) 2015 IBM
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: os_flavor_facts
short_description: Retrieve facts about one or more flavors
author: "David Shrewsbury (@Shrews)"
version_added: "2.1"
description:
- Retrieve facts about available OpenStack instance flavors. By default,
facts about ALL flavors are retrieved. Filters can be applied to get
facts for only matching flavors. For example, you can filter on the
amount of RAM available to the flavor, or the number of virtual CPUs
available to the flavor, or both. When specifying multiple filters,
*ALL* filters must match on a flavor before that flavor is returned as
a fact.
notes:
- This module creates a new top-level C(openstack_flavors) fact, which
contains a list of unsorted flavors.
requirements:
- "python >= 2.6"
- "shade"
options:
name:
description:
- A flavor name. Cannot be used with I(ram) or I(vcpus) or I(ephemeral).
required: false
default: None
ram:
description:
- "A string used for filtering flavors based on the amount of RAM
(in MB) desired. This string accepts the following special values:
'MIN' (return flavors with the minimum amount of RAM), and 'MAX'
(return flavors with the maximum amount of RAM)."
- "A specific amount of RAM may also be specified. Any flavors with this
exact amount of RAM will be returned."
- "A range of acceptable RAM may be given using a special syntax. Simply
prefix the amount of RAM with one of these acceptable range values:
'<', '>', '<=', '>='. These values represent less than, greater than,
less than or equal to, and greater than or equal to, respectively."
required: false
default: false
vcpus:
description:
- A string used for filtering flavors based on the number of virtual
CPUs desired. Format is the same as the I(ram) parameter.
required: false
default: false
limit:
description:
- Limits the number of flavors returned. All matching flavors are
returned by default.
required: false
default: None
ephemeral:
description:
- A string used for filtering flavors based on the amount of ephemeral
storage. Format is the same as the I(ram) parameter
required: false
default: false
version_added: "2.3"
availability_zone:
description:
- Ignored. Present for backwards compatability
required: false
extends_documentation_fragment: openstack
'''
EXAMPLES = '''
# Gather facts about all available flavors
- os_flavor_facts:
cloud: mycloud
# Gather facts for the flavor named "xlarge-flavor"
- os_flavor_facts:
cloud: mycloud
name: "xlarge-flavor"
# Get all flavors that have exactly 512 MB of RAM.
- os_flavor_facts:
cloud: mycloud
ram: "512"
# Get all flavors that have 1024 MB or more of RAM.
- os_flavor_facts:
cloud: mycloud
ram: ">=1024"
# Get a single flavor that has the minimum amount of RAM. Using the 'limit'
# option will guarantee only a single flavor is returned.
- os_flavor_facts:
cloud: mycloud
ram: "MIN"
limit: 1
# Get all flavors with 1024 MB of RAM or more, AND exactly 2 virtual CPUs.
- os_flavor_facts:
cloud: mycloud
ram: ">=1024"
vcpus: "2"
# Get all flavors with 1024 MB of RAM or more, exactly 2 virtual CPUs, and
# less than 30gb of ephemeral storage.
- os_flavor_facts:
cloud: mycloud
ram: ">=1024"
vcpus: "2"
ephemeral: "<30"
'''
RETURN = '''
openstack_flavors:
description: Dictionary describing the flavors.
returned: On success.
type: dictionary
contains:
id:
description: Flavor ID.
returned: success
type: string
sample: "515256b8-7027-4d73-aa54-4e30a4a4a339"
name:
description: Flavor name.
returned: success
type: string
sample: "tiny"
disk:
description: Size of local disk, in GB.
returned: success
type: int
sample: 10
ephemeral:
description: Ephemeral space size, in GB.
returned: success
type: int
sample: 10
ram:
description: Amount of memory, in MB.
returned: success
type: int
sample: 1024
swap:
description: Swap space size, in MB.
returned: success
type: int
sample: 100
vcpus:
description: Number of virtual CPUs.
returned: success
type: int
sample: 2
is_public:
description: Make flavor accessible to the public.
returned: success
type: bool
sample: true
'''
import re
try:
import shade
HAS_SHADE = True
except ImportError:
HAS_SHADE = False
from distutils.version import StrictVersion
def main():
argument_spec = openstack_full_argument_spec(
name=dict(required=False, default=None),
ram=dict(required=False, default=None),
vcpus=dict(required=False, default=None),
limit=dict(required=False, default=None, type='int'),
ephemeral=dict(required=False, default=None),
)
module_kwargs = openstack_module_kwargs(
mutually_exclusive=[
['name', 'ram'],
['name', 'vcpus'],
['name', 'ephemeral']
]
)
module = AnsibleModule(argument_spec, **module_kwargs)
if not HAS_SHADE:
module.fail_json(msg='shade is required for this module')
name = module.params['name']
vcpus = module.params['vcpus']
ram = module.params['ram']
ephemeral = module.params['ephemeral']
limit = module.params['limit']
try:
cloud = shade.openstack_cloud(**module.params)
if name:
flavors = cloud.search_flavors(filters={'name': name})
else:
flavors = cloud.list_flavors()
filters = {}
if vcpus:
filters['vcpus'] = vcpus
if ram:
filters['ram'] = ram
if ephemeral:
filters['ephemeral'] = ephemeral
if filters:
# Range search added in 1.5.0
if StrictVersion(shade.__version__) < StrictVersion('1.5.0'):
module.fail_json(msg="Shade >= 1.5.0 needed for this functionality")
flavors = cloud.range_search(flavors, filters)
if limit is not None:
flavors = flavors[:limit]
module.exit_json(changed=False,
ansible_facts=dict(openstack_flavors=flavors))
except shade.OpenStackCloudException as e:
module.fail_json(msg=str(e))
from ansible.module_utils.basic import *
from ansible.module_utils.openstack import *
if __name__ == '__main__':
main()
|
quinot/ansible | refs/heads/devel | test/units/modules/remote_management/oneview/test_oneview_datacenter_facts.py | 102 | # Copyright (c) 2016-2017 Hewlett Packard Enterprise Development LP
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
import pytest
from oneview_module_loader import OneViewModuleBase
from ansible.modules.remote_management.oneview.oneview_datacenter_facts import DatacenterFactsModule
from hpe_test_utils import FactsParamsTest
PARAMS_GET_CONNECTED = dict(
config='config.json',
name="MyDatacenter",
options=['visualContent']
)
@pytest.mark.resource('datacenters')
class TestDatacenterFactsModule(FactsParamsTest):
@pytest.fixture(autouse=True)
def setUp(self, mock_ansible_module, mock_ov_client):
self.resource = mock_ov_client.datacenters
self.mock_ansible_module = mock_ansible_module
self.mock_ov_client = mock_ov_client
def test_should_get_all_datacenters(self):
self.resource.get_all.return_value = {"name": "Data Center Name"}
self.mock_ansible_module.params = dict(config='config.json',)
DatacenterFactsModule().run()
self.mock_ansible_module.exit_json.assert_called_once_with(
changed=False,
ansible_facts=dict(datacenters=({"name": "Data Center Name"}))
)
def test_should_get_datacenter_by_name(self):
self.resource.get_by.return_value = [{"name": "Data Center Name"}]
self.mock_ansible_module.params = dict(config='config.json', name="MyDatacenter")
DatacenterFactsModule().run()
self.mock_ansible_module.exit_json.assert_called_once_with(
changed=False,
ansible_facts=dict(datacenters=([{"name": "Data Center Name"}]))
)
def test_should_get_datacenter_visual_content(self):
self.resource.get_by.return_value = [{"name": "Data Center Name", "uri": "/rest/datacenter/id"}]
self.resource.get_visual_content.return_value = {
"name": "Visual Content"}
self.mock_ansible_module.params = PARAMS_GET_CONNECTED
DatacenterFactsModule().run()
self.mock_ansible_module.exit_json.assert_called_once_with(
changed=False,
ansible_facts={'datacenter_visual_content': {'name': 'Visual Content'},
'datacenters': [{'name': 'Data Center Name', 'uri': '/rest/datacenter/id'}]}
)
def test_should_get_none_datacenter_visual_content(self):
self.resource.get_by.return_value = []
self.mock_ansible_module.params = PARAMS_GET_CONNECTED
DatacenterFactsModule().run()
self.mock_ansible_module.exit_json.assert_called_once_with(
changed=False,
ansible_facts={'datacenter_visual_content': None,
'datacenters': []}
)
|
mrfuxi/django | refs/heads/master | tests/test_runner_deprecation_app/tests.py | 377 | import warnings
from django.test import TestCase
from django.utils.deprecation import RemovedInNextVersionWarning
warnings.warn("module-level warning from deprecation_app", RemovedInNextVersionWarning)
class DummyTest(TestCase):
def test_warn(self):
warnings.warn("warning from test", RemovedInNextVersionWarning)
|
tomasreimers/tensorflow-emscripten | refs/heads/master | tensorflow/contrib/learn/python/learn/utils/gc.py | 66 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""System for specifying garbage collection (GC) of path based data.
This framework allows for GC of data specified by path names, for example files
on disk. gc.Path objects each represent a single item stored at a path and may
be a base directory,
/tmp/exports/0/...
/tmp/exports/1/...
...
or a fully qualified file,
/tmp/train-1.ckpt
/tmp/train-2.ckpt
...
A gc filter function takes and returns a list of gc.Path items. Filter
functions are responsible for selecting Path items for preservation or deletion.
Note that functions should always return a sorted list.
For example,
base_dir = "/tmp"
# create the directories
for e in xrange(10):
os.mkdir("%s/%d" % (base_dir, e), 0o755)
# create a simple parser that pulls the export_version from the directory
def parser(path):
match = re.match("^" + base_dir + "/(\\d+)$", path.path)
if not match:
return None
return path._replace(export_version=int(match.group(1)))
path_list = gc.get_paths("/tmp", parser) # contains all ten Paths
every_fifth = gc.mod_export_version(5)
print every_fifth(path_list) # shows ["/tmp/0", "/tmp/5"]
largest_three = gc.largest_export_versions(3)
print largest_three(all_paths) # shows ["/tmp/7", "/tmp/8", "/tmp/9"]
both = gc.union(every_fifth, largest_three)
print both(all_paths) # shows ["/tmp/0", "/tmp/5",
# "/tmp/7", "/tmp/8", "/tmp/9"]
# delete everything not in 'both'
to_delete = gc.negation(both)
for p in to_delete(all_paths):
gfile.DeleteRecursively(p.path) # deletes: "/tmp/1", "/tmp/2",
# "/tmp/3", "/tmp/4", "/tmp/6",
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import heapq
import math
import os
from tensorflow.python.platform import gfile
Path = collections.namedtuple('Path', 'path export_version')
def largest_export_versions(n):
"""Creates a filter that keeps the largest n export versions.
Args:
n: number of versions to keep.
Returns:
A filter function that keeps the n largest paths.
"""
def keep(paths):
heap = []
for idx, path in enumerate(paths):
if path.export_version is not None:
heapq.heappush(heap, (path.export_version, idx))
keepers = [paths[i] for _, i in heapq.nlargest(n, heap)]
return sorted(keepers)
return keep
def one_of_every_n_export_versions(n):
"""Creates a filter that keeps one of every n export versions.
Args:
n: interval size.
Returns:
A filter function that keeps exactly one path from each interval
[0, n], (n, 2n], (2n, 3n], etc... If more than one path exists in an
interval the largest is kept.
"""
def keep(paths):
"""A filter function that keeps exactly one out of every n paths."""
keeper_map = {} # map from interval to largest path seen in that interval
for p in paths:
if p.export_version is None:
# Skip missing export_versions.
continue
# Find the interval (with a special case to map export_version = 0 to
# interval 0.
interval = math.floor(
(p.export_version - 1) / n) if p.export_version else 0
existing = keeper_map.get(interval, None)
if (not existing) or (existing.export_version < p.export_version):
keeper_map[interval] = p
return sorted(keeper_map.values())
return keep
def mod_export_version(n):
"""Creates a filter that keeps every export that is a multiple of n.
Args:
n: step size.
Returns:
A filter function that keeps paths where export_version % n == 0.
"""
def keep(paths):
keepers = []
for p in paths:
if p.export_version % n == 0:
keepers.append(p)
return sorted(keepers)
return keep
def union(lf, rf):
"""Creates a filter that keeps the union of two filters.
Args:
lf: first filter
rf: second filter
Returns:
A filter function that keeps the n largest paths.
"""
def keep(paths):
l = set(lf(paths))
r = set(rf(paths))
return sorted(list(l|r))
return keep
def negation(f):
"""Negate a filter.
Args:
f: filter function to invert
Returns:
A filter function that returns the negation of f.
"""
def keep(paths):
l = set(paths)
r = set(f(paths))
return sorted(list(l-r))
return keep
def get_paths(base_dir, parser):
"""Gets a list of Paths in a given directory.
Args:
base_dir: directory.
parser: a function which gets the raw Path and can augment it with
information such as the export_version, or ignore the path by returning
None. An example parser may extract the export version from a path
such as "/tmp/exports/100" an another may extract from a full file
name such as "/tmp/checkpoint-99.out".
Returns:
A list of Paths contained in the base directory with the parsing function
applied.
By default the following fields are populated,
- Path.path
The parsing function is responsible for populating,
- Path.export_version
"""
raw_paths = gfile.ListDirectory(base_dir)
paths = []
for r in raw_paths:
p = parser(Path(os.path.join(base_dir, r), None))
if p:
paths.append(p)
return sorted(paths)
|
chalkchisel/sorl-url | refs/heads/master | sorl_url/utils.py | 2 | import hashlib
import json
import zlib
from django.core import signing
from django.core.urlresolvers import reverse
from django.db.models import get_model, Model
from django.utils.encoding import smart_str
from django.utils.functional import SimpleLazyObject, empty
from django.utils.importlib import import_module
from sorl.thumbnail.base import EXTENSIONS
from sorl.thumbnail.conf import settings
NO_DEFAULT = object()
_SETTINGS_HASH = None
def get_settings_hash():
global _SETTINGS_HASH
if _SETTINGS_HASH is None:
_SETTINGS_HASH = hashlib.md5(json.dumps({
'SORL_URL_CONFIG': settings.SORL_URL_CONFIG,
'THUMBNAIL_FORMAT': settings.THUMBNAIL_FORMAT,
'THUMBNAIL_COLORSPACE': settings.THUMBNAIL_COLORSPACE,
'THUMBNAIL_UPSCALE': settings.THUMBNAIL_UPSCALE,
'THUMBNAIL_QUALITY': settings.THUMBNAIL_QUALITY,
'THUMBNAIL_PROGRESSIVE': settings.THUMBNAIL_PROGRESSIVE,
'THUMBNAIL_ORIENTATION': settings.THUMBNAIL_ORIENTATION,
'THUMBNAIL_DUMMY': settings.THUMBNAIL_DUMMY,
'THUMBNAIL_DUMMY_SOURCE': settings.THUMBNAIL_DUMMY_SOURCE,
'THUMBNAIL_DUMMY_RATIO': settings.THUMBNAIL_DUMMY_RATIO
})).hexdigest()
return _SETTINGS_HASH
def generate_key():
m = hashlib.sha256(settings.SECRET_KEY)
return m.digest()
def generate_salt(model_name, field_name, instance_key):
return "sorl-%s(%s).%s" % (model_name, instance_key, field_name)
def generate_hash(model_name, field_name, instance_key, instance=None):
if instance is None:
field_config = THUMBNAIL_OPTIONS[model_name][field_name]
instance = field_config.get_instance(instance_key)
field_value = getattr(instance, field_name).name
return hashlib.md5("%s{%s}" % (field_value, get_settings_hash())).hexdigest()[:8]
def serialize_and_sign(payload, salt, compress=False):
data = signing.JSONSerializer().dumps(payload)
prefix = ""
if compress:
compressed = zlib.compress(data, 9)
if len(compressed) < (len(data) - 1): # Only use the compressed version if it's actually smaller
data = compressed
prefix = "."
encoded = prefix + signing.b64_encode(data)
return signing.Signer(None, salt=salt).sign(encoded)
def verify_and_load(candidate, salt):
payload = smart_str(signing.Signer(None, salt=salt).unsign(candidate))
if payload[0] == '.':
data = zlib.decompress(signing.b64_decode(payload[1:]))
else:
data = signing.b64_decode(payload)
return signing.JSONSerializer().loads(data)
def encode_for_url(payload, model_name, field_name, instance_key, instance=None, compress=True):
salt = generate_salt(model_name, field_name, instance_key)
return serialize_and_sign(payload, salt=salt, compress=compress)
def decode_from_url(encoded, model_name, field_name, instance_key, instance=None):
salt = generate_salt(model_name, field_name, instance_key)
return verify_and_load(encoded, salt=salt)
def lookup_field(obj, ref):
current = obj
for attr in ref.split('__'):
current = getattr(current, attr)
return current
class GettableWithConfig(object):
_config = None
_config_data = None
def __init__(self, config):
self._config_data = config
def __getitem__(self, key):
return self.get(key)
def __contains__(self, item):
return item in self.config
@property
def config(self):
if self._config is None:
self._config = self.build_config()
return self._config
def keys(self):
return self.config.keys()
def values(self):
return self.config.values()
def items(self):
return self.config.items()
def get(self, key, default=NO_DEFAULT):
if default is NO_DEFAULT:
return self.config.get(key)
else:
return self.config.get(key, default)
def build_config(self):
raise NotImplementedError()
class ThumbnailOptions(GettableWithConfig):
def build_config(self):
data = self._config_data if self._config_data else \
getattr(settings, 'SORL_URL_CONFIG', {})
config = {}
for config_key, model_config in data.items():
model_config = ModelConfig(model_config, config_key)
model = model_config.model
# Create a name-based mapping
config[config_key] = model_config
# Also keep a record of all the configurations associated with each model
config.setdefault(model, [])
config[model].append(model_config)
return config
def find_config_for_field(self, model, field):
if isinstance(model, basestring):
model = get_model(*(model.split('.')))
configs = self.get(model, [])
for config in configs:
if field in config:
return config[field]
def build_url(self, instance, field, geometry_string, model=None, **options):
model = model if model else type(instance)
if model is SimpleLazyObject:
if instance._wrapped == empty:
instance._setup()
model = type(instance._wrapped)
field_config = self.find_config_for_field(model, field)
if field_config is None:
if isinstance(model, basestring):
model_name = model
else:
model_name = "%s.%s" % (model._meta.app_label, model._meta.object_name)
raise ValueError("No mapping defined for %s.%s" % (model_name, field))
model_config = field_config.model_config
# If the instance isn't a model, we assume it's a value-appropriate key
# for example, it's allowable to specify ``model`` and an ID for instance
# rather than an actual instance.
# NOTE: This doesn't add any real efficiency (and may make thing less
# efficient in some cases), as we perform an instance lookup to
# be able to use the field's value in the generation of a salt.
if isinstance(instance, Model):
key = instance if not isinstance(instance, Model) else \
lookup_field(instance, model_config.get_key_field())
else:
key = instance
instance = field_config.get_instance(key)
extension = EXTENSIONS[options.pop('format',
field_config.options.get('format', settings.THUMBNAIL_FORMAT))] # We transmit format information in the URL, not in options.
config = {'geometry': geometry_string}
if options:
config['options'] = options
encoded_config = encode_for_url(config, model_config.name,
field_config.field, key, instance=instance)
state_hash = generate_hash(model_config.name, field_config.field, key,
instance=instance)
return "%s?config=%s&h=%s" % (
reverse('sorl_url', kwargs={
'model_name': model_config.name,
'field_name': field_config.field,
'key': key,
'extension': extension
}),
encoded_config,
state_hash
)
class ModelConfig(GettableWithConfig):
_backend = None
def __init__(self, config, name):
super(ModelConfig, self).__init__(config)
self.model = get_model(*(config['model'].split('.')))
self.name = name
self.options = config.get('options', {})
self.precache = config.get('precache', {})
# Allow a list of sizes, or a dictionary mapping sizes to configurations
if not isinstance(self.precache, dict):
self.precache = {x: [] for x in self.precache}
def get_instance(self, key):
return self.model.objects.get(**{
self.get_key_field(): self.get_key_filter()(key)
})
def get_key_field(self):
return self._config_data.get('key_field', 'id')
def get_key_filter(self):
key_filter = self._config_data.get('key_filter', lambda x: int(x))
if key_filter is None:
key_filter = lambda x: x
return key_filter
def get_backend(self):
if self._backend is None:
backend = self._config_data.get('backend', 'sorl.thumbnail.default.backend') # Don't worry, this respects settings.THUMBNAIL_BACKEND
mod_name, classname = backend.rsplit('.', 1)
mod = import_module(mod_name)
self._backend = getattr(mod, classname)
return self._backend
def build_config(self):
config = {}
for field in self._config_data['fields']:
if isinstance(field, basestring):
config[field] = FieldConfig(self, field)
else:
field, options = field
config[field] = FieldConfig(self, field, options)
return config
class FieldConfig(object):
def __init__(self, model_config, field, options=None):
self.model_config = model_config
self.field = field
self._options = options or {}
@property
def options(self):
opts = dict(self.model_config.options)
opts.update(self._options)
return opts
def get_instance(self, key):
return self.model_config.get_instance(key)
THUMBNAIL_OPTIONS = ThumbnailOptions(None)
|
intestinalbrain/noema | refs/heads/master | noema_project/noema_site/action/__init__.py | 9 | __author__ = 'master'
|
pythonvietnam/pbc082015 | refs/heads/master | nguyenduyhai/bai4/timkiem_demo.py | 1 |
ds = list()
while 1:
c = raw_input("Ban muon nhap thong tin hs? ")
if c == 'q':
# print ds
break
elif c == '1':
print "Tao thong tin hoc sinh"
hs = dict()
ht = raw_input("Ho va ten: ")
ns = raw_input("Ngay sinh: ")
hs['hoten'] = ht
hs['ngaysinh'] = ns
#Them hs vao ds
ds.append(hs)
elif c == '2':
print "Tim kiem hoc sinh"
qr = raw_input("Nhap ten hoc sinh muon tim kiem: ")
kqtk = list()
for i in ds:
if i['hoten'] == qr:
kqtk.append(i)
print "Da ket thuc tim kiem. Ket qua tim kiem la:"
print kqtk
|
CamelBackNotation/CarnotKE | refs/heads/master | jyhton/bugtests/test362.py | 13 | """
[ 545235 ] unexpected match with re
"""
import support
import re
rt = re.compile(r'c[^a]*t', re.IGNORECASE)
if rt.match("cat") is not None:
raise support.TestError('Should not match #1')
rs = re.compile(r'c[^a]t', re.IGNORECASE)
if rs.match('cat') is not None:
raise support.TestError('Should not match #2')
|
rezoo/chainer | refs/heads/master | examples/sentiment/download.py | 14 | #!/usr/bin/env python
import os
import os.path
from six.moves.urllib import request
import zipfile
request.urlretrieve(
'https://nlp.stanford.edu/sentiment/trainDevTestTrees_PTB.zip',
'trainDevTestTrees_PTB.zip')
zf = zipfile.ZipFile('trainDevTestTrees_PTB.zip')
for name in zf.namelist():
(dirname, filename) = os.path.split(name)
if not filename == '':
zf.extract(name, '.')
|
xinwu/horizon | refs/heads/master | openstack_dashboard/dashboards/project/firewalls/tabs.py | 48 | # Copyright 2013, Big Switch Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.core.urlresolvers import reverse_lazy
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import tabs
from openstack_dashboard import api
from openstack_dashboard.dashboards.project.firewalls import tables
FirewallsTable = tables.FirewallsTable
PoliciesTable = tables.PoliciesTable
RulesTable = tables.RulesTable
class RulesTab(tabs.TableTab):
table_classes = (RulesTable,)
name = _("Firewall Rules")
slug = "rules"
template_name = "horizon/common/_detail_table.html"
def get_rulestable_data(self):
try:
tenant_id = self.request.user.tenant_id
request = self.tab_group.request
rules = api.fwaas.rule_list_for_tenant(request, tenant_id)
except Exception:
rules = []
exceptions.handle(self.tab_group.request,
_('Unable to retrieve rules list.'))
return rules
class PoliciesTab(tabs.TableTab):
table_classes = (PoliciesTable,)
name = _("Firewall Policies")
slug = "policies"
template_name = "horizon/common/_detail_table.html"
def get_policiestable_data(self):
try:
tenant_id = self.request.user.tenant_id
request = self.tab_group.request
policies = api.fwaas.policy_list_for_tenant(request, tenant_id)
except Exception:
policies = []
exceptions.handle(self.tab_group.request,
_('Unable to retrieve policies list.'))
return policies
class FirewallsTab(tabs.TableTab):
table_classes = (FirewallsTable,)
name = _("Firewalls")
slug = "firewalls"
template_name = "horizon/common/_detail_table.html"
def get_firewallstable_data(self):
try:
tenant_id = self.request.user.tenant_id
request = self.tab_group.request
firewalls = api.fwaas.firewall_list_for_tenant(request, tenant_id)
if api.neutron.is_extension_supported(request,
'fwaasrouterinsertion'):
routers = api.neutron.router_list(request, tenant_id=tenant_id)
for fw in firewalls:
router_list = [r for r in routers
if r['id'] in fw['router_ids']]
fw.get_dict()['routers'] = router_list
except Exception:
firewalls = []
exceptions.handle(self.tab_group.request,
_('Unable to retrieve firewall list.'))
return firewalls
class RuleDetailsTab(tabs.Tab):
name = _("Firewall Rule Details")
slug = "ruledetails"
template_name = "project/firewalls/_rule_details.html"
failure_url = reverse_lazy('horizon:project:firewalls:index')
def get_context_data(self, request):
rid = self.tab_group.kwargs['rule_id']
try:
rule = api.fwaas.rule_get(request, rid)
except Exception:
exceptions.handle(request,
_('Unable to retrieve rule details.'),
redirect=self.failure_url)
return {'rule': rule}
class PolicyDetailsTab(tabs.Tab):
name = _("Firewall Policy Details")
slug = "policydetails"
template_name = "project/firewalls/_policy_details.html"
failure_url = reverse_lazy('horizon:project:firewalls:index')
def get_context_data(self, request):
pid = self.tab_group.kwargs['policy_id']
try:
policy = api.fwaas.policy_get(request, pid)
except Exception:
exceptions.handle(request,
_('Unable to retrieve policy details.'),
redirect=self.failure_url)
return {'policy': policy}
class FirewallDetailsTab(tabs.Tab):
name = _("Firewall Details")
slug = "firewalldetails"
template_name = "project/firewalls/_firewall_details.html"
failure_url = reverse_lazy('horizon:project:firewalls:index')
def get_context_data(self, request):
fid = self.tab_group.kwargs['firewall_id']
try:
firewall = api.fwaas.firewall_get(request, fid)
body = {'firewall': firewall}
if api.neutron.is_extension_supported(request,
'fwaasrouterinsertion'):
tenant_id = self.request.user.tenant_id
tenant_routers = api.neutron.router_list(request,
tenant_id=tenant_id)
router_ids = firewall.get_dict()['router_ids']
routers = [r for r in tenant_routers
if r['id'] in router_ids]
body['routers'] = routers
except Exception:
exceptions.handle(request,
_('Unable to retrieve firewall details.'),
redirect=self.failure_url)
return body
class FirewallTabs(tabs.TabGroup):
slug = "fwtabs"
tabs = (FirewallsTab, PoliciesTab, RulesTab)
sticky = True
class RuleDetailsTabs(tabs.TabGroup):
slug = "ruletabs"
tabs = (RuleDetailsTab,)
class PolicyDetailsTabs(tabs.TabGroup):
slug = "policytabs"
tabs = (PolicyDetailsTab,)
class FirewallDetailsTabs(tabs.TabGroup):
slug = "firewalltabs"
tabs = (FirewallDetailsTab,)
|
NielsZeilemaker/incubator-airflow | refs/heads/master | airflow/example_dags/example_latest_only_with_trigger.py | 44 | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Example LatestOnlyOperator and TriggerRule interactions
"""
import datetime as dt
import airflow
from airflow.models import DAG
from airflow.operators.dummy_operator import DummyOperator
from airflow.operators.latest_only_operator import LatestOnlyOperator
from airflow.utils.trigger_rule import TriggerRule
dag = DAG(
dag_id='latest_only_with_trigger',
schedule_interval=dt.timedelta(hours=4),
start_date=airflow.utils.dates.days_ago(2),
)
latest_only = LatestOnlyOperator(task_id='latest_only', dag=dag)
task1 = DummyOperator(task_id='task1', dag=dag)
task1.set_upstream(latest_only)
task2 = DummyOperator(task_id='task2', dag=dag)
task3 = DummyOperator(task_id='task3', dag=dag)
task3.set_upstream([task1, task2])
task4 = DummyOperator(task_id='task4', dag=dag,
trigger_rule=TriggerRule.ALL_DONE)
task4.set_upstream([task1, task2])
|
vadimtk/chrome4sdp | refs/heads/master | native_client_sdk/src/build_tools/tests/sdktools_test.py | 76 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import re
import subprocess
import sys
import tarfile
import tempfile
import test_server
import unittest
import zipfile
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
BUILD_TOOLS_DIR = os.path.dirname(SCRIPT_DIR)
TOOLS_DIR = os.path.join(os.path.dirname(BUILD_TOOLS_DIR), 'tools')
sys.path.extend([BUILD_TOOLS_DIR, TOOLS_DIR])
import getos
import manifest_util
import oshelpers
MANIFEST_BASENAME = 'naclsdk_manifest2.json'
# Attribute '' defined outside __init__
# pylint: disable=W0201
class SdkToolsTestCase(unittest.TestCase):
def tearDown(self):
if self.server:
self.server.Shutdown()
oshelpers.Remove(['-rf', self.basedir])
def SetupDefault(self):
self.SetupWithBaseDirPrefix('sdktools')
def SetupWithBaseDirPrefix(self, basedir_prefix, tmpdir=None):
self.basedir = tempfile.mkdtemp(prefix=basedir_prefix, dir=tmpdir)
self.cache_dir = os.path.join(self.basedir, 'nacl_sdk', 'sdk_cache')
# We have to make sure that we build our updaters with a version that is at
# least as large as the version in the sdk_tools bundle. If not, update
# tests may fail because the "current" version (according to the sdk_cache)
# is greater than the version we are attempting to update to.
self.current_revision = self._GetSdkToolsBundleRevision()
self._BuildUpdater(self.basedir, self.current_revision)
self.manifest = self._ReadCacheManifest()
self.sdk_tools_bundle = self.manifest.GetBundle('sdk_tools')
self.server = test_server.LocalHTTPServer(self.basedir)
def _GetSdkToolsBundleRevision(self):
"""Get the sdk_tools bundle revision.
We get this from the checked-in path; this is the same file that
build_updater uses to specify the current revision of sdk_tools."""
manifest_filename = os.path.join(BUILD_TOOLS_DIR, 'json',
'naclsdk_manifest0.json')
manifest = manifest_util.SDKManifest()
manifest.LoadDataFromString(open(manifest_filename, 'r').read())
return manifest.GetBundle('sdk_tools').revision
def _WriteConfig(self, config_data):
config_filename = os.path.join(self.cache_dir, 'naclsdk_config.json')
with open(config_filename, 'w') as stream:
stream.write(config_data)
def _WriteCacheManifest(self, manifest):
"""Write the manifest at nacl_sdk/sdk_cache.
This is useful for faking having installed a bundle.
"""
manifest_filename = os.path.join(self.cache_dir, MANIFEST_BASENAME)
with open(manifest_filename, 'w') as stream:
stream.write(manifest.GetDataAsString())
def _ReadCacheManifest(self):
"""Read the manifest at nacl_sdk/sdk_cache."""
manifest_filename = os.path.join(self.cache_dir, MANIFEST_BASENAME)
manifest = manifest_util.SDKManifest()
with open(manifest_filename) as stream:
manifest.LoadDataFromString(stream.read())
return manifest
def _WriteManifest(self):
with open(os.path.join(self.basedir, MANIFEST_BASENAME), 'w') as stream:
stream.write(self.manifest.GetDataAsString())
def _BuildUpdater(self, out_dir, revision=None):
build_updater_py = os.path.join(BUILD_TOOLS_DIR, 'build_updater.py')
cmd = [sys.executable, build_updater_py, '-o', out_dir]
if revision:
cmd.extend(['-r', str(revision)])
process = subprocess.Popen(cmd, stdout=subprocess.PIPE)
_, _ = process.communicate()
self.assertEqual(process.returncode, 0)
def _BuildUpdaterArchive(self, rel_path, revision):
"""Build a new sdk_tools bundle.
Args:
rel_path: The relative path to build the updater.
revision: The revision number to give to this bundle.
Returns:
A manifest_util.Archive() that points to this new bundle on the local
server.
"""
self._BuildUpdater(os.path.join(self.basedir, rel_path), revision)
new_sdk_tools_tgz = os.path.join(self.basedir, rel_path, 'sdk_tools.tgz')
with open(new_sdk_tools_tgz, 'rb') as sdk_tools_stream:
archive_sha1, archive_size = manifest_util.DownloadAndComputeHash(
sdk_tools_stream)
archive = manifest_util.Archive('all')
archive.url = self.server.GetURL('%s/sdk_tools.tgz' % (rel_path,))
archive.checksum = archive_sha1
archive.size = archive_size
return archive
def _Run(self, args, expect_error=False):
naclsdk_shell_script = os.path.join(self.basedir, 'nacl_sdk', 'naclsdk')
if getos.GetPlatform() == 'win':
naclsdk_shell_script += '.bat'
cmd = [naclsdk_shell_script]
cmd.extend(args)
cmd.extend(['-U', self.server.GetURL(MANIFEST_BASENAME)])
process = subprocess.Popen(cmd, stdout=subprocess.PIPE)
stdout, _ = process.communicate()
if ((expect_error and process.returncode == 0) or
(not expect_error and process.returncode != 0)):
self.fail('Error running nacl_sdk:\n"""\n%s\n"""' % stdout)
return stdout
def _RunAndExtractRevision(self):
stdout = self._Run(['version'])
match = re.search('version r(\d+)', stdout)
self.assertTrue(match is not None)
return int(match.group(1))
class TestSdkTools(SdkToolsTestCase):
def testPathHasSpaces(self):
"""Test that running naclsdk from a path with spaces works."""
self.SetupWithBaseDirPrefix('sdk tools')
self._WriteManifest()
self._RunAndExtractRevision()
class TestBuildUpdater(SdkToolsTestCase):
def setUp(self):
self.SetupDefault()
def testUpdaterPathsAreSane(self):
"""Test that the paths to files in nacl_sdk.zip and sdktools.tgz are
relative to the output directory."""
nacl_sdk_zip_path = os.path.join(self.basedir, 'nacl_sdk.zip')
zip_stream = zipfile.ZipFile(nacl_sdk_zip_path, 'r')
try:
self.assertTrue(all(name.startswith('nacl_sdk')
for name in zip_stream.namelist()))
finally:
zip_stream.close()
# sdktools.tgz has no built-in directories to look for. Instead, just look
# for some files that must be there.
sdktools_tgz_path = os.path.join(self.basedir, 'sdk_tools.tgz')
tar_stream = tarfile.open(sdktools_tgz_path, 'r:gz')
try:
names = [m.name for m in tar_stream.getmembers()]
self.assertTrue('LICENSE' in names)
self.assertTrue('sdk_update.py' in names)
finally:
tar_stream.close()
class TestAutoUpdateSdkTools(SdkToolsTestCase):
def setUp(self):
self.SetupDefault()
def testNoUpdate(self):
"""Test that running naclsdk with current revision does nothing."""
self._WriteManifest()
revision = self._RunAndExtractRevision()
self.assertEqual(revision, self.current_revision)
def testUpdate(self):
"""Test that running naclsdk with a new revision will auto-update."""
new_revision = self.current_revision + 1
archive = self._BuildUpdaterArchive('new', new_revision)
self.sdk_tools_bundle.RemoveAllArchivesForHostOS(archive.host_os)
self.sdk_tools_bundle.AddArchive(archive)
self.sdk_tools_bundle.revision = new_revision
self._WriteManifest()
revision = self._RunAndExtractRevision()
self.assertEqual(revision, new_revision)
def testManualUpdateIsIgnored(self):
"""Test that attempting to manually update sdk_tools is ignored.
If the sdk_tools bundle was updated normally (i.e. the old way), it would
leave a sdk_tools_update folder that would then be copied over on a
subsequent run. This test ensures that there is no folder made.
"""
new_revision = self.current_revision + 1
archive = self._BuildUpdaterArchive('new', new_revision)
self.sdk_tools_bundle.RemoveAllArchivesForHostOS(archive.host_os)
self.sdk_tools_bundle.AddArchive(archive)
self.sdk_tools_bundle.revision = new_revision
self._WriteManifest()
sdk_tools_update_dir = os.path.join(self.basedir, 'nacl_sdk',
'sdk_tools_update')
self.assertFalse(os.path.exists(sdk_tools_update_dir))
stdout = self._Run(['update', 'sdk_tools'])
self.assertTrue(stdout.find('Ignoring manual update request.') != -1)
self.assertFalse(os.path.exists(sdk_tools_update_dir))
def testHelpCommand(self):
"""Running naclsdk with -h should work.
This is a regression test for a bug where the auto-updater would remove the
sdk_tools directory when running "naclsdk -h".
"""
self._WriteManifest()
self._Run(['-h'])
class TestAutoUpdateSdkToolsDifferentFilesystem(TestAutoUpdateSdkTools):
def setUp(self):
# On Linux (on my machine at least), /tmp is a different filesystem than
# the current directory. os.rename fails when the source and destination
# are on different filesystems. Test that case here.
self.SetupWithBaseDirPrefix('sdktools', tmpdir='.')
if __name__ == '__main__':
sys.exit(unittest.main())
|
tareqalayan/ansible | refs/heads/devel | test/units/modules/cloud/google/test_gcp_forwarding_rule.py | 158 | import unittest
from ansible.modules.cloud.google.gcp_forwarding_rule import _build_global_forwarding_rule_dict
class TestGCPFowardingRule(unittest.TestCase):
"""Unit tests for gcp_fowarding_rule module."""
params_dict = {
'forwarding_rule_name': 'foo_fowarding_rule_name',
'address': 'foo_external_address',
'target': 'foo_targetproxy',
'region': 'global',
'port_range': 80,
'protocol': 'TCP',
'state': 'present',
}
def test__build_global_forwarding_rule_dict(self):
expected = {
'name': 'foo_fowarding_rule_name',
'IPAddress': 'https://www.googleapis.com/compute/v1/projects/my-project/global/addresses/foo_external_address',
'target': 'https://www.googleapis.com/compute/v1/projects/my-project/global/targetHttpProxies/foo_targetproxy',
'region': 'global',
'portRange': 80,
'IPProtocol': 'TCP',
}
actual = _build_global_forwarding_rule_dict(
self.params_dict, 'my-project')
self.assertEqual(expected, actual)
|
1013553207/django | refs/heads/master | django/core/serializers/xml_serializer.py | 184 | """
XML serializer.
"""
from __future__ import unicode_literals
from collections import OrderedDict
from xml.dom import pulldom
from xml.sax import handler
from xml.sax.expatreader import ExpatParser as _ExpatParser
from django.apps import apps
from django.conf import settings
from django.core.serializers import base
from django.db import DEFAULT_DB_ALIAS, models
from django.utils.encoding import smart_text
from django.utils.xmlutils import (
SimplerXMLGenerator, UnserializableContentError,
)
class Serializer(base.Serializer):
"""
Serializes a QuerySet to XML.
"""
def indent(self, level):
if self.options.get('indent') is not None:
self.xml.ignorableWhitespace('\n' + ' ' * self.options.get('indent') * level)
def start_serialization(self):
"""
Start serialization -- open the XML document and the root element.
"""
self.xml = SimplerXMLGenerator(self.stream, self.options.get("encoding", settings.DEFAULT_CHARSET))
self.xml.startDocument()
self.xml.startElement("django-objects", {"version": "1.0"})
def end_serialization(self):
"""
End serialization -- end the document.
"""
self.indent(0)
self.xml.endElement("django-objects")
self.xml.endDocument()
def start_object(self, obj):
"""
Called as each object is handled.
"""
if not hasattr(obj, "_meta"):
raise base.SerializationError("Non-model object (%s) encountered during serialization" % type(obj))
self.indent(1)
model = obj._meta.proxy_for_model if obj._deferred else obj.__class__
attrs = OrderedDict([("model", smart_text(model._meta))])
if not self.use_natural_primary_keys or not hasattr(obj, 'natural_key'):
obj_pk = obj._get_pk_val()
if obj_pk is not None:
attrs['pk'] = smart_text(obj_pk)
self.xml.startElement("object", attrs)
def end_object(self, obj):
"""
Called after handling all fields for an object.
"""
self.indent(1)
self.xml.endElement("object")
def handle_field(self, obj, field):
"""
Called to handle each field on an object (except for ForeignKeys and
ManyToManyFields)
"""
self.indent(2)
self.xml.startElement("field", OrderedDict([
("name", field.name),
("type", field.get_internal_type()),
]))
# Get a "string version" of the object's data.
if getattr(obj, field.name) is not None:
try:
self.xml.characters(field.value_to_string(obj))
except UnserializableContentError:
raise ValueError("%s.%s (pk:%s) contains unserializable characters" % (
obj.__class__.__name__, field.name, obj._get_pk_val()))
else:
self.xml.addQuickElement("None")
self.xml.endElement("field")
def handle_fk_field(self, obj, field):
"""
Called to handle a ForeignKey (we need to treat them slightly
differently from regular fields).
"""
self._start_relational_field(field)
related_att = getattr(obj, field.get_attname())
if related_att is not None:
if self.use_natural_foreign_keys and hasattr(field.remote_field.model, 'natural_key'):
related = getattr(obj, field.name)
# If related object has a natural key, use it
related = related.natural_key()
# Iterable natural keys are rolled out as subelements
for key_value in related:
self.xml.startElement("natural", {})
self.xml.characters(smart_text(key_value))
self.xml.endElement("natural")
else:
self.xml.characters(smart_text(related_att))
else:
self.xml.addQuickElement("None")
self.xml.endElement("field")
def handle_m2m_field(self, obj, field):
"""
Called to handle a ManyToManyField. Related objects are only
serialized as references to the object's PK (i.e. the related *data*
is not dumped, just the relation).
"""
if field.remote_field.through._meta.auto_created:
self._start_relational_field(field)
if self.use_natural_foreign_keys and hasattr(field.remote_field.model, 'natural_key'):
# If the objects in the m2m have a natural key, use it
def handle_m2m(value):
natural = value.natural_key()
# Iterable natural keys are rolled out as subelements
self.xml.startElement("object", {})
for key_value in natural:
self.xml.startElement("natural", {})
self.xml.characters(smart_text(key_value))
self.xml.endElement("natural")
self.xml.endElement("object")
else:
def handle_m2m(value):
self.xml.addQuickElement("object", attrs={
'pk': smart_text(value._get_pk_val())
})
for relobj in getattr(obj, field.name).iterator():
handle_m2m(relobj)
self.xml.endElement("field")
def _start_relational_field(self, field):
"""
Helper to output the <field> element for relational fields
"""
self.indent(2)
self.xml.startElement("field", OrderedDict([
("name", field.name),
("rel", field.remote_field.__class__.__name__),
("to", smart_text(field.remote_field.model._meta)),
]))
class Deserializer(base.Deserializer):
"""
Deserialize XML.
"""
def __init__(self, stream_or_string, **options):
super(Deserializer, self).__init__(stream_or_string, **options)
self.event_stream = pulldom.parse(self.stream, self._make_parser())
self.db = options.pop('using', DEFAULT_DB_ALIAS)
self.ignore = options.pop('ignorenonexistent', False)
def _make_parser(self):
"""Create a hardened XML parser (no custom/external entities)."""
return DefusedExpatParser()
def __next__(self):
for event, node in self.event_stream:
if event == "START_ELEMENT" and node.nodeName == "object":
self.event_stream.expandNode(node)
return self._handle_object(node)
raise StopIteration
def _handle_object(self, node):
"""
Convert an <object> node to a DeserializedObject.
"""
# Look up the model using the model loading mechanism. If this fails,
# bail.
Model = self._get_model_from_node(node, "model")
# Start building a data dictionary from the object.
data = {}
if node.hasAttribute('pk'):
data[Model._meta.pk.attname] = Model._meta.pk.to_python(
node.getAttribute('pk'))
# Also start building a dict of m2m data (this is saved as
# {m2m_accessor_attribute : [list_of_related_objects]})
m2m_data = {}
field_names = {f.name for f in Model._meta.get_fields()}
# Deserialize each field.
for field_node in node.getElementsByTagName("field"):
# If the field is missing the name attribute, bail (are you
# sensing a pattern here?)
field_name = field_node.getAttribute("name")
if not field_name:
raise base.DeserializationError("<field> node is missing the 'name' attribute")
# Get the field from the Model. This will raise a
# FieldDoesNotExist if, well, the field doesn't exist, which will
# be propagated correctly unless ignorenonexistent=True is used.
if self.ignore and field_name not in field_names:
continue
field = Model._meta.get_field(field_name)
# As is usually the case, relation fields get the special treatment.
if field.remote_field and isinstance(field.remote_field, models.ManyToManyRel):
m2m_data[field.name] = self._handle_m2m_field_node(field_node, field)
elif field.remote_field and isinstance(field.remote_field, models.ManyToOneRel):
data[field.attname] = self._handle_fk_field_node(field_node, field)
else:
if field_node.getElementsByTagName('None'):
value = None
else:
value = field.to_python(getInnerText(field_node).strip())
data[field.name] = value
obj = base.build_instance(Model, data, self.db)
# Return a DeserializedObject so that the m2m data has a place to live.
return base.DeserializedObject(obj, m2m_data)
def _handle_fk_field_node(self, node, field):
"""
Handle a <field> node for a ForeignKey
"""
# Check if there is a child node named 'None', returning None if so.
if node.getElementsByTagName('None'):
return None
else:
model = field.remote_field.model
if hasattr(model._default_manager, 'get_by_natural_key'):
keys = node.getElementsByTagName('natural')
if keys:
# If there are 'natural' subelements, it must be a natural key
field_value = [getInnerText(k).strip() for k in keys]
obj = model._default_manager.db_manager(self.db).get_by_natural_key(*field_value)
obj_pk = getattr(obj, field.remote_field.field_name)
# If this is a natural foreign key to an object that
# has a FK/O2O as the foreign key, use the FK value
if field.remote_field.model._meta.pk.remote_field:
obj_pk = obj_pk.pk
else:
# Otherwise, treat like a normal PK
field_value = getInnerText(node).strip()
obj_pk = model._meta.get_field(field.remote_field.field_name).to_python(field_value)
return obj_pk
else:
field_value = getInnerText(node).strip()
return model._meta.get_field(field.remote_field.field_name).to_python(field_value)
def _handle_m2m_field_node(self, node, field):
"""
Handle a <field> node for a ManyToManyField.
"""
model = field.remote_field.model
default_manager = model._default_manager
if hasattr(default_manager, 'get_by_natural_key'):
def m2m_convert(n):
keys = n.getElementsByTagName('natural')
if keys:
# If there are 'natural' subelements, it must be a natural key
field_value = [getInnerText(k).strip() for k in keys]
obj_pk = default_manager.db_manager(self.db).get_by_natural_key(*field_value).pk
else:
# Otherwise, treat like a normal PK value.
obj_pk = model._meta.pk.to_python(n.getAttribute('pk'))
return obj_pk
else:
m2m_convert = lambda n: model._meta.pk.to_python(n.getAttribute('pk'))
return [m2m_convert(c) for c in node.getElementsByTagName("object")]
def _get_model_from_node(self, node, attr):
"""
Helper to look up a model from a <object model=...> or a <field
rel=... to=...> node.
"""
model_identifier = node.getAttribute(attr)
if not model_identifier:
raise base.DeserializationError(
"<%s> node is missing the required '%s' attribute"
% (node.nodeName, attr))
try:
return apps.get_model(model_identifier)
except (LookupError, TypeError):
raise base.DeserializationError(
"<%s> node has invalid model identifier: '%s'"
% (node.nodeName, model_identifier))
def getInnerText(node):
"""
Get all the inner text of a DOM node (recursively).
"""
# inspired by http://mail.python.org/pipermail/xml-sig/2005-March/011022.html
inner_text = []
for child in node.childNodes:
if child.nodeType == child.TEXT_NODE or child.nodeType == child.CDATA_SECTION_NODE:
inner_text.append(child.data)
elif child.nodeType == child.ELEMENT_NODE:
inner_text.extend(getInnerText(child))
else:
pass
return "".join(inner_text)
# Below code based on Christian Heimes' defusedxml
class DefusedExpatParser(_ExpatParser):
"""
An expat parser hardened against XML bomb attacks.
Forbids DTDs, external entity references
"""
def __init__(self, *args, **kwargs):
_ExpatParser.__init__(self, *args, **kwargs)
self.setFeature(handler.feature_external_ges, False)
self.setFeature(handler.feature_external_pes, False)
def start_doctype_decl(self, name, sysid, pubid, has_internal_subset):
raise DTDForbidden(name, sysid, pubid)
def entity_decl(self, name, is_parameter_entity, value, base,
sysid, pubid, notation_name):
raise EntitiesForbidden(name, value, base, sysid, pubid, notation_name)
def unparsed_entity_decl(self, name, base, sysid, pubid, notation_name):
# expat 1.2
raise EntitiesForbidden(name, None, base, sysid, pubid, notation_name)
def external_entity_ref_handler(self, context, base, sysid, pubid):
raise ExternalReferenceForbidden(context, base, sysid, pubid)
def reset(self):
_ExpatParser.reset(self)
parser = self._parser
parser.StartDoctypeDeclHandler = self.start_doctype_decl
parser.EntityDeclHandler = self.entity_decl
parser.UnparsedEntityDeclHandler = self.unparsed_entity_decl
parser.ExternalEntityRefHandler = self.external_entity_ref_handler
class DefusedXmlException(ValueError):
"""Base exception."""
def __repr__(self):
return str(self)
class DTDForbidden(DefusedXmlException):
"""Document type definition is forbidden."""
def __init__(self, name, sysid, pubid):
super(DTDForbidden, self).__init__()
self.name = name
self.sysid = sysid
self.pubid = pubid
def __str__(self):
tpl = "DTDForbidden(name='{}', system_id={!r}, public_id={!r})"
return tpl.format(self.name, self.sysid, self.pubid)
class EntitiesForbidden(DefusedXmlException):
"""Entity definition is forbidden."""
def __init__(self, name, value, base, sysid, pubid, notation_name):
super(EntitiesForbidden, self).__init__()
self.name = name
self.value = value
self.base = base
self.sysid = sysid
self.pubid = pubid
self.notation_name = notation_name
def __str__(self):
tpl = "EntitiesForbidden(name='{}', system_id={!r}, public_id={!r})"
return tpl.format(self.name, self.sysid, self.pubid)
class ExternalReferenceForbidden(DefusedXmlException):
"""Resolving an external reference is forbidden."""
def __init__(self, context, base, sysid, pubid):
super(ExternalReferenceForbidden, self).__init__()
self.context = context
self.base = base
self.sysid = sysid
self.pubid = pubid
def __str__(self):
tpl = "ExternalReferenceForbidden(system_id='{}', public_id={})"
return tpl.format(self.sysid, self.pubid)
|
jerabekjak/smoderp | refs/heads/master | main_src/processes/surface.py | 1 | import math
def shallowSurfaceKinematic(sur):
a = sur.a
b = sur.b
h = sur.h
return math.pow(h,b) * a
|
Kray/qmdc | refs/heads/master | src/connection.py | 1 |
def parseDict(line):
result = {}
key = ""
tmp = ""
for c in line:
if c == '=' and len(key) == 0:
key = tmp
tmp = ""
elif c == ' ':
if len(key) == 0:
key = "id"
result[key] = tmp
key = ""
tmp = ""
else:
tmp += c
if len(key) > 0 or len(tmp) > 0:
result[key] = tmp
return result
class Status:
def __init__(status):
self.status = status
OK = 0
RETRY = 1
UNAVAILABLE = -1
class Connection(socket.socket):
def __init__(self, host, port):
socket.socket.__init__(self, socket.AF_INET, socket.SOCK_STREAM)
self.mutex = threading.RLock()
self.running = False
self.buffer = b''
self.queue = []
self.connect((host, port))
self.send(u"musicd\nprotocol=3\n\n".encode("utf-8"))
msg = self.receive(["musicd"])
if "codecs" in msg[1]:
self.codecs = msg[1]["codecs"].split(",")
else:
self.codecs = []
if "protocol" in msg[1]:
self.protocol = msg[1]["protocol"]
else:
# No protocol information?
self.protocol = 0
self.transcoding = {}
def auth(self, user, passw):
with self.mutex:
self.send(u"auth\nuser={}\npassword={}\n\n".format(user, passw).encode("utf-8"))
try:
line = self.receive(["auth"])
except MdError:
# Older protocol will give us unauthorized error because of our handshake, retry
self.send(u"auth\nuser={}\npassword={}\n\n".format(user, passw).encode("utf-8"))
line = self.receive(["auth"])
def search(self, string):
with self.mutex:
self.send(u"search\nquery={}\n\n".format(string).encode("utf-8"))
result = []
while 1:
msg = self.receive(["track", "search"])
if msg[0] == "search":
break
result.append(msg[1])
return result
def randomid(self):
with self.mutex:
self.send(u"randomid\n\n".encode("utf-8"))
msg = self.receive(["randomid"])
return int(msg[1].get("id"))
def open(self, trackid):
if self.running:
self.running = False
self.thread.join()
with self.mutex:
if self.transcoding.get("codec") in self.codecs:
self.send("open\nid={}\ncodec={}\nbitrate={}\n\n".format(trackid, self.transcoding.get("codec"), self.transcoding.get("bitrate")).encode("utf-8"))
else:
self.send("open\nid={}\n\n".format(trackid).encode("utf-8"))
stream = {}
track = {}
while 1:
msg = self.receive(["track", "open"])
if msg[0] == "open":
stream = msg[1]
break
elif msg[0] == "track":
track = msg[1]
self.queue = []
self.thread = threading.Thread(target=self.process)
self.thread.start()
return stream, track
def seek(self, position):
with self.mutex:
self.send("seek\nposition={}\n\n".format(position).encode("utf-8"))
msg = self.receive(["seek"])
self.queue = []
mdc.flush()
if self.running == False:
self.thread = threading.Thread(target=self.process)
self.thread.start()
def albumimg(self, albumid, size):
with self.mutex:
self.send("albumimg\nalbum={}\nsize={}\n\n".format(albumid, size).encode("utf-8"))
msg = self.receive(["albumimg"])
if not "image" in msg[1]:
if msg[1]["status"] == "retry":
return Status.RETRY
elif msg[1]["status"] == "unavailable":
return Status.UNAVAILABLE
else:
return msg[1]["image"]
def lyrics(self, trackid):
with self.mutex:
self.send("lyrics\ntrack={}\n\n".format(trackid).encode("utf-8"))
msg = self.receive(["lyrics"])
if not "lyrics" in msg[1]:
if msg[1]["status"] == "retry":
return Status.RETRY
elif msg[1]["status"] == "unavailable":
return Status.UNAVAILABLE
else:
return msg[1]["lyrics"]
def process(self):
self.running = True
while self.running:
msg = self.receive(["packet"])
if len(msg[1]["payload"]) == 0:
self.running = False
return
mdc.packet(msg[1]["payload"])
def receive(self, methods):
with self.mutex:
for item in self.queue:
if item[0] in methods:
del self.queue[0]
return item
while 1:
msg = self._receive_next()
if msg[0] == "error":
raise MdError(msg[1].get("name"))
if msg[0] in methods:
return msg
self.queue.append(msg)
def _receive_next(self):
with self.mutex:
msgs = self.buffer.split(b'\n\n')
if len(self.buffer) == 0 or (len(msgs) == 1 and self.buffer[-2:] != b"\n\n"):
data = self.recv(1024)
self.buffer += data
if len(data) == 0:
QMessageBox.critical(qmdc, "Error", "Connection terminated.")
raise MdError("Connection terminated.")
return self._receive_next()
msg = msgs[0].decode("utf-8", errors="replace")
msgs.pop(0)
self.buffer = b"\n\n".join(msgs)
result = []
split = msg.split("\n")
result.append(split[0])
split.pop(0)
result.append({})
for property in split:
parts = property.split("=")
if len(parts) == 1 or len(parts[0]) == 0:
continue
if parts[0][-1] == ":":
result[1][parts[0][:-1]] = self._receive_bin(int(parts[1]))
else:
result[1][parts[0]] = "=".join(parts[1:])
return result
def _receive_bin(self, size):
with self.mutex:
if len(self.buffer) < size:
data = self.recv(size - len(self.buffer))
if len(data) == 0:
QMessageBox.critical(qmdc, "Error", "Connection terminated.")
raise MdError("Connection terminated.")
self.buffer += data
return self._receive_bin(size)
result = self.buffer[:size]
self.buffer = self.buffer[size:]
return result
|
AmrThabet/CouchPotatoServer | refs/heads/master | couchpotato/core/media/_base/providers/torrent/passthepopcorn.py | 7 | import htmlentitydefs
import json
import re
import time
import traceback
from couchpotato.core.helpers.encoding import tryUrlencode
from couchpotato.core.helpers.variable import getTitle, tryInt, mergeDicts, getIdentifier
from couchpotato.core.logger import CPLog
from couchpotato.core.media._base.providers.torrent.base import TorrentProvider
from dateutil.parser import parse
import six
log = CPLog(__name__)
class Base(TorrentProvider):
urls = {
'domain': 'https://tls.passthepopcorn.me',
'detail': 'https://tls.passthepopcorn.me/torrents.php?torrentid=%s',
'torrent': 'https://tls.passthepopcorn.me/torrents.php',
'login': 'https://tls.passthepopcorn.me/ajax.php?action=login',
'login_check': 'https://tls.passthepopcorn.me/ajax.php?action=login',
'search': 'https://tls.passthepopcorn.me/search/%s/0/7/%d'
}
http_time_between_calls = 2
def _search(self, media, quality, results):
movie_title = getTitle(media)
quality_id = quality['identifier']
params = mergeDicts(self.quality_search_params[quality_id].copy(), {
'order_by': 'relevance',
'order_way': 'descending',
'searchstr': getIdentifier(media)
})
url = '%s?json=noredirect&%s' % (self.urls['torrent'], tryUrlencode(params))
res = self.getJsonData(url)
try:
if not 'Movies' in res:
return
authkey = res['AuthKey']
passkey = res['PassKey']
for ptpmovie in res['Movies']:
if not 'Torrents' in ptpmovie:
log.debug('Movie %s (%s) has NO torrents', (ptpmovie['Title'], ptpmovie['Year']))
continue
log.debug('Movie %s (%s) has %d torrents', (ptpmovie['Title'], ptpmovie['Year'], len(ptpmovie['Torrents'])))
for torrent in ptpmovie['Torrents']:
torrent_id = tryInt(torrent['Id'])
torrentdesc = '%s %s %s' % (torrent['Resolution'], torrent['Source'], torrent['Codec'])
torrentscore = 0
if 'GoldenPopcorn' in torrent and torrent['GoldenPopcorn']:
torrentdesc += ' HQ'
if self.conf('prefer_golden'):
torrentscore += 5000
if 'Scene' in torrent and torrent['Scene']:
torrentdesc += ' Scene'
if self.conf('prefer_scene'):
torrentscore += 2000
if 'RemasterTitle' in torrent and torrent['RemasterTitle']:
torrentdesc += self.htmlToASCII(' %s' % torrent['RemasterTitle'])
torrentdesc += ' (%s)' % quality_id
torrent_name = re.sub('[^A-Za-z0-9\-_ \(\).]+', '', '%s (%s) - %s' % (movie_title, ptpmovie['Year'], torrentdesc))
def extra_check(item):
return self.torrentMeetsQualitySpec(item, quality_id)
results.append({
'id': torrent_id,
'name': torrent_name,
'Source': torrent['Source'],
'Checked': 'true' if torrent['Checked'] else 'false',
'Resolution': torrent['Resolution'],
'url': '%s?action=download&id=%d&authkey=%s&torrent_pass=%s' % (self.urls['torrent'], torrent_id, authkey, passkey),
'detail_url': self.urls['detail'] % torrent_id,
'date': tryInt(time.mktime(parse(torrent['UploadTime']).timetuple())),
'size': tryInt(torrent['Size']) / (1024 * 1024),
'seeders': tryInt(torrent['Seeders']),
'leechers': tryInt(torrent['Leechers']),
'score': torrentscore,
'extra_check': extra_check,
})
except:
log.error('Failed getting results from %s: %s', (self.getName(), traceback.format_exc()))
def torrentMeetsQualitySpec(self, torrent, quality):
if not quality in self.post_search_filters:
return True
reqs = self.post_search_filters[quality].copy()
if self.conf('require_approval'):
log.debug('Config: Require staff-approval activated')
reqs['Checked'] = ['true']
for field, specs in reqs.items():
matches_one = False
seen_one = False
if not field in torrent:
log.debug('Torrent with ID %s has no field "%s"; cannot apply post-search-filter for quality "%s"', (torrent['id'], field, quality))
continue
for spec in specs:
if len(spec) > 0 and spec[0] == '!':
# a negative rule; if the field matches, return False
if torrent[field] == spec[1:]:
return False
else:
# a positive rule; if any of the possible positive values match the field, return True
log.debug('Checking if torrents field %s equals %s' % (field, spec))
seen_one = True
if torrent[field] == spec:
log.debug('Torrent satisfied %s == %s' % (field, spec))
matches_one = True
if seen_one and not matches_one:
log.debug('Torrent did not satisfy requirements, ignoring')
return False
return True
def htmlToUnicode(self, text):
def fixup(m):
txt = m.group(0)
if txt[:2] == "&#":
# character reference
try:
if txt[:3] == "&#x":
return unichr(int(txt[3:-1], 16))
else:
return unichr(int(txt[2:-1]))
except ValueError:
pass
else:
# named entity
try:
txt = unichr(htmlentitydefs.name2codepoint[txt[1:-1]])
except KeyError:
pass
return txt # leave as is
return re.sub("&#?\w+;", fixup, six.u('%s') % text)
def unicodeToASCII(self, text):
import unicodedata
return ''.join(c for c in unicodedata.normalize('NFKD', text) if unicodedata.category(c) != 'Mn')
def htmlToASCII(self, text):
return self.unicodeToASCII(self.htmlToUnicode(text))
def getLoginParams(self):
return {
'username': self.conf('username'),
'password': self.conf('password'),
'passkey': self.conf('passkey'),
'keeplogged': '1',
'login': 'Login'
}
def loginSuccess(self, output):
try:
return json.loads(output).get('Result', '').lower() == 'ok'
except:
return False
loginCheckSuccess = loginSuccess
config = [{
'name': 'passthepopcorn',
'groups': [
{
'tab': 'searcher',
'list': 'torrent_providers',
'name': 'PassThePopcorn',
'description': '<a href="https://passthepopcorn.me">PassThePopcorn.me</a>',
'wizard': True,
'icon': 'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAIAAACQkWg2AAAARklEQVQoz2NgIAP8BwMiGWRpIN1JNWn/t6T9f532+W8GkNt7vzz9UkfarZVpb68BuWlbnqW1nU7L2DMx7eCoBlpqGOppCQB83zIgIg+wWQAAAABJRU5ErkJggg==',
'options': [
{
'name': 'enabled',
'type': 'enabler',
'default': False
},
{
'name': 'domain',
'advanced': True,
'label': 'Proxy server',
'description': 'Domain for requests (HTTPS only!), keep empty to use default (tls.passthepopcorn.me).',
},
{
'name': 'username',
'default': '',
},
{
'name': 'password',
'default': '',
'type': 'password',
},
{
'name': 'passkey',
'default': '',
},
{
'name': 'prefer_golden',
'advanced': True,
'type': 'bool',
'label': 'Prefer golden',
'default': 1,
'description': 'Favors Golden Popcorn-releases over all other releases.'
},
{
'name': 'prefer_scene',
'advanced': True,
'type': 'bool',
'label': 'Prefer scene',
'default': 0,
'description': 'Favors scene-releases over non-scene releases.'
},
{
'name': 'require_approval',
'advanced': True,
'type': 'bool',
'label': 'Require approval',
'default': 0,
'description': 'Require staff-approval for releases to be accepted.'
},
{
'name': 'seed_ratio',
'label': 'Seed ratio',
'type': 'float',
'default': 1,
'description': 'Will not be (re)moved until this seed ratio is met.',
},
{
'name': 'seed_time',
'label': 'Seed time',
'type': 'int',
'default': 40,
'description': 'Will not be (re)moved until this seed time (in hours) is met.',
},
{
'name': 'extra_score',
'advanced': True,
'label': 'Extra Score',
'type': 'int',
'default': 20,
'description': 'Starting score for each release found via this provider.',
}
],
}
]
}]
|
gertingold/scipy | refs/heads/master | scipy/linalg/decomp_schur.py | 15 | """Schur decomposition functions."""
from __future__ import division, print_function, absolute_import
import numpy
from numpy import asarray_chkfinite, single, asarray, array
from numpy.linalg import norm
from scipy._lib.six import callable
# Local imports.
from .misc import LinAlgError, _datacopied
from .lapack import get_lapack_funcs
from .decomp import eigvals
__all__ = ['schur', 'rsf2csf']
_double_precision = ['i', 'l', 'd']
def schur(a, output='real', lwork=None, overwrite_a=False, sort=None,
check_finite=True):
"""
Compute Schur decomposition of a matrix.
The Schur decomposition is::
A = Z T Z^H
where Z is unitary and T is either upper-triangular, or for real
Schur decomposition (output='real'), quasi-upper triangular. In
the quasi-triangular form, 2x2 blocks describing complex-valued
eigenvalue pairs may extrude from the diagonal.
Parameters
----------
a : (M, M) array_like
Matrix to decompose
output : {'real', 'complex'}, optional
Construct the real or complex Schur decomposition (for real matrices).
lwork : int, optional
Work array size. If None or -1, it is automatically computed.
overwrite_a : bool, optional
Whether to overwrite data in a (may improve performance).
sort : {None, callable, 'lhp', 'rhp', 'iuc', 'ouc'}, optional
Specifies whether the upper eigenvalues should be sorted. A callable
may be passed that, given a eigenvalue, returns a boolean denoting
whether the eigenvalue should be sorted to the top-left (True).
Alternatively, string parameters may be used::
'lhp' Left-hand plane (x.real < 0.0)
'rhp' Right-hand plane (x.real > 0.0)
'iuc' Inside the unit circle (x*x.conjugate() <= 1.0)
'ouc' Outside the unit circle (x*x.conjugate() > 1.0)
Defaults to None (no sorting).
check_finite : bool, optional
Whether to check that the input matrix contains only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Returns
-------
T : (M, M) ndarray
Schur form of A. It is real-valued for the real Schur decomposition.
Z : (M, M) ndarray
An unitary Schur transformation matrix for A.
It is real-valued for the real Schur decomposition.
sdim : int
If and only if sorting was requested, a third return value will
contain the number of eigenvalues satisfying the sort condition.
Raises
------
LinAlgError
Error raised under three conditions:
1. The algorithm failed due to a failure of the QR algorithm to
compute all eigenvalues
2. If eigenvalue sorting was requested, the eigenvalues could not be
reordered due to a failure to separate eigenvalues, usually because
of poor conditioning
3. If eigenvalue sorting was requested, roundoff errors caused the
leading eigenvalues to no longer satisfy the sorting condition
See also
--------
rsf2csf : Convert real Schur form to complex Schur form
Examples
--------
>>> from scipy.linalg import schur, eigvals
>>> A = np.array([[0, 2, 2], [0, 1, 2], [1, 0, 1]])
>>> T, Z = schur(A)
>>> T
array([[ 2.65896708, 1.42440458, -1.92933439],
[ 0. , -0.32948354, -0.49063704],
[ 0. , 1.31178921, -0.32948354]])
>>> Z
array([[0.72711591, -0.60156188, 0.33079564],
[0.52839428, 0.79801892, 0.28976765],
[0.43829436, 0.03590414, -0.89811411]])
>>> T2, Z2 = schur(A, output='complex')
>>> T2
array([[ 2.65896708, -1.22839825+1.32378589j, 0.42590089+1.51937378j],
[ 0. , -0.32948354+0.80225456j, -0.59877807+0.56192146j],
[ 0. , 0. , -0.32948354-0.80225456j]])
>>> eigvals(T2)
array([2.65896708, -0.32948354+0.80225456j, -0.32948354-0.80225456j])
An arbitrary custom eig-sorting condition, having positive imaginary part,
which is satisfied by only one eigenvalue
>>> T3, Z3, sdim = schur(A, output='complex', sort=lambda x: x.imag > 0)
>>> sdim
1
"""
if output not in ['real', 'complex', 'r', 'c']:
raise ValueError("argument must be 'real', or 'complex'")
if check_finite:
a1 = asarray_chkfinite(a)
else:
a1 = asarray(a)
if len(a1.shape) != 2 or (a1.shape[0] != a1.shape[1]):
raise ValueError('expected square matrix')
typ = a1.dtype.char
if output in ['complex', 'c'] and typ not in ['F', 'D']:
if typ in _double_precision:
a1 = a1.astype('D')
typ = 'D'
else:
a1 = a1.astype('F')
typ = 'F'
overwrite_a = overwrite_a or (_datacopied(a1, a))
gees, = get_lapack_funcs(('gees',), (a1,))
if lwork is None or lwork == -1:
# get optimal work array
result = gees(lambda x: None, a1, lwork=-1)
lwork = result[-2][0].real.astype(numpy.int)
if sort is None:
sort_t = 0
sfunction = lambda x: None
else:
sort_t = 1
if callable(sort):
sfunction = sort
elif sort == 'lhp':
sfunction = lambda x: (x.real < 0.0)
elif sort == 'rhp':
sfunction = lambda x: (x.real >= 0.0)
elif sort == 'iuc':
sfunction = lambda x: (abs(x) <= 1.0)
elif sort == 'ouc':
sfunction = lambda x: (abs(x) > 1.0)
else:
raise ValueError("'sort' parameter must either be 'None', or a "
"callable, or one of ('lhp','rhp','iuc','ouc')")
result = gees(sfunction, a1, lwork=lwork, overwrite_a=overwrite_a,
sort_t=sort_t)
info = result[-1]
if info < 0:
raise ValueError('illegal value in {}-th argument of internal gees'
''.format(-info))
elif info == a1.shape[0] + 1:
raise LinAlgError('Eigenvalues could not be separated for reordering.')
elif info == a1.shape[0] + 2:
raise LinAlgError('Leading eigenvalues do not satisfy sort condition.')
elif info > 0:
raise LinAlgError("Schur form not found. Possibly ill-conditioned.")
if sort_t == 0:
return result[0], result[-3]
else:
return result[0], result[-3], result[1]
eps = numpy.finfo(float).eps
feps = numpy.finfo(single).eps
_array_kind = {'b': 0, 'h': 0, 'B': 0, 'i': 0, 'l': 0,
'f': 0, 'd': 0, 'F': 1, 'D': 1}
_array_precision = {'i': 1, 'l': 1, 'f': 0, 'd': 1, 'F': 0, 'D': 1}
_array_type = [['f', 'd'], ['F', 'D']]
def _commonType(*arrays):
kind = 0
precision = 0
for a in arrays:
t = a.dtype.char
kind = max(kind, _array_kind[t])
precision = max(precision, _array_precision[t])
return _array_type[kind][precision]
def _castCopy(type, *arrays):
cast_arrays = ()
for a in arrays:
if a.dtype.char == type:
cast_arrays = cast_arrays + (a.copy(),)
else:
cast_arrays = cast_arrays + (a.astype(type),)
if len(cast_arrays) == 1:
return cast_arrays[0]
else:
return cast_arrays
def rsf2csf(T, Z, check_finite=True):
"""
Convert real Schur form to complex Schur form.
Convert a quasi-diagonal real-valued Schur form to the upper triangular
complex-valued Schur form.
Parameters
----------
T : (M, M) array_like
Real Schur form of the original array
Z : (M, M) array_like
Schur transformation matrix
check_finite : bool, optional
Whether to check that the input arrays contain only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Returns
-------
T : (M, M) ndarray
Complex Schur form of the original array
Z : (M, M) ndarray
Schur transformation matrix corresponding to the complex form
See Also
--------
schur : Schur decomposition of an array
Examples
--------
>>> from scipy.linalg import schur, rsf2csf
>>> A = np.array([[0, 2, 2], [0, 1, 2], [1, 0, 1]])
>>> T, Z = schur(A)
>>> T
array([[ 2.65896708, 1.42440458, -1.92933439],
[ 0. , -0.32948354, -0.49063704],
[ 0. , 1.31178921, -0.32948354]])
>>> Z
array([[0.72711591, -0.60156188, 0.33079564],
[0.52839428, 0.79801892, 0.28976765],
[0.43829436, 0.03590414, -0.89811411]])
>>> T2 , Z2 = rsf2csf(T, Z)
>>> T2
array([[2.65896708+0.j, -1.64592781+0.743164187j, -1.21516887+1.00660462j],
[0.+0.j , -0.32948354+8.02254558e-01j, -0.82115218-2.77555756e-17j],
[0.+0.j , 0.+0.j, -0.32948354-0.802254558j]])
>>> Z2
array([[0.72711591+0.j, 0.28220393-0.31385693j, 0.51319638-0.17258824j],
[0.52839428+0.j, 0.24720268+0.41635578j, -0.68079517-0.15118243j],
[0.43829436+0.j, -0.76618703+0.01873251j, -0.03063006+0.46857912j]])
"""
if check_finite:
Z, T = map(asarray_chkfinite, (Z, T))
else:
Z, T = map(asarray, (Z, T))
for ind, X in enumerate([Z, T]):
if X.ndim != 2 or X.shape[0] != X.shape[1]:
raise ValueError("Input '{}' must be square.".format('ZT'[ind]))
if T.shape[0] != Z.shape[0]:
raise ValueError("Input array shapes must match: Z: {} vs. T: {}"
"".format(Z.shape, T.shape))
N = T.shape[0]
t = _commonType(Z, T, array([3.0], 'F'))
Z, T = _castCopy(t, Z, T)
for m in range(N-1, 0, -1):
if abs(T[m, m-1]) > eps*(abs(T[m-1, m-1]) + abs(T[m, m])):
mu = eigvals(T[m-1:m+1, m-1:m+1]) - T[m, m]
r = norm([mu[0], T[m, m-1]])
c = mu[0] / r
s = T[m, m-1] / r
G = array([[c.conj(), s], [-s, c]], dtype=t)
T[m-1:m+1, m-1:] = G.dot(T[m-1:m+1, m-1:])
T[:m+1, m-1:m+1] = T[:m+1, m-1:m+1].dot(G.conj().T)
Z[:, m-1:m+1] = Z[:, m-1:m+1].dot(G.conj().T)
T[m, m-1] = 0.0
return T, Z
|
rockyzhang/zhangyanhit-python-for-android-mips | refs/heads/master | python3-alpha/python3-src/Lib/nturl2path.py | 56 | """Convert a NT pathname to a file URL and vice versa."""
def url2pathname(url):
"""OS-specific conversion from a relative URL of the 'file' scheme
to a file system path; not recommended for general use."""
# e.g.
# ///C|/foo/bar/spam.foo
# becomes
# C:\foo\bar\spam.foo
import string, urllib.parse
# Windows itself uses ":" even in URLs.
url = url.replace(':', '|')
if not '|' in url:
# No drive specifier, just convert slashes
if url[:4] == '////':
# path is something like ////host/path/on/remote/host
# convert this to \\host\path\on\remote\host
# (notice halving of slashes at the start of the path)
url = url[2:]
components = url.split('/')
# make sure not to convert quoted slashes :-)
return urllib.parse.unquote('\\'.join(components))
comp = url.split('|')
if len(comp) != 2 or comp[0][-1] not in string.ascii_letters:
error = 'Bad URL: ' + url
raise IOError(error)
drive = comp[0][-1].upper()
components = comp[1].split('/')
path = drive + ':'
for comp in components:
if comp:
path = path + '\\' + urllib.parse.unquote(comp)
# Issue #11474 - handing url such as |c/|
if path.endswith(':') and url.endswith('/'):
path += '\\'
return path
def pathname2url(p):
"""OS-specific conversion from a file system path to a relative URL
of the 'file' scheme; not recommended for general use."""
# e.g.
# C:\foo\bar\spam.foo
# becomes
# ///C|/foo/bar/spam.foo
import urllib.parse
if not ':' in p:
# No drive specifier, just convert slashes and quote the name
if p[:2] == '\\\\':
# path is something like \\host\path\on\remote\host
# convert this to ////host/path/on/remote/host
# (notice doubling of slashes at the start of the path)
p = '\\\\' + p
components = p.split('\\')
return urllib.parse.quote('/'.join(components))
comp = p.split(':')
if len(comp) != 2 or len(comp[0]) > 1:
error = 'Bad path: ' + p
raise IOError(error)
drive = urllib.parse.quote(comp[0].upper())
components = comp[1].split('\\')
path = '///' + drive + ':'
for comp in components:
if comp:
path = path + '/' + urllib.parse.quote(comp)
return path
|
siosio/intellij-community | refs/heads/master | python/testData/codeInsight/smartEnter/func.py | 83 | def <caret>foo(*a)
if a
pass |
drhoet/avr-remote | refs/heads/master | main.py | 1 | import sys
if sys.version_info < (3,6):
print("This script requires Python version >= 3.6")
sys.exit(1)
import os
import json
import argparse
from avrremote.default_config import default_config as config
from avrremote.server import Server
def _load_config(config_file=None):
if config_file is None:
# try the env variable as a fallback
config_file = os.getenv('AVRREMOTE_SETTINGS', None)
if config_file is not None:
with open(config_file) as f:
config.update(json.load(f))
def create_app(loop):
""" Entry point used by the developer tools of asyncio. """
_load_config()
server = Server('0.0.0.0', 5000, config, serve_static=True)
server.start()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--socket-activation', action="store_true", help="Use the port number assigned by systemd socket activation")
parser.add_argument('--config-file', help="Specify the config file to use. If not specified, will check if a config file path is defined in environment variable AVRREMOTE_SETTINGS. If also this is not specified, the default configuration is used.")
parser.add_argument('--port', type=int, default=8080, help="The port to listen on (8080 if omitted). Ignored when --socket-activation is specified.")
parser.add_argument('--host', default='127.0.0.1', help="The IP address to listen on (127.0.0.1 if omitted). Ignored when --socket-activation is specified.")
parser.add_argument('--serve-static', '--static', action='store_true', help='Serve the static content on /static. You should be using some web server to do this.')
args = parser.parse_args()
_load_config(args.config_file)
server = Server(args.host, args.port, config, socket_activation=args.socket_activation, serve_static=args.serve_static)
server.start()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.