repo_name stringlengths 5 100 | ref stringlengths 12 67 | path stringlengths 4 244 | copies stringlengths 1 8 | content stringlengths 0 1.05M ⌀ |
|---|---|---|---|---|
playfire/django-email-from-template | refs/heads/master | setup.py | 1 | #!/usr/bin/env python
from setuptools import setup, find_packages
setup(
name='django-email-from-template',
description="Send emails generated entirely from Django templates.",
version='0.1',
url='http://code.playfire.com/',
author='Playfire.com',
author_email='tech@playfire.com',
license='BSD',
packages=find_packages(),
package_data={'': [
'templates/*/*',
]},
)
|
diablo1281/MissionPlanner | refs/heads/master | ExtLibs/Mavlink/pymavlink/generator/mavgen_csharp.py | 6 | #!/usr/bin/env python
'''
parse a MAVLink protocol XML file and generate a C# implementation
Copyright Michael Oborne 2016
Released under GNU GPL version 3 or later
'''
import sys, textwrap, os, time, re
from . import mavparse, mavtemplate
t = mavtemplate.MAVTemplate()
def generate_message_header(f, xml):
if xml.little_endian:
xml.mavlink_endian = "MAVLINK_LITTLE_ENDIAN"
else:
xml.mavlink_endian = "MAVLINK_BIG_ENDIAN"
if xml.crc_extra:
xml.crc_extra_define = "1"
else:
xml.crc_extra_define = "0"
if xml.command_24bit:
xml.command_24bit_define = "1"
else:
xml.command_24bit_define = "0"
if xml.sort_fields:
xml.aligned_fields_define = "1"
else:
xml.aligned_fields_define = "0"
# work out the included headers
xml.include_list = []
for i in xml.include:
base = i[:-4]
xml.include_list.append(mav_include(base))
xml.message_names_enum = ''
# and message CRCs array
xml.message_infos_array = ''
if xml.command_24bit:
# we sort with primary key msgid, secondary key dialect
for msgid in sorted(xml.message_names.keys()):
name = xml.message_names[msgid]
xml.message_infos_array += ' new message_info(%u, "%s", %u, %u, %u, typeof( mavlink_%s_t )),\n' % (msgid,
name,
xml.message_crcs[msgid],
xml.message_min_lengths[msgid],
xml.message_lengths[msgid],
name.lower())
xml.message_names_enum += '%s = %u,\n' % (name, msgid)
else:
for msgid in range(256):
crc = xml.message_crcs.get(msgid, None)
name = xml.message_names.get(msgid, None)
length = xml.message_lengths.get(msgid, None)
if name is not None:
xml.message_infos_array += ' new message_info(%u, "%s", %u, %u, %u, typeof( mavlink_%s_t )),\n' % (msgid,
name,
crc,
length,
length,
name.lower())
xml.message_names_enum += '%s = %u,\n' % (name, msgid)
# add some extra field attributes for convenience with arrays
for m in xml.enum:
m.description = m.description.replace("\n"," ")
m.description = m.description.replace("\r"," ")
for fe in m.entry:
fe.description = fe.description.replace("\n"," ")
fe.description = fe.description.replace("\r"," ")
fe.name = fe.name.replace(m.name + "_","")
fe.name = fe.name.replace("NAV_","")
firstchar = re.search('^([0-9])', fe.name )
if firstchar != None and firstchar.group():
fe.name = '_%s' % fe.name
t.write(f, '''
using System;
using System.Collections.Generic;
using System.Text;
using System.Runtime.InteropServices;
public partial class MAVLink
{
public const string MAVLINK_BUILD_DATE = "${parse_time}";
public const string MAVLINK_WIRE_PROTOCOL_VERSION = "${wire_protocol_version}";
public const int MAVLINK_MAX_PAYLOAD_LEN = ${largest_payload};
public const byte MAVLINK_CORE_HEADER_LEN = 9;///< Length of core header (of the comm. layer)
public const byte MAVLINK_CORE_HEADER_MAVLINK1_LEN = 5;///< Length of MAVLink1 core header (of the comm. layer)
public const byte MAVLINK_NUM_HEADER_BYTES = (MAVLINK_CORE_HEADER_LEN + 1);///< Length of all header bytes, including core and stx
public const byte MAVLINK_NUM_CHECKSUM_BYTES = 2;
public const byte MAVLINK_NUM_NON_PAYLOAD_BYTES = (MAVLINK_NUM_HEADER_BYTES + MAVLINK_NUM_CHECKSUM_BYTES);
public const int MAVLINK_MAX_PACKET_LEN = (MAVLINK_MAX_PAYLOAD_LEN + MAVLINK_NUM_NON_PAYLOAD_BYTES + MAVLINK_SIGNATURE_BLOCK_LEN);///< Maximum packet length
public const byte MAVLINK_SIGNATURE_BLOCK_LEN = 13;
public const int MAVLINK_LITTLE_ENDIAN = 1;
public const int MAVLINK_BIG_ENDIAN = 0;
public const byte MAVLINK_STX = ${protocol_marker};
public const byte MAVLINK_STX_MAVLINK1 = 0xFE;
public const byte MAVLINK_ENDIAN = ${mavlink_endian};
public const bool MAVLINK_ALIGNED_FIELDS = (${aligned_fields_define} == 1);
public const byte MAVLINK_CRC_EXTRA = ${crc_extra_define};
public const byte MAVLINK_COMMAND_24BIT = ${command_24bit_define};
public const bool MAVLINK_NEED_BYTE_SWAP = (MAVLINK_ENDIAN == MAVLINK_LITTLE_ENDIAN);
// msgid, name, crc, length, type
public static readonly message_info[] MAVLINK_MESSAGE_INFOS = new message_info[] {
${message_infos_array}
};
public const byte MAVLINK_VERSION = ${version};
public const byte MAVLINK_IFLAG_SIGNED= 0x01;
public const byte MAVLINK_IFLAG_MASK = 0x01;
public struct message_info
{
public uint msgid { get; internal set; }
public string name { get; internal set; }
public byte crc { get; internal set; }
public uint minlength { get; internal set; }
public uint length { get; internal set; }
public Type type { get; internal set; }
public message_info(uint msgid, string name, byte crc, uint minlength, uint length, Type type)
{
this.msgid = msgid;
this.name = name;
this.crc = crc;
this.minlength = minlength;
this.length = length;
this.type = type;
}
public override string ToString()
{
return String.Format("{0} - {1}",name,msgid);
}
}
public enum MAVLINK_MSG_ID
{
${message_names_enum}
}
''', xml)
def generate_message_enums(f, xml):
# add some extra field attributes for convenience with arrays
for m in xml.enum:
m.description = m.description.replace("\n"," ")
m.description = m.description.replace("\r"," ")
for fe in m.entry:
fe.description = fe.description.replace("\n"," ")
fe.description = fe.description.replace("\r"," ")
fe.name = fe.name.replace(m.name + "_","")
firstchar = re.search('^([0-9])', fe.name )
if firstchar != None and firstchar.group():
fe.name = '_%s' % fe.name
t.write(f, '''
${{enum:
///<summary> ${description} </summary>
public enum ${name}
{
${{entry: ///<summary> ${description} |${{param:${description}| }} </summary>
${name}=${value},
}}
};
}}
''', xml)
def generate_message_footer(f, xml):
t.write(f, '''
}
''', xml)
f.close()
def generate_message_h(f, directory, m):
'''generate per-message header for a XML file'''
t.write(f, '''
[StructLayout(LayoutKind.Sequential,Pack=1,Size=${wire_length})]
public struct mavlink_${name_lower}_t
{
${{ordered_fields: /// <summary> ${description} </summary>
${array_prefix} ${type} ${name}${array_suffix};
}}
};
''', m)
class mav_include(object):
def __init__(self, base):
self.base = base
def generate_one(fh, basename, xml):
'''generate headers for one XML file'''
directory = os.path.join(basename, xml.basename)
print("Generating CSharp implementation in directory %s" % directory)
mavparse.mkdir_p(directory)
# add some extra field attributes for convenience with arrays
for m in xml.message:
m.msg_name = m.name
if xml.crc_extra:
m.crc_extra_arg = ", %s" % m.crc_extra
else:
m.crc_extra_arg = ""
m.msg_nameid = "MAVLINK_MSG_ID_${name} = ${id}"
for f in m.fields:
f.description = f.description.replace("\n"," ")
f.description = f.description.replace("\r","")
if f.array_length != 0:
f.array_suffix = ''
f.array_prefix = '[MarshalAs(UnmanagedType.ByValArray,SizeConst=%u)]\n\t\tpublic' % f.array_length
f.array_arg = ', %u' % f.array_length
f.array_return_arg = '%u, ' % (f.array_length)
f.array_tag = ''
f.array_const = 'const '
f.decode_left = "%s.%s = " % (m.name_lower, f.name)
f.decode_right = ''
f.return_type = 'void'
f.return_value = 'void'
if f.type == 'char':
f.type = "byte[]"
f.array_tag = 'System.Text.ASCIIEncoding.ASCII.GetString(msg,%u,%u); //' % (f.wire_offset, f.array_length)
f.return_type = 'byte[]'
f.c_test_value = ".ToCharArray()";
elif f.type == 'uint8_t':
f.type = "byte[]";
f.array_tag = 'getBytes'
f.return_type = 'byte[]'
elif f.type == 'int8_t':
f.type = "byte[]";
f.array_tag = 'getBytes'
f.return_type = 'byte[]'
elif f.type == 'int16_t':
f.type = "Int16[]";
f.array_tag = 'getBytes'
f.return_type = 'Int16[]'
elif f.type == 'uint16_t':
f.type = "UInt16[]";
f.array_tag = 'getBytes'
f.return_type = 'UInt16[]'
elif f.type == 'float':
f.type = "float[]";
f.array_tag = 'getBytes'
f.return_type = 'float[]'
else:
test_strings = []
for v in f.test_value:
test_strings.append(str(v))
f.c_test_value = '{ %s }' % ', '.join(test_strings)
f.array_tag = '!!!%s' % f.type
f.get_arg = ', %s %s' % (f.type, f.name)
else:
if f.type == 'char':
f.type = "byte";
elif f.type == 'uint8_t':
f.type = "byte";
elif f.type == 'int8_t':
f.type = "byte";
elif f.type == 'int16_t':
f.type = "Int16";
elif f.type == 'uint16_t':
f.type = "UInt16";
elif f.type == 'uint32_t':
f.type = "UInt32";
elif f.type == 'int16_t':
f.type = "Int16";
elif f.type == 'int32_t':
f.type = "Int32";
elif f.type == 'uint64_t':
f.type = "UInt64";
elif f.type == 'int64_t':
f.type = "Int64";
elif f.type == 'float':
f.type = "Single";
else:
f.c_test_value = f.test_value
f.array_suffix = ''
f.array_prefix = 'public '
f.array_tag = 'BitConverter.To%s' % f.type
if f.type == 'byte':
f.array_tag = 'getByte'
if f.name == 'fixed': # this is a keyword
f.name = '@fixed'
f.array_arg = ''
f.array_return_arg = ''
f.array_const = ''
f.decode_left = "%s.%s = " % (m.name_lower, f.name)
f.decode_right = ''
f.get_arg = ''
f.c_test_value = f.test_value
f.return_type = f.type
# cope with uint8_t_mavlink_version
for m in xml.message:
m.arg_fields = []
m.array_fields = []
m.scalar_fields = []
for f in m.ordered_fields:
if f.array_length != 0:
m.array_fields.append(f)
else:
m.scalar_fields.append(f)
for f in m.fields:
if not f.omit_arg:
m.arg_fields.append(f)
f.putname = f.name
else:
f.putname = f.const_value
for m in xml.message:
generate_message_h(fh, directory, m)
def generate(basename, xml_list):
'''generate complete MAVLink Csharp implemenation'''
xml = xml_list[0]
directory = os.path.join(basename, xml.basename)
if not os.path.exists(directory):
os.makedirs(directory)
f = open(os.path.join(directory, "mavlink.cs"), mode='w')
generate_message_header(f, xml)
for xml1 in xml_list:
generate_message_enums(f, xml1);
for xml2 in xml_list:
generate_one(f, basename, xml2)
generate_message_footer(f,xml)
|
ChristineLaMuse/mozillians | refs/heads/master | vendor-local/lib/python/tablib/packages/odf/elementtypes.py | 83 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2008 Søren Roug, European Environment Agency
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# Contributor(s):
#
from namespaces import *
# Inline element don't cause a box
# They are analogous to the HTML elements SPAN, B, I etc.
inline_elements = (
(TEXTNS,u'a'),
(TEXTNS,u'author-initials'),
(TEXTNS,u'author-name'),
(TEXTNS,u'bibliography-mark'),
(TEXTNS,u'bookmark-ref'),
(TEXTNS,u'chapter'),
(TEXTNS,u'character-count'),
(TEXTNS,u'conditional-text'),
(TEXTNS,u'creation-date'),
(TEXTNS,u'creation-time'),
(TEXTNS,u'creator'),
(TEXTNS,u'database-display'),
(TEXTNS,u'database-name'),
(TEXTNS,u'database-next'),
(TEXTNS,u'database-row-number'),
(TEXTNS,u'database-row-select'),
(TEXTNS,u'date'),
(TEXTNS,u'dde-connection'),
(TEXTNS,u'description'),
(TEXTNS,u'editing-cycles'),
(TEXTNS,u'editing-duration'),
(TEXTNS,u'execute-macro'),
(TEXTNS,u'expression'),
(TEXTNS,u'file-name'),
(TEXTNS,u'hidden-paragraph'),
(TEXTNS,u'hidden-text'),
(TEXTNS,u'image-count'),
(TEXTNS,u'initial-creator'),
(TEXTNS,u'keywords'),
(TEXTNS,u'measure'),
(TEXTNS,u'modification-date'),
(TEXTNS,u'modification-time'),
(TEXTNS,u'note-ref'),
(TEXTNS,u'object-count'),
(TEXTNS,u'page-continuation'),
(TEXTNS,u'page-count'),
(TEXTNS,u'page-number'),
(TEXTNS,u'page-variable-get'),
(TEXTNS,u'page-variable-set'),
(TEXTNS,u'paragraph-count'),
(TEXTNS,u'placeholder'),
(TEXTNS,u'print-date'),
(TEXTNS,u'printed-by'),
(TEXTNS,u'print-time'),
(TEXTNS,u'reference-ref'),
(TEXTNS,u'ruby'),
(TEXTNS,u'ruby-base'),
(TEXTNS,u'ruby-text'),
(TEXTNS,u'script'),
(TEXTNS,u'sender-city'),
(TEXTNS,u'sender-company'),
(TEXTNS,u'sender-country'),
(TEXTNS,u'sender-email'),
(TEXTNS,u'sender-fax'),
(TEXTNS,u'sender-firstname'),
(TEXTNS,u'sender-initials'),
(TEXTNS,u'sender-lastname'),
(TEXTNS,u'sender-phone-private'),
(TEXTNS,u'sender-phone-work'),
(TEXTNS,u'sender-position'),
(TEXTNS,u'sender-postal-code'),
(TEXTNS,u'sender-state-or-province'),
(TEXTNS,u'sender-street'),
(TEXTNS,u'sender-title'),
(TEXTNS,u'sequence'),
(TEXTNS,u'sequence-ref'),
(TEXTNS,u'sheet-name'),
(TEXTNS,u'span'),
(TEXTNS,u'subject'),
(TEXTNS,u'table-count'),
(TEXTNS,u'table-formula'),
(TEXTNS,u'template-name'),
(TEXTNS,u'text-input'),
(TEXTNS,u'time'),
(TEXTNS,u'title'),
(TEXTNS,u'user-defined'),
(TEXTNS,u'user-field-get'),
(TEXTNS,u'user-field-input'),
(TEXTNS,u'variable-get'),
(TEXTNS,u'variable-input'),
(TEXTNS,u'variable-set'),
(TEXTNS,u'word-count'),
)
# It is almost impossible to determine what elements are block elements.
# There are so many that don't fit the form
block_elements = (
(TEXTNS,u'h'),
(TEXTNS,u'p'),
(TEXTNS,u'list'),
(TEXTNS,u'list-item'),
(TEXTNS,u'section'),
)
declarative_elements = (
(OFFICENS,u'font-face-decls'),
(PRESENTATIONNS,u'date-time-decl'),
(PRESENTATIONNS,u'footer-decl'),
(PRESENTATIONNS,u'header-decl'),
(TABLENS,u'table-template'),
(TEXTNS,u'alphabetical-index-entry-template'),
(TEXTNS,u'alphabetical-index-source'),
(TEXTNS,u'bibliography-entry-template'),
(TEXTNS,u'bibliography-source'),
(TEXTNS,u'dde-connection-decls'),
(TEXTNS,u'illustration-index-entry-template'),
(TEXTNS,u'illustration-index-source'),
(TEXTNS,u'index-source-styles'),
(TEXTNS,u'index-title-template'),
(TEXTNS,u'note-continuation-notice-backward'),
(TEXTNS,u'note-continuation-notice-forward'),
(TEXTNS,u'notes-configuration'),
(TEXTNS,u'object-index-entry-template'),
(TEXTNS,u'object-index-source'),
(TEXTNS,u'sequence-decls'),
(TEXTNS,u'table-index-entry-template'),
(TEXTNS,u'table-index-source'),
(TEXTNS,u'table-of-content-entry-template'),
(TEXTNS,u'table-of-content-source'),
(TEXTNS,u'user-field-decls'),
(TEXTNS,u'user-index-entry-template'),
(TEXTNS,u'user-index-source'),
(TEXTNS,u'variable-decls'),
)
empty_elements = (
(ANIMNS,u'animate'),
(ANIMNS,u'animateColor'),
(ANIMNS,u'animateMotion'),
(ANIMNS,u'animateTransform'),
(ANIMNS,u'audio'),
(ANIMNS,u'param'),
(ANIMNS,u'set'),
(ANIMNS,u'transitionFilter'),
(CHARTNS,u'categories'),
(CHARTNS,u'data-point'),
(CHARTNS,u'domain'),
(CHARTNS,u'error-indicator'),
(CHARTNS,u'floor'),
(CHARTNS,u'grid'),
(CHARTNS,u'legend'),
(CHARTNS,u'mean-value'),
(CHARTNS,u'regression-curve'),
(CHARTNS,u'stock-gain-marker'),
(CHARTNS,u'stock-loss-marker'),
(CHARTNS,u'stock-range-line'),
(CHARTNS,u'symbol-image'),
(CHARTNS,u'wall'),
(DR3DNS,u'cube'),
(DR3DNS,u'extrude'),
(DR3DNS,u'light'),
(DR3DNS,u'rotate'),
(DR3DNS,u'sphere'),
(DRAWNS,u'contour-path'),
(DRAWNS,u'contour-polygon'),
(DRAWNS,u'equation'),
(DRAWNS,u'fill-image'),
(DRAWNS,u'floating-frame'),
(DRAWNS,u'glue-point'),
(DRAWNS,u'gradient'),
(DRAWNS,u'handle'),
(DRAWNS,u'hatch'),
(DRAWNS,u'layer'),
(DRAWNS,u'marker'),
(DRAWNS,u'opacity'),
(DRAWNS,u'page-thumbnail'),
(DRAWNS,u'param'),
(DRAWNS,u'stroke-dash'),
(FORMNS,u'connection-resource'),
(FORMNS,u'list-value'),
(FORMNS,u'property'),
(MANIFESTNS,u'algorithm'),
(MANIFESTNS,u'key-derivation'),
(METANS,u'auto-reload'),
(METANS,u'document-statistic'),
(METANS,u'hyperlink-behaviour'),
(METANS,u'template'),
(NUMBERNS,u'am-pm'),
(NUMBERNS,u'boolean'),
(NUMBERNS,u'day'),
(NUMBERNS,u'day-of-week'),
(NUMBERNS,u'era'),
(NUMBERNS,u'fraction'),
(NUMBERNS,u'hours'),
(NUMBERNS,u'minutes'),
(NUMBERNS,u'month'),
(NUMBERNS,u'quarter'),
(NUMBERNS,u'scientific-number'),
(NUMBERNS,u'seconds'),
(NUMBERNS,u'text-content'),
(NUMBERNS,u'week-of-year'),
(NUMBERNS,u'year'),
(OFFICENS,u'dde-source'),
(PRESENTATIONNS,u'date-time'),
(PRESENTATIONNS,u'footer'),
(PRESENTATIONNS,u'header'),
(PRESENTATIONNS,u'placeholder'),
(PRESENTATIONNS,u'play'),
(PRESENTATIONNS,u'show'),
(PRESENTATIONNS,u'sound'),
(SCRIPTNS,u'event-listener'),
(STYLENS,u'column'),
(STYLENS,u'column-sep'),
(STYLENS,u'drop-cap'),
(STYLENS,u'footnote-sep'),
(STYLENS,u'list-level-properties'),
(STYLENS,u'map'),
(STYLENS,u'ruby-properties'),
(STYLENS,u'table-column-properties'),
(STYLENS,u'tab-stop'),
(STYLENS,u'text-properties'),
(SVGNS,u'definition-src'),
(SVGNS,u'font-face-format'),
(SVGNS,u'font-face-name'),
(SVGNS,u'stop'),
(TABLENS,u'body'),
(TABLENS,u'cell-address'),
(TABLENS,u'cell-range-source'),
(TABLENS,u'change-deletion'),
(TABLENS,u'consolidation'),
(TABLENS,u'database-source-query'),
(TABLENS,u'database-source-sql'),
(TABLENS,u'database-source-table'),
(TABLENS,u'data-pilot-display-info'),
(TABLENS,u'data-pilot-field-reference'),
(TABLENS,u'data-pilot-group-member'),
(TABLENS,u'data-pilot-layout-info'),
(TABLENS,u'data-pilot-member'),
(TABLENS,u'data-pilot-sort-info'),
(TABLENS,u'data-pilot-subtotal'),
(TABLENS,u'dependency'),
(TABLENS,u'error-macro'),
(TABLENS,u'even-columns'),
(TABLENS,u'even-rows'),
(TABLENS,u'filter-condition'),
(TABLENS,u'first-column'),
(TABLENS,u'first-row'),
(TABLENS,u'highlighted-range'),
(TABLENS,u'insertion-cut-off'),
(TABLENS,u'iteration'),
(TABLENS,u'label-range'),
(TABLENS,u'last-column'),
(TABLENS,u'last-row'),
(TABLENS,u'movement-cut-off'),
(TABLENS,u'named-expression'),
(TABLENS,u'named-range'),
(TABLENS,u'null-date'),
(TABLENS,u'odd-columns'),
(TABLENS,u'odd-rows'),
(TABLENS,u'operation'),
(TABLENS,u'scenario'),
(TABLENS,u'sort-by'),
(TABLENS,u'sort-groups'),
(TABLENS,u'source-range-address'),
(TABLENS,u'source-service'),
(TABLENS,u'subtotal-field'),
(TABLENS,u'table-column'),
(TABLENS,u'table-source'),
(TABLENS,u'target-range-address'),
(TEXTNS,u'alphabetical-index-auto-mark-file'),
(TEXTNS,u'alphabetical-index-mark'),
(TEXTNS,u'alphabetical-index-mark-end'),
(TEXTNS,u'alphabetical-index-mark-start'),
(TEXTNS,u'bookmark'),
(TEXTNS,u'bookmark-end'),
(TEXTNS,u'bookmark-start'),
(TEXTNS,u'change'),
(TEXTNS,u'change-end'),
(TEXTNS,u'change-start'),
(TEXTNS,u'dde-connection-decl'),
(TEXTNS,u'index-entry-bibliography'),
(TEXTNS,u'index-entry-chapter'),
(TEXTNS,u'index-entry-link-end'),
(TEXTNS,u'index-entry-link-start'),
(TEXTNS,u'index-entry-page-number'),
(TEXTNS,u'index-entry-tab-stop'),
(TEXTNS,u'index-entry-text'),
(TEXTNS,u'index-source-style'),
(TEXTNS,u'line-break'),
(TEXTNS,u'page'),
(TEXTNS,u'reference-mark'),
(TEXTNS,u'reference-mark-end'),
(TEXTNS,u'reference-mark-start'),
(TEXTNS,u's'),
(TEXTNS,u'section-source'),
(TEXTNS,u'sequence-decl'),
(TEXTNS,u'soft-page-break'),
(TEXTNS,u'sort-key'),
(TEXTNS,u'tab'),
(TEXTNS,u'toc-mark'),
(TEXTNS,u'toc-mark-end'),
(TEXTNS,u'toc-mark-start'),
(TEXTNS,u'user-field-decl'),
(TEXTNS,u'user-index-mark'),
(TEXTNS,u'user-index-mark-end'),
(TEXTNS,u'user-index-mark-start'),
(TEXTNS,u'variable-decl')
)
|
qbj/git_FuXiaotong | refs/heads/master | Projects/House_Price/house_price_RF_v2/evaluation.py | 1 |
import pandas as pd
import numpy as np
from sklearn.ensemble import RandomForestRegressor
from sklearn.preprocessing import LabelEncoder
import matplotlib.pyplot as plt
########################################################################
# DATA
########################################################################
# **************************** read test data ****************************************
df_train_data = pd.read_csv('C:\\Users\\fuxt2\\Documents\\code\\python\\house_price\\data\\train.csv')
# **************************************** pick features ****************************************
all_feature_indices = [71,5,72,9,14,39,75,69,64,45,74,73,35,48,29,6,78,55,36,63,40,28,16,68,42,31,25,50,10,1,33,32,21,24,7,13,8,77,65,22,52,15,79,76,47,58,23,60,11,18,2,41,70,4,56,44,54,51,30,67,3,12,0,34,38,59,43,57,37,66,26,20,62,53,49,19,46,27,17,61]
all_feature_indices = [72,9,71,5,74,69,39,75,45,64,42,13,6,73,14,55,29,36,35,31,65,16,40,10,28,25,68,50,78,7,52,63,79,33,15,1,48,8,70,32,77,24,41,60,21,22,23,2,51,11,18,58,47,0,76,67,44,4,30,3,12,59,66,54,57,34,37,38,56,20,43,26,62,53,19,49,27,46,17,61]
df_X = df_train_data.iloc[:,all_feature_indices].copy()
# **************************************** encode to number ****************************************
for i in df_X:
#df_X[i].replace(np.NaN, 0, inplace=True) #replace NaN with 0
if df_X[i].dtypes != np.float64:
df_X[i] = df_X[i].astype(str) # conver to string
encoder = LabelEncoder()
encoder.fit(df_X[i])
df_X[i] = encoder.transform(df_X[i])
df_X[i].replace(np.NaN, np.mean(df_X[i]), inplace=True) # replace NaN with mean
# **************************************** standardizing ****************************************
f_min_max = lambda x: (x-np.min(x)) / (np.max(x) - np.min(x))
df_X = df_X.apply(f_min_max)
# **************************************** slice train & test data ****************************************
percent_test = 0.3
max_ncol = len(df_train_data.columns)
max_nrow = df_train_data.__len__() + 1
mid_nrow = round(max_nrow*(1-percent_test))
df_X_train = df_X[:mid_nrow].copy()
df_X_test = df_X[mid_nrow:].copy()
fl_y_train = df_train_data.iloc[:mid_nrow, max_ncol - 1:max_ncol]
fl_y_test = df_train_data.iloc[mid_nrow:, max_ncol - 1:max_ncol]
#######################################################################
# EVALUATION
#######################################################################
# **************************************** metric ****************************************
fl_true_y = fl_y_test.values.ravel()
RMSE = lambda x,y : np.sqrt(np.mean((x-y)**2)) / np.mean(y) #RMSE
# **************************************** error & tree depth ****************************************
parameters = []
errors = []
min_para = 2
max_para = 79
n_feature_index = len(all_feature_indices)
df_X_train_org = df_X_train.copy()
df_X_test_org = df_X_test.copy()
for i in range(min_para, max_para,2):
pick_feature_indices = all_feature_indices[- i : n_feature_index]
df_X_train = df_X_train_org.iloc[:, pick_feature_indices].copy()
df_X_test = df_X_test_org.iloc[:, pick_feature_indices].copy()
#para
RF_regression_model = RandomForestRegressor(max_depth = 16, # bigger, more precise
random_state=0,
n_estimators = 160, # bigger, more precise
# min_samples_leaf = i, # bigger, less noise
n_jobs = -1
)
# fitting
RF_regression_model.fit(X=df_X_train, y=fl_y_train.values.ravel())
fl_predict_y = RF_regression_model.predict(df_X_test)
error = RMSE(fl_predict_y, fl_true_y)
parameters.append(i)
errors.append(error)
# plot
fig = plt.figure()
ax = fig.add_subplot(111)
plt.plot(parameters, errors)
for xy in zip(parameters, errors):
ax.annotate('(%s, %s)' % xy, xy=xy, textcoords='data')
plt.grid()
plt.show()
# conclusion:
# depth = 10
|
qiuzhong/crosswalk-test-suite | refs/heads/master | tools/build/pack_deb.py | 4 | #!/usr/bin/env python
#
# Copyright (c) 2015 Intel Corporation.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of works must retain the original copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the original copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of Intel Corporation nor the names of its contributors
# may be used to endorse or promote products derived from this work without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL INTEL CORPORATION BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors:
# Yin, Haichao <haichaox.yin@intel.com>
import os
import shutil
import glob
import time
import sys
import stat
import random
import json
import logging
import zipfile
import signal
import fnmatch
import subprocess
import re
from optparse import OptionParser
reload(sys)
sys.setdefaultencoding('utf8')
TOOL_VERSION = "v0.1"
VERSION_FILE = "VERSION"
DEFAULT_CMD_TIMEOUT = 600
PKG_TYPES = ["deb"]
PKG_NAME = None
BUILD_PARAMETERS = None
BUILD_ROOT = None
BUILD_ROOT_SRC = None
BUILD_ROOT_SRC_PKG = None
BUILD_ROOT_SRC_PKG_APP = None
BUILD_ROOT_SRC_SUB_APP = None
BUILD_ROOT_PKG = None
BUILD_ROOT_PKG_APP = None
LOG = None
LOG_LEVEL = logging.DEBUG
BUILD_TIME = time.strftime('%Y%m%d', time.localtime(time.time()))
class ColorFormatter(logging.Formatter):
def __init__(self, msg):
logging.Formatter.__init__(self, msg)
def format(self, record):
red, green, yellow, blue = range(4)
colors = {'INFO': green, 'DEBUG': blue,
'WARNING': yellow, 'ERROR': red}
msg = record.msg
if msg[0] == "+":
msg = "\33[01m" + msg[1:] + "\033[0m"
elif msg[0] == "=":
msg = "\33[07m" + msg + "\033[0m"
levelname = record.levelname
if levelname in colors:
msg_color = "\033[0;%dm" % (
31 + colors[levelname]) + msg + "\033[0m"
record.msg = msg_color
return logging.Formatter.format(self, record)
def iterfindfiles(path, fnexp):
for root, dirs, files in os.walk(path):
for filename in fnmatch.filter(files, fnexp):
yield os.path.join(root, filename)
def replaceUserString(path, fnexp, old_s, new_s):
for sub_file in iterfindfiles(path, fnexp):
try:
with open(sub_file, 'r') as sub_read_obj:
read_string = sub_read_obj.read()
except IOError as err:
LOG.error("Read %s Error : " % sub_file + str(err))
continue
if read_string.find(old_s) >= 0:
try:
with open(sub_file, 'w') as sub_write_obj:
sub_write_obj.write(re.sub(old_s, new_s, read_string))
except IOError as err:
LOG.error("Modify %s Error : " % sub_file + str(err))
continue
def isWindows():
return sys.platform == "cygwin" or sys.platform.startswith("win")
def killProcesses(ppid=None):
if isWindows():
subprocess.check_call("TASKKILL /F /PID %s /T" % ppid)
else:
ppid = str(ppid)
pidgrp = []
def GetChildPids(ppid):
command = "ps -ef | awk '{if ($3 ==%s) print $2;}'" % str(ppid)
pids = os.popen(command).read()
pids = pids.split()
return pids
pidgrp.extend(GetChildPids(ppid))
for pid in pidgrp:
pidgrp.extend(GetChildPids(pid))
pidgrp.insert(0, ppid)
while len(pidgrp) > 0:
pid = pidgrp.pop()
try:
os.kill(int(pid), signal.SIGKILL)
return True
except OSError:
try:
os.popen("kill -9 %d" % int(pid))
return True
except Exception:
return False
def safelyGetValue(origin_json=None, key=None):
if origin_json and key and key in origin_json:
return origin_json[key]
return None
def checkContains(origin_str=None, key_str=None):
if origin_str.upper().find(key_str.upper()) >= 0:
return True
return False
def getRandomStr():
str_pool = list("abcdefghijklmnopqrstuvwxyz1234567890")
random_str = ""
for i in range(15):
index = random.randint(0, len(str_pool) - 1)
random_str = random_str + str_pool[index]
return random_str
def zipDir(dir_path, zip_file):
try:
if os.path.exists(zip_file):
if not doRemove([zip_file]):
return False
if not os.path.exists(os.path.dirname(zip_file)):
os.makedirs(os.path.dirname(zip_file))
z_file = zipfile.ZipFile(zip_file, "w")
orig_dir = os.getcwd()
os.chdir(dir_path)
for root, dirs, files in os.walk("."):
for i_file in files:
LOG.info("zip %s" % os.path.join(root, i_file))
z_file.write(os.path.join(root, i_file))
z_file.close()
os.chdir(orig_dir)
except Exception as e:
LOG.error("Fail to pack %s to %s: %s" % (dir_path, zip_file, e))
return False
LOG.info("Done to zip %s to %s" % (dir_path, zip_file))
return True
def overwriteCopy(src, dest, symlinks=False, ignore=None):
if not os.path.exists(dest):
os.makedirs(dest)
shutil.copystat(src, dest)
sub_list = os.listdir(src)
if ignore:
excl = ignore(src, sub_list)
sub_list = [x for x in sub_list if x not in excl]
for i_sub in sub_list:
s_path = os.path.join(src, i_sub)
d_path = os.path.join(dest, i_sub)
if symlinks and os.path.islink(s_path):
if os.path.lexists(d_path):
os.remove(d_path)
os.symlink(os.readlink(s_path), d_path)
try:
s_path_s = os.lstat(s_path)
s_path_mode = stat.S_IMODE(s_path_s.st_mode)
os.lchmod(d_path, s_path_mode)
except Exception:
pass
elif os.path.isdir(s_path):
overwriteCopy(s_path, d_path, symlinks, ignore)
else:
shutil.copy2(s_path, d_path)
def doCopy(src_item=None, dest_item=None):
LOG.info("Copying %s to %s" % (src_item, dest_item))
try:
if os.path.isdir(src_item):
overwriteCopy(src_item, dest_item, symlinks=True)
else:
if not os.path.exists(os.path.dirname(dest_item)):
LOG.info("Create non-existent dir: %s" %
os.path.dirname(dest_item))
os.makedirs(os.path.dirname(dest_item))
shutil.copy2(src_item, dest_item)
except Exception as e:
LOG.error("Fail to copy file %s: %s" % (src_item, e))
return False
return True
def doRemove(target_file_list=None):
for i_file in target_file_list:
LOG.info("Removing %s" % i_file)
try:
if os.path.isdir(i_file):
shutil.rmtree(i_file)
else:
os.remove(i_file)
except Exception as e:
LOG.error("Fail to remove file %s: %s" % (i_file, e))
return False
return True
def updateCopylistPrefix(src_default, dest_default, src_sub, dest_sub):
src_new = ""
dest_new = ""
PACK_TOOL_TAG = "PACK-TOOL-ROOT"
if src_sub[0:len(PACK_TOOL_TAG)] == PACK_TOOL_TAG:
src_new = src_sub.replace(PACK_TOOL_TAG, BUILD_PARAMETERS.pkgpacktools)
else:
src_new = os.path.join(src_default, src_sub)
if dest_sub[0:len(PACK_TOOL_TAG)] == PACK_TOOL_TAG:
dest_new = dest_sub.replace(PACK_TOOL_TAG, BUILD_ROOT)
else:
dest_new = os.path.join(dest_default, dest_sub)
return (src_new, dest_new)
def buildSRC(src=None, dest=None, build_json=None):
if not os.path.exists(src):
LOG.info("+Src dir does not exist, skip build src process ...")
return True
if not doCopy(src, dest):
return False
if "blacklist" in build_json:
if build_json["blacklist"].count("") > 0:
build_json["blacklist"].remove("")
black_file_list = []
for i_black in build_json["blacklist"]:
black_file_list = black_file_list + \
glob.glob(os.path.join(dest, i_black))
black_file_list = list(set(black_file_list))
if not doRemove(black_file_list):
return False
if "copylist" in build_json:
for i_s_key in build_json["copylist"].keys():
if i_s_key and build_json["copylist"][i_s_key]:
(src_updated, dest_updated) = updateCopylistPrefix(
src, dest, i_s_key, build_json["copylist"][i_s_key])
if not doCopy(src_updated, dest_updated):
return False
return True
def exitHandler(return_code=1):
LOG.info("+Cleaning build root folder ...")
if not BUILD_PARAMETERS.bnotclean and os.path.exists(BUILD_ROOT):
if not doRemove([BUILD_ROOT]):
LOG.error("Fail to clean build root, exit ...")
sys.exit(1)
if return_code == 0:
LOG.info("================ DONE ================")
else:
LOG.error(
"================ Found Something Wrong !!! ================")
sys.exit(return_code)
def prepareBuildRoot():
LOG.info("+Preparing build root folder ...")
global BUILD_ROOT # The build root directory, like as "/tmp/randomName"
# The source code in the tmp directory, like as
# "/tmp/randomName/crosswalk-test"
global BUILD_ROOT_SRC
# The source of the zip operate for all package, like as
# "/tmp/randomName/pkg"
global BUILD_ROOT_SRC_PKG
# The source of the app_package operate for all package, like as
# "/tmp/randomName/pkg-app"
global BUILD_ROOT_SRC_PKG_APP
# The source of the sub app_package operate for all package, like as
# "/tmp/randomName/sub-app"
global BUILD_ROOT_SRC_SUB_APP
global BUILD_ROOT_PKG # BUILD_ROOT_SRC_PKG + "opt" + PKG_NAME
global BUILD_ROOT_PKG_APP # BUILD_ROOT_SRC_PKG_APP + "opt" + PKG_NAME
while True:
BUILD_ROOT = os.path.join("/tmp", getRandomStr())
if os.path.exists(BUILD_ROOT):
continue
else:
break
BUILD_ROOT_SRC = os.path.join(BUILD_ROOT, PKG_NAME)
BUILD_ROOT_SRC_PKG = os.path.join(BUILD_ROOT, "pkg")
BUILD_ROOT_SRC_PKG_APP = os.path.join(BUILD_ROOT, "pkg-app")
BUILD_ROOT_SRC_SUB_APP = os.path.join(BUILD_ROOT, "sub-app")
BUILD_ROOT_PKG = os.path.join(BUILD_ROOT, "pkg", "opt", PKG_NAME)
BUILD_ROOT_PKG_APP = os.path.join(BUILD_ROOT, "pkg-app", "opt", PKG_NAME)
if not doCopy(BUILD_PARAMETERS.srcdir, BUILD_ROOT_SRC):
return False
else:
replaceUserString(
BUILD_ROOT_SRC,
'*',
'TESTER-HOME-DIR',
"/home/%s" %
BUILD_PARAMETERS.user)
if not doRemove(
glob.glob(os.path.join(BUILD_ROOT_SRC, "%s*.zip" % PKG_NAME))):
return False
return True
def doCMD(cmd, time_out=DEFAULT_CMD_TIMEOUT, no_check_return=False):
LOG.info("Doing CMD: [ %s ]" % cmd)
pre_time = time.time()
cmd_proc = subprocess.Popen(args=cmd, shell=True)
while True:
cmd_exit_code = cmd_proc.poll()
elapsed_time = time.time() - pre_time
if cmd_exit_code is None:
if elapsed_time >= time_out:
killProcesses(ppid=cmd_proc.pid)
LOG.error("Timeout to exe CMD")
return False
else:
if not no_check_return and cmd_exit_code != 0:
LOG.error("Fail to exe CMD")
return False
break
time.sleep(2)
return True
def packDEB(build_json=None, app_src=None, app_dest=None, app_name=None):
pack_cmd = "crosswalk-app build"
LOG.info("Packing cmd : %s" % pack_cmd)
orig_dir = os.getcwd()
os.chdir(app_src)
LOG.info("Change dir to : %s" % app_src)
if app_name.find("org") == -1:
pkg_name = "org.test." + app_name.replace("-", "")
else:
pkg_name = app_name
if doCMD(pack_cmd, DEFAULT_CMD_TIMEOUT):
for parent, dirnames, filenames in os.walk(
os.path.join(app_src, "pkg")):
LOG.info("ReName source file is : %s" % filenames)
if app_name:
os.chdir(os.path.join(app_src, "pkg"))
for filename in filenames:
rename_cmd = "mv %s %s" % (filename, pkg_name + ".deb")
if not doCMD(rename_cmd, DEFAULT_CMD_TIMEOUT):
os.chdir(orig_dir)
return False
else:
return False
# After build successfully, copy the .deb from app_src+"pkg" to app_dest
if not doCopy(
os.path.join(app_src, "pkg"),
app_dest):
return False
return True
def packAPP(build_json=None, app_src=None, app_dest=None, app_name=None):
LOG.info("Packing %s(%s)" % (app_name, app_src))
if not os.path.exists(app_dest):
try:
os.makedirs(app_dest)
except Exception as e:
LOG.error("Fail to init package install dest dir: %s" % e)
return False
if checkContains(BUILD_PARAMETERS.pkgtype, "DEB"):
if not packDEB(build_json, app_src, app_dest, app_name):
return False
else:
LOG.error("Got wrong pkg type: %s" % BUILD_PARAMETERS.pkgtype)
return False
LOG.info("Success to pack APP: %s" % app_name)
return True
def createIndexFile(index_file_path=None, hosted_app=None):
try:
index_url = "opt/%s/webrunner/index.html?testsuite=../tests.xml" \
"&testprefix=../../../.." % PKG_NAME
html_content = "<!doctype html><head><meta http-equiv='Refresh' " \
"content='1; url=%s'></head>" % index_url
index_file = open(index_file_path, "w")
index_file.write(html_content)
index_file.close()
except Exception as e:
LOG.error("Fail to create index.html for top-app: %s" % e)
return False
LOG.info("Success to create index file %s" % index_file_path)
return True
def buildSubAPP(app_dir=None, build_json=None, app_dest_default=None):
app_dir_inside = safelyGetValue(build_json, "app-dir")
if app_dir_inside:
app_dir = app_dir_inside
LOG.info("+Building sub APP(s) from %s ..." % app_dir)
app_dir = os.path.join(BUILD_ROOT_SRC, app_dir)
app_name = safelyGetValue(build_json, "app-name")
if not app_name:
app_name = os.path.basename(app_dir)
app_src = os.path.join(BUILD_ROOT_SRC_SUB_APP, app_name)
pkg_name = app_name
LOG.info("+Change dir to %s: " % BUILD_ROOT_SRC_SUB_APP)
if not os.path.exists(BUILD_ROOT_SRC_SUB_APP):
LOG.info("Create BUILD_ROOT_SRC_SUB_APP dir: %s" %
BUILD_ROOT_SRC_SUB_APP)
os.makedirs(BUILD_ROOT_SRC_SUB_APP)
os.chdir(BUILD_ROOT_SRC_SUB_APP)
pack_cmd = "crosswalk-app create " + pkg_name
orig_dir = os.getcwd()
if not doCMD(pack_cmd, DEFAULT_CMD_TIMEOUT):
os.chdir(orig_dir)
return False
# copy source to BUILD_ROOT_SRC_SUB_APP/pkg_name/app
if buildSRC(app_dir, os.path.join(app_src, "app"), build_json):
app_dest = safelyGetValue(build_json, "install-path")
if app_dest:
app_dest = os.path.join(app_dest_default, app_dest)
else:
app_dest = app_dest_default
if safelyGetValue(build_json, "all-apps") == "true":
app_dirs = os.listdir(app_src)
apps_num = 0
for i_app_dir in app_dirs:
if os.path.isdir(os.path.join(app_src, i_app_dir)):
i_app_name = os.path.basename(i_app_dir)
if not packAPP(
build_json, os.path.join(app_src, i_app_name),
app_dest, i_app_name):
return False
else:
apps_num = apps_num + 1
if apps_num > 0:
LOG.info("Totally packed %d apps in %s" % (apps_num, app_dir))
return True
else:
return packAPP(build_json, app_src, app_dest, app_name)
return False
def buildPKGAPP(build_json=None):
try:
LOG.info("+Building package APP ...")
if not os.path.exists(
os.path.join(BUILD_ROOT_SRC, "crosswalk-app-tools-deb")):
try:
os.makedirs(
os.path.join(
BUILD_ROOT_SRC,
"crosswalk-app-tools-deb"))
except Exception as e:
LOG.error(
"Fail to make the crosswalk-app-tools-deb dir: %s" %
e)
return False
pkg_name = "org.test." + PKG_NAME.replace("-", "")
pack_cmd = "crosswalk-app create " + pkg_name
orig_dir = os.getcwd()
LOG.info(
"+Change dir to %s: " %
os.path.join(
BUILD_ROOT_SRC,
"crosswalk-app-tools-deb"))
os.chdir(os.path.join(BUILD_ROOT_SRC, "crosswalk-app-tools-deb"))
if not doCMD(pack_cmd, DEFAULT_CMD_TIMEOUT):
os.chdir(orig_dir)
return False
if not doCopy(os.path.join(BUILD_ROOT_SRC, "crosswalk-app-tools-deb", pkg_name),
os.path.join(BUILD_ROOT_SRC_PKG_APP, pkg_name)):
return False
if not doCopy(os.path.join(BUILD_ROOT_SRC, "icon.png"),
os.path.join(BUILD_ROOT_SRC_PKG_APP, pkg_name, "app", "icon.png")):
return False
if not doCopy(os.path.join(BUILD_ROOT_SRC, "manifest.json"),
os.path.join(BUILD_ROOT_SRC_PKG_APP, pkg_name, "app", "manifest.json")):
return False
if not createIndexFile(
os.path.join(BUILD_ROOT_SRC_PKG_APP, pkg_name, "app", "index.html")):
return False
if "blacklist" not in build_json:
build_json.update({"blacklist": []})
build_json["blacklist"].extend(PKG_BLACK_LIST)
BUILD_ROOT_PKG_APP = os.path.join(
BUILD_ROOT_SRC_PKG_APP,
pkg_name,
"app",
"opt",
PKG_NAME)
if not buildSRC(BUILD_ROOT_SRC, BUILD_ROOT_PKG_APP, build_json):
return False
comXML = os.path.join(BUILD_ROOT_PKG_APP, "tests.xml")
linuxXML = os.path.join(BUILD_ROOT_PKG_APP, "tests.linux.xml")
if os.path.exists(linuxXML):
if not doCMD("rm -rf %s" % comXML):
return False
if not doCMD("mv %s %s" % (linuxXML, comXML)):
return False
if "subapp-list" in build_json:
for i_sub_app in build_json["subapp-list"].keys():
if not buildSubAPP(
i_sub_app, build_json["subapp-list"][i_sub_app],
BUILD_ROOT_PKG_APP):
return False
if not packAPP(
build_json, os.path.join(BUILD_ROOT_SRC_PKG_APP, pkg_name), BUILD_ROOT_PKG, PKG_NAME):
return False
return True
except Exception as e:
LOG.error("Got wrong options: %s, exit ..." % e)
sys.exit(1)
def buildPKG(build_json=None):
if "blacklist" not in build_json:
build_json.update({"blacklist": []})
build_json["blacklist"].extend(PKG_BLACK_LIST)
if not buildSRC(BUILD_ROOT_SRC, BUILD_ROOT_PKG, build_json):
return False
if "subapp-list" in build_json:
for i_sub_app in build_json["subapp-list"].keys():
if not buildSubAPP(
i_sub_app, build_json["subapp-list"][i_sub_app],
BUILD_ROOT_PKG):
return False
if "pkg-app" in build_json:
if not buildPKGAPP(build_json["pkg-app"]):
return False
return True
def replaceCopy(readfile, writefile, content, newContent):
ffrom = open(readfile, "r")
fto = open(writefile, "w")
while True:
l = ffrom.readline()
if not l:
break
if 'org.xwalk.embedding.test' in l:
temp = ""
temp = re.sub(content, newContent, l)
fto.write(temp)
else:
temp1 = l
fto.write(temp1)
fto.close()
def findVersionFile(pathFile=None):
if not pathFile:
pathFile = os.path.join("..", "..", "..")
if not os.path.exists(
os.path.join(BUILD_PARAMETERS.srcdir, pathFile, VERSION_FILE)):
pathFile = pathFile[:-3]
if pathFile != "..":
findVersionFile(pathFile)
else:
pkg_version_file_path = os.path.join(
BUILD_PARAMETERS.srcdir,
pathFile,
VERSION_FILE)
return (pkg_version_file_path)
def main():
global LOG
LOG = logging.getLogger("pack-tool")
LOG.setLevel(LOG_LEVEL)
stream_handler = logging.StreamHandler()
stream_handler.setLevel(LOG_LEVEL)
stream_formatter = ColorFormatter("[%(asctime)s] %(message)s")
stream_handler.setFormatter(stream_formatter)
LOG.addHandler(stream_handler)
try:
usage = "Usage: ./%prog -t deb"
opts_parser = OptionParser(usage=usage)
opts_parser.add_option(
"-c",
"--cfg",
dest="pkgcfg",
help="specify the path of config json file")
opts_parser.add_option(
"-t",
"--type",
dest="pkgtype",
help="specify the pkg type, e.g. deb ...")
opts_parser.add_option(
"-d",
"--dest",
dest="destdir",
help="specify the installation folder for packed package")
opts_parser.add_option(
"-s",
"--src",
dest="srcdir",
help="specify the path of pkg resource for packing")
opts_parser.add_option(
"--tools",
dest="pkgpacktools",
help="specify the parent folder of pack tools")
opts_parser.add_option(
"--notclean",
dest="bnotclean",
action="store_true",
help="disable the build root clean after the packing")
opts_parser.add_option(
"-v",
"--version",
dest="bversion",
action="store_true",
help="show this pack tool's version")
opts_parser.add_option(
"-u",
"--user",
dest="user",
help="specify the user in inst.py")
opts_parser.add_option(
"--pkg-version",
dest="pkgversion",
help="specify the pkg version, e.g. 0.0.0.1")
if len(sys.argv) == 1:
sys.argv.append("-h")
global BUILD_PARAMETERS
(BUILD_PARAMETERS, args) = opts_parser.parse_args()
except Exception as e:
LOG.error("Got wrong options: %s, exit ..." % e)
sys.exit(1)
if BUILD_PARAMETERS.bversion:
print "Version: %s" % TOOL_VERSION
sys.exit(0)
if not BUILD_PARAMETERS.srcdir:
BUILD_PARAMETERS.srcdir = os.getcwd()
BUILD_PARAMETERS.srcdir = os.path.expanduser(BUILD_PARAMETERS.srcdir)
if not BUILD_PARAMETERS.user:
BUILD_PARAMETERS.user = "app"
pkg_version_file_path = findVersionFile()
try:
pkg_main_version = 0
pkg_release_version = 1
if BUILD_PARAMETERS.pkgversion:
LOG.info("Using %s as pkg version " % BUILD_PARAMETERS.pkgversion)
pkg_main_version = BUILD_PARAMETERS.pkgversion
else:
if pkg_version_file_path is not None:
LOG.info(
"Using pkg version by file: %s" %
pkg_version_file_path)
with open(pkg_version_file_path, "rt") as pkg_version_file:
pkg_version_raw = pkg_version_file.read()
pkg_version_file.close()
pkg_version_json = json.loads(pkg_version_raw)
pkg_main_version = pkg_version_json["main-version"]
pkg_release_version = pkg_version_json["release-version"]
except Exception as e:
LOG.error("Fail to read pkg version file: %s, exit ..." % e)
sys.exit(1)
if not BUILD_PARAMETERS.pkgtype:
LOG.error("No pkg type provided, exit ...")
sys.exit(1)
elif not BUILD_PARAMETERS.pkgtype in PKG_TYPES:
LOG.error("Wrong pkg type, only support: %s, exit ..." %
PKG_TYPES)
sys.exit(1)
elif not BUILD_PARAMETERS.destdir:
BUILD_PARAMETERS.destdir = BUILD_PARAMETERS.srcdir
BUILD_PARAMETERS.destdir = os.path.expanduser(BUILD_PARAMETERS.destdir)
if not BUILD_PARAMETERS.pkgpacktools:
BUILD_PARAMETERS.pkgpacktools = os.path.join(
BUILD_PARAMETERS.srcdir, "..", "..", "tools")
BUILD_PARAMETERS.pkgpacktools = os.path.expanduser(
BUILD_PARAMETERS.pkgpacktools)
config_json = None
if BUILD_PARAMETERS.pkgcfg:
config_json_file_path = BUILD_PARAMETERS.pkgcfg
else:
config_json_file_path = os.path.join(
BUILD_PARAMETERS.srcdir, "suite.json")
try:
LOG.info("Using config json file: %s" % config_json_file_path)
with open(config_json_file_path, "rt") as config_json_file:
config_raw = config_json_file.read()
config_json_file.close()
config_json = json.loads(config_raw)
except Exception as e:
LOG.error("Fail to read config json file: %s, exit ..." % e)
sys.exit(1)
global PKG_NAME
PKG_NAME = safelyGetValue(config_json, "pkg-name")
if not PKG_NAME:
PKG_NAME = os.path.basename(BUILD_PARAMETERS.srcdir)
LOG.warning(
"Due to fail to read pkg name from json that "
"using src dir name as pkg name ...")
LOG.info("================= %s (%s-%s) ================" %
(PKG_NAME, pkg_main_version, pkg_release_version))
if not safelyGetValue(config_json, "pkg-list"):
LOG.error("Fail to read pkg-list, exit ...")
sys.exit(1)
pkg_json = None
for i_pkg in config_json["pkg-list"].keys():
i_pkg_list = i_pkg.replace(" ", "").split(",")
if BUILD_PARAMETERS.pkgtype in i_pkg_list:
pkg_json = config_json["pkg-list"][i_pkg]
if not pkg_json:
LOG.error("Fail to read pkg json, exit ...")
sys.exit(1)
if not prepareBuildRoot():
exitHandler(1)
global PKG_BLACK_LIST
PKG_BLACK_LIST = []
if "pkg-blacklist" in config_json:
PKG_BLACK_LIST.extend(config_json["pkg-blacklist"])
if not buildPKG(pkg_json):
exitHandler(1)
LOG.info("+Building package ...")
pkg_file = os.path.join(
BUILD_PARAMETERS.destdir,
"%s-%s-%s.%s.zip" %
(PKG_NAME,
pkg_main_version,
pkg_release_version,
BUILD_PARAMETERS.pkgtype))
if not zipDir(BUILD_ROOT_SRC_PKG, pkg_file):
exitHandler(1)
if __name__ == "__main__":
main()
exitHandler(0)
|
SurfasJones/icecream-info | refs/heads/master | icecream/lib/python2.7/site-packages/pip/_vendor/requests/packages/charade/eucjpprober.py | 2918 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
import sys
from . import constants
from .mbcharsetprober import MultiByteCharSetProber
from .codingstatemachine import CodingStateMachine
from .chardistribution import EUCJPDistributionAnalysis
from .jpcntx import EUCJPContextAnalysis
from .mbcssm import EUCJPSMModel
class EUCJPProber(MultiByteCharSetProber):
def __init__(self):
MultiByteCharSetProber.__init__(self)
self._mCodingSM = CodingStateMachine(EUCJPSMModel)
self._mDistributionAnalyzer = EUCJPDistributionAnalysis()
self._mContextAnalyzer = EUCJPContextAnalysis()
self.reset()
def reset(self):
MultiByteCharSetProber.reset(self)
self._mContextAnalyzer.reset()
def get_charset_name(self):
return "EUC-JP"
def feed(self, aBuf):
aLen = len(aBuf)
for i in range(0, aLen):
# PY3K: aBuf is a byte array, so aBuf[i] is an int, not a byte
codingState = self._mCodingSM.next_state(aBuf[i])
if codingState == constants.eError:
if constants._debug:
sys.stderr.write(self.get_charset_name()
+ ' prober hit error at byte ' + str(i)
+ '\n')
self._mState = constants.eNotMe
break
elif codingState == constants.eItsMe:
self._mState = constants.eFoundIt
break
elif codingState == constants.eStart:
charLen = self._mCodingSM.get_current_charlen()
if i == 0:
self._mLastChar[1] = aBuf[0]
self._mContextAnalyzer.feed(self._mLastChar, charLen)
self._mDistributionAnalyzer.feed(self._mLastChar, charLen)
else:
self._mContextAnalyzer.feed(aBuf[i - 1:i + 1], charLen)
self._mDistributionAnalyzer.feed(aBuf[i - 1:i + 1],
charLen)
self._mLastChar[0] = aBuf[aLen - 1]
if self.get_state() == constants.eDetecting:
if (self._mContextAnalyzer.got_enough_data() and
(self.get_confidence() > constants.SHORTCUT_THRESHOLD)):
self._mState = constants.eFoundIt
return self.get_state()
def get_confidence(self):
contxtCf = self._mContextAnalyzer.get_confidence()
distribCf = self._mDistributionAnalyzer.get_confidence()
return max(contxtCf, distribCf)
|
xingyepei/edx-platform | refs/heads/release | common/test/acceptance/pages/lms/video/__init__.py | 12133432 | |
mrunge/horizon | refs/heads/master | openstack_dashboard/dashboards/project/routers/extensions/routerrules/__init__.py | 12133432 | |
chubbymaggie/amoco | refs/heads/release | amoco/ui/graphics/kivy_/__init__.py | 12133432 | |
johnnykv/heralding | refs/heads/master | heralding/reporting/__init__.py | 12133432 | |
DalikarFT/CFVOP | refs/heads/master | venv/Lib/site-packages/pip/commands/search.py | 343 | from __future__ import absolute_import
import logging
import sys
import textwrap
from pip.basecommand import Command, SUCCESS
from pip.compat import OrderedDict
from pip.download import PipXmlrpcTransport
from pip.models import PyPI
from pip.utils import get_terminal_size
from pip.utils.logging import indent_log
from pip.exceptions import CommandError
from pip.status_codes import NO_MATCHES_FOUND
from pip._vendor.packaging.version import parse as parse_version
from pip._vendor import pkg_resources
from pip._vendor.six.moves import xmlrpc_client
logger = logging.getLogger(__name__)
class SearchCommand(Command):
"""Search for PyPI packages whose name or summary contains <query>."""
name = 'search'
usage = """
%prog [options] <query>"""
summary = 'Search PyPI for packages.'
def __init__(self, *args, **kw):
super(SearchCommand, self).__init__(*args, **kw)
self.cmd_opts.add_option(
'-i', '--index',
dest='index',
metavar='URL',
default=PyPI.pypi_url,
help='Base URL of Python Package Index (default %default)')
self.parser.insert_option_group(0, self.cmd_opts)
def run(self, options, args):
if not args:
raise CommandError('Missing required argument (search query).')
query = args
pypi_hits = self.search(query, options)
hits = transform_hits(pypi_hits)
terminal_width = None
if sys.stdout.isatty():
terminal_width = get_terminal_size()[0]
print_results(hits, terminal_width=terminal_width)
if pypi_hits:
return SUCCESS
return NO_MATCHES_FOUND
def search(self, query, options):
index_url = options.index
with self._build_session(options) as session:
transport = PipXmlrpcTransport(index_url, session)
pypi = xmlrpc_client.ServerProxy(index_url, transport)
hits = pypi.search({'name': query, 'summary': query}, 'or')
return hits
def transform_hits(hits):
"""
The list from pypi is really a list of versions. We want a list of
packages with the list of versions stored inline. This converts the
list from pypi into one we can use.
"""
packages = OrderedDict()
for hit in hits:
name = hit['name']
summary = hit['summary']
version = hit['version']
if name not in packages.keys():
packages[name] = {
'name': name,
'summary': summary,
'versions': [version],
}
else:
packages[name]['versions'].append(version)
# if this is the highest version, replace summary and score
if version == highest_version(packages[name]['versions']):
packages[name]['summary'] = summary
return list(packages.values())
def print_results(hits, name_column_width=None, terminal_width=None):
if not hits:
return
if name_column_width is None:
name_column_width = max([
len(hit['name']) + len(hit.get('versions', ['-'])[-1])
for hit in hits
]) + 4
installed_packages = [p.project_name for p in pkg_resources.working_set]
for hit in hits:
name = hit['name']
summary = hit['summary'] or ''
version = hit.get('versions', ['-'])[-1]
if terminal_width is not None:
target_width = terminal_width - name_column_width - 5
if target_width > 10:
# wrap and indent summary to fit terminal
summary = textwrap.wrap(summary, target_width)
summary = ('\n' + ' ' * (name_column_width + 3)).join(summary)
line = '%-*s - %s' % (name_column_width,
'%s (%s)' % (name, version), summary)
try:
logger.info(line)
if name in installed_packages:
dist = pkg_resources.get_distribution(name)
with indent_log():
latest = highest_version(hit['versions'])
if dist.version == latest:
logger.info('INSTALLED: %s (latest)', dist.version)
else:
logger.info('INSTALLED: %s', dist.version)
logger.info('LATEST: %s', latest)
except UnicodeEncodeError:
pass
def highest_version(versions):
return max(versions, key=parse_version)
|
PhillipNordwall/FileXor | refs/heads/master | setup.py | 1 | """The setup for File Xor Stream"""
import setuptools
import codecs
import os
here = os.path.abspath(os.path.dirname(__file__))
with codecs.open(os.path.join(here, 'README.rst'), encoding='UTF-8') as f:
long_description = f.read()
setuptools.setup(
name='FileXor',
version='0.2.2a1',
description='',
long_description='',
url='',
author='Phillip Nordwall',
author_email='Phillip.Nordwall+filexor@gmail.com',
license='MIT',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Topic :: Software Development :: Libraries',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.4',
],
keywords='random generator encryption',
packages=setuptools.find_packages(exclude=['tests', 'venv']),
install_requires=['docopt'],
extras_require={
'test': ['pytest'],
},
entry_points={
'console_scripts': [
'FileXor=FileXor.__main__:cli',
]
}
)
|
slackhq/python-slackclient | refs/heads/main | slack_sdk/socket_mode/builtin/__init__.py | 1 | from .client import SocketModeClient # noqa
|
wagtail/wagtail | refs/heads/stable/2.13.x | wagtail/contrib/simple_translation/forms.py | 4 | from django import forms
from django.urls import reverse
from django.utils.safestring import mark_safe
from django.utils.translation import gettext_lazy, ngettext
from wagtail.core.models import Locale, Page
class CheckboxSelectMultipleWithDisabledOptions(forms.CheckboxSelectMultiple):
option_template_name = "simple_translation/admin/input_option.html"
disabled_values = []
def create_option(self, *args, **kwargs):
option = super().create_option(*args, **kwargs)
if option["value"] in self.disabled_values:
option["attrs"]["disabled"] = True
return option
class SubmitTranslationForm(forms.Form):
# Note: We don't actually use select_all in Python, it is just the
# easiest way to add the widget to the form. It's controlled in JS.
select_all = forms.BooleanField(label=gettext_lazy("Select all"), required=False)
locales = forms.ModelMultipleChoiceField(
label=gettext_lazy("Locales"),
queryset=Locale.objects.none(),
widget=CheckboxSelectMultipleWithDisabledOptions,
)
include_subtree = forms.BooleanField(
required=False, help_text=gettext_lazy("All child pages will be created.")
)
def __init__(self, instance, *args, **kwargs):
super().__init__(*args, **kwargs)
hide_include_subtree = True
self.show_submit = True
if isinstance(instance, Page):
descendant_count = instance.get_descendants().count()
if descendant_count > 0:
hide_include_subtree = False
self.fields["include_subtree"].label = ngettext(
"Include subtree ({} page)",
"Include subtree ({} pages)",
descendant_count,
).format(descendant_count)
if hide_include_subtree:
self.fields["include_subtree"].widget = forms.HiddenInput()
untranslated_locales = Locale.objects.exclude(
id__in=instance.get_translations(inclusive=True).values_list(
"locale_id", flat=True
)
)
self.fields["locales"].queryset = untranslated_locales
# For snippets, hide select all if there is one option.
# Using len() instead of count() here as we're going to evaluate this queryset
# anyway and it gets cached so it'll only have one query in the end.
hide_select_all = len(untranslated_locales) < 2
if isinstance(instance, Page):
parent = instance.get_parent()
# Find allowed locale options.
if parent.is_root():
# All locale options are allowed.
allowed_locale_ids = Locale.objects.all().values_list("id", flat=True)
else:
# Only the locale options that have a translated parent are allowed.
allowed_locale_ids = (
instance.get_parent()
.get_translations(inclusive=True)
.values_list("locale_id", flat=True)
)
# Get and set the locale options that are disabled.
disabled_locales = Locale.objects.exclude(
id__in=allowed_locale_ids
).values_list("id", flat=True)
self.fields["locales"].widget.disabled_values = disabled_locales
if disabled_locales:
# Display a help text.
url = reverse(
"simple_translation:submit_page_translation", args=[parent.id]
)
help_text = ngettext(
"A locale is disabled because a parent page is not translated.",
"Some locales are disabled because some parent pages are not translated.",
len(disabled_locales),
)
help_text += "<br>"
help_text += '<a href="{}">'.format(url)
help_text += ngettext(
"Translate the parent page.",
"Translate the parent pages.",
len(disabled_locales),
)
help_text += "</a>"
self.fields["locales"].help_text = mark_safe(help_text)
# For pages, if there is one locale or all locales are disabled.
hide_select_all = (
len(untranslated_locales) == 1
or len(untranslated_locales) - len(disabled_locales) == 0
)
# Hide the submit if all untranslated locales are disabled.
# This property is used in the template.
if len(untranslated_locales) == len(disabled_locales):
self.show_submit = False
if hide_select_all:
self.fields["select_all"].widget = forms.HiddenInput()
|
Noviat/odoo | refs/heads/8.0 | addons/hw_escpos/escpos/printer.py | 101 | #!/usr/bin/python
import usb.core
import usb.util
import serial
import socket
from escpos import *
from constants import *
from exceptions import *
from time import sleep
class Usb(Escpos):
""" Define USB printer """
def __init__(self, idVendor, idProduct, interface=0, in_ep=0x82, out_ep=0x01):
"""
@param idVendor : Vendor ID
@param idProduct : Product ID
@param interface : USB device interface
@param in_ep : Input end point
@param out_ep : Output end point
"""
self.errorText = "ERROR PRINTER\n\n\n\n\n\n"+PAPER_FULL_CUT
self.idVendor = idVendor
self.idProduct = idProduct
self.interface = interface
self.in_ep = in_ep
self.out_ep = out_ep
self.open()
def open(self):
""" Search device on USB tree and set is as escpos device """
self.device = usb.core.find(idVendor=self.idVendor, idProduct=self.idProduct)
if self.device is None:
raise NoDeviceError()
try:
if self.device.is_kernel_driver_active(self.interface):
self.device.detach_kernel_driver(self.interface)
self.device.set_configuration()
usb.util.claim_interface(self.device, self.interface)
except usb.core.USBError as e:
raise HandleDeviceError(e)
def close(self):
i = 0
while True:
try:
if not self.device.is_kernel_driver_active(self.interface):
usb.util.release_interface(self.device, self.interface)
self.device.attach_kernel_driver(self.interface)
usb.util.dispose_resources(self.device)
else:
self.device = None
return True
except usb.core.USBError as e:
i += 1
if i > 10:
return False
sleep(0.1)
def _raw(self, msg):
""" Print any command sent in raw format """
if len(msg) != self.device.write(self.out_ep, msg, self.interface):
self.device.write(self.out_ep, self.errorText, self.interface)
raise TicketNotPrinted()
def __extract_status(self):
maxiterate = 0
rep = None
while rep == None:
maxiterate += 1
if maxiterate > 10000:
raise NoStatusError()
r = self.device.read(self.in_ep, 20, self.interface).tolist()
while len(r):
rep = r.pop()
return rep
def get_printer_status(self):
status = {
'printer': {},
'offline': {},
'error' : {},
'paper' : {},
}
self.device.write(self.out_ep, DLE_EOT_PRINTER, self.interface)
printer = self.__extract_status()
self.device.write(self.out_ep, DLE_EOT_OFFLINE, self.interface)
offline = self.__extract_status()
self.device.write(self.out_ep, DLE_EOT_ERROR, self.interface)
error = self.__extract_status()
self.device.write(self.out_ep, DLE_EOT_PAPER, self.interface)
paper = self.__extract_status()
status['printer']['status_code'] = printer
status['printer']['status_error'] = not ((printer & 147) == 18)
status['printer']['online'] = not bool(printer & 8)
status['printer']['recovery'] = bool(printer & 32)
status['printer']['paper_feed_on'] = bool(printer & 64)
status['printer']['drawer_pin_high'] = bool(printer & 4)
status['offline']['status_code'] = offline
status['offline']['status_error'] = not ((offline & 147) == 18)
status['offline']['cover_open'] = bool(offline & 4)
status['offline']['paper_feed_on'] = bool(offline & 8)
status['offline']['paper'] = not bool(offline & 32)
status['offline']['error'] = bool(offline & 64)
status['error']['status_code'] = error
status['error']['status_error'] = not ((error & 147) == 18)
status['error']['recoverable'] = bool(error & 4)
status['error']['autocutter'] = bool(error & 8)
status['error']['unrecoverable'] = bool(error & 32)
status['error']['auto_recoverable'] = not bool(error & 64)
status['paper']['status_code'] = paper
status['paper']['status_error'] = not ((paper & 147) == 18)
status['paper']['near_end'] = bool(paper & 12)
status['paper']['present'] = not bool(paper & 96)
return status
def __del__(self):
""" Release USB interface """
if self.device:
self.close()
self.device = None
class Serial(Escpos):
""" Define Serial printer """
def __init__(self, devfile="/dev/ttyS0", baudrate=9600, bytesize=8, timeout=1):
"""
@param devfile : Device file under dev filesystem
@param baudrate : Baud rate for serial transmission
@param bytesize : Serial buffer size
@param timeout : Read/Write timeout
"""
self.devfile = devfile
self.baudrate = baudrate
self.bytesize = bytesize
self.timeout = timeout
self.open()
def open(self):
""" Setup serial port and set is as escpos device """
self.device = serial.Serial(port=self.devfile, baudrate=self.baudrate, bytesize=self.bytesize, parity=serial.PARITY_NONE, stopbits=serial.STOPBITS_ONE, timeout=self.timeout, dsrdtr=True)
if self.device is not None:
print "Serial printer enabled"
else:
print "Unable to open serial printer on: %s" % self.devfile
def _raw(self, msg):
""" Print any command sent in raw format """
self.device.write(msg)
def __del__(self):
""" Close Serial interface """
if self.device is not None:
self.device.close()
class Network(Escpos):
""" Define Network printer """
def __init__(self,host,port=9100):
"""
@param host : Printer's hostname or IP address
@param port : Port to write to
"""
self.host = host
self.port = port
self.open()
def open(self):
""" Open TCP socket and set it as escpos device """
self.device = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.device.connect((self.host, self.port))
if self.device is None:
print "Could not open socket for %s" % self.host
def _raw(self, msg):
self.device.send(msg)
def __del__(self):
""" Close TCP connection """
self.device.close()
|
refeed/coala-bears | refs/heads/master | tests/python/vulture_test_files/used_variable.py | 7 | x = 2
print(x)
|
lukasjuhrich/sipa | refs/heads/develop | manage.py | 1 | """Python helper for generic sipa tasks
The purpose of this module is to provide a generic interface of
often-used functions which usually require some sort of setup or are
accessible at many different locations.
It somewhat is like a makefile, but the usage of Flask-Script makes
the usage of python-like commands (run unittests, run the app,
configure the app, make the translations, etc.pp.) easier.
Usage:
$ python manage.py <command>
$ python manage.py test
"""
import importlib
import os
from subprocess import call
from flask_script import Manager, prompt_bool
from sipa import create_app
basedir = os.path.dirname(os.path.abspath(__file__))
manager = Manager(create_app)
def large_message(message, title="INFO", width=80, fill='='):
print("\n{0:{fill}^{width}}\n{1}\n".format(
title.upper(),
message,
fill=fill,
width=width,
))
def run_tests_unittest():
"""Run the unittests with a TextTestRunner, return the exit code."""
import unittest
tests = unittest.TestLoader().discover(os.path.join(basedir, 'tests'))
return unittest.TextTestRunner().run(tests)
def run_tests_nose():
"""Check if nosetests is installed and call it.
If the `nose` package is not available, prompt the user if he
wants to fall back to the unittest module.
"""
if importlib.util.find_spec("nose") is None:
large_message("It You don't have nosetests installed.")
if not prompt_bool("Shall I fall back to unittest?", default=True):
print("Aborting.")
result = 255
else:
result = run_tests_unittest()
return result
return call(["nosetests", "--verbose", "--rednose", "--with-coverage",
"--cover-erase", "--cover-branches", "--cover-package=sipa"])
@manager.option('-u', '--force-unittest', dest='force_unittest',
required=False, default=False, action="store_true")
def test(force_unittest):
"""Try to run the tests.
If Flask-Testing does not exist, a hint is displayed.
"""
spec = importlib.util.find_spec("flask_testing")
if spec is None:
large_message("It seems Flask-Testing is missing. "
"Are you sure you are in the "
"correct environment?")
if not prompt_bool("Continue?", default=False):
print("Aborting.")
exit(255)
timeout = os.getenv('CONNETION_TIMEOUT')
if os.getenv('CONNETION_TIMEOUT'):
connections = [('postgres', 5432), ('ldap_hss', 389)]
if not wait_until_ready(connections):
exit(254)
if not force_unittest:
result = run_tests_nose()
else:
result = run_tests_unittest()
exit(result)
def wait_until_ready(connections_to_test, timeout=5):
"""Wait until each connection can be established or the timeout is reached.
:param connections_to_test: A list of `(host, port)` tuples
:param timeout: Timeout in seconds
:return: False if the timeout is reached, True else
"""
import socket
import time
print("Starting connectivity test...")
print("Given TCP endpoints:",
" ".join("{}:{}".format(*host_port) for host_port in connections_to_test))
for conn_tuple in connections_to_test:
print("Trying to connect to {}:{}...".format(*conn_tuple), end='')
old_time = time.time()
while time.time() - old_time < timeout:
try:
socket.setdefaulttimeout(timeout)
socket.socket(socket.AF_INET, socket.SOCK_STREAM).connect(conn_tuple)
except ConnectionRefusedError:
pass
else:
print(" SUCCESS")
break
else:
print(" FAIL")
break
else:
return True
return False
if __name__ == '__main__':
manager.run()
|
uglyboxer/linear_neuron | refs/heads/master | net-p3/lib/python3.5/site-packages/scipy/special/generate_ufuncs.py | 14 | #!/usr/bin/python
"""
generate_ufuncs.py
Generate Ufunc definition source files for scipy.special. Produces
files '_ufuncs.c' and '_ufuncs_cxx.c' by first producing Cython.
This will generate both calls to PyUFunc_FromFuncAndData and the
required ufunc inner loops.
The syntax in the ufunc signature list is
<line>: <ufunc_name> '--' <kernels> '--' <headers>
<kernels>: <function> [',' <function>]*
<function>: <name> ':' <input> '*' <output>
'->' <retval> '*' <ignored_retval>
<input>: <typecode>*
<output>: <typecode>*
<retval>: <typecode>?
<ignored_retval>: <typecode>?
<headers>: <header_name> [',' <header_name>]*
The input parameter types are denoted by single character type
codes, according to
'f': 'float'
'd': 'double'
'g': 'long double'
'F': 'float complex'
'D': 'double complex'
'G': 'long double complex'
'i': 'int'
'l': 'long'
'v': 'void'
If multiple kernel functions are given for a single ufunc, the one
which is used is determined by the standard ufunc mechanism. Kernel
functions that are listed first are also matched first against the
ufunc input types, so functions listed earlier take precedence.
In addition, versions with casted variables, such as d->f,D->F and
i->d are automatically generated.
There should be either a single header that contains all of the kernel
functions listed, or there should be one header for each kernel
function. Cython pxd files are allowed in addition to .h files.
Cython functions may use fused types, but the names in the list
should be the specialized ones, such as 'somefunc[float]'.
Function coming from C++ should have ``++`` appended to the name of
the header.
Floating-point exceptions inside these Ufuncs are converted to
special function errors --- which are separately controlled by the
user, and off by default, as they are usually not especially useful
for the user.
The C++ module
--------------
In addition to ``_ufuncs`` module, a second module ``_ufuncs_cxx`` is
generated. This module only exports function pointers that are to be
used when constructing some of the ufuncs in ``_ufuncs``. The function
pointers are exported via Cython's standard mechanism.
This mainly avoids build issues --- Python distutils has no way to
figure out what to do if you want to link both C++ and Fortran code in
the same shared library.
"""
from __future__ import division, print_function, absolute_import
#---------------------------------------------------------------------------------
# Ufunc listing
#---------------------------------------------------------------------------------
#
#
# Ufuncs without C++
UFUNCS = """
sph_harm -- sph_harmonic: iidd->D, sph_harmonic_unsafe: dddd->D -- sph_harm.pxd, _legacy.pxd
_lambertw -- lambertw_scalar: Dld->D -- lambertw.pxd
_ellip_harm -- ellip_harmonic: ddiiddd->d, ellip_harmonic_unsafe: ddddddd->d --_ellip_harm.pxd, _legacy.pxd
logit -- logitf: f->f, logit: d->d, logitl: g->g -- _logit.h
expit -- expitf: f->f, expit: d->d, expitl: g->g -- _logit.h
bdtrc -- bdtrc: iid->d, bdtrc_unsafe: ddd->d -- cephes.h, _legacy.pxd
bdtr -- bdtr: iid->d, bdtr_unsafe: ddd->d -- cephes.h, _legacy.pxd
bdtri -- bdtri: iid->d, bdtri_unsafe: ddd->d -- cephes.h, _legacy.pxd
binom -- binom: dd->d -- orthogonal_eval.pxd
btdtr -- btdtr: ddd->d -- cephes.h
btdtri -- incbi: ddd->d -- cephes.h
fdtrc -- fdtrc: ddd->d -- cephes.h
fdtr -- fdtr: ddd->d -- cephes.h
fdtri -- fdtri: ddd->d -- cephes.h
gdtrc -- gdtrc: ddd->d -- cephes.h
gdtr -- gdtr: ddd->d -- cephes.h
hyp2f1 -- hyp2f1: dddd->d, chyp2f1_wrap: dddD->D -- cephes.h, specfun_wrappers.h
hyp1f1 -- hyp1f1_wrap: ddd->d, chyp1f1_wrap: ddD->D -- specfun_wrappers.h
hyperu -- hypU_wrap: ddd->d -- specfun_wrappers.h
hyp2f0 -- hyp2f0: dddi*d->d, hyp2f0_unsafe: dddd*d->d -- cephes.h, _legacy.pxd
hyp1f2 -- onef2: dddd*d->d -- cephes.h
hyp3f0 -- threef0: dddd*d->d -- cephes.h
betainc -- incbet: ddd->d -- cephes.h
betaincinv -- incbi: ddd->d -- cephes.h
nbdtrc -- nbdtrc: iid->d, nbdtrc_unsafe: ddd->d -- cephes.h, _legacy.pxd
nbdtr -- nbdtr: iid->d, nbdtr_unsafe: ddd->d -- cephes.h, _legacy.pxd
nbdtri -- nbdtri: iid->d, nbdtri_unsafe: ddd->d -- cephes.h, _legacy.pxd
beta -- beta: dd->d -- cephes.h
betaln -- lbeta: dd->d -- cephes.h
cbrt -- cbrt: d->d -- cephes.h
chdtrc -- chdtrc: dd->d -- cephes.h
chdtr -- chdtr: dd->d -- cephes.h
chdtri -- chdtri: dd->d -- cephes.h
ellipeinc -- ellie: dd->d -- cephes.h
ellipkinc -- ellik: dd->d -- cephes.h
ellipe -- ellpe: d->d -- cephes.h
ellipkm1 -- ellpk: d->d -- cephes.h
eval_jacobi -- eval_jacobi[double]: dddd->d, eval_jacobi[double complex]: dddD->D, eval_jacobi_l: lddd->d -- orthogonal_eval.pxd
eval_sh_jacobi -- eval_sh_jacobi[double]: dddd->d, eval_sh_jacobi[double complex]: dddD->D, eval_sh_jacobi_l: lddd->d -- orthogonal_eval.pxd
eval_gegenbauer -- eval_gegenbauer[double]: ddd->d, eval_gegenbauer[double complex]: ddD->D, eval_gegenbauer_l: ldd->d -- orthogonal_eval.pxd
eval_chebyt -- eval_chebyt[double]: dd->d, eval_chebyt[double complex]: dD->D, eval_chebyt_l: ld->d -- orthogonal_eval.pxd
eval_chebyu -- eval_chebyu[double]: dd->d, eval_chebyu[double complex]: dD->D, eval_chebyu_l: ld->d -- orthogonal_eval.pxd
eval_chebyc -- eval_chebyc[double]: dd->d, eval_chebyc[double complex]: dD->D, eval_chebyc_l: ld->d -- orthogonal_eval.pxd
eval_chebys -- eval_chebys[double]: dd->d, eval_chebys[double complex]: dD->D, eval_chebys_l: ld->d -- orthogonal_eval.pxd
eval_sh_chebyt -- eval_sh_chebyt[double]: dd->d, eval_sh_chebyt[double complex]: dD->D, eval_sh_chebyt_l:ld->d -- orthogonal_eval.pxd
eval_sh_chebyu -- eval_sh_chebyu[double]: dd->d, eval_sh_chebyu[double complex]: dD->D, eval_sh_chebyu_l:ld->d -- orthogonal_eval.pxd
eval_legendre -- eval_legendre[double]: dd->d, eval_legendre[double complex]: dD->D, eval_legendre_l: ld->d -- orthogonal_eval.pxd
eval_sh_legendre -- eval_sh_legendre[double]: dd->d, eval_sh_legendre[double complex]: dD->D, eval_sh_legendre_l:ld->d -- orthogonal_eval.pxd
eval_genlaguerre -- eval_genlaguerre[double]: ddd->d, eval_genlaguerre[double complex]: ddD->D, eval_genlaguerre_l:ldd->d -- orthogonal_eval.pxd
eval_laguerre -- eval_laguerre[double]: dd->d, eval_laguerre[double complex]: dD->D, eval_laguerre_l:ld->d -- orthogonal_eval.pxd
eval_hermite -- eval_hermite: ld->d -- orthogonal_eval.pxd
eval_hermitenorm -- eval_hermitenorm: ld->d -- orthogonal_eval.pxd
exp10 -- exp10: d->d -- cephes.h
exp2 -- exp2: d->d -- cephes.h
gamma -- Gamma: d->d, cgamma_wrap: D->D -- cephes.h, specfun_wrappers.h
gammaln -- lgam: d->d, clngamma_wrap: D->D -- cephes.h, specfun_wrappers.h
gammasgn -- gammasgn: d->d -- c_misc/misc.h
i0 -- i0: d->d -- cephes.h
i0e -- i0e: d->d -- cephes.h
i1 -- i1: d->d -- cephes.h
i1e -- i1e: d->d -- cephes.h
gammaincc -- igamc: dd->d -- cephes.h
gammainc -- igam: dd->d -- cephes.h
gammaincinv -- gammaincinv: dd->d -- cephes.h
gammainccinv -- igami: dd->d -- cephes.h
iv -- iv: dd->d, cbesi_wrap: dD->D -- cephes.h, amos_wrappers.h
ive -- cbesi_wrap_e_real: dd->d, cbesi_wrap_e: dD->D -- amos_wrappers.h
ellipj -- ellpj: dd*dddd->*i -- cephes.h
expn -- expn: id->d, expn_unsafe: dd->d -- cephes.h, _legacy.pxd
exp1 -- exp1_wrap: d->d, cexp1_wrap: D->D -- specfun_wrappers.h
expi -- expi_wrap: d->d, cexpi_wrap: D->D -- specfun_wrappers.h
kn -- cbesk_wrap_real_int: id->d, kn_unsafe: dd->d -- cephes.h, _legacy.pxd
pdtrc -- pdtrc: id->d, pdtrc_unsafe: dd->d -- cephes.h, _legacy.pxd
pdtr -- pdtr: id->d, pdtr_unsafe: dd->d -- cephes.h, _legacy.pxd
pdtri -- pdtri: id->d, pdtri_unsafe: dd->d -- cephes.h, _legacy.pxd
yn -- yn: id->d, yn_unsafe: dd->d -- cephes.h, _legacy.pxd
smirnov -- smirnov: id->d, smirnov_unsafe: dd->d -- cephes.h, _legacy.pxd
smirnovi -- smirnovi: id->d, smirnovi_unsafe: dd->d -- cephes.h, _legacy.pxd
airy -- airy_wrap: d*dddd->*i, cairy_wrap: D*DDDD->*i -- amos_wrappers.h
itairy -- itairy_wrap: d*dddd->*i -- specfun_wrappers.h
airye -- cairy_wrap_e_real: d*dddd->*i, cairy_wrap_e: D*DDDD->*i -- amos_wrappers.h
fresnel -- fresnl: d*dd->*i, cfresnl_wrap: D*DD->*i -- cephes.h, specfun_wrappers.h
shichi -- shichi: d*dd->*i -- cephes.h
sici -- sici: d*dd->*i -- cephes.h
itj0y0 -- it1j0y0_wrap: d*dd->*i -- specfun_wrappers.h
it2j0y0 -- it2j0y0_wrap: d*dd->*i -- specfun_wrappers.h
iti0k0 -- it1i0k0_wrap: d*dd->*i -- specfun_wrappers.h
it2i0k0 -- it2i0k0_wrap: d*dd->*i -- specfun_wrappers.h
j0 -- j0: d->d -- cephes.h
y0 -- y0: d->d -- cephes.h
j1 -- j1: d->d -- cephes.h
y1 -- y1: d->d -- cephes.h
jv -- cbesj_wrap_real: dd->d, cbesj_wrap: dD->D -- amos_wrappers.h
jve -- cbesj_wrap_e_real: dd->d, cbesj_wrap_e: dD->D -- amos_wrappers.h
yv -- cbesy_wrap_real: dd->d, cbesy_wrap: dD->D -- amos_wrappers.h
yve -- cbesy_wrap_e_real: dd->d, cbesy_wrap_e: dD->D -- amos_wrappers.h
k0 -- k0: d->d -- cephes.h
k0e -- k0e: d->d -- cephes.h
k1 -- k1: d->d -- cephes.h
k1e -- k1e: d->d -- cephes.h
kv -- cbesk_wrap_real: dd->d, cbesk_wrap: dD->D -- amos_wrappers.h
kve -- cbesk_wrap_e_real: dd->d, cbesk_wrap_e: dD->D -- amos_wrappers.h
hankel1 -- cbesh_wrap1: dD->D -- amos_wrappers.h
hankel1e -- cbesh_wrap1_e: dD->D -- amos_wrappers.h
hankel2 -- cbesh_wrap2: dD->D -- amos_wrappers.h
hankel2e -- cbesh_wrap2_e: dD->D -- amos_wrappers.h
ndtr -- ndtr: d->d -- cephes.h
log_ndtr -- log_ndtr: d->d -- cephes.h
ndtri -- ndtri: d->d -- cephes.h
psi -- psi: d->d, cpsi_wrap: D->D -- cephes.h, specfun_wrappers.h
rgamma -- rgamma: d->d, crgamma_wrap: D->D -- cephes.h, specfun_wrappers.h
round -- round: d->d -- cephes.h
sindg -- sindg: d->d -- cephes.h
cosdg -- cosdg: d->d -- cephes.h
radian -- radian: ddd->d -- cephes.h
tandg -- tandg: d->d -- cephes.h
cotdg -- cotdg: d->d -- cephes.h
log1p -- log1p: d->d -- cephes.h
expm1 -- expm1: d->d -- cephes.h
cosm1 -- cosm1: d->d -- cephes.h
spence -- spence: d->d -- cephes.h
zetac -- zetac: d->d -- cephes.h
struve -- struve_h: dd->d -- misc.h
modstruve -- struve_l: dd->d -- misc.h
_struve_power_series -- struve_power_series: ddi*d->d -- misc.h
_struve_asymp_large_z -- struve_asymp_large_z: ddi*d->d -- misc.h
_struve_bessel_series -- struve_bessel_series: ddi*d->d -- misc.h
itstruve0 -- itstruve0_wrap: d->d -- specfun_wrappers.h
it2struve0 -- it2struve0_wrap: d->d -- specfun_wrappers.h
itmodstruve0 -- itmodstruve0_wrap: d->d -- specfun_wrappers.h
kelvin -- kelvin_wrap: d*DDDD->*i -- specfun_wrappers.h
ber -- ber_wrap: d->d -- specfun_wrappers.h
bei -- bei_wrap: d->d -- specfun_wrappers.h
ker -- ker_wrap: d->d -- specfun_wrappers.h
kei -- kei_wrap: d->d -- specfun_wrappers.h
berp -- berp_wrap: d->d -- specfun_wrappers.h
beip -- beip_wrap: d->d -- specfun_wrappers.h
kerp -- kerp_wrap: d->d -- specfun_wrappers.h
keip -- keip_wrap: d->d -- specfun_wrappers.h
zeta -- zeta: dd->d -- cephes.h
kolmogorov -- kolmogorov: d->d -- cephes.h
kolmogi -- kolmogi: d->d -- cephes.h
besselpoly -- besselpoly: ddd->d -- c_misc/misc.h
btdtria -- cdfbet3_wrap: ddd->d -- cdf_wrappers.h
btdtrib -- cdfbet4_wrap: ddd->d -- cdf_wrappers.h
bdtrik -- cdfbin2_wrap: ddd->d -- cdf_wrappers.h
bdtrin -- cdfbin3_wrap: ddd->d -- cdf_wrappers.h
chdtriv -- cdfchi3_wrap: dd->d -- cdf_wrappers.h
chndtr -- cdfchn1_wrap: ddd->d -- cdf_wrappers.h
chndtrix -- cdfchn2_wrap: ddd->d -- cdf_wrappers.h
chndtridf -- cdfchn3_wrap: ddd->d -- cdf_wrappers.h
chndtrinc -- cdfchn4_wrap: ddd->d -- cdf_wrappers.h
fdtridfd -- cdff4_wrap: ddd->d -- cdf_wrappers.h
ncfdtr -- cdffnc1_wrap: dddd->d -- cdf_wrappers.h
ncfdtri -- cdffnc2_wrap: dddd->d -- cdf_wrappers.h
ncfdtridfn -- cdffnc3_wrap: dddd->d -- cdf_wrappers.h
ncfdtridfd -- cdffnc4_wrap: dddd->d -- cdf_wrappers.h
ncfdtrinc -- cdffnc5_wrap: dddd->d -- cdf_wrappers.h
gdtrix -- cdfgam2_wrap: ddd->d -- cdf_wrappers.h
gdtrib -- cdfgam3_wrap: ddd->d -- cdf_wrappers.h
gdtria -- cdfgam4_wrap: ddd->d -- cdf_wrappers.h
nbdtrik -- cdfnbn2_wrap: ddd->d -- cdf_wrappers.h
nbdtrin -- cdfnbn3_wrap: ddd->d -- cdf_wrappers.h
nrdtrimn -- cdfnor3_wrap: ddd->d -- cdf_wrappers.h
nrdtrisd -- cdfnor4_wrap: ddd->d -- cdf_wrappers.h
pdtrik -- cdfpoi2_wrap: dd->d -- cdf_wrappers.h
stdtr -- cdft1_wrap: dd->d -- cdf_wrappers.h
stdtrit -- cdft2_wrap: dd->d -- cdf_wrappers.h
stdtridf -- cdft3_wrap: dd->d -- cdf_wrappers.h
nctdtr -- cdftnc1_wrap: ddd->d -- cdf_wrappers.h
nctdtrit -- cdftnc2_wrap: ddd->d -- cdf_wrappers.h
nctdtridf -- cdftnc3_wrap: ddd->d -- cdf_wrappers.h
nctdtrinc -- cdftnc4_wrap: ddd->d -- cdf_wrappers.h
tklmbda -- tukeylambdacdf: dd->d -- cdf_wrappers.h
mathieu_a -- cem_cva_wrap: dd->d -- specfun_wrappers.h
mathieu_b -- sem_cva_wrap: dd->d -- specfun_wrappers.h
mathieu_cem -- cem_wrap: ddd*dd->*i -- specfun_wrappers.h
mathieu_sem -- sem_wrap: ddd*dd->*i -- specfun_wrappers.h
mathieu_modcem1 -- mcm1_wrap: ddd*dd->*i -- specfun_wrappers.h
mathieu_modcem2 -- mcm2_wrap: ddd*dd->*i -- specfun_wrappers.h
mathieu_modsem1 -- msm1_wrap: ddd*dd->*i -- specfun_wrappers.h
mathieu_modsem2 -- msm2_wrap: ddd*dd->*i -- specfun_wrappers.h
lpmv -- pmv_wrap: ddd->d -- specfun_wrappers.h
pbwa -- pbwa_wrap: dd*dd->*i -- specfun_wrappers.h
pbdv -- pbdv_wrap: dd*dd->*i -- specfun_wrappers.h
pbvv -- pbvv_wrap: dd*dd->*i -- specfun_wrappers.h
pro_cv -- prolate_segv_wrap: ddd->d -- specfun_wrappers.h
obl_cv -- oblate_segv_wrap: ddd->d -- specfun_wrappers.h
pro_ang1_cv -- prolate_aswfa_wrap: ddddd*dd->*i -- specfun_wrappers.h
pro_rad1_cv -- prolate_radial1_wrap: ddddd*dd->*i -- specfun_wrappers.h
pro_rad2_cv -- prolate_radial2_wrap: ddddd*dd->*i -- specfun_wrappers.h
obl_ang1_cv -- oblate_aswfa_wrap: ddddd*dd->*i -- specfun_wrappers.h
obl_rad1_cv -- oblate_radial1_wrap: ddddd*dd->*i -- specfun_wrappers.h
obl_rad2_cv -- oblate_radial2_wrap: ddddd*dd->*i -- specfun_wrappers.h
pro_ang1 -- prolate_aswfa_nocv_wrap: dddd*d->d -- specfun_wrappers.h
pro_rad1 -- prolate_radial1_nocv_wrap: dddd*d->d -- specfun_wrappers.h
pro_rad2 -- prolate_radial2_nocv_wrap: dddd*d->d -- specfun_wrappers.h
obl_ang1 -- oblate_aswfa_nocv_wrap: dddd*d->d -- specfun_wrappers.h
obl_rad1 -- oblate_radial1_nocv_wrap: dddd*d->d -- specfun_wrappers.h
obl_rad2 -- oblate_radial2_nocv_wrap: dddd*d->d -- specfun_wrappers.h
modfresnelp -- modified_fresnel_plus_wrap: d*DD->*i -- specfun_wrappers.h
modfresnelm -- modified_fresnel_minus_wrap: d*DD->*i -- specfun_wrappers.h
wofz -- faddeeva_w: D->D -- _faddeeva.h++
erfc -- erfc: d->d, faddeeva_erfc: D->D -- cephes.h, _faddeeva.h++
erf -- erf: d->d, faddeeva_erf: D->D -- cephes.h, _faddeeva.h++
dawsn -- faddeeva_dawsn: d->d, faddeeva_dawsn_complex: D->D -- _faddeeva.h++
erfcx -- faddeeva_erfcx: d->d, faddeeva_erfcx_complex: D->D -- _faddeeva.h++
erfi -- faddeeva_erfi: d->d, faddeeva_erfi_complex: D->D -- _faddeeva.h++
xlogy -- xlogy[double]: dd->d, xlogy[double_complex]: DD->D -- _xlogy.pxd
xlog1py -- xlog1py: dd->d -- _xlogy.pxd
poch -- poch: dd->d -- c_misc/misc.h
boxcox -- boxcox: dd->d -- _boxcox.pxd
boxcox1p -- boxcox1p: dd->d -- _boxcox.pxd
inv_boxcox -- inv_boxcox: dd->d -- _boxcox.pxd
inv_boxcox1p -- inv_boxcox1p: dd->d -- _boxcox.pxd
entr -- entr: d->d -- _convex_analysis.pxd
kl_div -- kl_div: dd->d -- _convex_analysis.pxd
rel_entr -- rel_entr: dd->d -- _convex_analysis.pxd
huber -- huber: dd->d -- _convex_analysis.pxd
pseudo_huber -- pseudo_huber: dd->d -- _convex_analysis.pxd
"""
#---------------------------------------------------------------------------------
# Extra code
#---------------------------------------------------------------------------------
EXTRA_CODE_COMMON = """\
# This file is automatically generated by generate_ufuncs.py.
# Do not edit manually!
cdef extern from "_complexstuff.h":
# numpy/npy_math.h doesn't have correct extern "C" declarations,
# so we must include a wrapped version first
pass
cdef extern from "numpy/npy_math.h":
double NPY_NAN
cimport numpy as np
from numpy cimport (
npy_float, npy_double, npy_longdouble,
npy_cfloat, npy_cdouble, npy_clongdouble,
npy_int, npy_long,
NPY_FLOAT, NPY_DOUBLE, NPY_LONGDOUBLE,
NPY_CFLOAT, NPY_CDOUBLE, NPY_CLONGDOUBLE,
NPY_INT, NPY_LONG)
ctypedef double complex double_complex
cdef extern from "numpy/ufuncobject.h":
int PyUFunc_getfperr() nogil
cdef public int wrap_PyUFunc_getfperr() nogil:
\"\"\"
Call PyUFunc_getfperr in a context where PyUFunc_API array is initialized;
this avoids messing with the UNIQUE_SYMBOL #defines
\"\"\"
return PyUFunc_getfperr()
cimport libc
cimport sf_error
np.import_array()
np.import_ufunc()
cdef int _set_errprint(int flag) nogil:
return sf_error.set_print(flag)
"""
EXTRA_CODE = """
cimport scipy.special._ufuncs_cxx
def errprint(inflag=None):
\"\"\"
errprint(inflag=None)
Sets or returns the error printing flag for special functions.
Parameters
----------
inflag : bool, optional
Whether warnings concerning evaluation of special functions in
scipy.special are shown. If omitted, no change is made to the
current setting.
Returns
-------
old_flag
Previous value of the error flag
\"\"\"
if inflag is not None:
scipy.special._ufuncs_cxx._set_errprint(int(bool(inflag)))
return sf_error.set_print(int(bool(inflag)))
else:
return sf_error.get_print()
"""
EXTRA_CODE_BOTTOM = """\
#
# Aliases
#
jn = jv
"""
#---------------------------------------------------------------------------------
# Code generation
#---------------------------------------------------------------------------------
import os
import optparse
import re
import textwrap
add_newdocs = __import__('add_newdocs')
CY_TYPES = {
'f': 'float',
'd': 'double',
'g': 'long double',
'F': 'float complex',
'D': 'double complex',
'G': 'long double complex',
'i': 'int',
'l': 'long',
'v': 'void',
}
C_TYPES = {
'f': 'npy_float',
'd': 'npy_double',
'g': 'npy_longdouble',
'F': 'npy_cfloat',
'D': 'npy_cdouble',
'G': 'npy_clongdouble',
'i': 'npy_int',
'l': 'npy_long',
'v': 'void',
}
TYPE_NAMES = {
'f': 'NPY_FLOAT',
'd': 'NPY_DOUBLE',
'g': 'NPY_LONGDOUBLE',
'F': 'NPY_CFLOAT',
'D': 'NPY_CDOUBLE',
'G': 'NPY_CLONGDOUBLE',
'i': 'NPY_INT',
'l': 'NPY_LONG',
}
def cast_order(c):
return ['ilfdgFDG'.index(x) for x in c]
# These downcasts will cause the function to return NaNs, unless the
# values happen to coincide exactly.
DANGEROUS_DOWNCAST = set([
('F', 'i'), ('F', 'l'), ('F', 'f'), ('F', 'd'), ('F', 'g'),
('D', 'i'), ('D', 'l'), ('D', 'f'), ('D', 'd'), ('D', 'g'),
('G', 'i'), ('G', 'l'), ('G', 'f'), ('G', 'd'), ('G', 'g'),
('f', 'i'), ('f', 'l'),
('d', 'i'), ('d', 'l'),
('g', 'i'), ('g', 'l'),
('l', 'i'),
])
NAN_VALUE = {
'f': 'NPY_NAN',
'd': 'NPY_NAN',
'g': 'NPY_NAN',
'F': 'NPY_NAN',
'D': 'NPY_NAN',
'G': 'NPY_NAN',
'i': '0xbad0bad0',
'l': '0xbad0bad0',
}
def generate_loop(func_inputs, func_outputs, func_retval,
ufunc_inputs, ufunc_outputs):
"""
Generate a UFunc loop function that calls a function given as its
data parameter with the specified input and output arguments and
return value.
This function can be passed to PyUFunc_FromFuncAndData.
Parameters
----------
func_inputs, func_outputs, func_retval : str
Signature of the function to call, given as type codes of the
input, output and return value arguments. These 1-character
codes are given according to the CY_TYPES and TYPE_NAMES
lists above.
The corresponding C function signature to be called is:
retval func(intype1 iv1, intype2 iv2, ..., outtype1 *ov1, ...);
If len(ufunc_outputs) == len(func_outputs)+1, the return value
is treated as the first output argument. Otherwise, the return
value is ignored.
ufunc_inputs, ufunc_outputs : str
Ufunc input and output signature.
This does not have to exactly match the function signature,
as long as the type casts work out on the C level.
Returns
-------
loop_name
Name of the generated loop function.
loop_body
Generated C code for the loop.
"""
if len(func_inputs) != len(ufunc_inputs):
raise ValueError("Function and ufunc have different number of inputs")
if len(func_outputs) != len(ufunc_outputs) and not (
func_retval != "v" and len(func_outputs)+1 == len(ufunc_outputs)):
raise ValueError("Function retval and ufunc outputs don't match")
name = "loop_%s_%s_%s_As_%s_%s" % (
func_retval, func_inputs, func_outputs, ufunc_inputs, ufunc_outputs
)
body = "cdef void %s(char **args, np.npy_intp *dims, np.npy_intp *steps, void *data) nogil:\n" % name
body += " cdef np.npy_intp i, n = dims[0]\n"
body += " cdef void *func = (<void**>data)[0]\n"
body += " cdef char *func_name = <char*>(<void**>data)[1]\n"
for j in range(len(ufunc_inputs)):
body += " cdef char *ip%d = args[%d]\n" % (j, j)
for j in range(len(ufunc_outputs)):
body += " cdef char *op%d = args[%d]\n" % (j, j + len(ufunc_inputs))
ftypes = []
fvars = []
outtypecodes = []
for j in range(len(func_inputs)):
ftypes.append(CY_TYPES[func_inputs[j]])
fvars.append("<%s>(<%s*>ip%d)[0]" % (
CY_TYPES[func_inputs[j]],
CY_TYPES[ufunc_inputs[j]], j))
if len(func_outputs)+1 == len(ufunc_outputs):
func_joff = 1
outtypecodes.append(func_retval)
body += " cdef %s ov0\n" % (CY_TYPES[func_retval],)
else:
func_joff = 0
for j, outtype in enumerate(func_outputs):
body += " cdef %s ov%d\n" % (CY_TYPES[outtype], j+func_joff)
ftypes.append("%s *" % CY_TYPES[outtype])
fvars.append("&ov%d" % (j+func_joff))
outtypecodes.append(outtype)
body += " for i in range(n):\n"
if len(func_outputs)+1 == len(ufunc_outputs):
rv = "ov0 = "
else:
rv = ""
funcall = " %s(<%s(*)(%s) nogil>func)(%s)\n" % (
rv, CY_TYPES[func_retval], ", ".join(ftypes), ", ".join(fvars))
# Cast-check inputs and call function
input_checks = []
for j in range(len(func_inputs)):
if (ufunc_inputs[j], func_inputs[j]) in DANGEROUS_DOWNCAST:
chk = "<%s>(<%s*>ip%d)[0] == (<%s*>ip%d)[0]" % (
CY_TYPES[func_inputs[j]], CY_TYPES[ufunc_inputs[j]], j,
CY_TYPES[ufunc_inputs[j]], j)
input_checks.append(chk)
if input_checks:
body += " if %s:\n" % (" and ".join(input_checks))
body += " " + funcall
body += " else:\n"
body += " sf_error.error(func_name, sf_error.DOMAIN, \"invalid input argument\")\n"
for j, outtype in enumerate(outtypecodes):
body += " ov%d = <%s>%s\n" % (
j, CY_TYPES[outtype], NAN_VALUE[outtype])
else:
body += funcall
# Assign and cast-check output values
for j, (outtype, fouttype) in enumerate(zip(ufunc_outputs, outtypecodes)):
if (fouttype, outtype) in DANGEROUS_DOWNCAST:
body += " if ov%d == <%s>ov%d:\n" % (j, CY_TYPES[outtype], j)
body += " (<%s *>op%d)[0] = <%s>ov%d\n" % (
CY_TYPES[outtype], j, CY_TYPES[outtype], j)
body += " else:\n"
body += " sf_error.error(func_name, sf_error.DOMAIN, \"invalid output\")\n"
body += " (<%s *>op%d)[0] = <%s>%s\n" % (
CY_TYPES[outtype], j, CY_TYPES[outtype], NAN_VALUE[outtype])
else:
body += " (<%s *>op%d)[0] = <%s>ov%d\n" % (
CY_TYPES[outtype], j, CY_TYPES[outtype], j)
for j in range(len(ufunc_inputs)):
body += " ip%d += steps[%d]\n" % (j, j)
for j in range(len(ufunc_outputs)):
body += " op%d += steps[%d]\n" % (j, j + len(ufunc_inputs))
body += " sf_error.check_fpe(func_name)\n"
return name, body
def iter_variants(inputs, outputs):
"""
Generate variants of UFunc signatures, by changing variable types,
within the limitation that the corresponding C types casts still
work out.
This does not generate all possibilities, just the ones required
for the ufunc to work properly with the most common data types.
Parameters
----------
inputs, outputs : str
UFunc input and output signature strings
Yields
------
new_input, new_output : str
Modified input and output strings.
Also the original input/output pair is yielded.
"""
maps = [
# always use long instead of int (more common type on 64-bit)
('i', 'l'),
]
# float32-preserving signatures
maps = maps + [(a + 'dD', b + 'fF') for a, b in maps]
# do the replacements
for src, dst in maps:
new_inputs = inputs
new_outputs = outputs
for a, b in zip(src, dst):
new_inputs = new_inputs.replace(a, b)
new_outputs = new_outputs.replace(a, b)
yield new_inputs, new_outputs
class Ufunc(object):
"""
Ufunc signature, restricted format suitable for special functions.
Parameters
----------
name
Name of the ufunc to create
signature
String of form 'func: fff*ff->f, func2: ddd->*i' describing
the C-level functions and types of their input arguments
and return values.
The syntax is 'function_name: inputparams*outputparams->output_retval*ignored_retval'
Attributes
----------
name : str
Python name for the Ufunc
signatures : list of (func_name, inarg_spec, outarg_spec, ret_spec, header_name)
List of parsed signatures
doc : str
Docstring, obtained from add_newdocs
function_name_overrides : dict of str->str
Overrides for the function names in signatures
"""
def __init__(self, name, signatures, headers):
self.name = name
self.signatures = self._parse_signatures(signatures, headers)
self.doc = add_newdocs.get("scipy.special." + name)
if self.doc is None:
raise ValueError("No docstring for ufunc %r" % name)
self.doc = textwrap.dedent(self.doc).strip()
self.function_name_overrides = {}
def _parse_signatures(self, sigs_str, headers_str):
sigs = [x.strip() for x in sigs_str.split(",") if x.strip()]
headers = [x.strip() for x in headers_str.split(",") if x.strip()]
if len(headers) == 1:
headers = headers * len(sigs)
if len(headers) != len(sigs):
raise ValueError("%s: Number of headers and signatures doesn't match: %r -- %r" % (
self.name, sigs_str, headers_str))
return [self._parse_signature(x) + (h,) for x, h in zip(sigs, headers)]
def _parse_signature(self, sig):
m = re.match("\s*(.*):\s*([fdgFDGil]*)\s*\\*\s*([fdgFDGil]*)\s*->\s*([*fdgFDGil]*)\s*$", sig)
if m:
func, inarg, outarg, ret = [x.strip() for x in m.groups()]
if ret.count('*') > 1:
raise ValueError("%s: Invalid signature: %r" % (self.name, sig))
return (func, inarg, outarg, ret)
m = re.match("\s*(.*):\s*([fdgFDGil]*)\s*->\s*([fdgFDGil]?)\s*$", sig)
if m:
func, inarg, ret = [x.strip() for x in m.groups()]
return (func, inarg, "", ret)
raise ValueError("%s: Invalid signature: %r" % (self.name, sig))
def _get_signatures_and_loops(self, all_loops):
inarg_num = None
outarg_num = None
seen = set()
variants = []
def add_variant(func_name, inarg, outarg, ret, inp, outp):
if inp in seen:
return
seen.add(inp)
sig = (func_name, inp, outp)
if "v" in outp:
raise ValueError("%s: void signature %r" % (self.name, sig))
if len(inp) != inarg_num or len(outp) != outarg_num:
raise ValueError("%s: signature %r does not have %d/%d input/output args" % (
self.name, sig,
inarg_num, outarg_num))
loop_name, loop = generate_loop(inarg, outarg, ret, inp, outp)
all_loops[loop_name] = loop
variants.append((func_name, loop_name, inp, outp))
# First add base variants
for func_name, inarg, outarg, ret, header in self.signatures:
outp = re.sub(r'\*.*', '', ret) + outarg
ret = ret.replace('*', '')
if inarg_num is None:
inarg_num = len(inarg)
outarg_num = len(outp)
inp, outp = list(iter_variants(inarg, outp))[0]
add_variant(func_name, inarg, outarg, ret, inp, outp)
# Then the supplementary ones
for func_name, inarg, outarg, ret, header in self.signatures:
outp = re.sub(r'\*.*', '', ret) + outarg
ret = ret.replace('*', '')
for inp, outp in iter_variants(inarg, outp):
add_variant(func_name, inarg, outarg, ret, inp, outp)
# Then sort variants to input argument cast order
# -- the sort is stable, so functions earlier in the signature list
# are still preferred
variants.sort(key=lambda v: cast_order(v[2]))
return variants, inarg_num, outarg_num
def cython_func_name(self, c_name, specialized=False, prefix="_func_",
override=True):
# act on function name overrides
if override and c_name in self.function_name_overrides:
c_name = self.function_name_overrides[c_name]
prefix = ""
# support fused types
m = re.match(r'^(.*?)(\[.*\])$', c_name)
if m:
c_base_name, fused_part = m.groups()
else:
c_base_name, fused_part = c_name, ""
if specialized:
return "%s%s%s" % (prefix, c_base_name, fused_part.replace(' ', '_'))
else:
return "%s%s" % (prefix, c_base_name,)
def get_prototypes(self):
prototypes = []
for func_name, inarg, outarg, ret, header in self.signatures:
ret = ret.replace('*', '')
c_args = ([C_TYPES[x] for x in inarg]
+ [C_TYPES[x] + ' *' for x in outarg])
cy_args = ([CY_TYPES[x] for x in inarg]
+ [CY_TYPES[x] + ' *' for x in outarg])
c_proto = "%s (*)(%s)" % (C_TYPES[ret], ", ".join(c_args))
cy_proto = "%s (*)(%s) nogil" % (CY_TYPES[ret], ", ".join(cy_args))
prototypes.append((func_name, c_proto, cy_proto, header))
return prototypes
def generate(self, all_loops):
toplevel = ""
variants, inarg_num, outarg_num = self._get_signatures_and_loops(all_loops)
loops = []
funcs = []
types = []
for func_name, loop_name, inputs, outputs in variants:
for x in inputs:
types.append(TYPE_NAMES[x])
for x in outputs:
types.append(TYPE_NAMES[x])
loops.append(loop_name)
funcs.append(func_name)
toplevel += "cdef np.PyUFuncGenericFunction ufunc_%s_loops[%d]\n" % (self.name, len(loops))
toplevel += "cdef void *ufunc_%s_ptr[%d]\n" % (self.name, 2*len(funcs))
toplevel += "cdef void *ufunc_%s_data[%d]\n" % (self.name, len(funcs))
toplevel += "cdef char ufunc_%s_types[%d]\n" % (self.name, len(types))
toplevel += 'cdef char *ufunc_%s_doc = (\n "%s")\n' % (
self.name,
self.doc.replace("\\", "\\\\").replace('"', '\\"').replace('\n', '\\n\"\n "')
)
for j, function in enumerate(loops):
toplevel += "ufunc_%s_loops[%d] = <np.PyUFuncGenericFunction>%s\n" % (self.name, j, function)
for j, type in enumerate(types):
toplevel += "ufunc_%s_types[%d] = <char>%s\n" % (self.name, j, type)
for j, func in enumerate(funcs):
toplevel += "ufunc_%s_ptr[2*%d] = <void*>%s\n" % (self.name, j,
self.cython_func_name(func, specialized=True))
toplevel += "ufunc_%s_ptr[2*%d+1] = <void*>(<char*>\"%s\")\n" % (self.name, j,
self.name)
for j, func in enumerate(funcs):
toplevel += "ufunc_%s_data[%d] = &ufunc_%s_ptr[2*%d]\n" % (
self.name, j, self.name, j)
toplevel += ('@ = np.PyUFunc_FromFuncAndData(ufunc_@_loops, '
'ufunc_@_data, ufunc_@_types, %d, %d, %d, 0, '
'"@", ufunc_@_doc, 0)\n' % (len(types)/(inarg_num+outarg_num),
inarg_num, outarg_num)
).replace('@', self.name)
return toplevel
@classmethod
def parse_all(cls, ufunc_str):
ufuncs = []
lines = ufunc_str.splitlines()
lines.sort()
for line in lines:
line = line.strip()
if not line:
continue
m = re.match("^([a-z0-9_]+)\s*--\s*(.*?)\s*--(.*)$", line)
if not m:
raise ValueError("Unparseable line %r" % line)
ufuncs.append(cls(m.group(1), m.group(2), m.group(3)))
return ufuncs
def get_declaration(ufunc, c_name, c_proto, cy_proto, header, proto_h_filename):
"""
Construct a Cython declaration of a function coming either from a
pxd or a header file. Do sufficient tricks to enable compile-time
type checking against the signature expected by the ufunc.
"""
defs = []
defs_h = []
var_name = c_name.replace('[', '_').replace(']', '_').replace(' ', '_')
if header.endswith('.pxd'):
defs.append("from %s cimport %s as %s" % (
header[:-4], ufunc.cython_func_name(c_name, prefix=""),
ufunc.cython_func_name(c_name)))
# check function signature at compile time
proto_name = '_proto_%s_t' % var_name
defs.append("ctypedef %s" % (cy_proto.replace('(*)', proto_name)))
defs.append("cdef %s *%s_var = &%s" % (
proto_name, proto_name, ufunc.cython_func_name(c_name, specialized=True)))
else:
# redeclare the function, so that the assumed
# signature is checked at compile time
new_name = "%s \"%s\"" % (ufunc.cython_func_name(c_name), c_name)
defs.append("cdef extern from \"%s\":" % proto_h_filename)
defs.append(" cdef %s" % (cy_proto.replace('(*)', new_name)))
defs_h.append("#include \"%s\"" % header)
defs_h.append("%s;" % (c_proto.replace('(*)', c_name)))
return defs, defs_h, var_name
def generate(filename, cxx_fn_prefix, ufuncs):
proto_h_filename = os.path.splitext(filename)[0] + '_defs.h'
cxx_proto_h_filename = cxx_fn_prefix + '_defs.h'
cxx_pyx_filename = cxx_fn_prefix + ".pyx"
cxx_pxd_filename = cxx_fn_prefix + ".pxd"
toplevel = ""
# for _ufuncs*
defs = []
defs_h = []
all_loops = {}
# for _ufuncs_cxx*
cxx_defs = []
cxx_pxd_defs = ["cdef int _set_errprint(int flag) nogil"]
cxx_defs_h = []
ufuncs.sort(key=lambda u: u.name)
for ufunc in ufuncs:
# generate function declaration and type checking snippets
cfuncs = ufunc.get_prototypes()
for c_name, c_proto, cy_proto, header in cfuncs:
if header.endswith('++'):
header = header[:-2]
# for the CXX module
item_defs, item_defs_h, var_name = get_declaration(ufunc, c_name, c_proto, cy_proto,
header, cxx_proto_h_filename)
cxx_defs.extend(item_defs)
cxx_defs_h.extend(item_defs_h)
cxx_defs.append("cdef void *_export_%s = <void*>%s" % (
var_name, ufunc.cython_func_name(c_name, specialized=True, override=False)))
cxx_pxd_defs.append("cdef void *_export_%s" % (var_name,))
# let cython grab the function pointer from the c++ shared library
ufunc.function_name_overrides[c_name] = "scipy.special._ufuncs_cxx._export_" + var_name
else:
# usual case
item_defs, item_defs_h, _ = get_declaration(ufunc, c_name, c_proto, cy_proto, header,
proto_h_filename)
defs.extend(item_defs)
defs_h.extend(item_defs_h)
# ufunc creation code snippet
t = ufunc.generate(all_loops)
toplevel += t + "\n"
# Produce output
toplevel = "\n".join(list(all_loops.values()) + defs + [toplevel])
f = open(filename, 'wb')
f.write(EXTRA_CODE_COMMON)
f.write(EXTRA_CODE)
f.write(toplevel)
f.write(EXTRA_CODE_BOTTOM)
f.close()
defs_h = unique(defs_h)
f = open(proto_h_filename, 'wb')
f.write("#ifndef UFUNCS_PROTO_H\n#define UFUNCS_PROTO_H 1\n")
f.write("\n".join(defs_h))
f.write("\n#endif\n")
f.close()
cxx_defs_h = unique(cxx_defs_h)
f = open(cxx_proto_h_filename, 'wb')
f.write("#ifndef UFUNCS_PROTO_H\n#define UFUNCS_PROTO_H 1\n")
f.write("\n".join(cxx_defs_h))
f.write("\n#endif\n")
f.close()
f = open(cxx_pyx_filename, 'wb')
f.write(EXTRA_CODE_COMMON)
f.write("\n".join(cxx_defs))
f.write("\n# distutils: language = c++\n")
f.close()
f = open(cxx_pxd_filename, 'wb')
f.write("\n".join(cxx_pxd_defs))
f.close()
def unique(lst):
"""
Return a list without repeated entries (first occurrence is kept),
preserving order.
"""
seen = set()
new_lst = []
for item in lst:
if item in seen:
continue
seen.add(item)
new_lst.append(item)
return new_lst
def main():
p = optparse.OptionParser(usage=__doc__.strip())
options, args = p.parse_args()
if len(args) != 0:
p.error('invalid number of arguments')
ufuncs = Ufunc.parse_all(UFUNCS)
generate("_ufuncs.pyx", "_ufuncs_cxx", ufuncs)
if __name__ == "__main__":
main()
|
overtherain/scriptfile | refs/heads/master | software/googleAppEngine/google/appengine/_internal/django/core/management/commands/startproject.py | 23 | from google.appengine._internal.django.core.management.base import copy_helper, CommandError, LabelCommand
from google.appengine._internal.django.utils.importlib import import_module
import os
import re
from random import choice
class Command(LabelCommand):
help = "Creates a Django project directory structure for the given project name in the current directory."
args = "[projectname]"
label = 'project name'
requires_model_validation = False
# Can't import settings during this command, because they haven't
# necessarily been created.
can_import_settings = False
def handle_label(self, project_name, **options):
# Determine the project_name a bit naively -- by looking at the name of
# the parent directory.
directory = os.getcwd()
# Check that the project_name cannot be imported.
try:
import_module(project_name)
except ImportError:
pass
else:
raise CommandError("%r conflicts with the name of an existing Python module and cannot be used as a project name. Please try another name." % project_name)
copy_helper(self.style, 'project', project_name, directory)
# Create a random SECRET_KEY hash, and put it in the main settings.
main_settings_file = os.path.join(directory, project_name, 'settings.py')
settings_contents = open(main_settings_file, 'r').read()
fp = open(main_settings_file, 'w')
secret_key = ''.join([choice('abcdefghijklmnopqrstuvwxyz0123456789!@#$%^&*(-_=+)') for i in range(50)])
settings_contents = re.sub(r"(?<=SECRET_KEY = ')'", secret_key + "'", settings_contents)
fp.write(settings_contents)
fp.close()
|
kuza55/dns-digger | refs/heads/master | src/core/archiver.py | 1 | __author__ = "Kevin Warrick"
__email__ = "kwarrick@uga.edu"
import sys
import json
import logging
import logging.handlers
import collections
import pika
class Archiver(object):
"""Write JSON responses to disk as CSV file.
Archiver is a simple consumer which uses Python's logging module and the
TimeRotatingFileHandler to archive raw JSON messages to disk as CSVs.
"""
def __init__(self, queue, config):
self.queue = queue
self.config = config
self.rabbit_conn, self.rabbit_channel = self.connect()
def connect(self):
rabbit_config = dict(self.config.items('rabbitmq'))
creds = pika.PlainCredentials(rabbit_config['user'], rabbit_config['password'])
params = pika.ConnectionParameters(host=rabbit_config['host'],
port=int(rabbit_config['port']),
credentials=creds)
rabbit_conn = pika.BlockingConnection(params)
rabbit_channel = rabbit_conn.channel()
return rabbit_conn, rabbit_channel
def message_handler(self, channel, method, properties, body):
message = collections.defaultdict(str, json.loads(body))
flags = message.get('flags', '')
if not 'ignore' in flags:
message['response'] = message['response'].replace("\n", '')
self.logger.info("%(domain)s,%(resolver)s,%(time)d,%(response)s,%(error)s" % message)
channel.basic_ack(delivery_tag=method.delivery_tag)
def run(self):
self.logger = logging.getLogger('archiver')
self.logger.setLevel(logging.INFO)
# handler = logging.handlers.RotatingFileHandler('log/responses', maxBytes=(2**30))
handler = logging.handlers.TimedRotatingFileHandler('log/responses', when='midnight')
handler.setFormatter('')
self.logger.addHandler(handler)
try:
self.rabbit_channel.basic_consume(self.message_handler, queue=self.queue)
self.rabbit_channel.start_consuming()
except KeyboardInterrupt:
self.rabbit_channel.stop_consuming()
self.rabbit_conn.close()
return 0
|
joram/sickbeard-orange | refs/heads/ThePirateBay | lib/bs4/element.py | 438 | import collections
import re
import sys
import warnings
from bs4.dammit import EntitySubstitution
DEFAULT_OUTPUT_ENCODING = "utf-8"
PY3K = (sys.version_info[0] > 2)
whitespace_re = re.compile("\s+")
def _alias(attr):
"""Alias one attribute name to another for backward compatibility"""
@property
def alias(self):
return getattr(self, attr)
@alias.setter
def alias(self):
return setattr(self, attr)
return alias
class NamespacedAttribute(unicode):
def __new__(cls, prefix, name, namespace=None):
if name is None:
obj = unicode.__new__(cls, prefix)
elif prefix is None:
# Not really namespaced.
obj = unicode.__new__(cls, name)
else:
obj = unicode.__new__(cls, prefix + ":" + name)
obj.prefix = prefix
obj.name = name
obj.namespace = namespace
return obj
class AttributeValueWithCharsetSubstitution(unicode):
"""A stand-in object for a character encoding specified in HTML."""
class CharsetMetaAttributeValue(AttributeValueWithCharsetSubstitution):
"""A generic stand-in for the value of a meta tag's 'charset' attribute.
When Beautiful Soup parses the markup '<meta charset="utf8">', the
value of the 'charset' attribute will be one of these objects.
"""
def __new__(cls, original_value):
obj = unicode.__new__(cls, original_value)
obj.original_value = original_value
return obj
def encode(self, encoding):
return encoding
class ContentMetaAttributeValue(AttributeValueWithCharsetSubstitution):
"""A generic stand-in for the value of a meta tag's 'content' attribute.
When Beautiful Soup parses the markup:
<meta http-equiv="content-type" content="text/html; charset=utf8">
The value of the 'content' attribute will be one of these objects.
"""
CHARSET_RE = re.compile("((^|;)\s*charset=)([^;]*)", re.M)
def __new__(cls, original_value):
match = cls.CHARSET_RE.search(original_value)
if match is None:
# No substitution necessary.
return unicode.__new__(unicode, original_value)
obj = unicode.__new__(cls, original_value)
obj.original_value = original_value
return obj
def encode(self, encoding):
def rewrite(match):
return match.group(1) + encoding
return self.CHARSET_RE.sub(rewrite, self.original_value)
class HTMLAwareEntitySubstitution(EntitySubstitution):
"""Entity substitution rules that are aware of some HTML quirks.
Specifically, the contents of <script> and <style> tags should not
undergo entity substitution.
Incoming NavigableString objects are checked to see if they're the
direct children of a <script> or <style> tag.
"""
cdata_containing_tags = set(["script", "style"])
preformatted_tags = set(["pre"])
@classmethod
def _substitute_if_appropriate(cls, ns, f):
if (isinstance(ns, NavigableString)
and ns.parent is not None
and ns.parent.name in cls.cdata_containing_tags):
# Do nothing.
return ns
# Substitute.
return f(ns)
@classmethod
def substitute_html(cls, ns):
return cls._substitute_if_appropriate(
ns, EntitySubstitution.substitute_html)
@classmethod
def substitute_xml(cls, ns):
return cls._substitute_if_appropriate(
ns, EntitySubstitution.substitute_xml)
class PageElement(object):
"""Contains the navigational information for some part of the page
(either a tag or a piece of text)"""
# There are five possible values for the "formatter" argument passed in
# to methods like encode() and prettify():
#
# "html" - All Unicode characters with corresponding HTML entities
# are converted to those entities on output.
# "minimal" - Bare ampersands and angle brackets are converted to
# XML entities: & < >
# None - The null formatter. Unicode characters are never
# converted to entities. This is not recommended, but it's
# faster than "minimal".
# A function - This function will be called on every string that
# needs to undergo entity substitution.
#
# In an HTML document, the default "html" and "minimal" functions
# will leave the contents of <script> and <style> tags alone. For
# an XML document, all tags will be given the same treatment.
HTML_FORMATTERS = {
"html" : HTMLAwareEntitySubstitution.substitute_html,
"minimal" : HTMLAwareEntitySubstitution.substitute_xml,
None : None
}
XML_FORMATTERS = {
"html" : EntitySubstitution.substitute_html,
"minimal" : EntitySubstitution.substitute_xml,
None : None
}
def format_string(self, s, formatter='minimal'):
"""Format the given string using the given formatter."""
if not callable(formatter):
formatter = self._formatter_for_name(formatter)
if formatter is None:
output = s
else:
output = formatter(s)
return output
@property
def _is_xml(self):
"""Is this element part of an XML tree or an HTML tree?
This is used when mapping a formatter name ("minimal") to an
appropriate function (one that performs entity-substitution on
the contents of <script> and <style> tags, or not). It's
inefficient, but it should be called very rarely.
"""
if self.parent is None:
# This is the top-level object. It should have .is_xml set
# from tree creation. If not, take a guess--BS is usually
# used on HTML markup.
return getattr(self, 'is_xml', False)
return self.parent._is_xml
def _formatter_for_name(self, name):
"Look up a formatter function based on its name and the tree."
if self._is_xml:
return self.XML_FORMATTERS.get(
name, EntitySubstitution.substitute_xml)
else:
return self.HTML_FORMATTERS.get(
name, HTMLAwareEntitySubstitution.substitute_xml)
def setup(self, parent=None, previous_element=None):
"""Sets up the initial relations between this element and
other elements."""
self.parent = parent
self.previous_element = previous_element
if previous_element is not None:
self.previous_element.next_element = self
self.next_element = None
self.previous_sibling = None
self.next_sibling = None
if self.parent is not None and self.parent.contents:
self.previous_sibling = self.parent.contents[-1]
self.previous_sibling.next_sibling = self
nextSibling = _alias("next_sibling") # BS3
previousSibling = _alias("previous_sibling") # BS3
def replace_with(self, replace_with):
if replace_with is self:
return
if replace_with is self.parent:
raise ValueError("Cannot replace a Tag with its parent.")
old_parent = self.parent
my_index = self.parent.index(self)
self.extract()
old_parent.insert(my_index, replace_with)
return self
replaceWith = replace_with # BS3
def unwrap(self):
my_parent = self.parent
my_index = self.parent.index(self)
self.extract()
for child in reversed(self.contents[:]):
my_parent.insert(my_index, child)
return self
replace_with_children = unwrap
replaceWithChildren = unwrap # BS3
def wrap(self, wrap_inside):
me = self.replace_with(wrap_inside)
wrap_inside.append(me)
return wrap_inside
def extract(self):
"""Destructively rips this element out of the tree."""
if self.parent is not None:
del self.parent.contents[self.parent.index(self)]
#Find the two elements that would be next to each other if
#this element (and any children) hadn't been parsed. Connect
#the two.
last_child = self._last_descendant()
next_element = last_child.next_element
if self.previous_element is not None:
self.previous_element.next_element = next_element
if next_element is not None:
next_element.previous_element = self.previous_element
self.previous_element = None
last_child.next_element = None
self.parent = None
if self.previous_sibling is not None:
self.previous_sibling.next_sibling = self.next_sibling
if self.next_sibling is not None:
self.next_sibling.previous_sibling = self.previous_sibling
self.previous_sibling = self.next_sibling = None
return self
def _last_descendant(self, is_initialized=True, accept_self=True):
"Finds the last element beneath this object to be parsed."
if is_initialized and self.next_sibling:
last_child = self.next_sibling.previous_element
else:
last_child = self
while isinstance(last_child, Tag) and last_child.contents:
last_child = last_child.contents[-1]
if not accept_self and last_child == self:
last_child = None
return last_child
# BS3: Not part of the API!
_lastRecursiveChild = _last_descendant
def insert(self, position, new_child):
if new_child is self:
raise ValueError("Cannot insert a tag into itself.")
if (isinstance(new_child, basestring)
and not isinstance(new_child, NavigableString)):
new_child = NavigableString(new_child)
position = min(position, len(self.contents))
if hasattr(new_child, 'parent') and new_child.parent is not None:
# We're 'inserting' an element that's already one
# of this object's children.
if new_child.parent is self:
current_index = self.index(new_child)
if current_index < position:
# We're moving this element further down the list
# of this object's children. That means that when
# we extract this element, our target index will
# jump down one.
position -= 1
new_child.extract()
new_child.parent = self
previous_child = None
if position == 0:
new_child.previous_sibling = None
new_child.previous_element = self
else:
previous_child = self.contents[position - 1]
new_child.previous_sibling = previous_child
new_child.previous_sibling.next_sibling = new_child
new_child.previous_element = previous_child._last_descendant(False)
if new_child.previous_element is not None:
new_child.previous_element.next_element = new_child
new_childs_last_element = new_child._last_descendant(False)
if position >= len(self.contents):
new_child.next_sibling = None
parent = self
parents_next_sibling = None
while parents_next_sibling is None and parent is not None:
parents_next_sibling = parent.next_sibling
parent = parent.parent
if parents_next_sibling is not None:
# We found the element that comes next in the document.
break
if parents_next_sibling is not None:
new_childs_last_element.next_element = parents_next_sibling
else:
# The last element of this tag is the last element in
# the document.
new_childs_last_element.next_element = None
else:
next_child = self.contents[position]
new_child.next_sibling = next_child
if new_child.next_sibling is not None:
new_child.next_sibling.previous_sibling = new_child
new_childs_last_element.next_element = next_child
if new_childs_last_element.next_element is not None:
new_childs_last_element.next_element.previous_element = new_childs_last_element
self.contents.insert(position, new_child)
def append(self, tag):
"""Appends the given tag to the contents of this tag."""
self.insert(len(self.contents), tag)
def insert_before(self, predecessor):
"""Makes the given element the immediate predecessor of this one.
The two elements will have the same parent, and the given element
will be immediately before this one.
"""
if self is predecessor:
raise ValueError("Can't insert an element before itself.")
parent = self.parent
if parent is None:
raise ValueError(
"Element has no parent, so 'before' has no meaning.")
# Extract first so that the index won't be screwed up if they
# are siblings.
if isinstance(predecessor, PageElement):
predecessor.extract()
index = parent.index(self)
parent.insert(index, predecessor)
def insert_after(self, successor):
"""Makes the given element the immediate successor of this one.
The two elements will have the same parent, and the given element
will be immediately after this one.
"""
if self is successor:
raise ValueError("Can't insert an element after itself.")
parent = self.parent
if parent is None:
raise ValueError(
"Element has no parent, so 'after' has no meaning.")
# Extract first so that the index won't be screwed up if they
# are siblings.
if isinstance(successor, PageElement):
successor.extract()
index = parent.index(self)
parent.insert(index+1, successor)
def find_next(self, name=None, attrs={}, text=None, **kwargs):
"""Returns the first item that matches the given criteria and
appears after this Tag in the document."""
return self._find_one(self.find_all_next, name, attrs, text, **kwargs)
findNext = find_next # BS3
def find_all_next(self, name=None, attrs={}, text=None, limit=None,
**kwargs):
"""Returns all items that match the given criteria and appear
after this Tag in the document."""
return self._find_all(name, attrs, text, limit, self.next_elements,
**kwargs)
findAllNext = find_all_next # BS3
def find_next_sibling(self, name=None, attrs={}, text=None, **kwargs):
"""Returns the closest sibling to this Tag that matches the
given criteria and appears after this Tag in the document."""
return self._find_one(self.find_next_siblings, name, attrs, text,
**kwargs)
findNextSibling = find_next_sibling # BS3
def find_next_siblings(self, name=None, attrs={}, text=None, limit=None,
**kwargs):
"""Returns the siblings of this Tag that match the given
criteria and appear after this Tag in the document."""
return self._find_all(name, attrs, text, limit,
self.next_siblings, **kwargs)
findNextSiblings = find_next_siblings # BS3
fetchNextSiblings = find_next_siblings # BS2
def find_previous(self, name=None, attrs={}, text=None, **kwargs):
"""Returns the first item that matches the given criteria and
appears before this Tag in the document."""
return self._find_one(
self.find_all_previous, name, attrs, text, **kwargs)
findPrevious = find_previous # BS3
def find_all_previous(self, name=None, attrs={}, text=None, limit=None,
**kwargs):
"""Returns all items that match the given criteria and appear
before this Tag in the document."""
return self._find_all(name, attrs, text, limit, self.previous_elements,
**kwargs)
findAllPrevious = find_all_previous # BS3
fetchPrevious = find_all_previous # BS2
def find_previous_sibling(self, name=None, attrs={}, text=None, **kwargs):
"""Returns the closest sibling to this Tag that matches the
given criteria and appears before this Tag in the document."""
return self._find_one(self.find_previous_siblings, name, attrs, text,
**kwargs)
findPreviousSibling = find_previous_sibling # BS3
def find_previous_siblings(self, name=None, attrs={}, text=None,
limit=None, **kwargs):
"""Returns the siblings of this Tag that match the given
criteria and appear before this Tag in the document."""
return self._find_all(name, attrs, text, limit,
self.previous_siblings, **kwargs)
findPreviousSiblings = find_previous_siblings # BS3
fetchPreviousSiblings = find_previous_siblings # BS2
def find_parent(self, name=None, attrs={}, **kwargs):
"""Returns the closest parent of this Tag that matches the given
criteria."""
# NOTE: We can't use _find_one because findParents takes a different
# set of arguments.
r = None
l = self.find_parents(name, attrs, 1, **kwargs)
if l:
r = l[0]
return r
findParent = find_parent # BS3
def find_parents(self, name=None, attrs={}, limit=None, **kwargs):
"""Returns the parents of this Tag that match the given
criteria."""
return self._find_all(name, attrs, None, limit, self.parents,
**kwargs)
findParents = find_parents # BS3
fetchParents = find_parents # BS2
@property
def next(self):
return self.next_element
@property
def previous(self):
return self.previous_element
#These methods do the real heavy lifting.
def _find_one(self, method, name, attrs, text, **kwargs):
r = None
l = method(name, attrs, text, 1, **kwargs)
if l:
r = l[0]
return r
def _find_all(self, name, attrs, text, limit, generator, **kwargs):
"Iterates over a generator looking for things that match."
if isinstance(name, SoupStrainer):
strainer = name
else:
strainer = SoupStrainer(name, attrs, text, **kwargs)
if text is None and not limit and not attrs and not kwargs:
if name is True or name is None:
# Optimization to find all tags.
result = (element for element in generator
if isinstance(element, Tag))
return ResultSet(strainer, result)
elif isinstance(name, basestring):
# Optimization to find all tags with a given name.
result = (element for element in generator
if isinstance(element, Tag)
and element.name == name)
return ResultSet(strainer, result)
results = ResultSet(strainer)
while True:
try:
i = next(generator)
except StopIteration:
break
if i:
found = strainer.search(i)
if found:
results.append(found)
if limit and len(results) >= limit:
break
return results
#These generators can be used to navigate starting from both
#NavigableStrings and Tags.
@property
def next_elements(self):
i = self.next_element
while i is not None:
yield i
i = i.next_element
@property
def next_siblings(self):
i = self.next_sibling
while i is not None:
yield i
i = i.next_sibling
@property
def previous_elements(self):
i = self.previous_element
while i is not None:
yield i
i = i.previous_element
@property
def previous_siblings(self):
i = self.previous_sibling
while i is not None:
yield i
i = i.previous_sibling
@property
def parents(self):
i = self.parent
while i is not None:
yield i
i = i.parent
# Methods for supporting CSS selectors.
tag_name_re = re.compile('^[a-z0-9]+$')
# /^(\w+)\[(\w+)([=~\|\^\$\*]?)=?"?([^\]"]*)"?\]$/
# \---/ \---/\-------------/ \-------/
# | | | |
# | | | The value
# | | ~,|,^,$,* or =
# | Attribute
# Tag
attribselect_re = re.compile(
r'^(?P<tag>\w+)?\[(?P<attribute>\w+)(?P<operator>[=~\|\^\$\*]?)' +
r'=?"?(?P<value>[^\]"]*)"?\]$'
)
def _attr_value_as_string(self, value, default=None):
"""Force an attribute value into a string representation.
A multi-valued attribute will be converted into a
space-separated stirng.
"""
value = self.get(value, default)
if isinstance(value, list) or isinstance(value, tuple):
value =" ".join(value)
return value
def _tag_name_matches_and(self, function, tag_name):
if not tag_name:
return function
else:
def _match(tag):
return tag.name == tag_name and function(tag)
return _match
def _attribute_checker(self, operator, attribute, value=''):
"""Create a function that performs a CSS selector operation.
Takes an operator, attribute and optional value. Returns a
function that will return True for elements that match that
combination.
"""
if operator == '=':
# string representation of `attribute` is equal to `value`
return lambda el: el._attr_value_as_string(attribute) == value
elif operator == '~':
# space-separated list representation of `attribute`
# contains `value`
def _includes_value(element):
attribute_value = element.get(attribute, [])
if not isinstance(attribute_value, list):
attribute_value = attribute_value.split()
return value in attribute_value
return _includes_value
elif operator == '^':
# string representation of `attribute` starts with `value`
return lambda el: el._attr_value_as_string(
attribute, '').startswith(value)
elif operator == '$':
# string represenation of `attribute` ends with `value`
return lambda el: el._attr_value_as_string(
attribute, '').endswith(value)
elif operator == '*':
# string representation of `attribute` contains `value`
return lambda el: value in el._attr_value_as_string(attribute, '')
elif operator == '|':
# string representation of `attribute` is either exactly
# `value` or starts with `value` and then a dash.
def _is_or_starts_with_dash(element):
attribute_value = element._attr_value_as_string(attribute, '')
return (attribute_value == value or attribute_value.startswith(
value + '-'))
return _is_or_starts_with_dash
else:
return lambda el: el.has_attr(attribute)
# Old non-property versions of the generators, for backwards
# compatibility with BS3.
def nextGenerator(self):
return self.next_elements
def nextSiblingGenerator(self):
return self.next_siblings
def previousGenerator(self):
return self.previous_elements
def previousSiblingGenerator(self):
return self.previous_siblings
def parentGenerator(self):
return self.parents
class NavigableString(unicode, PageElement):
PREFIX = ''
SUFFIX = ''
def __new__(cls, value):
"""Create a new NavigableString.
When unpickling a NavigableString, this method is called with
the string in DEFAULT_OUTPUT_ENCODING. That encoding needs to be
passed in to the superclass's __new__ or the superclass won't know
how to handle non-ASCII characters.
"""
if isinstance(value, unicode):
return unicode.__new__(cls, value)
return unicode.__new__(cls, value, DEFAULT_OUTPUT_ENCODING)
def __copy__(self):
return self
def __getnewargs__(self):
return (unicode(self),)
def __getattr__(self, attr):
"""text.string gives you text. This is for backwards
compatibility for Navigable*String, but for CData* it lets you
get the string without the CData wrapper."""
if attr == 'string':
return self
else:
raise AttributeError(
"'%s' object has no attribute '%s'" % (
self.__class__.__name__, attr))
def output_ready(self, formatter="minimal"):
output = self.format_string(self, formatter)
return self.PREFIX + output + self.SUFFIX
@property
def name(self):
return None
@name.setter
def name(self, name):
raise AttributeError("A NavigableString cannot be given a name.")
class PreformattedString(NavigableString):
"""A NavigableString not subject to the normal formatting rules.
The string will be passed into the formatter (to trigger side effects),
but the return value will be ignored.
"""
def output_ready(self, formatter="minimal"):
"""CData strings are passed into the formatter.
But the return value is ignored."""
self.format_string(self, formatter)
return self.PREFIX + self + self.SUFFIX
class CData(PreformattedString):
PREFIX = u'<![CDATA['
SUFFIX = u']]>'
class ProcessingInstruction(PreformattedString):
PREFIX = u'<?'
SUFFIX = u'?>'
class Comment(PreformattedString):
PREFIX = u'<!--'
SUFFIX = u'-->'
class Declaration(PreformattedString):
PREFIX = u'<!'
SUFFIX = u'!>'
class Doctype(PreformattedString):
@classmethod
def for_name_and_ids(cls, name, pub_id, system_id):
value = name or ''
if pub_id is not None:
value += ' PUBLIC "%s"' % pub_id
if system_id is not None:
value += ' "%s"' % system_id
elif system_id is not None:
value += ' SYSTEM "%s"' % system_id
return Doctype(value)
PREFIX = u'<!DOCTYPE '
SUFFIX = u'>\n'
class Tag(PageElement):
"""Represents a found HTML tag with its attributes and contents."""
def __init__(self, parser=None, builder=None, name=None, namespace=None,
prefix=None, attrs=None, parent=None, previous=None):
"Basic constructor."
if parser is None:
self.parser_class = None
else:
# We don't actually store the parser object: that lets extracted
# chunks be garbage-collected.
self.parser_class = parser.__class__
if name is None:
raise ValueError("No value provided for new tag's name.")
self.name = name
self.namespace = namespace
self.prefix = prefix
if attrs is None:
attrs = {}
elif attrs and builder.cdata_list_attributes:
attrs = builder._replace_cdata_list_attribute_values(
self.name, attrs)
else:
attrs = dict(attrs)
self.attrs = attrs
self.contents = []
self.setup(parent, previous)
self.hidden = False
# Set up any substitutions, such as the charset in a META tag.
if builder is not None:
builder.set_up_substitutions(self)
self.can_be_empty_element = builder.can_be_empty_element(name)
else:
self.can_be_empty_element = False
parserClass = _alias("parser_class") # BS3
@property
def is_empty_element(self):
"""Is this tag an empty-element tag? (aka a self-closing tag)
A tag that has contents is never an empty-element tag.
A tag that has no contents may or may not be an empty-element
tag. It depends on the builder used to create the tag. If the
builder has a designated list of empty-element tags, then only
a tag whose name shows up in that list is considered an
empty-element tag.
If the builder has no designated list of empty-element tags,
then any tag with no contents is an empty-element tag.
"""
return len(self.contents) == 0 and self.can_be_empty_element
isSelfClosing = is_empty_element # BS3
@property
def string(self):
"""Convenience property to get the single string within this tag.
:Return: If this tag has a single string child, return value
is that string. If this tag has no children, or more than one
child, return value is None. If this tag has one child tag,
return value is the 'string' attribute of the child tag,
recursively.
"""
if len(self.contents) != 1:
return None
child = self.contents[0]
if isinstance(child, NavigableString):
return child
return child.string
@string.setter
def string(self, string):
self.clear()
self.append(string.__class__(string))
def _all_strings(self, strip=False, types=(NavigableString, CData)):
"""Yield all strings of certain classes, possibly stripping them.
By default, yields only NavigableString and CData objects. So
no comments, processing instructions, etc.
"""
for descendant in self.descendants:
if (
(types is None and not isinstance(descendant, NavigableString))
or
(types is not None and type(descendant) not in types)):
continue
if strip:
descendant = descendant.strip()
if len(descendant) == 0:
continue
yield descendant
strings = property(_all_strings)
@property
def stripped_strings(self):
for string in self._all_strings(True):
yield string
def get_text(self, separator=u"", strip=False,
types=(NavigableString, CData)):
"""
Get all child strings, concatenated using the given separator.
"""
return separator.join([s for s in self._all_strings(
strip, types=types)])
getText = get_text
text = property(get_text)
def decompose(self):
"""Recursively destroys the contents of this tree."""
self.extract()
i = self
while i is not None:
next = i.next_element
i.__dict__.clear()
i.contents = []
i = next
def clear(self, decompose=False):
"""
Extract all children. If decompose is True, decompose instead.
"""
if decompose:
for element in self.contents[:]:
if isinstance(element, Tag):
element.decompose()
else:
element.extract()
else:
for element in self.contents[:]:
element.extract()
def index(self, element):
"""
Find the index of a child by identity, not value. Avoids issues with
tag.contents.index(element) getting the index of equal elements.
"""
for i, child in enumerate(self.contents):
if child is element:
return i
raise ValueError("Tag.index: element not in tag")
def get(self, key, default=None):
"""Returns the value of the 'key' attribute for the tag, or
the value given for 'default' if it doesn't have that
attribute."""
return self.attrs.get(key, default)
def has_attr(self, key):
return key in self.attrs
def __hash__(self):
return str(self).__hash__()
def __getitem__(self, key):
"""tag[key] returns the value of the 'key' attribute for the tag,
and throws an exception if it's not there."""
return self.attrs[key]
def __iter__(self):
"Iterating over a tag iterates over its contents."
return iter(self.contents)
def __len__(self):
"The length of a tag is the length of its list of contents."
return len(self.contents)
def __contains__(self, x):
return x in self.contents
def __nonzero__(self):
"A tag is non-None even if it has no contents."
return True
def __setitem__(self, key, value):
"""Setting tag[key] sets the value of the 'key' attribute for the
tag."""
self.attrs[key] = value
def __delitem__(self, key):
"Deleting tag[key] deletes all 'key' attributes for the tag."
self.attrs.pop(key, None)
def __call__(self, *args, **kwargs):
"""Calling a tag like a function is the same as calling its
find_all() method. Eg. tag('a') returns a list of all the A tags
found within this tag."""
return self.find_all(*args, **kwargs)
def __getattr__(self, tag):
#print "Getattr %s.%s" % (self.__class__, tag)
if len(tag) > 3 and tag.endswith('Tag'):
# BS3: soup.aTag -> "soup.find("a")
tag_name = tag[:-3]
warnings.warn(
'.%sTag is deprecated, use .find("%s") instead.' % (
tag_name, tag_name))
return self.find(tag_name)
# We special case contents to avoid recursion.
elif not tag.startswith("__") and not tag=="contents":
return self.find(tag)
raise AttributeError(
"'%s' object has no attribute '%s'" % (self.__class__, tag))
def __eq__(self, other):
"""Returns true iff this tag has the same name, the same attributes,
and the same contents (recursively) as the given tag."""
if self is other:
return True
if (not hasattr(other, 'name') or
not hasattr(other, 'attrs') or
not hasattr(other, 'contents') or
self.name != other.name or
self.attrs != other.attrs or
len(self) != len(other)):
return False
for i, my_child in enumerate(self.contents):
if my_child != other.contents[i]:
return False
return True
def __ne__(self, other):
"""Returns true iff this tag is not identical to the other tag,
as defined in __eq__."""
return not self == other
def __repr__(self, encoding=DEFAULT_OUTPUT_ENCODING):
"""Renders this tag as a string."""
return self.encode(encoding)
def __unicode__(self):
return self.decode()
def __str__(self):
return self.encode()
if PY3K:
__str__ = __repr__ = __unicode__
def encode(self, encoding=DEFAULT_OUTPUT_ENCODING,
indent_level=None, formatter="minimal",
errors="xmlcharrefreplace"):
# Turn the data structure into Unicode, then encode the
# Unicode.
u = self.decode(indent_level, encoding, formatter)
return u.encode(encoding, errors)
def _should_pretty_print(self, indent_level):
"""Should this tag be pretty-printed?"""
return (
indent_level is not None and
(self.name not in HTMLAwareEntitySubstitution.preformatted_tags
or self._is_xml))
def decode(self, indent_level=None,
eventual_encoding=DEFAULT_OUTPUT_ENCODING,
formatter="minimal"):
"""Returns a Unicode representation of this tag and its contents.
:param eventual_encoding: The tag is destined to be
encoded into this encoding. This method is _not_
responsible for performing that encoding. This information
is passed in so that it can be substituted in if the
document contains a <META> tag that mentions the document's
encoding.
"""
# First off, turn a string formatter into a function. This
# will stop the lookup from happening over and over again.
if not callable(formatter):
formatter = self._formatter_for_name(formatter)
attrs = []
if self.attrs:
for key, val in sorted(self.attrs.items()):
if val is None:
decoded = key
else:
if isinstance(val, list) or isinstance(val, tuple):
val = ' '.join(val)
elif not isinstance(val, basestring):
val = unicode(val)
elif (
isinstance(val, AttributeValueWithCharsetSubstitution)
and eventual_encoding is not None):
val = val.encode(eventual_encoding)
text = self.format_string(val, formatter)
decoded = (
unicode(key) + '='
+ EntitySubstitution.quoted_attribute_value(text))
attrs.append(decoded)
close = ''
closeTag = ''
prefix = ''
if self.prefix:
prefix = self.prefix + ":"
if self.is_empty_element:
close = '/'
else:
closeTag = '</%s%s>' % (prefix, self.name)
pretty_print = self._should_pretty_print(indent_level)
space = ''
indent_space = ''
if indent_level is not None:
indent_space = (' ' * (indent_level - 1))
if pretty_print:
space = indent_space
indent_contents = indent_level + 1
else:
indent_contents = None
contents = self.decode_contents(
indent_contents, eventual_encoding, formatter)
if self.hidden:
# This is the 'document root' object.
s = contents
else:
s = []
attribute_string = ''
if attrs:
attribute_string = ' ' + ' '.join(attrs)
if indent_level is not None:
# Even if this particular tag is not pretty-printed,
# we should indent up to the start of the tag.
s.append(indent_space)
s.append('<%s%s%s%s>' % (
prefix, self.name, attribute_string, close))
if pretty_print:
s.append("\n")
s.append(contents)
if pretty_print and contents and contents[-1] != "\n":
s.append("\n")
if pretty_print and closeTag:
s.append(space)
s.append(closeTag)
if indent_level is not None and closeTag and self.next_sibling:
# Even if this particular tag is not pretty-printed,
# we're now done with the tag, and we should add a
# newline if appropriate.
s.append("\n")
s = ''.join(s)
return s
def prettify(self, encoding=None, formatter="minimal"):
if encoding is None:
return self.decode(True, formatter=formatter)
else:
return self.encode(encoding, True, formatter=formatter)
def decode_contents(self, indent_level=None,
eventual_encoding=DEFAULT_OUTPUT_ENCODING,
formatter="minimal"):
"""Renders the contents of this tag as a Unicode string.
:param eventual_encoding: The tag is destined to be
encoded into this encoding. This method is _not_
responsible for performing that encoding. This information
is passed in so that it can be substituted in if the
document contains a <META> tag that mentions the document's
encoding.
"""
# First off, turn a string formatter into a function. This
# will stop the lookup from happening over and over again.
if not callable(formatter):
formatter = self._formatter_for_name(formatter)
pretty_print = (indent_level is not None)
s = []
for c in self:
text = None
if isinstance(c, NavigableString):
text = c.output_ready(formatter)
elif isinstance(c, Tag):
s.append(c.decode(indent_level, eventual_encoding,
formatter))
if text and indent_level and not self.name == 'pre':
text = text.strip()
if text:
if pretty_print and not self.name == 'pre':
s.append(" " * (indent_level - 1))
s.append(text)
if pretty_print and not self.name == 'pre':
s.append("\n")
return ''.join(s)
def encode_contents(
self, indent_level=None, encoding=DEFAULT_OUTPUT_ENCODING,
formatter="minimal"):
"""Renders the contents of this tag as a bytestring."""
contents = self.decode_contents(indent_level, encoding, formatter)
return contents.encode(encoding)
# Old method for BS3 compatibility
def renderContents(self, encoding=DEFAULT_OUTPUT_ENCODING,
prettyPrint=False, indentLevel=0):
if not prettyPrint:
indentLevel = None
return self.encode_contents(
indent_level=indentLevel, encoding=encoding)
#Soup methods
def find(self, name=None, attrs={}, recursive=True, text=None,
**kwargs):
"""Return only the first child of this Tag matching the given
criteria."""
r = None
l = self.find_all(name, attrs, recursive, text, 1, **kwargs)
if l:
r = l[0]
return r
findChild = find
def find_all(self, name=None, attrs={}, recursive=True, text=None,
limit=None, **kwargs):
"""Extracts a list of Tag objects that match the given
criteria. You can specify the name of the Tag and any
attributes you want the Tag to have.
The value of a key-value pair in the 'attrs' map can be a
string, a list of strings, a regular expression object, or a
callable that takes a string and returns whether or not the
string matches for some custom definition of 'matches'. The
same is true of the tag name."""
generator = self.descendants
if not recursive:
generator = self.children
return self._find_all(name, attrs, text, limit, generator, **kwargs)
findAll = find_all # BS3
findChildren = find_all # BS2
#Generator methods
@property
def children(self):
# return iter() to make the purpose of the method clear
return iter(self.contents) # XXX This seems to be untested.
@property
def descendants(self):
if not len(self.contents):
return
stopNode = self._last_descendant().next_element
current = self.contents[0]
while current is not stopNode:
yield current
current = current.next_element
# CSS selector code
_selector_combinators = ['>', '+', '~']
_select_debug = False
def select(self, selector, _candidate_generator=None):
"""Perform a CSS selection operation on the current element."""
tokens = selector.split()
current_context = [self]
if tokens[-1] in self._selector_combinators:
raise ValueError(
'Final combinator "%s" is missing an argument.' % tokens[-1])
if self._select_debug:
print 'Running CSS selector "%s"' % selector
for index, token in enumerate(tokens):
if self._select_debug:
print ' Considering token "%s"' % token
recursive_candidate_generator = None
tag_name = None
if tokens[index-1] in self._selector_combinators:
# This token was consumed by the previous combinator. Skip it.
if self._select_debug:
print ' Token was consumed by the previous combinator.'
continue
# Each operation corresponds to a checker function, a rule
# for determining whether a candidate matches the
# selector. Candidates are generated by the active
# iterator.
checker = None
m = self.attribselect_re.match(token)
if m is not None:
# Attribute selector
tag_name, attribute, operator, value = m.groups()
checker = self._attribute_checker(operator, attribute, value)
elif '#' in token:
# ID selector
tag_name, tag_id = token.split('#', 1)
def id_matches(tag):
return tag.get('id', None) == tag_id
checker = id_matches
elif '.' in token:
# Class selector
tag_name, klass = token.split('.', 1)
classes = set(klass.split('.'))
def classes_match(candidate):
return classes.issubset(candidate.get('class', []))
checker = classes_match
elif ':' in token:
# Pseudo-class
tag_name, pseudo = token.split(':', 1)
if tag_name == '':
raise ValueError(
"A pseudo-class must be prefixed with a tag name.")
pseudo_attributes = re.match('([a-zA-Z\d-]+)\(([a-zA-Z\d]+)\)', pseudo)
found = []
if pseudo_attributes is not None:
pseudo_type, pseudo_value = pseudo_attributes.groups()
if pseudo_type == 'nth-of-type':
try:
pseudo_value = int(pseudo_value)
except:
raise NotImplementedError(
'Only numeric values are currently supported for the nth-of-type pseudo-class.')
if pseudo_value < 1:
raise ValueError(
'nth-of-type pseudo-class value must be at least 1.')
class Counter(object):
def __init__(self, destination):
self.count = 0
self.destination = destination
def nth_child_of_type(self, tag):
self.count += 1
if self.count == self.destination:
return True
if self.count > self.destination:
# Stop the generator that's sending us
# these things.
raise StopIteration()
return False
checker = Counter(pseudo_value).nth_child_of_type
else:
raise NotImplementedError(
'Only the following pseudo-classes are implemented: nth-of-type.')
elif token == '*':
# Star selector -- matches everything
pass
elif token == '>':
# Run the next token as a CSS selector against the
# direct children of each tag in the current context.
recursive_candidate_generator = lambda tag: tag.children
elif token == '~':
# Run the next token as a CSS selector against the
# siblings of each tag in the current context.
recursive_candidate_generator = lambda tag: tag.next_siblings
elif token == '+':
# For each tag in the current context, run the next
# token as a CSS selector against the tag's next
# sibling that's a tag.
def next_tag_sibling(tag):
yield tag.find_next_sibling(True)
recursive_candidate_generator = next_tag_sibling
elif self.tag_name_re.match(token):
# Just a tag name.
tag_name = token
else:
raise ValueError(
'Unsupported or invalid CSS selector: "%s"' % token)
if recursive_candidate_generator:
# This happens when the selector looks like "> foo".
#
# The generator calls select() recursively on every
# member of the current context, passing in a different
# candidate generator and a different selector.
#
# In the case of "> foo", the candidate generator is
# one that yields a tag's direct children (">"), and
# the selector is "foo".
next_token = tokens[index+1]
def recursive_select(tag):
if self._select_debug:
print ' Calling select("%s") recursively on %s %s' % (next_token, tag.name, tag.attrs)
print '-' * 40
for i in tag.select(next_token, recursive_candidate_generator):
if self._select_debug:
print '(Recursive select picked up candidate %s %s)' % (i.name, i.attrs)
yield i
if self._select_debug:
print '-' * 40
_use_candidate_generator = recursive_select
elif _candidate_generator is None:
# By default, a tag's candidates are all of its
# children. If tag_name is defined, only yield tags
# with that name.
if self._select_debug:
if tag_name:
check = "[any]"
else:
check = tag_name
print ' Default candidate generator, tag name="%s"' % check
if self._select_debug:
# This is redundant with later code, but it stops
# a bunch of bogus tags from cluttering up the
# debug log.
def default_candidate_generator(tag):
for child in tag.descendants:
if not isinstance(child, Tag):
continue
if tag_name and not child.name == tag_name:
continue
yield child
_use_candidate_generator = default_candidate_generator
else:
_use_candidate_generator = lambda tag: tag.descendants
else:
_use_candidate_generator = _candidate_generator
new_context = []
new_context_ids = set([])
for tag in current_context:
if self._select_debug:
print " Running candidate generator on %s %s" % (
tag.name, repr(tag.attrs))
for candidate in _use_candidate_generator(tag):
if not isinstance(candidate, Tag):
continue
if tag_name and candidate.name != tag_name:
continue
if checker is not None:
try:
result = checker(candidate)
except StopIteration:
# The checker has decided we should no longer
# run the generator.
break
if checker is None or result:
if self._select_debug:
print " SUCCESS %s %s" % (candidate.name, repr(candidate.attrs))
if id(candidate) not in new_context_ids:
# If a tag matches a selector more than once,
# don't include it in the context more than once.
new_context.append(candidate)
new_context_ids.add(id(candidate))
elif self._select_debug:
print " FAILURE %s %s" % (candidate.name, repr(candidate.attrs))
current_context = new_context
if self._select_debug:
print "Final verdict:"
for i in current_context:
print " %s %s" % (i.name, i.attrs)
return current_context
# Old names for backwards compatibility
def childGenerator(self):
return self.children
def recursiveChildGenerator(self):
return self.descendants
def has_key(self, key):
"""This was kind of misleading because has_key() (attributes)
was different from __in__ (contents). has_key() is gone in
Python 3, anyway."""
warnings.warn('has_key is deprecated. Use has_attr("%s") instead.' % (
key))
return self.has_attr(key)
# Next, a couple classes to represent queries and their results.
class SoupStrainer(object):
"""Encapsulates a number of ways of matching a markup element (tag or
text)."""
def __init__(self, name=None, attrs={}, text=None, **kwargs):
self.name = self._normalize_search_value(name)
if not isinstance(attrs, dict):
# Treat a non-dict value for attrs as a search for the 'class'
# attribute.
kwargs['class'] = attrs
attrs = None
if 'class_' in kwargs:
# Treat class_="foo" as a search for the 'class'
# attribute, overriding any non-dict value for attrs.
kwargs['class'] = kwargs['class_']
del kwargs['class_']
if kwargs:
if attrs:
attrs = attrs.copy()
attrs.update(kwargs)
else:
attrs = kwargs
normalized_attrs = {}
for key, value in attrs.items():
normalized_attrs[key] = self._normalize_search_value(value)
self.attrs = normalized_attrs
self.text = self._normalize_search_value(text)
def _normalize_search_value(self, value):
# Leave it alone if it's a Unicode string, a callable, a
# regular expression, a boolean, or None.
if (isinstance(value, unicode) or callable(value) or hasattr(value, 'match')
or isinstance(value, bool) or value is None):
return value
# If it's a bytestring, convert it to Unicode, treating it as UTF-8.
if isinstance(value, bytes):
return value.decode("utf8")
# If it's listlike, convert it into a list of strings.
if hasattr(value, '__iter__'):
new_value = []
for v in value:
if (hasattr(v, '__iter__') and not isinstance(v, bytes)
and not isinstance(v, unicode)):
# This is almost certainly the user's mistake. In the
# interests of avoiding infinite loops, we'll let
# it through as-is rather than doing a recursive call.
new_value.append(v)
else:
new_value.append(self._normalize_search_value(v))
return new_value
# Otherwise, convert it into a Unicode string.
# The unicode(str()) thing is so this will do the same thing on Python 2
# and Python 3.
return unicode(str(value))
def __str__(self):
if self.text:
return self.text
else:
return "%s|%s" % (self.name, self.attrs)
def search_tag(self, markup_name=None, markup_attrs={}):
found = None
markup = None
if isinstance(markup_name, Tag):
markup = markup_name
markup_attrs = markup
call_function_with_tag_data = (
isinstance(self.name, collections.Callable)
and not isinstance(markup_name, Tag))
if ((not self.name)
or call_function_with_tag_data
or (markup and self._matches(markup, self.name))
or (not markup and self._matches(markup_name, self.name))):
if call_function_with_tag_data:
match = self.name(markup_name, markup_attrs)
else:
match = True
markup_attr_map = None
for attr, match_against in list(self.attrs.items()):
if not markup_attr_map:
if hasattr(markup_attrs, 'get'):
markup_attr_map = markup_attrs
else:
markup_attr_map = {}
for k, v in markup_attrs:
markup_attr_map[k] = v
attr_value = markup_attr_map.get(attr)
if not self._matches(attr_value, match_against):
match = False
break
if match:
if markup:
found = markup
else:
found = markup_name
if found and self.text and not self._matches(found.string, self.text):
found = None
return found
searchTag = search_tag
def search(self, markup):
# print 'looking for %s in %s' % (self, markup)
found = None
# If given a list of items, scan it for a text element that
# matches.
if hasattr(markup, '__iter__') and not isinstance(markup, (Tag, basestring)):
for element in markup:
if isinstance(element, NavigableString) \
and self.search(element):
found = element
break
# If it's a Tag, make sure its name or attributes match.
# Don't bother with Tags if we're searching for text.
elif isinstance(markup, Tag):
if not self.text or self.name or self.attrs:
found = self.search_tag(markup)
# If it's text, make sure the text matches.
elif isinstance(markup, NavigableString) or \
isinstance(markup, basestring):
if not self.name and not self.attrs and self._matches(markup, self.text):
found = markup
else:
raise Exception(
"I don't know how to match against a %s" % markup.__class__)
return found
def _matches(self, markup, match_against):
# print u"Matching %s against %s" % (markup, match_against)
result = False
if isinstance(markup, list) or isinstance(markup, tuple):
# This should only happen when searching a multi-valued attribute
# like 'class'.
if (isinstance(match_against, unicode)
and ' ' in match_against):
# A bit of a special case. If they try to match "foo
# bar" on a multivalue attribute's value, only accept
# the literal value "foo bar"
#
# XXX This is going to be pretty slow because we keep
# splitting match_against. But it shouldn't come up
# too often.
return (whitespace_re.split(match_against) == markup)
else:
for item in markup:
if self._matches(item, match_against):
return True
return False
if match_against is True:
# True matches any non-None value.
return markup is not None
if isinstance(match_against, collections.Callable):
return match_against(markup)
# Custom callables take the tag as an argument, but all
# other ways of matching match the tag name as a string.
if isinstance(markup, Tag):
markup = markup.name
# Ensure that `markup` is either a Unicode string, or None.
markup = self._normalize_search_value(markup)
if markup is None:
# None matches None, False, an empty string, an empty list, and so on.
return not match_against
if isinstance(match_against, unicode):
# Exact string match
return markup == match_against
if hasattr(match_against, 'match'):
# Regexp match
return match_against.search(markup)
if hasattr(match_against, '__iter__'):
# The markup must be an exact match against something
# in the iterable.
return markup in match_against
class ResultSet(list):
"""A ResultSet is just a list that keeps track of the SoupStrainer
that created it."""
def __init__(self, source, result=()):
super(ResultSet, self).__init__(result)
self.source = source
|
nojhan/weboob-devel | refs/heads/master | modules/allrecipes/test.py | 1 | # -*- coding: utf-8 -*-
# Copyright(C) 2013 Julien Veyssier
#
# This file is part of weboob.
#
# weboob is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# weboob is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with weboob. If not, see <http://www.gnu.org/licenses/>.
from weboob.tools.test import BackendTest
import itertools
class AllrecipesTest(BackendTest):
MODULE = 'allrecipes'
def test_recipe(self):
recipes = list(itertools.islice(self.backend.iter_recipes('french fries'), 0, 20))
assert len(recipes)
full_recipe = self.backend.get_recipe(recipes[0].id)
assert full_recipe.instructions
assert full_recipe.ingredients
assert full_recipe.title
|
edhuckle/statsmodels | refs/heads/master | docs/source/plots/var_plot_fevd.py | 39 | from var_plots import plot_fevd
plot_fevd()
|
ging/keystone | refs/heads/master | keystone/openstack/common/config/__init__.py | 12133432 | |
raoanirudh/rmtk | refs/heads/master | rmtk/vulnerability/capacity.py | 12133432 | |
izonder/intellij-community | refs/heads/master | python/testData/formatter/alignListComprehensionInDict.py | 80 | def foo():
return {field.key: field for key, field in inspect.getmembers(instance)
if isinstance(field, QueryableAttribute)
and isinstance(field.property, ColumnProperty)
or field.foreign_keys} |
mmolero/pcloudpy | refs/heads/master | pcloudpy/gui/MainWindowBase.py | 1 | """
Template MainWindowBase.py
"""
#Author: Miguel Molero <miguel.molero@gmail.com>
import sys
import os
from PyQt5.QtCore import *
from PyQt5.QtGui import *
from PyQt5.QtWidgets import *
from PyQt5.QtCore import pyqtSignal as Signal
import markdown2
import yaml
import pprint
#own components
from pcloudpy.gui.resources_rc import *
#from pcloudpy.gui.graphics.QVTKWidget import QVTKWidget
from pcloudpy.gui.AppObject import AppObject
from pcloudpy.gui.utils.qhelpers import *
from pcloudpy.gui.components.ViewWidget import ViewWidget
from pcloudpy.gui.components.TabViewWidget import TabViewWidget
from pcloudpy.gui.components.ToolboxesWidget import ToolBoxesWidget
from pcloudpy.gui.components.DatasetsWidget import DatasetsWidget
from pcloudpy.gui.components.ObjectInspectorWidget import ObjectInspectorWidget
from pcloudpy.gui.components.FilterWidget import FilterWidget
#from shell.PythonConsole import PythonConsole
#from shell.IPythonConsole import IPythonConsole
#from shell.CodeEdit import CodeEdit
NAME = "pcloudpy"
class Info(object):
version = "0.10"
date = "27-10-2015"
class MainWindowBase(QMainWindow):
"""
Base Class for the MainWindow Object. This class should inherit its attributes and methods to a MainWindow Class
"""
def __init__(self, parent = None):
super(MainWindowBase, self).__init__(parent)
self.setLocale((QLocale(QLocale.English, QLocale.UnitedStates)))
self._software_name = NAME
self.App = AppObject()
self.init()
self.create_menus()
self.create_toolbars()
self.setup_docks()
self.setup_graphicsview()
self.setup_statusbar()
self.setup_connections()
self.init_settings()
self.init_toolboxes()
QTimer.singleShot(0,self.load_initial_file)
@property
def software_name(self):
return self._software_name
@software_name.setter
def software_name(self, name):
self._software_name = name
def init(self):
self.Info = Info()
self.dirty = False
self.reset = False
self.filename = None
self.recent_files = []
self.dir_path = os.getcwd()
self.setGeometry(100,100,900,600)
self.setMinimumSize(400,400)
self.setMaximumSize(2000,1500)
self.setWindowFlags(self.windowFlags())
self.setWindowTitle(self.software_name)
#Put here your init code
def set_title(self, fname=None):
title = os.path.basename(fname)
self.setWindowTitle("%s:%s"%(self.softwareName,title))
def load_initial_file(self):
settings = QSettings()
fname = settings.value("LastFile")
if fname and QFile.exists(fname):
self.load_file(fname)
def load_file(self, fname=None):
if fname is None:
action = self.sender()
if isinstance(action, QAction):
fname = action.data()
if not self.ok_to_Continue():
return
else:
return
if fname:
self.filename = None
self.add_recent_file(fname)
self.filename = fname
self.dirty = False
self.set_title(fname)
#Add More actions
#
#
def add_recent_file(self, fname):
if fname is None:
return
if not self.recentFiles.count(fname):
self.recentFiles.insert(0,fname)
while len(self.recentFiles)>9:
self.recentFiles.pop()
def create_menus(self):
self.menubar = self.menuBar()
file_menu = self.menubar.addMenu(self.tr('&File'))
help_menu = self.menubar.addMenu(self.tr("&Help"))
file_open_action = createAction(self, "&Open Dataset[s]", self.file_open)
file_open_action.setIcon(self.style().standardIcon(QStyle.SP_DirIcon))
help_about_action = createAction(self, "&About %s"%self._software_name, self.help_about, icon="pcloudpy.png")
addActions(file_menu, (file_open_action,))
addActions(help_menu, (help_about_action,))
def setup_connections(self):
#Main Window
self.workspaceLineEdit.textEdited.connect(self.editWorkSpace)
#self.code_edit.codeRequested.connect(self.console_widget.execute_code)
def setup_docks(self):
#Toolboxes
self.toolboxes_widget = ToolBoxesWidget()
self.toolboxes_dockwidget = QDockWidget(self.tr("Toolboxes"))
self.toolboxes_dockwidget.setObjectName("Toolboxes-Dock")
self.toolboxes_dockwidget.setWidget(self.toolboxes_widget)
self.toolboxes_dockwidget.setAllowedAreas(Qt.RightDockWidgetArea)
self.addDockWidget(Qt.RightDockWidgetArea, self.toolboxes_dockwidget)
#Datasets
self.datasets_widget = DatasetsWidget()
self.datasets_dockwidget = QDockWidget(self.tr("Datasets"))
self.datasets_dockwidget.setObjectName("Datasets-Dock")
self.datasets_dockwidget.setWidget(self.datasets_widget)
self.datasets_dockwidget.setAllowedAreas(Qt.LeftDockWidgetArea)
self.addDockWidget(Qt.LeftDockWidgetArea, self.datasets_dockwidget)
#Object Inspector
self.object_inspector_widget = ObjectInspectorWidget()
self.object_inspector_dockwidget = QDockWidget(self.tr("Object Inspector"))
self.object_inspector_dockwidget.setObjectName("Object-Inspector-Dock")
self.object_inspector_dockwidget.setWidget(self.object_inspector_widget)
self.object_inspector_dockwidget.setAllowedAreas(Qt.LeftDockWidgetArea)
self.addDockWidget(Qt.LeftDockWidgetArea, self.object_inspector_dockwidget)
#Filter Widget
self.filter_widget = FilterWidget()
self.filter_widget_dockwidget = QDockWidget(self.tr("Filter Setup"))
self.filter_widget_dockwidget.setObjectName("Filter-Setup-Dock")
self.filter_widget_dockwidget.setWidget(self.filter_widget)
self.filter_widget_dockwidget.setAllowedAreas(Qt.RightDockWidgetArea)
self.addDockWidget(Qt.RightDockWidgetArea, self.filter_widget_dockwidget)
#Console
self.tab_console = QTabWidget()
#self.console_widget = IPythonConsole(self, self.App)
#self.code_edit = CodeEdit()
#self.tab_console.addTab(self.console_widget, "Console")
#self.tab_console.addTab(self.code_edit, "Editor")
#self.console_widget_dockwidget = QDockWidget(self.tr("IPython"))
#self.console_widget_dockwidget.setObjectName("Console-Dock")
#self.console_widget_dockwidget.setWidget(self.tab_console)
#self.console_widget_dockwidget.setAllowedAreas(Qt.BottomDockWidgetArea)
#self.addDockWidget(Qt.BottomDockWidgetArea, self.console_widget_dockwidget)
def create_toolbars(self):
self.actionOpen_WorkSpace = createAction(self,"Set Workspace", self.setWorkSpace)
self.actionOpen_WorkSpace.setIcon(self.style().standardIcon(QStyle.SP_DirIcon))
self.first_toolbar = QToolBar(self)
self.first_toolbar.setObjectName("Workspace Toolbar")
self.first_toolbar.setAllowedAreas(Qt.TopToolBarArea | Qt.BottomToolBarArea)
self.workspaceLineEdit = QLineEdit()
self.workspaceLineEdit.setMinimumWidth(200)
self.first_toolbar.addWidget(QLabel("Workspace Dir"))
self.first_toolbar.addWidget(self.workspaceLineEdit)
self.first_toolbar.addAction(self.actionOpen_WorkSpace)
self.addToolBar(self.first_toolbar)
if self.dir_path is None:
self.dir_path = os.getcwd()
self.workspaceLineEdit.setText(self.dir_path)
self.addToolBarBreak()
def setup_graphicsview(self):
self.tab_view = TabViewWidget(self)
view = ViewWidget()
self.tab_view.addTab(view, "Layout #1")
self.setCentralWidget(self.tab_view)
#
self.datasets_widget.init_tree(view.model)
def setup_statusbar(self):
self.status = self.statusBar()
self.status.setSizeGripEnabled(False)
#Add more action
def setWorkSpace(self):
dir = QFileDialog.getExistingDirectory(None, self.tr("Set Workspace directory"), self.dir_path, QFileDialog.ShowDirsOnly | QFileDialog.DontResolveSymlinks)
if dir:
self.dir_path = dir
self.workspaceLineEdit.setText(self.dir_path)
def editWorkSpace(self):
if os.path.isdir(self.workspaceLineEdit.text()):
self.dir_path = self.workspaceLineEdit.text()
def init_settings(self):
settings = QSettings()
self.recentFiles = settings.value("RecentFiles")
size = settings.value("MainWindow/Size",QSize(900,600))
position = settings.value("MainWindow/Position",QPoint(50,50))
self.restoreState(settings.value("MainWindow/State"))
self.dir_path = settings.value("DirPath")
#Retrives more options
if self.recentFiles is None:
self.recentFiles = []
self.resize(size)
self.move(position)
#Add more actions
self.workspaceLineEdit.setText(self.dir_path)
def reset_settings(self):
settings = QSettings()
settings.clear()
self.reset = True
self.close()
def init_toolboxes(self):
if hasattr(sys, 'frozen'):
#http://stackoverflow.com/questions/14750997/load-txt-file-from-resources-in-python
fd = QFile(":/config_toolboxes.yaml")
if fd.open(QIODevice.ReadOnly | QFile.Text):
text = QTextStream(fd).readAll()
fd.close()
data = yaml.load(text)
else:
path = os.path.dirname(os.path.realpath(__file__))
with open(os.path.join(path,'resources', 'conf', 'config_toolboxes.yaml'), 'r') as f:
# use safe_load instead load
data = yaml.safe_load(f)
#pp = pprint.PrettyPrinter()
#pp.pprint(data)
self.toolboxes_widget.init_tree(data)
def ok_to_continue(self):
if self.dirty:
reply = QMessageBox.question(self,
"%s - Unsaved Changes"%self.softwareName,
"Save unsaved changes?",
QMessageBox.Yes|QMessageBox.No|QMessageBox.Cancel)
if reply == QMessageBox.Cancel:
return False
elif reply == QMessageBox.Yes:
self.file_save()
return True
def file_new(self):
pass
def file_open(self):
pass
def file_saveAs(self):
pass
def file_save(self):
pass
def help_about(self):
message = read_file(":/about.md").format(self.Info.version, self.Info.date)
html = markdown2.markdown(str(message))
QMessageBox.about(self, "About %s"%NAME, html)
def closeEvent(self, event):
if self.reset:
return
if self.ok_to_continue():
settings = QSettings()
filename = self.filename if self.filename is not None else None
settings.setValue("LastFile", filename)
recentFiles = self.recentFiles if self.recentFiles else None
settings.setValue("RecentFiles", recentFiles)
settings.setValue("MainWindow/Size", self.size())
settings.setValue("MainWindow/Position", self.pos())
settings.setValue("MainWindow/State", self.saveState())
settings.setValue("DirPath", self.dir_path)
#Set more options
else:
event.ignore()
if __name__=='__main__':
import sys
app = QApplication(sys.argv)
win = MainWindowBase()
win.show()
app.exec_()
|
cburbridge/mongodb_store | refs/heads/hydro-devel | mongodb_store/scripts/mongodb_server.py | 4 | #!/usr/bin/env python
import rospy
import subprocess
import sys
import os
import re
import signal
import errno
from std_srvs.srv import *
import mongodb_store.util
if not mongodb_store.util.check_for_pymongo():
sys.exit(1)
MongoClient = mongodb_store.util.import_MongoClient()
import pymongo
def is_socket_free(host, port):
import socket;
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
result = sock.connect_ex((host, port))
return result != 0
class MongoServer(object):
def __init__(self):
rospy.init_node("mongodb_server", anonymous=True)#, disable_signals=True)
# Has the db already gone down, before the ros node?
self._gone_down = False
self._ready = False # is the db ready: when mongo says "waiting for connection"
test_mode = rospy.get_param("~test_mode", False)
self.repl_set = rospy.get_param("~repl_set", None)
if test_mode:
import random
default_host = "localhost"
default_port = random.randrange(49152,65535)
count = 0
while not is_socket_free(default_host, default_port):
default_port = random.randrange(49152,65535)
count += 1
if count > 100:
rospy.logerr("Can't find a free port to run the test server on.")
sys.exit(1)
default_path = "/tmp/ros_mongodb_store_%d" % default_port
os.mkdir(default_path)
else:
default_host = "localhost"
default_port = 27017
default_path = "/opt/ros/mongodb_store"
# Get the database path
self._db_path = rospy.get_param("~database_path", default_path)
is_master = rospy.get_param("~master", True)
if is_master:
self._mongo_host = rospy.get_param("mongodb_host", default_host)
rospy.set_param("mongodb_host",self._mongo_host)
self._mongo_port = rospy.get_param("mongodb_port", default_port)
rospy.set_param("mongodb_port",self._mongo_port)
else:
self._mongo_host = rospy.get_param("~host")
self._mongo_port = rospy.get_param("~port")
rospy.loginfo("Mongo server address: "+self._mongo_host+":"+str(self._mongo_port))
# Check that mongodb is installed
try:
mongov = subprocess.check_output(["mongod","--version"])
match = re.search("db version v(\d+\.\d+\.\d+)",mongov)
self._mongo_version=match.group(1)
except subprocess.CalledProcessError:
rospy.logerr("Can't find MongoDB executable. Is it installed?\nInstall it with \"sudo apt-get install mongodb\"")
sys.exit(1)
rospy.loginfo("Found MongoDB version " + self._mongo_version)
# Check that the provided db path exists.
if not os.path.exists(self._db_path):
rospy.logerr("Can't find database at supplied path " + self._db_path + ". If this is a new DB, create it as an empty directory.")
sys.exit(1)
# Advertise ros services for db interaction
self._shutdown_srv = rospy.Service("/datacentre/shutdown", Empty, self._shutdown_srv_cb)
self._wait_ready_srv = rospy.Service("/datacentre/wait_ready",Empty,self._wait_ready_srv_cb)
rospy.on_shutdown(self._on_node_shutdown)
# Start the mongodb server
self._mongo_loop()
def _mongo_loop(self):
# Blocker to prevent Ctrl-C being passed to the mongo server
def block_mongo_kill():
os.setpgrp()
# signal.signal(signal.SIGINT, signal.SIG_IGN)
cmd = ["mongod","--dbpath",self._db_path,"--port",str(self._mongo_port)]
if self.repl_set is not None:
cmd.append("--replSet")
cmd.append(self.repl_set)
self._mongo_process = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
preexec_fn = block_mongo_kill)
while self._mongo_process.poll() is None:# and not rospy.is_shutdown():
try:
stdout = self._mongo_process.stdout.readline()
except IOError, e: # probably interupt because shutdown cut it up
if e.errno == errno.EINTR:
continue
else:
raise
if stdout is not None:
if stdout.find("ERROR") !=-1:
rospy.logerr(stdout.strip())
else:
rospy.loginfo(stdout.strip())
if stdout.find("waiting for connections on port") !=-1:
self._ready=True
if self.repl_set is not None:
try:
self.initialize_repl_set()
except Exception as e:
rospy.logwarn("initialzing replSet failed: %s" % e)
if not rospy.is_shutdown():
rospy.logerr("MongoDB process stopped!")
if self._mongo_process.returncode!=0:
rospy.logerr("Mongo process error! Exit code="+str(self._mongo_process.returncode))
self._gone_down = True
self._ready=False
def _on_node_shutdown(self):
rospy.loginfo("Shutting down datacentre")
if self._gone_down:
rospy.logwarn("It looks like Mongo already died. Watch out as the DB might need recovery time at next run.")
return
try:
c = MongoClient(self._mongo_host,self._mongo_port)
except pymongo.errors.ConnectionFailure, c:
pass
try:
c.admin.command("shutdown")
except pymongo.errors.AutoReconnect, a:
pass
def _shutdown_srv_cb(self,req):
rospy.signal_shutdown("Shutdown request..")
return EmptyResponse()
def _wait_ready_srv_cb(self,req):
while not self._ready:
rospy.sleep(0.1)
return EmptyResponse()
def initialize_repl_set(self):
c = pymongo.Connection("%s:%d" % (self._mongo_host,self._mongo_port), slave_okay=True)
c.admin.command("replSetInitiate")
c.close()
if __name__ == '__main__':
server = MongoServer()
|
kuiwei/edx-platform | refs/heads/master | common/djangoapps/status/status.py | 86 | """
A tiny app that checks for a status message.
"""
from django.conf import settings
import json
import logging
import os
log = logging.getLogger(__name__)
def get_site_status_msg(course_id):
"""
Look for a file settings.STATUS_MESSAGE_PATH. If found, read it,
parse as json, and do the following:
* if there is a key 'global', include that in the result list.
* if course is not None, and there is a key for course.id, add that to the result list.
* return "<br/>".join(result)
Otherwise, return None.
If something goes wrong, returns None. ("is there a status msg?" logic is
not allowed to break the entire site).
"""
try:
if os.path.isfile(settings.STATUS_MESSAGE_PATH):
with open(settings.STATUS_MESSAGE_PATH) as f:
content = f.read()
else:
return None
status_dict = json.loads(content)
msg = status_dict.get('global', None)
if course_id in status_dict:
msg = msg + "<br>" if msg else ''
msg += status_dict[course_id]
return msg
except:
log.exception("Error while getting a status message.")
return None
|
grazor/ftwatch | refs/heads/master | web.py | 1 | #!/usr/bin/env python
import os
from ftwatch import app, base_path
from ftwatch.util import init_db
if __name__ == '__main__':
if not os.path.isfile(app.config['DB_PATH']):
fname = os.path.join(base_path,'schema.sql')
init_db(fname)
# Set app port here
app.run(host='0.0.0.0', port=8000)
|
Brainiq7/Ananse | refs/heads/master | ananse_dl/extractor/crunchyroll.py | 1 | # encoding: utf-8
from __future__ import unicode_literals
import re
import json
import base64
import zlib
import xml.etree.ElementTree
from hashlib import sha1
from math import pow, sqrt, floor
from .subtitles import SubtitlesInfoExtractor
from ..compat import (
compat_urllib_parse,
compat_urllib_request,
)
from ..utils import (
ExtractorError,
bytes_to_intlist,
intlist_to_bytes,
unified_strdate,
urlencode_postdata,
)
from ..aes import (
aes_cbc_decrypt,
inc,
)
from .common import InfoExtractor
class CrunchyrollIE(SubtitlesInfoExtractor):
_VALID_URL = r'https?://(?:(?P<prefix>www|m)\.)?(?P<url>crunchyroll\.(?:com|fr)/(?:[^/]*/[^/?&]*?|media/\?id=)(?P<video_id>[0-9]+))(?:[/?&]|$)'
_TESTS = [
# {
# 'url': 'http://www.crunchyroll.com/wanna-be-the-strongest-in-the-world/episode-1-an-idol-wrestler-is-born-645513',
# 'info_dict': {
# 'id': '645513',
# 'ext': 'flv',
# 'title': 'Wanna be the Strongest in the World Episode 1 – An Idol-Wrestler is Born!',
# 'description': 'md5:2d17137920c64f2f49981a7797d275ef',
# 'thumbnail': 'http://img1.ak.crunchyroll.com/i/spire1-tmb/20c6b5e10f1a47b10516877d3c039cae1380951166_full.jpg',
# 'uploader': 'Yomiuri Telecasting Corporation (YTV)',
# 'upload_date': '20131013',
# 'url': 're:(?!.*&)',
# },
# 'params': {
# # rtmp
# 'skip_download': True,
# },
# },
{
'url': 'http://www.crunchyroll.fr/girl-friend-beta/episode-11-goodbye-la-mode-661697',
'only_matching': True,
}]
_FORMAT_IDS = {
'360': ('60', '106'),
'480': ('61', '106'),
'720': ('62', '106'),
'1080': ('80', '108'),
}
def _login(self):
(username, password) = self._get_login_info()
if username is None:
return
self.report_login()
login_url = 'https://www.crunchyroll.com/?a=formhandler'
data = urlencode_postdata({
'formname': 'RpcApiUser_Login',
'name': username,
'password': password,
})
login_request = compat_urllib_request.Request(login_url, data)
login_request.add_header('Content-Type', 'application/x-www-form-urlencoded')
self._download_webpage(login_request, None, False, 'Wrong login info')
def _real_initialize(self):
self._login()
def _decrypt_subtitles(self, data, iv, id):
data = bytes_to_intlist(data)
iv = bytes_to_intlist(iv)
id = int(id)
def obfuscate_key_aux(count, modulo, start):
output = list(start)
for _ in range(count):
output.append(output[-1] + output[-2])
# cut off start values
output = output[2:]
output = list(map(lambda x: x % modulo + 33, output))
return output
def obfuscate_key(key):
num1 = int(floor(pow(2, 25) * sqrt(6.9)))
num2 = (num1 ^ key) << 5
num3 = key ^ num1
num4 = num3 ^ (num3 >> 3) ^ num2
prefix = intlist_to_bytes(obfuscate_key_aux(20, 97, (1, 2)))
shaHash = bytes_to_intlist(sha1(prefix + str(num4).encode('ascii')).digest())
# Extend 160 Bit hash to 256 Bit
return shaHash + [0] * 12
key = obfuscate_key(id)
class Counter:
__value = iv
def next_value(self):
temp = self.__value
self.__value = inc(self.__value)
return temp
decrypted_data = intlist_to_bytes(aes_cbc_decrypt(data, key, iv))
return zlib.decompress(decrypted_data)
def _convert_subtitles_to_srt(self, sub_root):
output = ''
for i, event in enumerate(sub_root.findall('./events/event'), 1):
start = event.attrib['start'].replace('.', ',')
end = event.attrib['end'].replace('.', ',')
text = event.attrib['text'].replace('\\N', '\n')
output += '%d\n%s --> %s\n%s\n\n' % (i, start, end, text)
return output
def _convert_subtitles_to_ass(self, sub_root):
output = ''
def ass_bool(strvalue):
assvalue = '0'
if strvalue == '1':
assvalue = '-1'
return assvalue
output = '[Script Info]\n'
output += 'Title: %s\n' % sub_root.attrib["title"]
output += 'ScriptType: v4.00+\n'
output += 'WrapStyle: %s\n' % sub_root.attrib["wrap_style"]
output += 'PlayResX: %s\n' % sub_root.attrib["play_res_x"]
output += 'PlayResY: %s\n' % sub_root.attrib["play_res_y"]
output += """ScaledBorderAndShadow: yes
[V4+ Styles]
Format: Name, Fontname, Fontsize, PrimaryColour, SecondaryColour, OutlineColour, BackColour, Bold, Italic, Underline, StrikeOut, ScaleX, ScaleY, Spacing, Angle, BorderStyle, Outline, Shadow, Alignment, MarginL, MarginR, MarginV, Encoding
"""
for style in sub_root.findall('./styles/style'):
output += 'Style: ' + style.attrib["name"]
output += ',' + style.attrib["font_name"]
output += ',' + style.attrib["font_size"]
output += ',' + style.attrib["primary_colour"]
output += ',' + style.attrib["secondary_colour"]
output += ',' + style.attrib["outline_colour"]
output += ',' + style.attrib["back_colour"]
output += ',' + ass_bool(style.attrib["bold"])
output += ',' + ass_bool(style.attrib["italic"])
output += ',' + ass_bool(style.attrib["underline"])
output += ',' + ass_bool(style.attrib["strikeout"])
output += ',' + style.attrib["scale_x"]
output += ',' + style.attrib["scale_y"]
output += ',' + style.attrib["spacing"]
output += ',' + style.attrib["angle"]
output += ',' + style.attrib["border_style"]
output += ',' + style.attrib["outline"]
output += ',' + style.attrib["shadow"]
output += ',' + style.attrib["alignment"]
output += ',' + style.attrib["margin_l"]
output += ',' + style.attrib["margin_r"]
output += ',' + style.attrib["margin_v"]
output += ',' + style.attrib["encoding"]
output += '\n'
output += """
[Events]
Format: Layer, Start, End, Style, Name, MarginL, MarginR, MarginV, Effect, Text
"""
for event in sub_root.findall('./events/event'):
output += 'Dialogue: 0'
output += ',' + event.attrib["start"]
output += ',' + event.attrib["end"]
output += ',' + event.attrib["style"]
output += ',' + event.attrib["name"]
output += ',' + event.attrib["margin_l"]
output += ',' + event.attrib["margin_r"]
output += ',' + event.attrib["margin_v"]
output += ',' + event.attrib["effect"]
output += ',' + event.attrib["text"]
output += '\n'
return output
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('video_id')
if mobj.group('prefix') == 'm':
mobile_webpage = self._download_webpage(url, video_id, 'Downloading mobile webpage')
webpage_url = self._search_regex(r'<link rel="canonical" href="([^"]+)" />', mobile_webpage, 'webpage_url')
else:
webpage_url = 'http://www.' + mobj.group('url')
webpage = self._download_webpage(webpage_url, video_id, 'Downloading webpage')
note_m = self._html_search_regex(r'<div class="showmedia-trailer-notice">(.+?)</div>', webpage, 'trailer-notice', default='')
if note_m:
raise ExtractorError(note_m)
mobj = re.search(r'Page\.messaging_box_controller\.addItems\(\[(?P<msg>{.+?})\]\)', webpage)
if mobj:
msg = json.loads(mobj.group('msg'))
if msg.get('type') == 'error':
raise ExtractorError('crunchyroll returned error: %s' % msg['message_body'], expected=True)
video_title = self._html_search_regex(r'<h1[^>]*>(.+?)</h1>', webpage, 'video_title', flags=re.DOTALL)
video_title = re.sub(r' {2,}', ' ', video_title)
video_description = self._html_search_regex(r'"description":"([^"]+)', webpage, 'video_description', default='')
if not video_description:
video_description = None
video_upload_date = self._html_search_regex(r'<div>Availability for free users:(.+?)</div>', webpage, 'video_upload_date', fatal=False, flags=re.DOTALL)
if video_upload_date:
video_upload_date = unified_strdate(video_upload_date)
video_uploader = self._html_search_regex(r'<div>\s*Publisher:(.+?)</div>', webpage, 'video_uploader', fatal=False, flags=re.DOTALL)
playerdata_url = compat_urllib_parse.unquote(self._html_search_regex(r'"config_url":"([^"]+)', webpage, 'playerdata_url'))
playerdata_req = compat_urllib_request.Request(playerdata_url)
playerdata_req.data = compat_urllib_parse.urlencode({'current_page': webpage_url})
playerdata_req.add_header('Content-Type', 'application/x-www-form-urlencoded')
playerdata = self._download_webpage(playerdata_req, video_id, note='Downloading media info')
stream_id = self._search_regex(r'<media_id>([^<]+)', playerdata, 'stream_id')
video_thumbnail = self._search_regex(r'<episode_image_url>([^<]+)', playerdata, 'thumbnail', fatal=False)
formats = []
for fmt in re.findall(r'\?p([0-9]{3,4})=1', webpage):
stream_quality, stream_format = self._FORMAT_IDS[fmt]
video_format = fmt + 'p'
streamdata_req = compat_urllib_request.Request('http://www.crunchyroll.com/xml/')
# urlencode doesn't work!
streamdata_req.data = 'req=RpcApiVideoEncode%5FGetStreamInfo&video%5Fencode%5Fquality=' + stream_quality + '&media%5Fid=' + stream_id + '&video%5Fformat=' + stream_format
streamdata_req.add_header('Content-Type', 'application/x-www-form-urlencoded')
streamdata_req.add_header('Content-Length', str(len(streamdata_req.data)))
streamdata = self._download_xml(
streamdata_req, video_id,
note='Downloading media info for %s' % video_format)
video_url = streamdata.find('.//host').text
video_play_path = streamdata.find('.//file').text
formats.append({
'url': video_url,
'play_path': video_play_path,
'ext': 'flv',
'format': video_format,
'format_id': video_format,
})
subtitles = {}
sub_format = self._downloader.params.get('subtitlesformat', 'srt')
for sub_id, sub_name in re.findall(r'\?ssid=([0-9]+)" title="([^"]+)', webpage):
sub_page = self._download_webpage(
'http://www.crunchyroll.com/xml/?req=RpcApiSubtitle_GetXml&subtitle_script_id=' + sub_id,
video_id, note='Downloading subtitles for ' + sub_name)
id = self._search_regex(r'id=\'([0-9]+)', sub_page, 'subtitle_id', fatal=False)
iv = self._search_regex(r'<iv>([^<]+)', sub_page, 'subtitle_iv', fatal=False)
data = self._search_regex(r'<data>([^<]+)', sub_page, 'subtitle_data', fatal=False)
if not id or not iv or not data:
continue
id = int(id)
iv = base64.b64decode(iv)
data = base64.b64decode(data)
subtitle = self._decrypt_subtitles(data, iv, id).decode('utf-8')
lang_code = self._search_regex(r'lang_code=["\']([^"\']+)', subtitle, 'subtitle_lang_code', fatal=False)
if not lang_code:
continue
sub_root = xml.etree.ElementTree.fromstring(subtitle)
if sub_format == 'ass':
subtitles[lang_code] = self._convert_subtitles_to_ass(sub_root)
else:
subtitles[lang_code] = self._convert_subtitles_to_srt(sub_root)
if self._downloader.params.get('listsubtitles', False):
self._list_available_subtitles(video_id, subtitles)
return
return {
'id': video_id,
'title': video_title,
'description': video_description,
'thumbnail': video_thumbnail,
'uploader': video_uploader,
'upload_date': video_upload_date,
'subtitles': subtitles,
'formats': formats,
}
class CrunchyrollShowPlaylistIE(InfoExtractor):
IE_NAME = "crunchyroll:playlist"
_VALID_URL = r'https?://(?:(?P<prefix>www|m)\.)?(?P<url>crunchyroll\.com/(?!(?:news|anime-news|library|forum|launchcalendar|lineup|store|comics|freetrial|login))(?P<id>[\w\-]+))/?$'
_TESTS = [{
'url': 'http://www.crunchyroll.com/a-bridge-to-the-starry-skies-hoshizora-e-kakaru-hashi',
'info_dict': {
'id': 'a-bridge-to-the-starry-skies-hoshizora-e-kakaru-hashi',
'title': 'A Bridge to the Starry Skies - Hoshizora e Kakaru Hashi'
},
'playlist_count': 13,
}]
def _real_extract(self, url):
show_id = self._match_id(url)
webpage = self._download_webpage(url, show_id)
title = self._html_search_regex(
r'(?s)<h1[^>]*>\s*<span itemprop="name">(.*?)</span>',
webpage, 'title')
episode_paths = re.findall(
r'(?s)<li id="showview_videos_media_[0-9]+"[^>]+>.*?<a href="([^"]+)"',
webpage)
entries = [
self.url_result('http://www.crunchyroll.com' + ep, 'Crunchyroll')
for ep in episode_paths
]
entries.reverse()
return {
'_type': 'playlist',
'id': show_id,
'title': title,
'entries': entries,
}
|
pvtodorov/indra | refs/heads/master | indra/tests/test_preassembler.py | 2 | from __future__ import absolute_import, print_function, unicode_literals
from builtins import dict, str
import os
from collections import OrderedDict
from indra.preassembler import Preassembler, render_stmt_graph, \
flatten_evidence, flatten_stmts
from indra.preassembler.hierarchy_manager import HierarchyManager
from indra.sources import trips, reach
from indra.statements import Agent, Phosphorylation, BoundCondition, \
Dephosphorylation, Evidence, ModCondition, \
ActiveForm, MutCondition, Complex, \
Translocation, Activation, Inhibition, \
Deacetylation, Conversion, Concept, Influence, \
IncreaseAmount, DecreaseAmount, Statement
from indra.preassembler.hierarchy_manager import hierarchies
def test_duplicates():
src = Agent('SRC', db_refs = {'HGNC': '11283'})
ras = Agent('RAS', db_refs = {'FA': '03663'})
st1 = Phosphorylation(src, ras)
st2 = Phosphorylation(src, ras)
pa = Preassembler(hierarchies, stmts=[st1, st2])
pa.combine_duplicates()
assert len(pa.unique_stmts) == 1
def test_duplicates_copy():
src = Agent('SRC', db_refs = {'HGNC': '11283'})
ras = Agent('RAS', db_refs = {'FA': '03663'})
st1 = Phosphorylation(src, ras, evidence=[Evidence(text='Text 1')])
st2 = Phosphorylation(src, ras, evidence=[Evidence(text='Text 2')])
stmts = [st1, st2]
pa = Preassembler(hierarchies, stmts=stmts)
pa.combine_duplicates()
assert len(pa.unique_stmts) == 1
assert len(stmts) == 2
assert len(stmts[0].evidence) == 1
assert len(stmts[1].evidence) == 1
def test_duplicates_sorting():
mc = ModCondition('phosphorylation')
map2k1_1 = Agent('MAP2K1', mods=[mc])
mc1 = ModCondition('phosphorylation', 'serine', '218')
mc2 = ModCondition('phosphorylation', 'serine', '222')
mc3 = ModCondition('phosphorylation', 'serine', '298')
map2k1_2 = Agent('MAP2K1', mods=[mc1, mc2, mc3])
mapk3 = Agent('MAPK3')
#ras = Agent('MAPK3', db_refs = {'FA': '03663'})
#nras = Agent('NRAS', db_refs = {'FA': '03663'})
st1 = Phosphorylation(map2k1_1, mapk3, position='218')
st2 = Phosphorylation(map2k1_2, mapk3)
st3 = Phosphorylation(map2k1_1, mapk3, position='218')
stmts = [st1, st2, st3]
pa = Preassembler(hierarchies, stmts=stmts)
pa.combine_duplicates()
assert len(pa.unique_stmts) == 2
def test_combine_duplicates():
raf = Agent('RAF1')
mek = Agent('MEK1')
erk = Agent('ERK2')
p1 = Phosphorylation(raf, mek,
evidence=Evidence(text='foo'))
p2 = Phosphorylation(raf, mek,
evidence=Evidence(text='bar'))
p3 = Phosphorylation(raf, mek,
evidence=Evidence(text='baz'))
p4 = Phosphorylation(raf, mek,
evidence=Evidence(text='beep'))
p5 = Phosphorylation(mek, erk,
evidence=Evidence(text='foo2'))
p6 = Dephosphorylation(mek, erk,
evidence=Evidence(text='bar2'))
p7 = Dephosphorylation(mek, erk,
evidence=Evidence(text='baz2'))
p8 = Dephosphorylation(mek, erk,
evidence=Evidence(text='beep2'))
p9 = Dephosphorylation(Agent('SRC'), Agent('KRAS'),
evidence=Evidence(text='beep'))
stmts = [p1, p2, p3, p4, p5, p6, p7, p8, p9]
pa = Preassembler(hierarchies, stmts=stmts)
pa.combine_duplicates()
# The statements come out sorted by their matches_key
assert len(pa.unique_stmts) == 4, len(pa.unique_stmts)
num_evs =[len(s.evidence) for s in pa.unique_stmts]
assert pa.unique_stmts[0].matches(p6) # MEK dephos ERK
assert num_evs[0] == 3, num_evs[0]
assert pa.unique_stmts[1].matches(p9) # SRC dephos KRAS
assert num_evs[1] == 1, num_evs[1]
assert pa.unique_stmts[2].matches(p5) # MEK phos ERK
assert num_evs[2] == 1, num_evs[2]
assert pa.unique_stmts[3].matches(p1) # RAF phos MEK
assert num_evs[3] == 4, num_evs[3]
def test_combine_evidence_exact_duplicates():
raf = Agent('RAF1')
mek = Agent('MEK1')
p1 = Phosphorylation(raf, mek,
evidence=Evidence(text='foo'))
p2 = Phosphorylation(raf, mek,
evidence=Evidence(text='bar'))
p3 = Phosphorylation(raf, mek,
evidence=Evidence(text='bar'))
stmts = [p1, p2, p3]
pa = Preassembler(hierarchies, stmts=stmts)
pa.combine_duplicates()
# The statements come out sorted by their matches_key
assert len(pa.unique_stmts) == 1
assert len(pa.unique_stmts[0].evidence) == 2
assert set(ev.text for ev in pa.unique_stmts[0].evidence) == \
set(['foo', 'bar'])
def test_combine_evidence_exact_duplicates_different_raw_text():
raf1 = Agent('RAF1', db_refs={'TEXT': 'Raf'})
raf2 = Agent('RAF1', db_refs={'TEXT': 'RAF'})
mek = Agent('MEK1')
p1 = Phosphorylation(raf1, mek,
evidence=Evidence(text='foo'))
p2 = Phosphorylation(raf1, mek,
evidence=Evidence(text='bar'))
p3 = Phosphorylation(raf2, mek,
evidence=Evidence(text='bar'))
stmts = [p1, p2, p3]
pa = Preassembler(hierarchies, stmts=stmts)
pa.combine_duplicates()
# The statements come out sorted by their matches_key
assert len(pa.unique_stmts) == 1
assert len(pa.unique_stmts[0].evidence) == 3
assert set(ev.text for ev in pa.unique_stmts[0].evidence) == \
set(['foo', 'bar', 'bar'])
def test_superfamily_refinement():
"""A gene-level statement should be supported by a family-level
statement."""
src = Agent('SRC', db_refs = {'HGNC': '11283'})
ras = Agent('RAS', db_refs = {'FPLX': 'RAS'})
nras = Agent('NRAS', db_refs = {'HGNC': '7989'})
st1 = Phosphorylation(src, ras, 'tyrosine', '32')
st2 = Phosphorylation(src, nras, 'tyrosine', '32')
pa = Preassembler(hierarchies, stmts=[st1, st2])
stmts = pa.combine_related()
# The top-level list should contain only one statement, the gene-level
# one, supported by the family one.
assert len(stmts) == 1
assert (stmts[0].equals(st2))
assert (len(stmts[0].supported_by) == 1)
assert (stmts[0].supported_by[0].equals(st1))
def test_superfamily_refinement_isa_or_partof():
src = Agent('SRC', db_refs = {'HGNC': '11283'})
prkag1 = Agent('PRKAG1', db_refs = {'HGNC': '9385'})
ampk = Agent('AMPK', db_refs = {'FPLX': 'AMPK'})
st1 = Phosphorylation(src, ampk, 'tyrosine', '32')
st2 = Phosphorylation(src, prkag1, 'tyrosine', '32')
pa = Preassembler(hierarchies, stmts=[st1, st2])
stmts = pa.combine_related()
# The top-level list should contain only one statement, the gene-level
# one, supported by the family one.
assert len(stmts) == 1
assert stmts[0].equals(st2)
assert len(stmts[0].supported_by) == 1
assert stmts[0].supported_by[0].equals(st1)
def test_modification_refinement():
"""A more specific modification statement should be supported by a more
generic modification statement."""
src = Agent('SRC', db_refs = {'HGNC': '11283'})
nras = Agent('NRAS', db_refs = {'HGNC': '7989'})
st1 = Phosphorylation(src, nras, 'tyrosine', '32')
st2 = Phosphorylation(src, nras)
pa = Preassembler(hierarchies, stmts=[st1, st2])
stmts = pa.combine_related()
# The top-level list should contain only one statement, the more specific
# modification, supported by the less-specific modification.
assert len(stmts) == 1
assert stmts[0].equals(st1)
assert len(stmts[0].supported_by) == 1
assert stmts[0].supported_by[0].equals(st2)
def test_modification_refinement_residue_noenz():
erbb3 = Agent('Erbb3')
st1 = Phosphorylation(None, erbb3)
st2 = Phosphorylation(None, erbb3, 'Y')
pa = Preassembler(hierarchies, stmts=[st1, st2])
pa.combine_related()
assert len(pa.related_stmts) == 1
def test_modification_refinement_noenz():
"""A more specific modification statement should be supported by a more
generic modification statement."""
src = Agent('SRC', db_refs = {'HGNC': '11283'})
nras = Agent('NRAS', db_refs = {'HGNC': '7989'})
st1 = Phosphorylation(src, nras, 'tyrosine', '32')
st2 = Phosphorylation(None, nras, 'tyrosine', '32')
pa = Preassembler(hierarchies, stmts=[st1, st2])
stmts = pa.combine_related()
# The top-level list should contain only one statement, the more specific
# modification, supported by the less-specific modification.
assert len(stmts) == 1
assert stmts[0].equals(st1)
assert len(stmts[0].supported_by) == 1
assert stmts[0].supported_by[0].equals(st2)
assert stmts[0].supported_by[0].supports[0].equals(st1)
def test_modification_refinement_noenz2():
"""A more specific modification statement should be supported by a more
generic modification statement.
Similar to test_modification_refinement_noenz for statements where one
argument is associated with a component in the hierarchy (SIRT1 in this
case) but the other is not (BECN1).
"""
sirt1 = Agent('SIRT1', db_refs={'HGNC':'14929', 'UP':'Q96EB6',
'TEXT':'SIRT1'})
becn1 = Agent('BECN1', db_refs={'HGNC': '1034', 'UP': 'Q14457',
'TEXT': 'Beclin 1'})
st1 = Deacetylation(sirt1, becn1)
st2 = Deacetylation(None, becn1)
pa = Preassembler(hierarchies, stmts=[st1, st2])
stmts = pa.combine_related()
# The top-level list should contain only one statement, the more specific
# modification, supported by the less-specific modification.
assert (len(stmts) == 1)
assert (stmts[0].equals(st1))
assert (len(stmts[0].supported_by) == 1)
assert (stmts[0].supported_by[0].equals(st2))
assert (stmts[0].supported_by[0].supports[0].equals(st1))
def test_modification_norefinement_noenz():
"""A more specific modification statement should be supported by a more
generic modification statement."""
src = Agent('SRC', db_refs = {'HGNC': '11283'})
nras = Agent('NRAS', db_refs = {'HGNC': '7989'})
st1 = Phosphorylation(src, nras)
st2 = Phosphorylation(None, nras, 'Y', '32',
evidence=[Evidence(text='foo')])
pa = Preassembler(hierarchies, stmts=[st1, st2])
stmts = pa.combine_related()
# Modification is less specific, enzyme more specific in st1, therefore
# these statements shouldn't be combined.
assert len(stmts) == 2
assert len(stmts[1].evidence)==1
def test_modification_norefinement_subsfamily():
"""A more specific modification statement should be supported by a more
generic modification statement."""
src = Agent('SRC', db_refs = {'HGNC': '11283'})
nras = Agent('NRAS', db_refs = {'HGNC': '7989'})
ras = Agent('RAS', db_refs = {'FPLX': 'RAS'})
st1 = Phosphorylation(src, nras)
st2 = Phosphorylation(src, ras, 'Y', '32',
evidence=[Evidence(text='foo')])
pa = Preassembler(hierarchies, stmts=[st1, st2])
stmts = pa.combine_related()
# Modification is less specific, enzyme more specific in st1, therefore
# these statements shouldn't be combined.
assert len(stmts) == 2
assert len(stmts[1].evidence)==1
def test_modification_norefinement_enzfamily():
"""A more specific modification statement should be supported by a more
generic modification statement."""
mek = Agent('MEK')
raf = Agent('RAF')
braf = Agent('BRAF')
st1 = Phosphorylation(raf, mek, 'Y', '32',
evidence=[Evidence(text='foo')])
st2 = Phosphorylation(braf, mek)
pa = Preassembler(hierarchies, stmts=[st1, st2])
stmts = pa.combine_related()
# Modification is less specific, enzyme more specific in st1, therefore
# these statements shouldn't be combined.
assert len(stmts) == 2
assert len(stmts[1].evidence)==1
def test_bound_condition_refinement():
"""A statement with more specific bound context should be supported by a
less specific statement."""
src = Agent('SRC', db_refs = {'HGNC': '11283'})
gtp = Agent('GTP', db_refs = {'CHEBI': '15996'})
nras = Agent('NRAS', db_refs = {'HGNC': '7989'})
nrasgtp = Agent('NRAS', db_refs = {'HGNC': '7989'},
bound_conditions=[BoundCondition(gtp, True)])
st1 = Phosphorylation(src, nras, 'tyrosine', '32')
st2 = Phosphorylation(src, nrasgtp, 'tyrosine', '32')
# The top-level list should contain only one statement, the more specific
# modification, supported by the less-specific modification.
pa = Preassembler(hierarchies, stmts=[st1, st2])
stmts = pa.combine_related()
assert len(stmts) == 1
assert stmts[0].equals(st2)
assert len(stmts[0].supported_by) == 1
assert stmts[0].supported_by[0].equals(st1)
def test_bound_condition_norefinement():
"""A statement with more specific bound context should be supported by a
less specific statement."""
src = Agent('SRC', db_refs = {'HGNC': '11283'})
gtp = Agent('GTP', db_refs = {'CHEBI': '15996'})
nras = Agent('NRAS', db_refs = {'HGNC': '7989'})
nrasgtp = Agent('NRAS', db_refs = {'HGNC': '7989'},
bound_conditions=[BoundCondition(gtp, True)])
st1 = Phosphorylation(src, nras, 'tyrosine', '32')
st2 = Phosphorylation(src, nrasgtp)
pa = Preassembler(hierarchies, stmts=[st1, st2])
stmts = pa.combine_related()
# The bound condition is more specific in st2 but the modification is less
# specific. Therefore these statements should not be combined.
assert len(stmts) == 2
def test_bound_condition_deep_refinement():
"""A statement with more specific bound context should be supported by a
less specific statement."""
src = Agent('SRC', db_refs = {'HGNC': '11283'})
gtp1 = Agent('GTP', db_refs = {'CHEBI': '15996'})
gtp2 = Agent('GTP', mods=[ModCondition('phosphorylation')],
db_refs = {'CHEBI': '15996'})
nrasgtp1 = Agent('NRAS', db_refs = {'HGNC': '7989'},
bound_conditions=[BoundCondition(gtp1, True)])
nrasgtp2 = Agent('NRAS', db_refs = {'HGNC': '7989'},
bound_conditions=[BoundCondition(gtp2, True)])
st1 = Phosphorylation(src, nrasgtp1, 'tyrosine', '32')
st2 = Phosphorylation(src, nrasgtp2, 'tyrosine', '32')
# The top-level list should contain only one statement, the more specific
# modification, supported by the less-specific modification.
pa = Preassembler(hierarchies, stmts=[st1, st2])
stmts = pa.combine_related()
assert len(stmts) == 1
assert stmts[0].equals(st2)
assert len(stmts[0].supported_by) == 1
assert stmts[0].supported_by[0].equals(st1)
def test_complex_refinement():
ras = Agent('RAS')
raf = Agent('RAF')
mek = Agent('MEK')
st1 = Complex([ras, raf])
st2 = Complex([mek, ras, raf])
pa = Preassembler(hierarchies, stmts=[st1, st2])
pa.combine_related()
assert len(pa.unique_stmts) == 2
assert len(pa.related_stmts) == 2
def test_complex_agent_refinement():
ras = Agent('RAS')
raf1 = Agent('RAF', mods=[ModCondition('ubiquitination', None, None, True)])
raf2 = Agent('RAF', mods=[ModCondition('ubiquitination', None, None, False)])
st1 = Complex([ras, raf1])
st2 = Complex([ras, raf2])
pa = Preassembler(hierarchies, stmts=[st1, st2])
pa.combine_related()
assert len(pa.unique_stmts) == 2
assert len(pa.related_stmts) == 2
def test_mod_sites_refinement():
"""A statement with more specific modification context should be supported
by a less-specific statement."""
# TODO
assert True
def test_binding_site_refinement():
"""A statement with information about a binding site for an interaction
between two proteins should be supported by a statement without this
information."""
# TODO
assert True
def test_activating_substitution_refinement():
"""Should only be refinement if entities are a refinement and all
fields match."""
mc1 = MutCondition('12', 'G', 'D')
mc2 = MutCondition('61', 'Q', 'L')
nras1 = Agent('NRAS', mutations=[mc1], db_refs = {'HGNC': '7989'})
nras2 = Agent('NRAS', mutations=[mc2], db_refs = {'HGNC': '7989'})
ras = Agent('RAS', mutations=[mc1], db_refs={'FPLX': 'RAS'})
st1 = ActiveForm(ras, 'gtpbound', True,
evidence=Evidence(text='bar'))
st2 = ActiveForm(nras1, 'gtpbound', True,
evidence=Evidence(text='foo'))
st3 = ActiveForm(nras2, 'gtpbound', True,
evidence=Evidence(text='bar'))
st4 = ActiveForm(nras1, 'phosphatase', True,
evidence=Evidence(text='bar'))
st5 = ActiveForm(nras1, 'gtpbound', False,
evidence=Evidence(text='bar'))
assert st2.refinement_of(st1, hierarchies)
assert not st3.refinement_of(st1, hierarchies)
assert not st4.refinement_of(st1, hierarchies)
assert not st5.refinement_of(st1, hierarchies)
assert not st1.refinement_of(st2, hierarchies)
assert not st3.refinement_of(st2, hierarchies)
assert not st4.refinement_of(st2, hierarchies)
assert not st5.refinement_of(st2, hierarchies)
assert not st1.refinement_of(st3, hierarchies)
assert not st2.refinement_of(st3, hierarchies)
assert not st4.refinement_of(st3, hierarchies)
assert not st5.refinement_of(st3, hierarchies)
assert not st1.refinement_of(st4, hierarchies)
assert not st2.refinement_of(st4, hierarchies)
assert not st3.refinement_of(st4, hierarchies)
assert not st5.refinement_of(st4, hierarchies)
assert not st1.refinement_of(st5, hierarchies)
assert not st2.refinement_of(st5, hierarchies)
assert not st3.refinement_of(st5, hierarchies)
assert not st4.refinement_of(st5, hierarchies)
def test_translocation():
st1 = Translocation(Agent('AKT'), None, None)
st2 = Translocation(Agent('AKT'), None, 'plasma membrane')
st3 = Translocation(Agent('AKT'), None, 'nucleus')
pa = Preassembler(hierarchies, stmts=[st1, st2, st3])
pa.combine_related()
assert len(pa.related_stmts) == 2
def test_grounding_aggregation():
braf1 = Agent('BRAF', db_refs={'TEXT': 'braf', 'HGNC': '1097'})
braf2 = Agent('BRAF', db_refs={'TEXT': 'BRAF'})
braf3 = Agent('BRAF', db_refs={'TEXT': 'Braf', 'UP': 'P15056'})
st1 = Phosphorylation(None, braf1)
st2 = Phosphorylation(None, braf2)
st3 = Phosphorylation(None, braf3)
pa = Preassembler(hierarchies, stmts=[st1, st2, st3])
unique_stmts = pa.combine_duplicates()
assert len(unique_stmts) == 3
def test_grounding_aggregation_complex():
mek = Agent('MEK')
braf1 = Agent('BRAF', db_refs={'TEXT': 'braf', 'HGNC': '1097'})
braf2 = Agent('BRAF', db_refs={'TEXT': 'BRAF', 'dummy': 'dummy'})
braf3 = Agent('BRAF', db_refs={'TEXT': 'Braf', 'UP': 'P15056'})
st1 = Complex([mek, braf1])
st2 = Complex([braf2, mek])
st3 = Complex([mek, braf3])
pa = Preassembler(hierarchies, stmts=[st1, st2, st3])
unique_stmts = pa.combine_duplicates()
assert len(unique_stmts) == 3
def test_render_stmt_graph():
braf = Agent('BRAF', db_refs={'HGNC': '1097'})
mek1 = Agent('MAP2K1', db_refs={'HGNC': '6840'})
mek = Agent('MEK', db_refs={'FPLX':'MEK'})
# Statements
p0 = Phosphorylation(braf, mek)
p1 = Phosphorylation(braf, mek1)
p2 = Phosphorylation(braf, mek1, position='218')
p3 = Phosphorylation(braf, mek1, position='222')
p4 = Phosphorylation(braf, mek1, 'serine')
p5 = Phosphorylation(braf, mek1, 'serine', '218')
p6 = Phosphorylation(braf, mek1, 'serine', '222')
stmts = [p0, p1, p2, p3, p4, p5, p6]
pa = Preassembler(hierarchies, stmts=stmts)
pa.combine_related()
graph = render_stmt_graph(pa.related_stmts, reduce=False)
# One node for each statement
assert len(graph.nodes()) == 7
# Edges:
# p0 supports p1-p6 = 6 edges
# p1 supports p2-p6 = 5 edges
# p2 supports p5 = 1 edge
# p3 supports p6 = 1 edge
# p4 supports p5-p6 = 2 edges
# (p5 and p6 support none--they are top-level)
# 6 + 5 + 1 + 1 + 2 = 15 edges
assert len(graph.edges()) == 15
def test_flatten_evidence_hierarchy():
braf = Agent('BRAF')
mek = Agent('MAP2K1')
st1 = Phosphorylation(braf, mek, evidence=[Evidence(text='foo')])
st2 = Phosphorylation(braf, mek, 'S', '218',
evidence=[Evidence(text='bar')])
pa = Preassembler(hierarchies, stmts=[st1, st2])
pa.combine_related()
assert len(pa.related_stmts) == 1
flattened = flatten_evidence(pa.related_stmts)
assert len(flattened) == 1
top_stmt = flattened[0]
assert len(top_stmt.evidence) == 2
assert 'bar' in [e.text for e in top_stmt.evidence]
assert 'foo' in [e.text for e in top_stmt.evidence]
assert len(top_stmt.supported_by) == 1
supporting_stmt = top_stmt.supported_by[0]
assert len(supporting_stmt.evidence) == 1
assert supporting_stmt.evidence[0].text == 'foo'
supporting_stmt.evidence[0].text = 'changed_foo'
assert supporting_stmt.evidence[0].text == 'changed_foo'
assert 'changed_foo' not in [e.text for e in top_stmt.evidence]
assert 'foo' in [e.text for e in top_stmt.evidence]
assert {ev.annotations.get('support_type') for ev in top_stmt.evidence} \
== {'direct', 'supported_by'}
def test_flatten_evidence_multilevel():
braf = Agent('BRAF')
mek = Agent('MAP2K1')
st1 = Phosphorylation(braf, mek, evidence=[Evidence(text='foo')])
st2 = Phosphorylation(braf, mek, 'S',
evidence=[Evidence(text='bar')])
st3 = Phosphorylation(braf, mek, 'S', '218',
evidence=[Evidence(text='baz')])
pa = Preassembler(hierarchies, stmts=[st1, st2, st3])
pa.combine_related()
assert len(pa.related_stmts) == 1
flattened = flatten_evidence(pa.related_stmts)
assert len(flattened) == 1
top_stmt = flattened[0]
assert len(top_stmt.evidence) == 3, len(top_stmt.evidence)
anns = [ev.annotations['support_type'] for ev in top_stmt.evidence]
assert anns.count('direct') == 1
assert anns.count('supported_by') == 2
def test_flatten_evidence_hierarchy_supports():
braf = Agent('BRAF')
mek = Agent('MAP2K1')
st1 = Phosphorylation(braf, mek, evidence=[Evidence(text='foo')])
st2 = Phosphorylation(braf, mek, 'S', '218',
evidence=[Evidence(text='bar')])
pa = Preassembler(hierarchies, stmts=[st1, st2])
pa_stmts = pa.combine_related(return_toplevel=False)
assert len(pa_stmts) == 2
flattened = flatten_evidence(pa_stmts, collect_from='supports')
assert len(flattened) == 2
top_stmt = flattened[1]
assert len(top_stmt.evidence) == 1
assert 'bar' in [e.text for e in top_stmt.evidence]
assert len(top_stmt.supported_by) == 1
supporting_stmt = top_stmt.supported_by[0]
assert len(supporting_stmt.evidence) == 2
assert set([e.text for e in supporting_stmt.evidence]) == {'foo', 'bar'}
def test_flatten_stmts():
st1 = Phosphorylation(Agent('MAP3K5'), Agent('RAF1'), 'S', '338')
st2 = Phosphorylation(None, Agent('RAF1'), 'S', '338')
st3 = Phosphorylation(None, Agent('RAF1'))
st4 = Phosphorylation(Agent('PAK1'), Agent('RAF1'), 'S', '338')
st5 = Phosphorylation(None, Agent('RAF1'), evidence=Evidence(text='foo'))
pa = Preassembler(hierarchies, stmts=[st1, st2, st3, st4, st5])
pa.combine_duplicates()
pa.combine_related()
assert len(pa.related_stmts) == 2
assert len(flatten_stmts(pa.unique_stmts)) == 4
assert len(flatten_stmts(pa.related_stmts)) == 4
def test_complex_refinement_order():
st1 = Complex([Agent('MED23'), Agent('ELK1')])
st2 = Complex([Agent('ELK1', mods=[ModCondition('phosphorylation')]),
Agent('MED23')])
pa = Preassembler(hierarchies, stmts=[st1, st2])
pa.combine_duplicates()
pa.combine_related()
assert len(pa.related_stmts) == 1
def test_activation_refinement():
subj = Agent('alcohol', db_refs={'CHEBI': 'CHEBI:16236',
'HMDB': 'HMDB00108',
'PUBCHEM': '702',
'TEXT': 'alcohol'})
obj = Agent('endotoxin', db_refs={'TEXT': 'endotoxin'})
st1 = Inhibition(subj, obj)
st2 = Activation(subj, obj)
pa = Preassembler(hierarchies, stmts=[st1, st2])
pa.combine_duplicates()
assert len(pa.unique_stmts) == 2
pa.combine_related()
assert len(pa.related_stmts) == 2
def test_homodimer_refinement():
egfr = Agent('EGFR')
erbb = Agent('ERBB2')
st1 = Complex([erbb, erbb])
st2 = Complex([erbb, egfr])
pa = Preassembler(hierarchies, stmts=[st1, st2])
pa.combine_duplicates()
assert len(pa.unique_stmts) == 2
pa.combine_related()
assert len(pa.related_stmts) == 2
def test_return_toplevel():
src = Agent('SRC', db_refs = {'HGNC': '11283'})
nras = Agent('NRAS', db_refs = {'HGNC': '7989'})
st1 = Phosphorylation(src, nras, 'tyrosine', '32')
st2 = Phosphorylation(src, nras)
pa = Preassembler(hierarchies, stmts=[st1, st2])
stmts = pa.combine_related(return_toplevel=True)
assert len(stmts) == 1
assert len(stmts[0].supported_by) == 1
assert len(stmts[0].supported_by[0].supports) == 1
stmts = pa.combine_related(return_toplevel=False)
assert len(stmts) == 2
ix = 1 if stmts[0].residue else 0
assert len(stmts[1-ix].supported_by) == 1
assert len(stmts[1-ix].supported_by[0].supports) == 1
assert len(stmts[ix].supports) == 1
assert len(stmts[ix].supports[0].supported_by) == 1
def test_multiprocessing():
braf = Agent('BRAF', db_refs={'HGNC': '1097'})
mek1 = Agent('MAP2K1', db_refs={'HGNC': '6840'})
mek = Agent('MEK', db_refs={'FPLX':'MEK'})
# Statements
p0 = Phosphorylation(braf, mek)
p1 = Phosphorylation(braf, mek1)
p2 = Phosphorylation(braf, mek1, position='218')
p3 = Phosphorylation(braf, mek1, position='222')
p4 = Phosphorylation(braf, mek1, 'serine')
p5 = Phosphorylation(braf, mek1, 'serine', '218')
p6 = Phosphorylation(braf, mek1, 'serine', '222')
p7 = Dephosphorylation(braf, mek1)
stmts = [p0, p1, p2, p3, p4, p5, p6, p7]
pa = Preassembler(hierarchies, stmts=stmts)
# Size cutoff set to a low number so that one group will run remotely
# and one locally
toplevel = pa.combine_related(return_toplevel=True, poolsize=1,
size_cutoff=2)
assert len(toplevel) == 3, 'Got %d toplevel statements.' % len(toplevel)
def test_conversion_refinement():
ras = Agent('RAS', db_refs={'FPLX': 'RAS'})
hras = Agent('HRAS', db_refs={'HGNC': '5173'})
gtp = Agent('GTP')
gdp = Agent('GDP')
st1 = Conversion(ras, gtp, gdp)
st2 = Conversion(hras, gtp, gdp)
st3 = Conversion(hras, [gtp, gdp], gdp)
st4 = Conversion(hras, [gdp, gtp], gdp)
pa = Preassembler(hierarchies, stmts=[st1, st2, st3, st4])
toplevel_stmts = pa.combine_related()
assert len(toplevel_stmts) == 2
def test_influence_duplicate():
gov = 'UN/entities/human/government/government_entity'
agr = 'UN/entities/natural/crop_technology'
cgov = Concept('government', db_refs={'UN': [(gov, 1.0)]})
cagr = Concept('agriculture', db_refs={'UN': [(agr, 1.0)]})
stmt1 = Influence(cgov, cagr, evidence=[Evidence(source_api='eidos1')])
stmt2 = Influence(cagr, cgov, evidence=[Evidence(source_api='eidos2')])
stmt3 = Influence(cgov, cagr, evidence=[Evidence(source_api='eidos3')])
eidos_ont = os.path.join(os.path.dirname(os.path.abspath(__file__)),
'../sources/eidos/eidos_ontology.rdf')
hm = HierarchyManager(eidos_ont, True, True)
hierarchies = {'entity': hm}
pa = Preassembler(hierarchies, [stmt1, stmt2, stmt3])
unique_stmts = pa.combine_duplicates()
assert len(unique_stmts) == 2
assert len(unique_stmts[0].evidence) == 2
assert len(unique_stmts[1].evidence) == 1
sources = [e.source_api for e in unique_stmts[0].evidence]
assert set(sources) == set(['eidos1', 'eidos3'])
def test_influence_refinement():
tran = 'UN/entities/human/infrastructure/transportation'
truck = 'UN/entities/human/infrastructure/transportation/' + \
'transportation_methods'
agr = 'UN/entities/human/livelihood'
ctran = Concept('transportation', db_refs={'UN': [(tran, 1.0)]})
ctruck = Concept('trucking', db_refs={'UN': [(truck, 1.0)]})
cagr = Concept('agriculture', db_refs={'UN': [(agr, 1.0)]})
stmt1 = Influence(ctran, cagr, evidence=[Evidence(source_api='eidos1')])
stmt2 = Influence(ctruck, cagr, evidence=[Evidence(source_api='eidos2')])
stmt3 = Influence(cagr, ctran, evidence=[Evidence(source_api='eidos3')])
eidos_ont = os.path.join(os.path.dirname(os.path.abspath(__file__)),
'../sources/eidos/eidos_ontology.rdf')
hm = HierarchyManager(eidos_ont, True, True)
hierarchies = {'entity': hm}
pa = Preassembler(hierarchies, [stmt1, stmt2, stmt3])
rel_stmts = pa.combine_related()
assert len(rel_stmts) == 2
truck_stmt = [st for st in rel_stmts if st.subj.name == 'trucking'][0]
assert len(truck_stmt.supported_by) == 1
assert truck_stmt.supported_by[0].subj.name == 'transportation'
def test_find_contradicts():
st1 = Inhibition(Agent('a'), Agent('b'))
st2 = Activation(Agent('a'), Agent('b'))
st3 = IncreaseAmount(Agent('a'), Agent('b'))
st4 = DecreaseAmount(Agent('a'), Agent('b'))
st5 = ActiveForm(Agent('a',
mods=[ModCondition('phosphorylation', None, None, True)]),
'kinase', True)
st6 = ActiveForm(Agent('a',
mods=[ModCondition('phosphorylation', None, None, True)]),
'kinase', False)
pa = Preassembler(hierarchies, [st1, st2, st3, st4, st5, st6])
contradicts = pa.find_contradicts()
assert len(contradicts) == 3
for s1, s2 in contradicts:
assert {s1.uuid, s2.uuid} in ({st1.uuid, st2.uuid},
{st3.uuid, st4.uuid},
{st5.uuid, st6.uuid})
def test_find_contradicts_refinement():
ras = Agent('RAS', db_refs={'FPLX': 'RAS'})
kras = Agent('KRAS', db_refs={'HGNC': '6407'})
hras = Agent('HRAS', db_refs={'HGNC': '5173'})
st1 = Phosphorylation(Agent('x'), ras)
st2 = Dephosphorylation(Agent('x'), kras)
st3 = Dephosphorylation(Agent('x'), hras)
pa = Preassembler(hierarchies, [st1, st2, st3])
contradicts = pa.find_contradicts()
assert len(contradicts) == 2
for s1, s2 in contradicts:
assert {s1.uuid, s2.uuid} in ({st1.uuid, st2.uuid},
{st1.uuid, st3.uuid})
def test_preassemble_related_complex():
ras = Agent('RAS', db_refs={'FPLX': 'RAS'})
kras = Agent('KRAS', db_refs={'HGNC': '6407'})
hras = Agent('HRAS', db_refs={'HGNC': '5173'})
st1 = Complex([kras, hras])
st2 = Complex([kras, ras])
st3 = Complex([hras, kras])
st4 = Complex([ras, kras])
pa = Preassembler(hierarchies, [st1, st2, st3, st4])
uniq = pa.combine_duplicates()
assert len(uniq) == 2
top = pa.combine_related()
assert len(top) == 1
def test_agent_text_storage():
A1 = Agent('A', db_refs={'TEXT': 'A'})
A2 = Agent('A', db_refs={'TEXT': 'alpha'})
B1 = Agent('B', db_refs={'TEXT': 'bag'})
B2 = Agent('B', db_refs={'TEXT': 'bug'})
C = Agent('C')
D = Agent('D')
inp = [
Complex([A1, B1], evidence=Evidence(text='A complex bag.')),
Complex([B2, A2], evidence=Evidence(text='bug complex alpha once.')),
Complex([B2, A2], evidence=Evidence(text='bug complex alpha again.')),
Complex([A1, C, B2], evidence=Evidence(text='A complex C bug.')),
Phosphorylation(A1, B1, evidence=Evidence(text='A phospo bags.')),
Phosphorylation(A2, B2, evidence=Evidence(text='alpha phospho bugs.')),
Conversion(D, [A1, B1], [C, D],
evidence=Evidence(text='D: A bag -> C D')),
Conversion(D, [B1, A2], [C, D],
evidence=Evidence(text='D: bag a -> C D')),
Conversion(D, [B2, A2], [D, C],
evidence=Evidence(text='D: bug a -> D C')),
Conversion(D, [B1, A1], [C, D],
evidence=Evidence(text='D: bag A -> C D')),
Conversion(D, [A1], [A1, C],
evidence=Evidence(text='D: A -> A C'))
]
pa = Preassembler(hierarchies, inp)
unq1 = pa.combine_duplicates()
assert len(unq1) == 5, len(unq1)
assert all([len(ev.annotations['prior_uuids']) == 1
for s in unq1 for ev in s.evidence
if len(s.evidence) > 1]),\
'There can only be one prior evidence per uuid at this stage.'
ev_uuid_dict = {ev.annotations['prior_uuids'][0]: ev.annotations['agents']
for s in unq1 for ev in s.evidence}
for s in inp:
raw_text = [ag.db_refs.get('TEXT')
for ag in s.agent_list(deep_sorted=True)]
assert raw_text == ev_uuid_dict[s.uuid]['raw_text'],\
str(raw_text) + '!=' + str(ev_uuid_dict[s.uuid]['raw_text'])
# Now run pa on the above corpus plus another statement.
inp2 = unq1 + [
Complex([A1, C, B1], evidence=Evidence(text='A complex C bag.'))
]
pa2 = Preassembler(hierarchies, inp2)
unq2 = pa2.combine_duplicates()
assert len(unq2) == 5, len(unq2)
old_ev_list = []
new_ev = None
for s in unq2:
for ev in s.evidence:
if ev.text == inp2[-1].evidence[0].text:
new_ev = ev
else:
old_ev_list.append(ev)
assert all([len(ev.annotations['prior_uuids']) == 2 for ev in old_ev_list])
assert new_ev
assert len(new_ev.annotations['prior_uuids']) == 1
def test_agent_coordinates():
path = os.path.join(os.path.dirname(os.path.abspath(__file__)),
'reach_coordinates.json')
stmts = reach.process_json_file(path).statements
pa = Preassembler(hierarchies, stmts)
unique_stmt = pa.combine_duplicates()[0]
evidence_list = unique_stmt.evidence
agent_annots = [ev.annotations['agents'] for ev in unique_stmt.evidence]
assert all(a['raw_text'] == ['MEK1', 'ERK2'] for a in agent_annots)
assert {tuple(a['coords']) for a in agent_annots} == {((21, 25), (0, 4)),
((0, 4), (15, 19))}
|
AdamantLife/alcustoms | refs/heads/master | alcustoms/winregistry.py | 1 | ## Builtin
import binascii
import functools
import struct
from winreg import *
VALUE_TYPES = {
REG_BINARY:"REG_BINARY",
REG_DWORD:"REG_DWORD",
REG_DWORD_LITTLE_ENDIAN:"REG_DWORD_LITTLE_ENDIAN",
REG_DWORD_BIG_ENDIAN:"REG_DWORD_BIG_ENDIAN",
REG_EXPAND_SZ:"REG_EXPAND_SZ",
REG_LINK:"REG_LINK",
REG_MULTI_SZ:"REG_MULTI_SZ",
REG_NONE:"REG_NONE",
REG_QWORD:"REG_QWORD",
REG_QWORD_LITTLE_ENDIAN:"REG_QWORD_LITTLE_ENDIAN",
REG_RESOURCE_LIST:"REG_RESOURCE_LIST",
REG_FULL_RESOURCE_DESCRIPTOR:"REG_FULL_RESOURCE_DESCRIPTOR",
REG_RESOURCE_REQUIREMENTS_LIST:"REG_RESOURCE_REQUIREMENTS_LIST",
REG_SZ:"REG_SZ"
}
"""
For Reference:
VALUE_TYPES = {
3:REG_BINARY,
4:REG_DWORD,
4:REG_DWORD_LITTLE_ENDIAN,
5:REG_DWORD_BIG_ENDIAN,
2:REG_EXPAND_SZ,
6:REG_LINK,
7:REG_MULTI_SZ,
0:REG_NONE,
11:REG_QWORD,
11:REG_QWORD_LITTLE_ENDIAN,
8:REG_RESOURCE_LIST,
9:REG_FULL_RESOURCE_DESCRIPTOR,
10:REG_RESOURCE_REQUIREMENTS_LIST,
1:REG_SZ
}
"""
def openkey_decorator(function):
""" A decorator to check if a Key is open before querying it """
@functools.wraps(function)
def inner(self,*args,**kw):
if self.closed: raise AttributeError("Key is not Open")
return function(self,*args,**kw)
return inner
class Key():
def __init__(self,rootkey,keyname):
self.rootkey = rootkey
self.keyname = keyname
self._handle = None
def _openhandle(self):
""" Creates a new handle to the Key """
self._handle = OpenKey(self.rootkey,self.keyname)
def _closehandle(self):
""" Closes the current handle """
self._handle.Close()
self._handle = None
@property
def handle(self):
if self.closed:
self.open()
return self._handle
@property
def opened(self):
return bool(self._handle)
@property
def closed(self):
return not bool(self._handle)
def open(self):
""" Opens a Key if it is not already open; otherwise, returns the current valid handle """
if self.closed:
self._openhandle()
return self.handle
def close(self):
""" Closes the current handle """
if self.open:
self._closehandle()
self.handle = None
@openkey_decorator
def contentcount(self):
""" Returns a tuple of (values,subkeys) within the key """
return QueryInfoKey(self.handle)[:2]
@openkey_decorator
def contents(self):
""" Returns a list of tuples of (type,name) of the contents of the key """
return [("value",name) for (name,value,valuetype) in self.values()]\
+ [("subkey",subkey) for subkey in self.subkeys()]
@openkey_decorator
def rawvalues(self):
""" Returns a list of tuples of (name,value,valuetype) of the key """
return [EnumValue(self.handle,value) for value in range(self.contentcount()[1])]
@openkey_decorator
def values(self):
""" As rawvalues, but attempts to decode the value """
rawvalues = self.rawvalues()
## Converting to list to replace values
values = [list(value) for value in rawvalues]
for value in values:
value[1] = convertvaluebytype(value[1],value[2])
return [tuple(value) for value in values]
@openkey_decorator
def subkeys(self):
""" Returns a list of subkeys of the Key as Key Objects """
return [Key(self.handle,EnumKey(self.handle,subkey)) for subkey in range(self.contentcount()[0])]
@openkey_decorator
def lastupdated(self):
""" Returns the key's lastupdated value """
return QueryInfoKey(self.handle)[-1]
def convertvaluebytype(value,int_type):
""" Converts a value by it's value type """
if int_type == REG_BINARY:
decode = []
print(value)
for chunk in struct.iter_unpack("<Q",value):
decode.extend(list(chunk))
return binascii.unhexlify("".join(decode))
#if int_type == REG_DWORD: ## Note that REG_DWORD_LITTLE_ENDIAN is the same integer and format (on Windows)
# return value
#return value.decode("UTF-32-LE")
return value
def explorekey(keyobj,level = 0):
with keyobj.handle as key:
print(">"*level,keyobj.keyname)
subkeys,values = keyobj.contentcount()
lastupdated = keyobj.lastupdated()
print(">"*level,f"Keys: {subkeys},\tValues: {values}\tLast Updated {lastupdated}")
print(">"*(level+1),"VALUES:")
for valuename,valuedata,valuetype in keyobj.values():
print("----")
print(">"*(level+2),valuename)
print(">"*(level+2),valuedata)
print(">"*(level+2),valuetype)
print("----")
print(">"*(level+1),"SUBKEYS:")
for subkey in keyobj.subkeys():
explorekey(subkey,level + 4)
print("<"*level)
|
everettjf/iOSBlog | refs/heads/master | web/x123/migrations/0002_auto_20160222_0145.py | 4 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.1 on 2016-02-22 01:45
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('x123', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='angle',
name='tag',
field=models.CharField(blank=True, max_length=10, null=True, verbose_name='Tag'),
),
migrations.AddField(
model_name='aspect',
name='tag',
field=models.CharField(blank=True, max_length=10, null=True, verbose_name='Tag'),
),
migrations.AddField(
model_name='domain',
name='tag',
field=models.CharField(blank=True, max_length=10, null=True, verbose_name='Tag'),
),
]
|
wuxianghou/phantomjs | refs/heads/master | src/qt/qtwebkit/Tools/Scripts/webkitpy/style/main.py | 177 | # Copyright (C) 2010 Chris Jerdonek (cjerdonek@webkit.org)
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import codecs
import logging
import sys
import webkitpy.style.checker as checker
from webkitpy.style.patchreader import PatchReader
from webkitpy.style.checker import StyleProcessor
from webkitpy.style.filereader import TextFileReader
from webkitpy.common.host import Host
_log = logging.getLogger(__name__)
def change_directory(filesystem, checkout_root, paths):
"""Change the working directory to the WebKit checkout root, if possible.
If every path in the paths parameter is below the checkout root (or if
the paths parameter is empty or None), this method changes the current
working directory to the checkout root and converts the paths parameter
as described below.
This allows the paths being checked to be displayed relative to the
checkout root, and for path-specific style checks to work as expected.
Path-specific checks include whether files should be skipped, whether
custom style rules should apply to certain files, etc.
Returns:
paths: A copy of the paths parameter -- possibly converted, as follows.
If this method changed the current working directory to the
checkout root, then the list is the paths parameter converted to
normalized paths relative to the checkout root.
Args:
paths: A list of paths to the files that should be checked for style.
This argument can be None or the empty list if a git commit
or all changes under the checkout root should be checked.
checkout_root: The path to the root of the WebKit checkout.
"""
if paths is not None:
paths = list(paths)
if paths:
# Then try converting all of the paths to paths relative to
# the checkout root.
rel_paths = []
for path in paths:
rel_path = filesystem.relpath(path, checkout_root)
if rel_path.startswith(filesystem.pardir):
# Then the path is not below the checkout root. Since all
# paths should be interpreted relative to the same root,
# do not interpret any of the paths as relative to the
# checkout root. Interpret all of them relative to the
# current working directory, and do not change the current
# working directory.
_log.warn(
"""Path-dependent style checks may not work correctly:
One of the given paths is outside the WebKit checkout of the current
working directory:
Path: %s
Checkout root: %s
Pass only files below the checkout root to ensure correct results.
See the help documentation for more info.
"""
% (path, checkout_root))
return paths
rel_paths.append(rel_path)
# If we got here, the conversion was successful.
paths = rel_paths
_log.debug("Changing to checkout root: " + checkout_root)
filesystem.chdir(checkout_root)
return paths
class CheckWebKitStyle(object):
def _engage_awesome_stderr_hacks(self):
# Change stderr to write with replacement characters so we don't die
# if we try to print something containing non-ASCII characters.
stderr = codecs.StreamReaderWriter(sys.stderr,
codecs.getreader('utf8'),
codecs.getwriter('utf8'),
'replace')
# Setting an "encoding" attribute on the stream is necessary to
# prevent the logging module from raising an error. See
# the checker.configure_logging() function for more information.
stderr.encoding = "UTF-8"
# FIXME: Change webkitpy.style so that we do not need to overwrite
# the global sys.stderr. This involves updating the code to
# accept a stream parameter where necessary, and not calling
# sys.stderr explicitly anywhere.
sys.stderr = stderr
return stderr
def main(self):
args = sys.argv[1:]
host = Host()
host.initialize_scm()
stderr = self._engage_awesome_stderr_hacks()
# Checking for the verbose flag before calling check_webkit_style_parser()
# lets us enable verbose logging earlier.
is_verbose = "-v" in args or "--verbose" in args
checker.configure_logging(stream=stderr, is_verbose=is_verbose)
_log.debug("Verbose logging enabled.")
parser = checker.check_webkit_style_parser()
(paths, options) = parser.parse(args)
configuration = checker.check_webkit_style_configuration(options)
paths = change_directory(host.filesystem, checkout_root=host.scm().checkout_root, paths=paths)
style_processor = StyleProcessor(configuration)
file_reader = TextFileReader(host.filesystem, style_processor)
if paths and not options.diff_files:
file_reader.process_paths(paths)
else:
changed_files = paths if options.diff_files else None
patch = host.scm().create_patch(options.git_commit, changed_files=changed_files)
patch_checker = PatchReader(file_reader)
patch_checker.check(patch)
error_count = style_processor.error_count
file_count = file_reader.file_count
delete_only_file_count = file_reader.delete_only_file_count
_log.info("Total errors found: %d in %d files" % (error_count, file_count))
# We fail when style errors are found or there are no checked files.
return error_count > 0 or (file_count == 0 and delete_only_file_count == 0)
|
DimensionDataCBUSydney/plumbery | refs/heads/master | plumbery/actions/information.py | 2 | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from plumbery.action import PlumberyAction
from plumbery.plogging import plogging
class InformationAction(PlumberyAction):
"""
Displays dynamic information for deployed systems
:param settings: specific settings for this action
:type param: ``dict``
"""
def begin(self, engine):
plogging.info("Action: information")
plogging.info("- begin")
def enter(self, facility):
plogging.info("- enter facility")
def process(self, blueprint):
plogging.info("- process blueprint")
def end(self):
plogging.info("- end")
|
raildo/nova | refs/heads/master | nova/tests/unit/virt/libvirt/test_compat.py | 80 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from nova.compute import power_state
from nova import test
from nova.tests.unit.virt.libvirt import fakelibvirt
from nova.virt.libvirt import compat
from nova.virt.libvirt import host
class CompatTestCase(test.NoDBTestCase):
def setUp(self):
super(CompatTestCase, self).setUp()
self.useFixture(fakelibvirt.FakeLibvirtFixture())
@mock.patch.object(host.Host, 'has_min_version')
def test_get_domain_info(self, mock_has_min_version):
test_host = host.Host("qemu:///system")
domain = mock.MagicMock()
expected = [power_state.RUNNING, 512, 512, None, None]
race = fakelibvirt.make_libvirtError(
fakelibvirt.libvirtError,
'ERR',
error_code=fakelibvirt.VIR_ERR_OPERATION_FAILED,
error_message='cannot read cputime for domain')
mock_has_min_version.return_value = True
domain.info.return_value = expected
actual = compat.get_domain_info(fakelibvirt, test_host, domain)
self.assertEqual(actual, expected)
self.assertEqual(domain.info.call_count, 1)
domain.info.reset_mock()
domain.info.side_effect = race
self.assertRaises(fakelibvirt.libvirtError,
compat.get_domain_info,
fakelibvirt, test_host, domain)
self.assertEqual(domain.info.call_count, 1)
domain.info.reset_mock()
mock_has_min_version.return_value = False
domain.info.side_effect = [race, expected]
actual = compat.get_domain_info(fakelibvirt, test_host, domain)
self.assertEqual(actual, expected)
self.assertEqual(domain.info.call_count, 2)
domain.info.reset_mock()
domain.info.side_effect = race
self.assertRaises(fakelibvirt.libvirtError,
compat.get_domain_info,
fakelibvirt, test_host, domain)
self.assertEqual(domain.info.call_count, 2)
|
maxamillion/ansible | refs/heads/devel | lib/ansible/modules/template.py | 24 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2017, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# This is a virtual module that is entirely implemented as an action plugin and runs on the controller
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = r'''
---
module: template
version_added: historical
short_description: Template a file out to a target host
options:
follow:
description:
- Determine whether symbolic links should be followed.
- When set to C(yes) symbolic links will be followed, if they exist.
- When set to C(no) symbolic links will not be followed.
- Previous to Ansible 2.4, this was hardcoded as C(yes).
type: bool
default: no
version_added: '2.4'
notes:
- For Windows you can use M(ansible.windows.win_template) which uses '\\r\\n' as C(newline_sequence) by default.
seealso:
- module: ansible.builtin.copy
- module: ansible.windows.win_copy
- module: ansible.windows.win_template
author:
- Ansible Core Team
- Michael DeHaan
extends_documentation_fragment:
- backup
- files
- template_common
- validate
'''
EXAMPLES = r'''
- name: Template a file to /etc/file.conf
ansible.builtin.template:
src: /mytemplates/foo.j2
dest: /etc/file.conf
owner: bin
group: wheel
mode: '0644'
- name: Template a file, using symbolic modes (equivalent to 0644)
ansible.builtin.template:
src: /mytemplates/foo.j2
dest: /etc/file.conf
owner: bin
group: wheel
mode: u=rw,g=r,o=r
- name: Copy a version of named.conf that is dependent on the OS. setype obtained by doing ls -Z /etc/named.conf on original file
ansible.builtin.template:
src: named.conf_{{ ansible_os_family }}.j2
dest: /etc/named.conf
group: named
setype: named_conf_t
mode: 0640
- name: Create a DOS-style text file from a template
ansible.builtin.template:
src: config.ini.j2
dest: /share/windows/config.ini
newline_sequence: '\r\n'
- name: Copy a new sudoers file into place, after passing validation with visudo
ansible.builtin.template:
src: /mine/sudoers
dest: /etc/sudoers
validate: /usr/sbin/visudo -cf %s
- name: Update sshd configuration safely, avoid locking yourself out
ansible.builtin.template:
src: etc/ssh/sshd_config.j2
dest: /etc/ssh/sshd_config
owner: root
group: root
mode: '0600'
validate: /usr/sbin/sshd -t -f %s
backup: yes
'''
|
palerdot/calibre | refs/heads/master | src/calibre/ebooks/mobi/writer2/serializer.py | 9 | #!/usr/bin/env python
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
from __future__ import (unicode_literals, division, absolute_import,
print_function)
__license__ = 'GPL v3'
__copyright__ = '2011, Kovid Goyal <kovid@kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
import re, unicodedata
from calibre.ebooks.oeb.base import (OEB_DOCS, XHTML, XHTML_NS, XML_NS,
namespace, prefixname, urlnormalize)
from calibre.ebooks.mobi.mobiml import MBP_NS
from calibre.ebooks.mobi.utils import is_guide_ref_start
from collections import defaultdict
from urlparse import urldefrag
from cStringIO import StringIO
class Serializer(object):
NSRMAP = {'': None, XML_NS: 'xml', XHTML_NS: '', MBP_NS: 'mbp'}
def __init__(self, oeb, images, is_periodical, write_page_breaks_after_item=True):
'''
Write all the HTML markup in oeb into a single in memory buffer
containing a single html document with links replaced by offsets into
the buffer.
:param oeb: OEBBook object that encapsulates the document to be
processed.
:param images: Mapping of image hrefs (urlnormalized) to image record
indices.
:param write_page_breaks_after_item: If True a MOBIpocket pagebreak tag
is written after every element of the spine in ``oeb``.
'''
self.oeb = oeb
# Map of image hrefs to image index in the MOBI file
self.images = images
self.used_images = set()
self.logger = oeb.logger
self.is_periodical = is_periodical
self.write_page_breaks_after_item = write_page_breaks_after_item
# If not None, this is a number pointing to the location at which to
# open the MOBI file on the Kindle
self.start_offset = None
# Mapping of hrefs (urlnormalized) to the offset in the buffer where
# the resource pointed to by the href lives. Used at the end to fill in
# the correct values into all filepos="..." links.
self.id_offsets = {}
# Mapping of hrefs (urlnormalized) to a list of offsets into the buffer
# where filepos="..." elements are written corresponding to links that
# point to the href. This is used at the end to fill in the correct values.
self.href_offsets = defaultdict(list)
# List of offsets in the buffer of non linear items in the spine. These
# become uncrossable breaks in the MOBI
self.breaks = []
self.find_blocks()
def find_blocks(self):
'''
Mark every item in the spine if it is the start/end of a
section/article, so that it can be wrapped in divs appropriately.
'''
for item in self.oeb.spine:
item.is_section_start = item.is_section_end = False
item.is_article_start = item.is_article_end = False
def spine_item(tocitem):
href = urldefrag(tocitem.href)[0]
for item in self.oeb.spine:
if item.href == href:
return item
for item in self.oeb.toc.iterdescendants():
if item.klass == 'section':
articles = list(item)
if not articles: continue
spine_item(item).is_section_start = True
for i, article in enumerate(articles):
si = spine_item(article)
if si is not None:
si.is_article_start = True
items = list(self.oeb.spine)
in_sec = in_art = False
for i, item in enumerate(items):
try:
prev_item = items[i-1]
except:
prev_item = None
if in_art and item.is_article_start == True:
prev_item.is_article_end = True
in_art = False
if in_sec and item.is_section_start == True:
prev_item.is_section_end = True
in_sec = False
if item.is_section_start: in_sec = True
if item.is_article_start: in_art = True
item.is_section_end = item.is_article_end = True
def __call__(self):
'''
Return the document serialized as a single UTF-8 encoded bytestring.
'''
buf = self.buf = StringIO()
buf.write(b'<html>')
self.serialize_head()
self.serialize_body()
buf.write(b'</html>')
self.end_offset = buf.tell()
self.fixup_links()
if self.start_offset is None and not self.is_periodical:
# If we don't set a start offset, the stupid Kindle will
# open the book at the location of the first IndexEntry, which
# could be anywhere. So ensure the book is always opened at the
# beginning, instead.
self.start_offset = self.body_start_offset
return buf.getvalue()
def serialize_head(self):
buf = self.buf
buf.write(b'<head>')
if len(self.oeb.guide) > 0:
self.serialize_guide()
buf.write(b'</head>')
def serialize_guide(self):
'''
The Kindle decides where to open a book based on the presence of
an item in the guide that looks like
<reference type="text" title="Start" href="chapter-one.xhtml"/>
Similarly an item with type="toc" controls where the Goto Table of
Contents operation on the kindle goes.
'''
buf = self.buf
hrefs = self.oeb.manifest.hrefs
buf.write(b'<guide>')
for ref in self.oeb.guide.values():
path = urldefrag(ref.href)[0]
if path not in hrefs or hrefs[path].media_type not in OEB_DOCS:
continue
buf.write(b'<reference type="')
if ref.type.startswith('other.') :
self.serialize_text(ref.type.replace('other.',''), quot=True)
else:
self.serialize_text(ref.type, quot=True)
buf.write(b'" ')
if ref.title is not None:
buf.write(b'title="')
self.serialize_text(ref.title, quot=True)
buf.write(b'" ')
if is_guide_ref_start(ref):
self._start_href = ref.href
self.serialize_href(ref.href)
# Space required or won't work, I kid you not
buf.write(b' />')
buf.write(b'</guide>')
def serialize_href(self, href, base=None):
'''
Serialize the href attribute of an <a> or <reference> tag. It is
serialized as filepos="000000000" and a pointer to its location is
stored in self.href_offsets so that the correct value can be filled in
at the end.
'''
hrefs = self.oeb.manifest.hrefs
try:
path, frag = urldefrag(urlnormalize(href))
except ValueError:
# Unparseable URL
return False
if path and base:
path = base.abshref(path)
if path and path not in hrefs:
return False
buf = self.buf
item = hrefs[path] if path else None
if item and item.spine_position is None:
return False
path = item.href if item else base.href
href = '#'.join((path, frag)) if frag else path
buf.write(b'filepos=')
self.href_offsets[href].append(buf.tell())
buf.write(b'0000000000')
return True
def serialize_body(self):
'''
Serialize all items in the spine of the document. Non linear items are
moved to the end.
'''
buf = self.buf
def serialize_toc_level(tocref, href=None):
# add the provided toc level to the output stream
# if href is provided add a link ref to the toc level output (e.g. feed_0/index.html)
if href is not None:
# resolve the section url in id_offsets
buf.write('<mbp:pagebreak />')
self.id_offsets[urlnormalize(href)] = buf.tell()
if tocref.klass == "periodical":
buf.write('<div> <div height="1em"></div>')
else:
t = tocref.title
if isinstance(t, unicode):
t = t.encode('utf-8')
buf.write('<div></div> <div> <h2 height="1em"><font size="+2"><b>'
+t+'</b></font></h2> <div height="1em"></div>')
buf.write('<ul>')
for tocitem in tocref.nodes:
buf.write('<li><a filepos=')
itemhref = tocitem.href
if tocref.klass == 'periodical':
# This is a section node.
# For periodical tocs, the section urls are like r'feed_\d+/index.html'
# We dont want to point to the start of the first article
# so we change the href.
itemhref = re.sub(r'article_\d+/', '', itemhref)
self.href_offsets[itemhref].append(buf.tell())
buf.write('0000000000')
buf.write(' ><font size="+1"><b><u>')
t = tocitem.title
if isinstance(t, unicode):
t = t.encode('utf-8')
buf.write(t)
buf.write('</u></b></font></a></li>')
buf.write('</ul><div height="1em"></div></div><mbp:pagebreak />')
self.anchor_offset = buf.tell()
buf.write(b'<body>')
self.body_start_offset = buf.tell()
if self.is_periodical:
top_toc = self.oeb.toc.nodes[0]
serialize_toc_level(top_toc)
spine = [item for item in self.oeb.spine if item.linear]
spine.extend([item for item in self.oeb.spine if not item.linear])
for item in spine:
if self.is_periodical and item.is_section_start:
for section_toc in top_toc.nodes:
if urlnormalize(item.href) == section_toc.href:
# create section url of the form r'feed_\d+/index.html'
section_url = re.sub(r'article_\d+/', '', section_toc.href)
serialize_toc_level(section_toc, section_url)
section_toc.href = section_url
break
self.serialize_item(item)
self.body_end_offset = buf.tell()
buf.write(b'</body>')
def serialize_item(self, item):
'''
Serialize an individual item from the spine of the input document.
A reference to this item is stored in self.href_offsets
'''
buf = self.buf
if not item.linear:
self.breaks.append(buf.tell() - 1)
self.id_offsets[urlnormalize(item.href)] = buf.tell()
if item.is_section_start:
buf.write(b'<a ></a> ')
if item.is_article_start:
buf.write(b'<a ></a> <a ></a>')
for elem in item.data.find(XHTML('body')):
self.serialize_elem(elem, item)
if self.write_page_breaks_after_item:
buf.write(b'<mbp:pagebreak/>')
if item.is_article_end:
# Kindle periodical article end marker
buf.write(b'<a ></a> <a ></a>')
if item.is_section_end:
buf.write(b' <a ></a>')
self.anchor_offset = None
def serialize_elem(self, elem, item, nsrmap=NSRMAP):
buf = self.buf
if not isinstance(elem.tag, basestring) \
or namespace(elem.tag) not in nsrmap:
return
tag = prefixname(elem.tag, nsrmap)
# Previous layers take care of @name
id_ = elem.attrib.pop('id', None)
if id_:
href = '#'.join((item.href, id_))
offset = self.anchor_offset or buf.tell()
key = urlnormalize(href)
# Only set this id_offset if it wasn't previously seen
self.id_offsets[key] = self.id_offsets.get(key, offset)
if self.anchor_offset is not None and \
tag == 'a' and not elem.attrib and \
not len(elem) and not elem.text:
return
self.anchor_offset = buf.tell()
buf.write(b'<')
buf.write(tag.encode('utf-8'))
if elem.attrib:
for attr, val in elem.attrib.items():
if namespace(attr) not in nsrmap:
continue
attr = prefixname(attr, nsrmap)
buf.write(b' ')
if attr == 'href':
if self.serialize_href(val, item):
continue
elif attr == 'src':
href = urlnormalize(item.abshref(val))
if href in self.images:
index = self.images[href]
self.used_images.add(href)
buf.write(b'recindex="%05d"' % index)
continue
buf.write(attr.encode('utf-8'))
buf.write(b'="')
self.serialize_text(val, quot=True)
buf.write(b'"')
buf.write(b'>')
if elem.text or len(elem) > 0:
if elem.text:
self.anchor_offset = None
self.serialize_text(elem.text)
for child in elem:
self.serialize_elem(child, item)
if child.tail:
self.anchor_offset = None
self.serialize_text(child.tail)
buf.write(b'</%s>' % tag.encode('utf-8'))
def serialize_text(self, text, quot=False):
text = text.replace('&', '&')
text = text.replace('<', '<')
text = text.replace('>', '>')
text = text.replace(u'\u00AD', '') # Soft-hyphen
if quot:
text = text.replace('"', '"')
if isinstance(text, unicode):
text = unicodedata.normalize('NFC', text)
self.buf.write(text.encode('utf-8'))
def fixup_links(self):
'''
Fill in the correct values for all filepos="..." links with the offsets
of the linked to content (as stored in id_offsets).
'''
buf = self.buf
id_offsets = self.id_offsets
start_href = getattr(self, '_start_href', None)
for href, hoffs in self.href_offsets.items():
is_start = (href and href == start_href)
# Iterate over all filepos items
if href not in id_offsets:
self.logger.warn('Hyperlink target %r not found' % href)
# Link to the top of the document, better than just ignoring
href, _ = urldefrag(href)
if href in self.id_offsets:
ioff = self.id_offsets[href]
if is_start:
self.start_offset = ioff
for hoff in hoffs:
buf.seek(hoff)
buf.write(b'%010d' % ioff)
|
jose36/jmdl5 | refs/heads/master | servers/zinwa.py | 43 | # -*- coding: utf-8 -*-
#------------------------------------------------------------
# pelisalacarta - XBMC Plugin
# Conector para zinwa
# http://blog.tvalacarta.info/plugin-xbmc/pelisalacarta/
#------------------------------------------------------------
import urlparse,urllib2,urllib,re
import os
from core import scrapertools
from core import logger
from core import config
def test_video_exists( page_url ):
return True,""
def get_video_url( page_url , premium = False , user="" , password="", video_password="" ):
logger.info("[zinwa.py] get_video_url(page_url='%s')" % page_url)
video_urls = []
data = scrapertools.cache_page(page_url)
mediaurl = scrapertools.get_match(data,'file\: "([^"]+)"')
extension = scrapertools.get_filename_from_url(mediaurl)[-4:]
video_urls.append( [ extension + " [zinwa]",mediaurl ] )
return video_urls
# Encuentra vídeos del servidor en el texto pasado
def find_videos(data):
encontrados = set()
devuelve = []
#http://zinwa.com/frap5b3uhesl
patronvideos = '(zinwa.com/[a-z0-9]+)'
logger.info("[zinwa.py] find_videos #"+patronvideos+"#")
matches = re.compile(patronvideos,re.DOTALL).findall(data)
for match in matches:
titulo = "[zinwa]"
url = "http://"+match
if url not in encontrados:
logger.info(" url="+url)
devuelve.append( [ titulo , url , 'zinwa' ] )
encontrados.add(url)
else:
logger.info(" url duplicada="+url)
return devuelve
def test():
video_urls = get_video_url("http://zinwa.com/frap5b3uhesl")
return len(video_urls)>0 |
agutieda/QuantEcon.py | refs/heads/master | examples/cauchy_samples.py | 7 |
import numpy as np
from scipy.stats import cauchy
import matplotlib.pyplot as plt
n = 1000
distribution = cauchy()
fig, ax = plt.subplots()
data = distribution.rvs(n)
if 0:
ax.plot(list(range(n)), data, 'bo', alpha=0.5)
ax.vlines(list(range(n)), 0, data, lw=0.2)
ax.set_title("{} observations from the Cauchy distribution".format(n))
if 1:
# == Compute sample mean at each n == #
sample_mean = np.empty(n)
for i in range(n):
sample_mean[i] = np.mean(data[:i])
# == Plot == #
ax.plot(list(range(n)), sample_mean, 'r-', lw=3, alpha=0.6,
label=r'$\bar X_n$')
ax.plot(list(range(n)), [0] * n, 'k--', lw=0.5)
ax.legend()
fig.show()
|
ehashman/oh-mainline | refs/heads/master | vendor/packages/django-registration/registration/urls.py | 39 | """
URLConf for Django user registration and authentication.
If the default behavior of the registration views is acceptable to
you, simply use a line like this in your root URLConf to set up the
default URLs for registration::
(r'^accounts/', include('registration.urls')),
This will also automatically set up the views in
``django.contrib.auth`` at sensible default locations.
But if you'd like to customize the behavior (e.g., by passing extra
arguments to the various views) or split up the URLs, feel free to set
up your own URL patterns for these views instead. If you do, it's a
good idea to use the names ``registration_activate``,
``registration_complete`` and ``registration_register`` for the
various steps of the user-signup process.
"""
from django.conf.urls.defaults import *
from django.views.generic.simple import direct_to_template
from django.contrib.auth import views as auth_views
from registration.views import activate
from registration.views import register
urlpatterns = patterns('',
# Activation keys get matched by \w+ instead of the more specific
# [a-fA-F0-9]{40} because a bad activation key should still get to the view;
# that way it can return a sensible "invalid key" message instead of a
# confusing 404.
url(r'^activate/(?P<activation_key>\w+)/$',
activate,
name='registration_activate'),
url(r'^login/$',
auth_views.login,
{'template_name': 'registration/login.html'},
name='auth_login'),
url(r'^logout/$',
auth_views.logout,
{'template_name': 'registration/logout.html'},
name='auth_logout'),
url(r'^password/change/$',
auth_views.password_change,
name='auth_password_change'),
url(r'^password/change/done/$',
auth_views.password_change_done,
name='auth_password_change_done'),
url(r'^password/reset/$',
auth_views.password_reset,
name='auth_password_reset'),
url(r'^password/reset/confirm/(?P<uidb36>[0-9A-Za-z]+)-(?P<token>.+)/$',
auth_views.password_reset_confirm,
name='auth_password_reset_confirm'),
url(r'^password/reset/complete/$',
auth_views.password_reset_complete,
name='auth_password_reset_complete'),
url(r'^password/reset/done/$',
auth_views.password_reset_done,
name='auth_password_reset_done'),
url(r'^register/$',
register,
name='registration_register'),
url(r'^register/complete/$',
direct_to_template,
{'template': 'registration/registration_complete.html'},
name='registration_complete'),
)
|
Beeblio/django | refs/heads/master | tests/migrations/migrations_test_apps/with_package_model/models/__init__.py | 12133432 | |
UIKit0/marsyas | refs/heads/master | src/django/birdsong/application/birdsong/orchive/__init__.py | 12133432 | |
ChristosChristofidis/bokeh | refs/heads/master | examples/plotting/file/ajax_source_realtime.py | 22 | import numpy as np
from bokeh.plotting import figure, show, output_file
from bokeh.models.sources import AjaxDataSource
output_file("ajax_source_realtime.html", title="ajax_source_realtime.py example")
source = AjaxDataSource(data_url='http://localhost:5050/data', mode="append",
if_modified=True, polling_interval=1000, max_size=125)
p = figure()
p.line('x', 'y', source=source)
show(p)
import time
from threading import Thread
from collections import namedtuple, deque
Entry = namedtuple('Entry', ['x', 'y', 'creation'])
entries = deque(maxlen=120)
def gen_entry():
global entries
x = 0
while True:
last_entry = Entry(x, np.sin(x*np.pi/10), time.time())
entries.append(last_entry)
print("Entry generated: %s" % str(last_entry))
x += 1
if x > entries.maxlen and x % 10 == 0:
time.sleep(2)
t = Thread(target=gen_entry)
t.daemon = True
t.start()
import json
from flask import Flask, Response, request
from bokeh.server.crossdomain import crossdomain
app = Flask(__name__)
@app.route('/data', methods=['GET', 'OPTIONS'])
@crossdomain(origin="*", methods=['GET', 'POST'])
def hello_world():
global entries
try:
modified_since = float(request.headers.get('If-Modified-Since'))
except TypeError:
modified_since = 0
new_entries = [e for e in entries if e.creation > modified_since]
js = json.dumps({'x':[e.x for e in new_entries], 'y':[e.y for e in new_entries]})
resp = Response(js, status=200, mimetype='application/json')
if new_entries:
resp.headers['Last-Modified'] = new_entries[-1].creation
elif modified_since:
resp.headers['Last-Modified'] = modified_since
return resp
if __name__ == "__main__":
app.run(port=5050)
|
desecho/tickets | refs/heads/master | tickets_project/tickets/admin.py | 1 | from django.contrib import admin
from .models import (UserProfile, Department, Team, SubscriberType, Type,
Urgence, Ticket, Reason)
class TeamAdmin(admin.ModelAdmin):
def queryset(self, request):
qs = self.model.all.get_query_set()
return qs
admin.site.register(UserProfile)
admin.site.register(Department)
admin.site.register(SubscriberType)
admin.site.register(Type)
admin.site.register(Team, TeamAdmin)
admin.site.register(Urgence)
admin.site.register(Ticket)
admin.site.register(Reason)
|
Stormwolves/stormwolves-generator | refs/heads/master | stormwolves/plugins/tipuesearch/__init__.py | 371 | from .tipue_search import *
|
socialpercon/anki-1 | refs/heads/master | oldanki/template/__init__.py | 20 | from oldanki.template.template import Template
from oldanki.template.view import View
def render(template, context=None, **kwargs):
context = context and context.copy() or {}
context.update(kwargs)
return Template(template, context).render()
|
arpitn30/open-event-orga-server | refs/heads/development | app/views/sitemap.py | 8 | from math import ceil
from flask import url_for, render_template, make_response, request, \
Blueprint, abort
from app.settings import get_settings
from app.helpers.data_getter import DataGetter
from app.models.event import Event
from app.models.setting import Environment
sitemaps = Blueprint('sitemaps', __name__)
# INDEX PAGES LIST
PER_PAGE_EVENTS = 500
event_details_pages = [
'display_event_detail_home',
'display_event_sessions',
'display_event_schedule',
'display_event_cfs',
'display_event_coc'
]
@sitemaps.route('/sitemap.xml')
def render_sitemap():
if get_settings()['app_environment'] == Environment.STAGING:
urls = []
else:
urls = [full_url(url_for('sitemaps.render_pages_sitemap'))]
# pages sitemap
# get events pages
events = get_indexable_events()
pages = int(ceil(len(events) / (PER_PAGE_EVENTS * 1.0)))
for num in range(1, pages + 1):
urls.append(
full_url(url_for('sitemaps.render_event_pages', num=num))
)
# make sitemap
sitemap = render_template('sitemap/sitemap_index.xml', sitemaps=urls)
resp = make_response(sitemap)
resp.headers['Content-Type'] = 'application/xml'
return resp
@sitemaps.route('/sitemaps/pages.xml.gz')
def render_pages_sitemap():
if get_settings()['app_environment'] == Environment.STAGING:
abort(404)
urls = [
page.url if page.url.find('://') > -1 else
full_url(url_for('basicpagesview.url_view', url=page.url))
for page in DataGetter.get_all_pages()
]
return make_sitemap_response(urls)
@sitemaps.route('/sitemaps/events/<int:num>.xml.gz')
def render_event_pages(num):
if get_settings()['app_environment'] == Environment.STAGING:
abort(404)
main_urls = []
start = (num - 1) * PER_PAGE_EVENTS
end = PER_PAGE_EVENTS * num
events = get_indexable_events()[start:end]
if len(events) == 0:
abort(404)
for e in events:
urls = [
full_url(url_for('event_detail.' + view, identifier=e.identifier))
for view in event_details_pages
]
main_urls += urls
return make_sitemap_response(main_urls)
##########
# Helpers
##########
def make_sitemap_response(urls):
sitemap = render_template('sitemap/sitemap.xml', urls=urls)
resp = make_response(sitemap)
resp.headers['Content-Type'] = 'application/xml'
return resp
def get_indexable_events():
events = Event.query.filter_by(privacy='public').order_by('id')
return list(events)
def full_url(url):
return request.url_root.strip('/') + url
|
gkadillak/rockstor-core | refs/heads/master | src/rockstor/smart_manager/taplib/probe_config.py | 6 | """
Copyright (c) 2012 RockStor, Inc. <http://rockstor.com>
This file is part of RockStor.
RockStor is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published
by the Free Software Foundation; either version 3 of the License,
or (at your option) any later version.
RockStor is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
class TapConfig(object):
def __init__(self, uuid, location, sdetail):
self.uuid = uuid
self.location = location
self.sdetail = sdetail
TAP_MAP = {
'nfs-1': { 'location': 'nfsd/nfsd_distrib',
'sdetail': 'All NFS calls',
'cb': 'process_nfsd_calls',},
'nfs-2': { 'location': 'nfsd/nfsd_distrib_client',
'sdetail': 'NFS call distribution over clients',
'cb': 'process_nfsd_calls',},
'nfs-3': { 'location': 'nfsd/nfsd_distrib_share',
'sdetail': 'NFS call distribution over shares',
'cb': 'share_distribution',},
'nfs-4': { 'location': 'nfsd/nfsd_distrib_share_client',
'sdetail': 'NFS call distribution over clients and shares',
'cb': 'share_client_distribution',},
'nfs-5': { 'location': 'nfsd/nfsd_distrib_uid_gid',
'sdetail': 'NFS call distribution over uids and gids',
'cb': 'nfs_uid_gid_distribution',},
}
|
rickmendes/ansible-modules-core | refs/heads/devel | network/nxos/nxos_igmp_interface.py | 15 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
DOCUMENTATION = '''
---
module: nxos_igmp_interface
version_added: "2.2"
short_description: Manages IGMP interface configuration.
description:
- Manages IGMP interface configuration settings.
extends_documentation_fragment: nxos
author:
- Jason Edelman (@jedelman8)
- Gabriele Gerbino (@GGabriele)
notes:
- When C(state=default), supported params will be reset to a default state.
These include C(version), C(startup_query_interval),
C(startup_query_count), C(robustness), C(querier_timeout), C(query_mrt),
C(query_interval), C(last_member_qrt), C(last_member_query_count),
C(group_timeout), C(report_llg), and C(immediate_leave).
- When C(state=absent), all configs for C(oif_prefix), C(oif_source), and
C(oif_routemap) will be removed.
- PIM must be enabled to use this module.
- This module is for Layer 3 interfaces.
- Route-map check not performed (same as CLI) check when configuring
route-map with 'static-oif'
- If restart is set to true with other params set, the restart will happen
last, i.e. after the configuration takes place.
options:
interface:
description:
- The full interface name for IGMP configuration.
e.g. I(Ethernet1/2).
required: true
version:
description:
- IGMP version. It can be 2 or 3.
required: false
default: null
choices: ['2', '3']
startup_query_interval:
description:
- Query interval used when the IGMP process starts up.
The range is from 1 to 18000. The default is 31.
required: false
default: null
startup_query_count:
description:
- Query count used when the IGMP process starts up.
The range is from 1 to 10. The default is 2.
required: false
default: null
robustness:
description:
- Sets the robustness variable. Values can range from 1 to 7.
The default is 2.
required: false
default: null
querier_timeout:
description:
- Sets the querier timeout that the software uses when deciding
to take over as the querier. Values can range from 1 to 65535
seconds. The default is 255 seconds.
required: false
default: null
query_mrt:
description:
- Sets the response time advertised in IGMP queries.
Values can range from 1 to 25 seconds. The default is 10 seconds.
required: false
default: null
query_interval:
description:
- Sets the frequency at which the software sends IGMP host query
messages. Values can range from 1 to 18000 seconds.
he default is 125 seconds.
required: false
default: null
last_member_qrt:
description:
- Sets the query interval waited after sending membership reports
before the software deletes the group state. Values can range
from 1 to 25 seconds. The default is 1 second.
required: false
default: null
last_member_query_count:
description:
- Sets the number of times that the software sends an IGMP query
in response to a host leave message.
Values can range from 1 to 5. The default is 2.
required: false
default: null
group_timeout:
description:
- Sets the group membership timeout for IGMPv2.
Values can range from 3 to 65,535 seconds.
The default is 260 seconds.
required: false
default: null
report_llg:
description:
- Configures report-link-local-groups.
Enables sending reports for groups in 224.0.0.0/24.
Reports are always sent for nonlink local groups.
By default, reports are not sent for link local groups.
required: false
choices: ['true', 'false']
default: false
immediate_leave:
description:
- Enables the device to remove the group entry from the multicast
routing table immediately upon receiving a leave message for
the group. Use this command to minimize the leave latency of
IGMPv2 group memberships on a given IGMP interface because the
device does not send group-specific queries.
The default is disabled.
required: false
choices: ['true', 'false']
default: false
oif_routemap:
description:
- Configure a routemap for static outgoing interface (OIF).
required: false
default: null
oif_prefix:
description:
- Configure a prefix for static outgoing interface (OIF).
required: false
default: null
oif_source:
description:
- Configure a source for static outgoing interface (OIF).
required: false
default: null
restart:
description:
- Restart IGMP.
required: false
choices: ['true', 'false']
default: null
state:
description:
- Manages desired state of the resource.
required: false
default: present
choices: ['present', 'default']
'''
EXAMPLES = '''
- nxos_igmp_interface:
interface: ethernet1/32
startup_query_interval: 30
state: present
username: "{{ un }}"
password: "{{ pwd }}"
host: "{{ inventory_hostname }}"
'''
RETURN = '''
proposed:
description: k/v pairs of parameters passed into module
returned: always
type: dict
sample: {"asn": "65535", "router_id": "1.1.1.1", "vrf": "test"}
existing:
description: k/v pairs of existing BGP configuration
type: dict
sample: {"asn": "65535", "bestpath_always_compare_med": false,
"bestpath_aspath_multipath_relax": false,
"bestpath_compare_neighborid": false,
"bestpath_compare_routerid": false,
"bestpath_cost_community_ignore": false,
"bestpath_med_confed": false,
"bestpath_med_missing_as_worst": false,
"bestpath_med_non_deterministic": false, "cluster_id": "",
"confederation_id": "", "confederation_peers": "",
"graceful_restart": true, "graceful_restart_helper": false,
"graceful_restart_timers_restart": "120",
"graceful_restart_timers_stalepath_time": "300", "local_as": "",
"log_neighbor_changes": false, "maxas_limit": "",
"neighbor_down_fib_accelerate": false, "reconnect_interval": "60",
"router_id": "11.11.11.11", "suppress_fib_pending": false,
"timer_bestpath_limit": "", "timer_bgp_hold": "180",
"timer_bgp_keepalive": "60", "vrf": "test"}
end_state:
description: k/v pairs of BGP configuration after module execution
returned: always
type: dict
sample: {"asn": "65535", "bestpath_always_compare_med": false,
"bestpath_aspath_multipath_relax": false,
"bestpath_compare_neighborid": false,
"bestpath_compare_routerid": false,
"bestpath_cost_community_ignore": false,
"bestpath_med_confed": false,
"bestpath_med_missing_as_worst": false,
"bestpath_med_non_deterministic": false, "cluster_id": "",
"confederation_id": "", "confederation_peers": "",
"graceful_restart": true, "graceful_restart_helper": false,
"graceful_restart_timers_restart": "120",
"graceful_restart_timers_stalepath_time": "300", "local_as": "",
"log_neighbor_changes": false, "maxas_limit": "",
"neighbor_down_fib_accelerate": false, "reconnect_interval": "60",
"router_id": "1.1.1.1", "suppress_fib_pending": false,
"timer_bestpath_limit": "", "timer_bgp_hold": "180",
"timer_bgp_keepalive": "60", "vrf": "test"}
updates:
description: commands sent to the device
returned: always
type: list
sample: ["router bgp 65535", "vrf test", "router-id 1.1.1.1"]
changed:
description: check to see if a change was made on the device
returned: always
type: boolean
sample: true
'''
import json
import collections
# COMMON CODE FOR MIGRATION
import re
from ansible.module_utils.basic import get_exception
from ansible.module_utils.netcfg import NetworkConfig, ConfigLine
from ansible.module_utils.shell import ShellError
try:
from ansible.module_utils.nxos import get_module
except ImportError:
from ansible.module_utils.nxos import NetworkModule
def to_list(val):
if isinstance(val, (list, tuple)):
return list(val)
elif val is not None:
return [val]
else:
return list()
class CustomNetworkConfig(NetworkConfig):
def expand_section(self, configobj, S=None):
if S is None:
S = list()
S.append(configobj)
for child in configobj.children:
if child in S:
continue
self.expand_section(child, S)
return S
def get_object(self, path):
for item in self.items:
if item.text == path[-1]:
parents = [p.text for p in item.parents]
if parents == path[:-1]:
return item
def to_block(self, section):
return '\n'.join([item.raw for item in section])
def get_section(self, path):
try:
section = self.get_section_objects(path)
return self.to_block(section)
except ValueError:
return list()
def get_section_objects(self, path):
if not isinstance(path, list):
path = [path]
obj = self.get_object(path)
if not obj:
raise ValueError('path does not exist in config')
return self.expand_section(obj)
def add(self, lines, parents=None):
"""Adds one or lines of configuration
"""
ancestors = list()
offset = 0
obj = None
## global config command
if not parents:
for line in to_list(lines):
item = ConfigLine(line)
item.raw = line
if item not in self.items:
self.items.append(item)
else:
for index, p in enumerate(parents):
try:
i = index + 1
obj = self.get_section_objects(parents[:i])[0]
ancestors.append(obj)
except ValueError:
# add parent to config
offset = index * self.indent
obj = ConfigLine(p)
obj.raw = p.rjust(len(p) + offset)
if ancestors:
obj.parents = list(ancestors)
ancestors[-1].children.append(obj)
self.items.append(obj)
ancestors.append(obj)
# add child objects
for line in to_list(lines):
# check if child already exists
for child in ancestors[-1].children:
if child.text == line:
break
else:
offset = len(parents) * self.indent
item = ConfigLine(line)
item.raw = line.rjust(len(line) + offset)
item.parents = ancestors
ancestors[-1].children.append(item)
self.items.append(item)
def get_network_module(**kwargs):
try:
return get_module(**kwargs)
except NameError:
return NetworkModule(**kwargs)
def get_config(module, include_defaults=False):
config = module.params['config']
if not config:
try:
config = module.get_config()
except AttributeError:
defaults = module.params['include_defaults']
config = module.config.get_config(include_defaults=defaults)
return CustomNetworkConfig(indent=2, contents=config)
def load_config(module, candidate):
config = get_config(module)
commands = candidate.difference(config)
commands = [str(c).strip() for c in commands]
save_config = module.params['save']
result = dict(changed=False)
if commands:
if not module.check_mode:
try:
module.configure(commands)
except AttributeError:
module.config(commands)
if save_config:
try:
module.config.save_config()
except AttributeError:
module.execute(['copy running-config startup-config'])
result['changed'] = True
result['updates'] = commands
return result
# END OF COMMON CODE
def get_cli_body_ssh(command, response, module):
"""Get response for when transport=cli. This is kind of a hack and mainly
needed because these modules were originally written for NX-API. And
not every command supports "| json" when using cli/ssh. As such, we assume
if | json returns an XML string, it is a valid command, but that the
resource doesn't exist yet. Instead, the output will be a raw string
when issuing commands containing 'show run'.
"""
if 'xml' in response[0]:
body = []
elif 'show run' in command:
body = response
else:
try:
response = response[0].replace(command + '\n\n', '').strip()
body = [json.loads(response)]
except ValueError:
module.fail_json(msg='Command does not support JSON output',
command=command)
return body
def execute_show(cmds, module, command_type=None):
command_type_map = {
'cli_show': 'json',
'cli_show_ascii': 'text'
}
try:
if command_type:
response = module.execute(cmds, command_type=command_type)
else:
response = module.execute(cmds)
except ShellError:
clie = get_exception()
module.fail_json(msg='Error sending {0}'.format(cmds),
error=str(clie))
except AttributeError:
try:
if command_type:
command_type = command_type_map.get(command_type)
module.cli.add_commands(cmds, output=command_type)
response = module.cli.run_commands()
else:
module.cli.add_commands(cmds, raw=True)
response = module.cli.run_commands()
except ShellError:
clie = get_exception()
module.fail_json(msg='Error sending {0}'.format(cmds),
error=str(clie))
return response
def execute_show_command(command, module, command_type='cli_show'):
if module.params['transport'] == 'cli':
command += ' | json'
cmds = [command]
response = execute_show(cmds, module)
body = get_cli_body_ssh(command, response, module)
elif module.params['transport'] == 'nxapi':
cmds = [command]
body = execute_show(cmds, module, command_type=command_type)
return body
def get_interface_mode(interface, intf_type, module):
command = 'show interface {0}'.format(interface)
interface = {}
mode = 'unknown'
if intf_type in ['ethernet', 'portchannel']:
body = execute_show_command(command, module)[0]
interface_table = body['TABLE_interface']['ROW_interface']
mode = str(interface_table.get('eth_mode', 'layer3'))
if mode == 'access' or mode == 'trunk':
mode = 'layer2'
elif intf_type == 'loopback' or intf_type == 'svi':
mode = 'layer3'
return mode
def get_interface_type(interface):
if interface.upper().startswith('ET'):
return 'ethernet'
elif interface.upper().startswith('VL'):
return 'svi'
elif interface.upper().startswith('LO'):
return 'loopback'
elif interface.upper().startswith('MG'):
return 'management'
elif interface.upper().startswith('MA'):
return 'management'
elif interface.upper().startswith('PO'):
return 'portchannel'
else:
return 'unknown'
def apply_key_map(key_map, table):
new_dict = {}
for key, value in table.items():
new_key = key_map.get(key)
if new_key:
value = table.get(key)
if value:
new_dict[new_key] = value
else:
new_dict[new_key] = value
return new_dict
def flatten_list(command_lists):
flat_command_list = []
for command in command_lists:
if isinstance(command, list):
flat_command_list.extend(command)
else:
flat_command_list.append(command)
return flat_command_list
def get_igmp_interface(module, interface):
command = 'show ip igmp interface {0}'.format(interface)
igmp = {}
key_map = {
'IGMPVersion': 'version',
'ConfiguredStartupQueryInterval': 'startup_query_interval',
'StartupQueryCount': 'startup_query_count',
'RobustnessVariable': 'robustness',
'QuerierTimeout': 'querier_timeout',
'ConfiguredMaxResponseTime': 'query_mrt',
'ConfiguredQueryInterval': 'query_interval',
'LastMemberMTR': 'last_member_qrt',
'LastMemberQueryCount': 'last_member_query_count',
'ConfiguredGroupTimeout': 'group_timeout'
}
body = execute_show_command(command, module)[0]
if body:
resource = body['TABLE_vrf']['ROW_vrf']['TABLE_if']['ROW_if']
igmp = apply_key_map(key_map, resource)
report_llg = str(resource['ReportingForLinkLocal'])
if report_llg == 'true':
igmp['report_llg'] = True
elif report_llg == 'false':
igmp['report_llg'] = False
immediate_leave = str(resource['ImmediateLeave']) # returns en or dis
if immediate_leave == 'en':
igmp['immediate_leave'] = True
elif immediate_leave == 'dis':
igmp['immediate_leave'] = False
# the next block of code is used to retrieve anything with:
# ip igmp static-oif *** i.e.. could be route-map ROUTEMAP
# or PREFIX source <ip>, etc.
command = 'show run interface {0} | inc oif'.format(interface)
body = execute_show_command(
command, module, command_type='cli_show_ascii')[0]
staticoif = []
if body:
split_body = body.split('\n')
route_map_regex = ('.*ip igmp static-oif route-map\s+'
'(?P<route_map>\S+).*')
prefix_source_regex = ('.*ip igmp static-oif\s+(?P<prefix>'
'((\d+.){3}\d+))(\ssource\s'
'(?P<source>\S+))?.*')
for line in split_body:
temp = {}
try:
match_route_map = re.match(route_map_regex, line, re.DOTALL)
route_map = match_route_map.groupdict()['route_map']
except AttributeError:
route_map = ''
try:
match_prefix_source = re.match(
prefix_source_regex, line, re.DOTALL)
prefix_source_group = match_prefix_source.groupdict()
prefix = prefix_source_group['prefix']
source = prefix_source_group['source']
except AttributeError:
prefix = ''
source = ''
if route_map:
temp['route_map'] = route_map
if prefix:
temp['prefix'] = prefix
if source:
temp['source'] = source
if temp:
staticoif.append(temp)
igmp['oif_routemap'] = None
igmp['oif_prefix_source'] = []
if staticoif:
if len(staticoif) == 1 and staticoif[0].get('route_map'):
igmp['oif_routemap'] = staticoif[0]['route_map']
else:
igmp['oif_prefix_source'] = staticoif
return igmp
def config_igmp_interface(delta, found_both, found_prefix):
CMDS = {
'version': 'ip igmp version {0}',
'startup_query_interval': 'ip igmp startup-query-interval {0}',
'startup_query_count': 'ip igmp startup-query-count {0}',
'robustness': 'ip igmp robustness-variable {0}',
'querier_timeout': 'ip igmp querier-timeout {0}',
'query_mrt': 'ip igmp query-max-response-time {0}',
'query_interval': 'ip igmp query-interval {0}',
'last_member_qrt': 'ip igmp last-member-query-response-time {0}',
'last_member_query_count': 'ip igmp last-member-query-count {0}',
'group_timeout': 'ip igmp group-timeout {0}',
'report_llg': 'ip igmp report-link-local-groups',
'immediate_leave': 'ip igmp immediate-leave',
'oif_prefix_source': 'ip igmp static-oif {0} source {1} ',
'oif_routemap': 'ip igmp static-oif route-map {0}',
'oif_prefix': 'ip igmp static-oif {0}',
}
commands = []
command = None
for key, value in delta.iteritems():
if key == 'oif_source' or found_both or found_prefix:
pass
elif key == 'oif_prefix':
if delta.get('oif_source'):
command = CMDS.get('oif_prefix_source').format(
delta.get('oif_prefix'), delta.get('oif_source'))
else:
command = CMDS.get('oif_prefix').format(
delta.get('oif_prefix'))
elif value:
command = CMDS.get(key).format(value)
elif not value:
command = 'no {0}'.format(CMDS.get(key).format(value))
if command:
if command not in commands:
commands.append(command)
command = None
return commands
def get_igmp_interface_defaults():
version = '2'
startup_query_interval = '31'
startup_query_count = '2'
robustness = '2'
querier_timeout = '255'
query_mrt = '10'
query_interval = '125'
last_member_qrt = '1'
last_member_query_count = '2'
group_timeout = '260'
report_llg = False
immediate_leave = False
args = dict(version=version, startup_query_interval=startup_query_interval,
startup_query_count=startup_query_count, robustness=robustness,
querier_timeout=querier_timeout, query_mrt=query_mrt,
query_interval=query_interval, last_member_qrt=last_member_qrt,
last_member_query_count=last_member_query_count,
group_timeout=group_timeout, report_llg=report_llg,
immediate_leave=immediate_leave)
default = dict((param, value) for (param, value) in args.iteritems()
if value is not None)
return default
def config_default_igmp_interface(existing, delta, found_both, found_prefix):
commands = []
proposed = get_igmp_interface_defaults()
delta = dict(set(proposed.iteritems()).difference(existing.iteritems()))
if delta:
command = config_igmp_interface(delta, found_both, found_prefix)
if command:
for each in command:
commands.append(each)
return commands
def config_remove_oif(existing, existing_oif_prefix_source):
commands = []
command = None
if existing.get('routemap'):
command = 'no ip igmp static-oif route-map {0}'.format(
existing.get('routemap'))
if existing_oif_prefix_source:
for each in existing_oif_prefix_source:
if each.get('prefix') and each.get('source'):
command = 'no ip igmp static-oif {0} source {1} '.format(
each.get('prefix'), each.get('source')
)
elif each.get('prefix'):
command = 'no ip igmp static-oif {0}'.format(
each.get('prefix')
)
if command:
commands.append(command)
command = None
return commands
def execute_config_command(commands, module):
try:
module.configure(commands)
except ShellError:
clie = get_exception()
module.fail_json(msg='Error sending CLI commands',
error=str(clie), commands=commands)
except AttributeError:
try:
commands.insert(0, 'configure')
module.cli.add_commands(commands, output='config')
module.cli.run_commands()
except ShellError:
clie = get_exception()
module.fail_json(msg='Error sending CLI commands',
error=str(clie), commands=commands)
def main():
argument_spec = dict(
interface=dict(required=True, type='str'),
version=dict(required=False, type='str'),
startup_query_interval=dict(required=False, type='str'),
startup_query_count=dict(required=False, type='str'),
robustness=dict(required=False, type='str'),
querier_timeout=dict(required=False, type='str'),
query_mrt=dict(required=False, type='str'),
query_interval=dict(required=False, type='str'),
last_member_qrt=dict(required=False, type='str'),
last_member_query_count=dict(required=False, type='str'),
group_timeout=dict(required=False, type='str'),
report_llg=dict(type='bool'),
immediate_leave=dict(type='bool'),
oif_routemap=dict(required=False, type='str'),
oif_prefix=dict(required=False, type='str'),
oif_source=dict(required=False, type='str'),
restart=dict(type='bool', default=False),
state=dict(choices=['present', 'absent', 'default'],
default='present'),
include_defaults=dict(default=True),
config=dict(),
save=dict(type='bool', default=False)
)
module = get_network_module(argument_spec=argument_spec,
supports_check_mode=True)
state = module.params['state']
interface = module.params['interface']
oif_prefix = module.params['oif_prefix']
oif_source = module.params['oif_source']
oif_routemap = module.params['oif_routemap']
if oif_source:
if not oif_prefix:
module.fail_json(msg='oif_prefix required when setting oif_source')
intf_type = get_interface_type(interface)
if get_interface_mode(interface, intf_type, module) == 'layer2':
module.fail_json(msg='this module only works on Layer 3 interfaces')
if oif_prefix and oif_routemap:
module.fail_json(msg='cannot use oif_prefix AND oif_routemap.'
' select one.')
existing = get_igmp_interface(module, interface)
existing_copy = existing.copy()
end_state = existing_copy
if not existing.get('version'):
module.fail_json(msg='pim needs to be enabled on the interface')
existing_oif_prefix_source = existing.get('oif_prefix_source')
# not json serializable
existing.pop('oif_prefix_source')
if oif_routemap and existing_oif_prefix_source:
module.fail_json(msg='Delete static-oif configurations on this '
'interface if you want to use a routemap')
if oif_prefix and existing.get('oif_routemap'):
module.fail_json(msg='Delete static-oif route-map configuration '
'on this interface if you want to config '
'static entries')
args = [
'version',
'startup_query_interval',
'startup_query_count',
'robustness',
'querier_timeout',
'query_mrt',
'query_interval',
'last_member_qrt',
'last_member_query_count',
'group_timeout',
'report_llg',
'immediate_leave',
'oif_routemap',
'oif_prefix',
'oif_source'
]
changed = False
commands = []
proposed = dict((k, v) for k, v in module.params.iteritems()
if v is not None and k in args)
CANNOT_ABSENT = ['version', 'startup_query_interval',
'startup_query_count', 'robustness', 'querier_timeout',
'query_mrt', 'query_interval', 'last_member_qrt',
'last_member_query_count', 'group_timeout', 'report_llg',
'immediate_leave']
if state == 'absent':
for each in CANNOT_ABSENT:
if each in proposed.keys():
module.fail_json(msg='only params: oif_prefix, oif_source, '
'oif_routemap can be used when '
'state=absent')
# delta check for all params except oif_prefix and oif_source
delta = dict(set(proposed.iteritems()).difference(existing.iteritems()))
# now check to see there is a delta for prefix and source command option
found_both = False
found_prefix = False
if existing_oif_prefix_source:
if oif_prefix and oif_source:
for each in existing_oif_prefix_source:
if (oif_prefix == each.get('prefix') and
oif_source == each.get('source')):
found_both = True
if not found_both:
delta['prefix'] = oif_prefix
delta['source'] = oif_source
elif oif_prefix:
for each in existing_oif_prefix_source:
if oif_prefix == each.get('prefix') and not each.get('source'):
found_prefix = True
if not found_prefix:
delta['prefix'] = oif_prefix
if state == 'present':
if delta:
command = config_igmp_interface(delta, found_both, found_prefix)
if command:
commands.append(command)
elif state == 'default':
command = config_default_igmp_interface(existing, delta,
found_both, found_prefix)
if command:
commands.append(command)
elif state == 'absent':
command = None
if existing.get('oif_routemap') or existing_oif_prefix_source:
command = config_remove_oif(existing, existing_oif_prefix_source)
if command:
commands.append(command)
command = config_default_igmp_interface(existing, delta,
found_both, found_prefix)
if command:
commands.append(command)
if module.params['restart']:
commands.append('restart igmp')
cmds = []
results = {}
if commands:
commands.insert(0, ['interface {0}'.format(interface)])
cmds = flatten_list(commands)
if module.check_mode:
module.exit_json(changed=True, commands=cmds)
else:
execute_config_command(cmds, module)
changed = True
end_state = get_igmp_interface(module, interface)
if 'configure' in cmds:
cmds.pop(0)
results['proposed'] = proposed
results['existing'] = existing_copy
results['updates'] = cmds
results['changed'] = changed
results['end_state'] = end_state
module.exit_json(**results)
if __name__ == '__main__':
main() |
meisamhe/GPLshared | refs/heads/master | Programming/MPI — AMath 483 583, Spring 2013 1.0 documentation_files/inorder_traversal_with_parent.py | 1 | from binary_tree_with_parent_prototype import BinaryTreeNode
# @include
def inorder_traversal(tree):
prev, result = None, []
while tree:
if prev is tree.parent:
# We came down to tree from prev.
if tree.left: # Keep going left.
next = tree.left
else:
result.append(tree.data)
# Done with left, so go right if right is not empty. Otherwise,
# go up.
next = tree.right or tree.parent
elif tree.left is prev:
# We came up to tree from its left child.
result.append(tree.data)
# Done with left, so go right if right is not empty. Otherwise, go
# up.
next = tree.right or tree.parent
else: # Done with both children, so move up.
next = tree.parent
prev, tree = tree, next
return result
# @exclude
def main():
# 3
# 2 5
# 1 4 6
root = BinaryTreeNode(3)
root.parent = None
assert inorder_traversal(root) == [3]
root.left = BinaryTreeNode(2)
root.left.parent = root
root.left.left = BinaryTreeNode(1)
root.left.left.parent = root.left
assert inorder_traversal(root) == [1, 2, 3]
root.right = BinaryTreeNode(5)
root.right.parent = root
root.right.left = BinaryTreeNode(4)
root.right.left.parent = root.right
root.right.right = BinaryTreeNode(6)
root.right.right.parent = root.right
assert inorder_traversal(root) == [1, 2, 3, 4, 5, 6]
if __name__ == '__main__':
main()
|
dudeness/webAppSite | refs/heads/development | application/settings/statics.py | 1 | from base import *
from modules import *
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.6/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(PROJECT_PATH, 'static')
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
STATICFILES_STORAGE = 'pipeline.storage.PipelineCachedStorage'
# Additional locations of static files
STATICFILES_DIRS = ()
for module in INSTALLED_MODULES:
if DEBUG:
STATICFILES_DIRS += (os.path.join(MODULE_PATH, module, 'static'),)
PIPELINE_JS = {
'vendor': {
'source_filenames': (
'vendor/jquery/*.js',
'vendor/lodash/*.js',
'vendor/angularjs/1.2.7/angular.min.js',
'vendor/angularjs/1.2.7/angular-*.min.js',
),
'output_filename': 'compiled/vendor.min.js',
}
}
for module in INSTALLED_MODULES:
PIPELINE_JS.update({
module: {
'source_filenames': (
module + '/javascript/controller/*.js',
module + '/javascript/directive/*.js',
module + '/javascript/*.js',
),
'output_filename': 'compiled/' + module + '.min.js',
}
})
PIPELINE_CSS = {
'styles': {
'source_filenames': (
'compiled/styles.css',
),
'output_filename': 'compiled/styles.min.css',
},
}
|
devincornell/semanticanlysis | refs/heads/master | __init__.py | 1 | from .documents import *
from .semanticnetwork import *
from .topicmodels import *
from .preprocessing import *
from .visualize import *
from .timelines import *
from .concordance import *
from .dictionary import *
from .util import *
if __name__ == '__main__':
print('This is the semanticanlysis library.')
|
stonegithubs/odoo | refs/heads/8.0 | openerp/models.py | 20 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
"""
Object Relational Mapping module:
* Hierarchical structure
* Constraints consistency and validation
* Object metadata depends on its status
* Optimised processing by complex query (multiple actions at once)
* Default field values
* Permissions optimisation
* Persistant object: DB postgresql
* Data conversion
* Multi-level caching system
* Two different inheritance mechanisms
* Rich set of field types:
- classical (varchar, integer, boolean, ...)
- relational (one2many, many2one, many2many)
- functional
"""
import datetime
import functools
import itertools
import logging
import operator
import pickle
import pytz
import re
import time
from collections import defaultdict, MutableMapping
from inspect import getmembers, currentframe
from operator import itemgetter
import babel.dates
import dateutil.relativedelta
import psycopg2
from lxml import etree
import openerp
from . import SUPERUSER_ID
from . import api
from . import tools
from .api import Environment
from .exceptions import except_orm, AccessError, MissingError, ValidationError
from .osv import fields
from .osv.query import Query
from .tools import frozendict, lazy_property, ormcache
from .tools.config import config
from .tools.func import frame_codeinfo
from .tools.misc import CountingStream, DEFAULT_SERVER_DATETIME_FORMAT, DEFAULT_SERVER_DATE_FORMAT
from .tools.safe_eval import safe_eval as eval
from .tools.translate import _
_logger = logging.getLogger(__name__)
_schema = logging.getLogger(__name__ + '.schema')
regex_order = re.compile('^(\s*([a-z0-9:_]+|"[a-z0-9:_]+")(\s+(desc|asc))?\s*(,|$))+(?<!,)$', re.I)
regex_object_name = re.compile(r'^[a-z0-9_.]+$')
onchange_v7 = re.compile(r"^(\w+)\((.*)\)$")
AUTOINIT_RECALCULATE_STORED_FIELDS = 1000
def check_object_name(name):
""" Check if the given name is a valid openerp object name.
The _name attribute in osv and osv_memory object is subject to
some restrictions. This function returns True or False whether
the given name is allowed or not.
TODO: this is an approximation. The goal in this approximation
is to disallow uppercase characters (in some places, we quote
table/column names and in other not, which leads to this kind
of errors:
psycopg2.ProgrammingError: relation "xxx" does not exist).
The same restriction should apply to both osv and osv_memory
objects for consistency.
"""
if regex_object_name.match(name) is None:
return False
return True
def raise_on_invalid_object_name(name):
if not check_object_name(name):
msg = "The _name attribute %s is not valid." % name
_logger.error(msg)
raise except_orm('ValueError', msg)
POSTGRES_CONFDELTYPES = {
'RESTRICT': 'r',
'NO ACTION': 'a',
'CASCADE': 'c',
'SET NULL': 'n',
'SET DEFAULT': 'd',
}
def intersect(la, lb):
return filter(lambda x: x in lb, la)
def same_name(f, g):
""" Test whether functions ``f`` and ``g`` are identical or have the same name """
return f == g or getattr(f, '__name__', 0) == getattr(g, '__name__', 1)
def fix_import_export_id_paths(fieldname):
"""
Fixes the id fields in import and exports, and splits field paths
on '/'.
:param str fieldname: name of the field to import/export
:return: split field name
:rtype: list of str
"""
fixed_db_id = re.sub(r'([^/])\.id', r'\1/.id', fieldname)
fixed_external_id = re.sub(r'([^/]):id', r'\1/id', fixed_db_id)
return fixed_external_id.split('/')
def pg_varchar(size=0):
""" Returns the VARCHAR declaration for the provided size:
* If no size (or an empty or negative size is provided) return an
'infinite' VARCHAR
* Otherwise return a VARCHAR(n)
:type int size: varchar size, optional
:rtype: str
"""
if size:
if not isinstance(size, int):
raise TypeError("VARCHAR parameter should be an int, got %s"
% type(size))
if size > 0:
return 'VARCHAR(%d)' % size
return 'VARCHAR'
FIELDS_TO_PGTYPES = {
fields.boolean: 'bool',
fields.integer: 'int4',
fields.text: 'text',
fields.html: 'text',
fields.date: 'date',
fields.datetime: 'timestamp',
fields.binary: 'bytea',
fields.many2one: 'int4',
fields.serialized: 'text',
}
def get_pg_type(f, type_override=None):
"""
:param fields._column f: field to get a Postgres type for
:param type type_override: use the provided type for dispatching instead of the field's own type
:returns: (postgres_identification_type, postgres_type_specification)
:rtype: (str, str)
"""
field_type = type_override or type(f)
if field_type in FIELDS_TO_PGTYPES:
pg_type = (FIELDS_TO_PGTYPES[field_type], FIELDS_TO_PGTYPES[field_type])
elif issubclass(field_type, fields.float):
# Explicit support for "falsy" digits (0, False) to indicate a
# NUMERIC field with no fixed precision. The values will be saved
# in the database with all significant digits.
# FLOAT8 type is still the default when there is no precision because
# it is faster for most operations (sums, etc.)
if f.digits is not None:
pg_type = ('numeric', 'NUMERIC')
else:
pg_type = ('float8', 'DOUBLE PRECISION')
elif issubclass(field_type, (fields.char, fields.reference)):
pg_type = ('varchar', pg_varchar(f.size))
elif issubclass(field_type, fields.selection):
if (f.selection and isinstance(f.selection, list) and isinstance(f.selection[0][0], int))\
or getattr(f, 'size', None) == -1:
pg_type = ('int4', 'INTEGER')
else:
pg_type = ('varchar', pg_varchar(getattr(f, 'size', None)))
elif issubclass(field_type, fields.function):
if f._type == 'selection':
pg_type = ('varchar', pg_varchar())
else:
pg_type = get_pg_type(f, getattr(fields, f._type))
else:
_logger.warning('%s type not supported!', field_type)
pg_type = None
return pg_type
class MetaModel(api.Meta):
""" Metaclass for the models.
This class is used as the metaclass for the class :class:`BaseModel` to
discover the models defined in a module (without instantiating them).
If the automatic discovery is not needed, it is possible to set the model's
``_register`` attribute to False.
"""
module_to_models = {}
def __init__(self, name, bases, attrs):
if not self._register:
self._register = True
super(MetaModel, self).__init__(name, bases, attrs)
return
if not hasattr(self, '_module'):
# The (OpenERP) module name can be in the ``openerp.addons`` namespace
# or not. For instance, module ``sale`` can be imported as
# ``openerp.addons.sale`` (the right way) or ``sale`` (for backward
# compatibility).
module_parts = self.__module__.split('.')
if len(module_parts) > 2 and module_parts[:2] == ['openerp', 'addons']:
module_name = self.__module__.split('.')[2]
else:
module_name = self.__module__.split('.')[0]
self._module = module_name
# Remember which models to instanciate for this module.
if not self._custom:
self.module_to_models.setdefault(self._module, []).append(self)
# check for new-api conversion error: leave comma after field definition
for key, val in attrs.iteritems():
if type(val) is tuple and len(val) == 1 and isinstance(val[0], Field):
_logger.error("Trailing comma after field definition: %s.%s", self, key)
# transform columns into new-style fields (enables field inheritance)
for name, column in self._columns.iteritems():
if name in self.__dict__:
_logger.warning("In class %s, field %r overriding an existing value", self, name)
setattr(self, name, column.to_field())
class NewId(object):
""" Pseudo-ids for new records. """
def __nonzero__(self):
return False
IdType = (int, long, basestring, NewId)
# maximum number of prefetched records
PREFETCH_MAX = 200
# special columns automatically created by the ORM
LOG_ACCESS_COLUMNS = ['create_uid', 'create_date', 'write_uid', 'write_date']
MAGIC_COLUMNS = ['id'] + LOG_ACCESS_COLUMNS
class BaseModel(object):
""" Base class for OpenERP models.
OpenERP models are created by inheriting from this class' subclasses:
* :class:`Model` for regular database-persisted models
* :class:`TransientModel` for temporary data, stored in the database but
automatically vacuumed every so often
* :class:`AbstractModel` for abstract super classes meant to be shared by
multiple inheriting model
The system automatically instantiates every model once per database. Those
instances represent the available models on each database, and depend on
which modules are installed on that database. The actual class of each
instance is built from the Python classes that create and inherit from the
corresponding model.
Every model instance is a "recordset", i.e., an ordered collection of
records of the model. Recordsets are returned by methods like
:meth:`~.browse`, :meth:`~.search`, or field accesses. Records have no
explicit representation: a record is represented as a recordset of one
record.
To create a class that should not be instantiated, the _register class
attribute may be set to False.
"""
__metaclass__ = MetaModel
_auto = True # create database backend
_register = False # Set to false if the model shouldn't be automatically discovered.
_name = None
_columns = {}
_constraints = []
_custom = False
_defaults = {}
_rec_name = None
_parent_name = 'parent_id'
_parent_store = False
_parent_order = False
_date_name = 'date'
_order = 'id'
_sequence = None
_description = None
_needaction = False
_translate = True # set to False to disable translations export for this model
# dict of {field:method}, with method returning the (name_get of records, {id: fold})
# to include in the _read_group, if grouped on this field
_group_by_full = {}
# Transience
_transient = False # True in a TransientModel
# structure:
# { 'parent_model': 'm2o_field', ... }
_inherits = {}
# Mapping from inherits'd field name to triple (m, r, f, n) where m is the
# model from which it is inherits'd, r is the (local) field towards m, f
# is the _column object itself, and n is the original (i.e. top-most)
# parent model.
# Example:
# { 'field_name': ('parent_model', 'm2o_field_to_reach_parent',
# field_column_obj, origina_parent_model), ... }
_inherit_fields = {}
_table = None
_log_create = False
_sql_constraints = []
# model dependencies, for models backed up by sql views:
# {model_name: field_names, ...}
_depends = {}
CONCURRENCY_CHECK_FIELD = '__last_update'
def log(self, cr, uid, id, message, secondary=False, context=None):
return _logger.warning("log() is deprecated. Please use OpenChatter notification system instead of the res.log mechanism.")
def view_init(self, cr, uid, fields_list, context=None):
"""Override this method to do specific things when a view on the object is opened."""
pass
def _field_create(self, cr, context=None):
""" Create entries in ir_model_fields for all the model's fields.
If necessary, also create an entry in ir_model, and if called from the
modules loading scheme (by receiving 'module' in the context), also
create entries in ir_model_data (for the model and the fields).
- create an entry in ir_model (if there is not already one),
- create an entry in ir_model_data (if there is not already one, and if
'module' is in the context),
- update ir_model_fields with the fields found in _columns
(TODO there is some redundancy as _columns is updated from
ir_model_fields in __init__).
"""
if context is None:
context = {}
cr.execute("SELECT id FROM ir_model WHERE model=%s", (self._name,))
if not cr.rowcount:
cr.execute('SELECT nextval(%s)', ('ir_model_id_seq',))
model_id = cr.fetchone()[0]
cr.execute("INSERT INTO ir_model (id,model, name, info,state) VALUES (%s, %s, %s, %s, %s)", (model_id, self._name, self._description, self.__doc__, 'base'))
else:
model_id = cr.fetchone()[0]
if 'module' in context:
name_id = 'model_'+self._name.replace('.', '_')
cr.execute('select * from ir_model_data where name=%s and module=%s', (name_id, context['module']))
if not cr.rowcount:
cr.execute("INSERT INTO ir_model_data (name,date_init,date_update,module,model,res_id) VALUES (%s, (now() at time zone 'UTC'), (now() at time zone 'UTC'), %s, %s, %s)", \
(name_id, context['module'], 'ir.model', model_id)
)
cr.execute("SELECT * FROM ir_model_fields WHERE model=%s", (self._name,))
cols = {}
for rec in cr.dictfetchall():
cols[rec['name']] = rec
ir_model_fields_obj = self.pool.get('ir.model.fields')
# sparse field should be created at the end, as it depends on its serialized field already existing
model_fields = sorted(self._fields.items(), key=lambda x: 1 if x[1].type == 'sparse' else 0)
for (k, f) in model_fields:
vals = {
'model_id': model_id,
'model': self._name,
'name': k,
'field_description': f.string,
'ttype': f.type,
'relation': f.comodel_name or '',
'select_level': tools.ustr(int(f.index)),
'readonly': (f.readonly and 1) or 0,
'required': (f.required and 1) or 0,
'selectable': (f.search or f.store and 1) or 0,
'translate': (f.translate if hasattr(f,'translate') else False and 1) or 0,
'relation_field': f.inverse_name if hasattr(f, 'inverse_name') else '',
'serialization_field_id': None,
}
if getattr(f, 'serialization_field', None):
# resolve link to serialization_field if specified by name
serialization_field_id = ir_model_fields_obj.search(cr, SUPERUSER_ID, [('model','=',vals['model']), ('name', '=', f.serialization_field)])
if not serialization_field_id:
raise except_orm(_('Error'), _("Serialization field `%s` not found for sparse field `%s`!") % (f.serialization_field, k))
vals['serialization_field_id'] = serialization_field_id[0]
# When its a custom field,it does not contain f.select
if context.get('field_state', 'base') == 'manual':
if context.get('field_name', '') == k:
vals['select_level'] = context.get('select', '0')
#setting value to let the problem NOT occur next time
elif k in cols:
vals['select_level'] = cols[k]['select_level']
if k not in cols:
cr.execute('select nextval(%s)', ('ir_model_fields_id_seq',))
id = cr.fetchone()[0]
vals['id'] = id
cr.execute("""INSERT INTO ir_model_fields (
id, model_id, model, name, field_description, ttype,
relation,state,select_level,relation_field, translate, serialization_field_id
) VALUES (
%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s
)""", (
id, vals['model_id'], vals['model'], vals['name'], vals['field_description'], vals['ttype'],
vals['relation'], 'base',
vals['select_level'], vals['relation_field'], bool(vals['translate']), vals['serialization_field_id']
))
if 'module' in context:
name1 = 'field_' + self._table + '_' + k
cr.execute("select name from ir_model_data where name=%s", (name1,))
if cr.fetchone():
name1 = name1 + "_" + str(id)
cr.execute("INSERT INTO ir_model_data (name,date_init,date_update,module,model,res_id) VALUES (%s, (now() at time zone 'UTC'), (now() at time zone 'UTC'), %s, %s, %s)", \
(name1, context['module'], 'ir.model.fields', id)
)
else:
for key, val in vals.items():
if cols[k][key] != vals[key]:
cr.execute('update ir_model_fields set field_description=%s where model=%s and name=%s', (vals['field_description'], vals['model'], vals['name']))
cr.execute("""UPDATE ir_model_fields SET
model_id=%s, field_description=%s, ttype=%s, relation=%s,
select_level=%s, readonly=%s ,required=%s, selectable=%s, relation_field=%s, translate=%s, serialization_field_id=%s
WHERE
model=%s AND name=%s""", (
vals['model_id'], vals['field_description'], vals['ttype'],
vals['relation'],
vals['select_level'], bool(vals['readonly']), bool(vals['required']), bool(vals['selectable']), vals['relation_field'], bool(vals['translate']), vals['serialization_field_id'], vals['model'], vals['name']
))
break
self.invalidate_cache(cr, SUPERUSER_ID)
@classmethod
def _add_field(cls, name, field):
""" Add the given ``field`` under the given ``name`` in the class """
# add field as an attribute and in cls._fields (for reflection)
if not isinstance(getattr(cls, name, field), Field):
_logger.warning("In model %r, field %r overriding existing value", cls._name, name)
setattr(cls, name, field)
cls._fields[name] = field
# basic setup of field
field.set_class_name(cls, name)
# cls._columns will be updated once fields are set up
@classmethod
def _pop_field(cls, name):
""" Remove the field with the given ``name`` from the model.
This method should only be used for manual fields.
"""
field = cls._fields.pop(name)
cls._columns.pop(name, None)
if hasattr(cls, name):
delattr(cls, name)
return field
@classmethod
def _add_magic_fields(cls):
""" Introduce magic fields on the current class
* id is a "normal" field (with a specific getter)
* create_uid, create_date, write_uid and write_date have become
"normal" fields
* $CONCURRENCY_CHECK_FIELD is a computed field with its computing
method defined dynamically. Uses ``str(datetime.datetime.utcnow())``
to get the same structure as the previous
``(now() at time zone 'UTC')::timestamp``::
# select (now() at time zone 'UTC')::timestamp;
timezone
----------------------------
2013-06-18 08:30:37.292809
>>> str(datetime.datetime.utcnow())
'2013-06-18 08:31:32.821177'
"""
def add(name, field):
""" add ``field`` with the given ``name`` if it does not exist yet """
if name not in cls._fields:
cls._add_field(name, field)
# cyclic import
from . import fields
# this field 'id' must override any other column or field
cls._add_field('id', fields.Id(automatic=True))
add('display_name', fields.Char(string='Display Name', automatic=True,
compute='_compute_display_name'))
if cls._log_access:
add('create_uid', fields.Many2one('res.users', string='Created by', automatic=True))
add('create_date', fields.Datetime(string='Created on', automatic=True))
add('write_uid', fields.Many2one('res.users', string='Last Updated by', automatic=True))
add('write_date', fields.Datetime(string='Last Updated on', automatic=True))
last_modified_name = 'compute_concurrency_field_with_access'
else:
last_modified_name = 'compute_concurrency_field'
# this field must override any other column or field
cls._add_field(cls.CONCURRENCY_CHECK_FIELD, fields.Datetime(
string='Last Modified on', compute=last_modified_name, automatic=True))
@api.one
def compute_concurrency_field(self):
self[self.CONCURRENCY_CHECK_FIELD] = \
datetime.datetime.utcnow().strftime(DEFAULT_SERVER_DATETIME_FORMAT)
@api.one
@api.depends('create_date', 'write_date')
def compute_concurrency_field_with_access(self):
self[self.CONCURRENCY_CHECK_FIELD] = \
self.write_date or self.create_date or \
datetime.datetime.utcnow().strftime(DEFAULT_SERVER_DATETIME_FORMAT)
#
# Goal: try to apply inheritance at the instantiation level and
# put objects in the pool var
#
@classmethod
def _build_model(cls, pool, cr):
""" Instantiate a given model.
This class method instantiates the class of some model (i.e. a class
deriving from osv or osv_memory). The class might be the class passed
in argument or, if it inherits from another class, a class constructed
by combining the two classes.
"""
# The model's class inherits from cls and the classes of the inherited
# models. All those classes are combined in a flat hierarchy:
#
# Model the base class of all models
# / | \
# cls c2 c1 the classes defined in modules
# \ | /
# ModelClass the final class of the model
# / | \
# model recordset ... the class' instances
#
# The registry contains the instance ``model``. Its class, ``ModelClass``,
# carries inferred metadata that is shared between all the model's
# instances for this registry only. When we '_inherit' from another
# model, we do not inherit its ``ModelClass``, but this class' parents.
# This is a limitation of the inheritance mechanism.
# Keep links to non-inherited constraints in cls; this is useful for
# instance when exporting translations
cls._local_constraints = cls.__dict__.get('_constraints', [])
cls._local_sql_constraints = cls.__dict__.get('_sql_constraints', [])
# determine inherited models
parents = getattr(cls, '_inherit', [])
parents = [parents] if isinstance(parents, basestring) else (parents or [])
# determine the model's name
name = cls._name or (len(parents) == 1 and parents[0]) or cls.__name__
# determine the module that introduced the model
original_module = pool[name]._original_module if name in parents else cls._module
# determine all the classes the model should inherit from
bases = [cls]
hierarchy = cls
for parent in parents:
if parent not in pool:
raise TypeError('The model "%s" specifies an unexisting parent class "%s"\n'
'You may need to add a dependency on the parent class\' module.' % (name, parent))
parent_class = type(pool[parent])
bases += parent_class.__bases__
hierarchy = type(name, (hierarchy, parent_class), {'_register': False})
# order bases following the mro of class hierarchy
bases = [base for base in hierarchy.mro() if base in bases]
# determine the attributes of the model's class
inherits = {}
depends = {}
constraints = {}
sql_constraints = []
for base in reversed(bases):
inherits.update(base._inherits)
for mname, fnames in base._depends.iteritems():
depends[mname] = depends.get(mname, []) + fnames
for cons in base._constraints:
# cons may override a constraint with the same function name
constraints[getattr(cons[0], '__name__', id(cons[0]))] = cons
sql_constraints += base._sql_constraints
# build the actual class of the model
ModelClass = type(name, tuple(bases), {
'_name': name,
'_register': False,
'_columns': None, # recomputed in _setup_fields()
'_defaults': None, # recomputed in _setup_base()
'_fields': frozendict(), # idem
'_inherits': inherits,
'_depends': depends,
'_constraints': constraints.values(),
'_sql_constraints': sql_constraints,
'_original_module': original_module,
})
# instantiate the model, and initialize it
model = object.__new__(ModelClass)
model.__init__(pool, cr)
return model
@classmethod
def _init_function_fields(cls, pool, cr):
# initialize the list of non-stored function fields for this model
pool._pure_function_fields[cls._name] = []
# process store of low-level function fields
for fname, column in cls._columns.iteritems():
# filter out existing store about this field
pool._store_function[cls._name] = [
stored
for stored in pool._store_function.get(cls._name, [])
if (stored[0], stored[1]) != (cls._name, fname)
]
if not isinstance(column, fields.function):
continue
if not column.store:
# register it on the pool for invalidation
pool._pure_function_fields[cls._name].append(fname)
continue
# process store parameter
store = column.store
if store is True:
get_ids = lambda self, cr, uid, ids, c={}: ids
store = {cls._name: (get_ids, None, column.priority, None)}
for model, spec in store.iteritems():
if len(spec) == 4:
(fnct, fields2, order, length) = spec
elif len(spec) == 3:
(fnct, fields2, order) = spec
length = None
else:
raise except_orm('Error',
('Invalid function definition %s in object %s !\nYou must use the definition: store={object:(fnct, fields, priority, time length)}.' % (fname, cls._name)))
pool._store_function.setdefault(model, [])
t = (cls._name, fname, fnct, tuple(fields2) if fields2 else None, order, length)
if t not in pool._store_function[model]:
pool._store_function[model].append(t)
pool._store_function[model].sort(key=lambda x: x[4])
@classmethod
def _init_manual_fields(cls, cr, partial):
manual_fields = cls.pool.get_manual_fields(cr, cls._name)
for name, field in manual_fields.iteritems():
if name in cls._fields:
continue
attrs = {
'manual': True,
'string': field['field_description'],
'required': bool(field['required']),
'readonly': bool(field['readonly']),
}
# FIXME: ignore field['serialization_field_id']
if field['ttype'] in ('char', 'text', 'html'):
attrs['translate'] = bool(field['translate'])
attrs['size'] = field['size'] or None
elif field['ttype'] in ('selection', 'reference'):
attrs['selection'] = eval(field['selection'])
elif field['ttype'] == 'many2one':
if partial and field['relation'] not in cls.pool:
continue
attrs['comodel_name'] = field['relation']
attrs['ondelete'] = field['on_delete']
attrs['domain'] = eval(field['domain']) if field['domain'] else None
elif field['ttype'] == 'one2many':
if partial and not (
field['relation'] in cls.pool and (
field['relation_field'] in cls.pool[field['relation']]._fields or
field['relation_field'] in cls.pool.get_manual_fields(cr, field['relation'])
)):
continue
attrs['comodel_name'] = field['relation']
attrs['inverse_name'] = field['relation_field']
attrs['domain'] = eval(field['domain']) if field['domain'] else None
elif field['ttype'] == 'many2many':
if partial and field['relation'] not in cls.pool:
continue
attrs['comodel_name'] = field['relation']
_rel1 = field['relation'].replace('.', '_')
_rel2 = field['model'].replace('.', '_')
attrs['relation'] = 'x_%s_%s_%s_rel' % (_rel1, _rel2, name)
attrs['column1'] = 'id1'
attrs['column2'] = 'id2'
attrs['domain'] = eval(field['domain']) if field['domain'] else None
cls._add_field(name, Field.by_type[field['ttype']](**attrs))
@classmethod
def _init_constraints_onchanges(cls):
# store sql constraint error messages
for (key, _, msg) in cls._sql_constraints:
cls.pool._sql_error[cls._table + '_' + key] = msg
@property
def _constraint_methods(self):
""" Return a list of methods implementing Python constraints. """
def is_constraint(func):
return callable(func) and hasattr(func, '_constrains')
cls = type(self)
methods = []
for attr, func in getmembers(cls, is_constraint):
if not all(name in cls._fields for name in func._constrains):
_logger.warning("@constrains%r parameters must be field names", func._constrains)
methods.append(func)
# optimization: memoize result on cls, it will not be recomputed
cls._constraint_methods = methods
return methods
@property
def _onchange_methods(self):
""" Return a dictionary mapping field names to onchange methods. """
def is_onchange(func):
return callable(func) and hasattr(func, '_onchange')
cls = type(self)
methods = defaultdict(list)
for attr, func in getmembers(cls, is_onchange):
for name in func._onchange:
if name not in cls._fields:
_logger.warning("@onchange%r parameters must be field names", func._onchange)
methods[name].append(func)
# optimization: memoize result on cls, it will not be recomputed
cls._onchange_methods = methods
return methods
def __new__(cls):
# In the past, this method was registering the model class in the server.
# This job is now done entirely by the metaclass MetaModel.
#
# Do not create an instance here. Model instances are created by method
# _build_model().
return None
def __init__(self, pool, cr):
""" Initialize a model and make it part of the given registry.
- copy the stored fields' functions in the registry,
- retrieve custom fields and add them in the model,
- ensure there is a many2one for each _inherits'd parent,
- update the children's _columns,
- give a chance to each field to initialize itself.
"""
cls = type(self)
# link the class to the registry, and update the registry
cls.pool = pool
cls._model = self # backward compatibility
pool.add(cls._name, self)
# determine description, table, sequence and log_access
if not cls._description:
cls._description = cls._name
if not cls._table:
cls._table = cls._name.replace('.', '_')
if not cls._sequence:
cls._sequence = cls._table + '_id_seq'
if not hasattr(cls, '_log_access'):
# If _log_access is not specified, it is the same value as _auto.
cls._log_access = cls._auto
# Transience
if cls.is_transient():
cls._transient_check_count = 0
cls._transient_max_count = config.get('osv_memory_count_limit')
cls._transient_max_hours = config.get('osv_memory_age_limit')
assert cls._log_access, \
"TransientModels must have log_access turned on, " \
"in order to implement their access rights policy"
@api.model
@ormcache()
def _is_an_ordinary_table(self):
self.env.cr.execute("""\
SELECT 1
FROM pg_class
WHERE relname = %s
AND relkind = %s""", [self._table, 'r'])
return bool(self.env.cr.fetchone())
def __export_xml_id(self):
""" Return a valid xml_id for the record ``self``. """
if not self._is_an_ordinary_table():
raise Exception(
"You can not export the column ID of model %s, because the "
"table %s is not an ordinary table."
% (self._name, self._table))
ir_model_data = self.sudo().env['ir.model.data']
data = ir_model_data.search([('model', '=', self._name), ('res_id', '=', self.id)])
if data:
if data[0].module:
return '%s.%s' % (data[0].module, data[0].name)
else:
return data[0].name
else:
postfix = 0
name = '%s_%s' % (self._table, self.id)
while ir_model_data.search([('module', '=', '__export__'), ('name', '=', name)]):
postfix += 1
name = '%s_%s_%s' % (self._table, self.id, postfix)
ir_model_data.create({
'model': self._name,
'res_id': self.id,
'module': '__export__',
'name': name,
})
return '__export__.' + name
@api.multi
def __export_rows(self, fields):
""" Export fields of the records in ``self``.
:param fields: list of lists of fields to traverse
:return: list of lists of corresponding values
"""
lines = []
for record in self:
# main line of record, initially empty
current = [''] * len(fields)
lines.append(current)
# list of primary fields followed by secondary field(s)
primary_done = []
# process column by column
for i, path in enumerate(fields):
if not path:
continue
name = path[0]
if name in primary_done:
continue
if name == '.id':
current[i] = str(record.id)
elif name == 'id':
current[i] = record.__export_xml_id()
else:
field = record._fields[name]
value = record[name]
# this part could be simpler, but it has to be done this way
# in order to reproduce the former behavior
if not isinstance(value, BaseModel):
current[i] = field.convert_to_export(value, self.env)
else:
primary_done.append(name)
# This is a special case, its strange behavior is intended!
if field.type == 'many2many' and len(path) > 1 and path[1] == 'id':
xml_ids = [r.__export_xml_id() for r in value]
current[i] = ','.join(xml_ids) or False
continue
# recursively export the fields that follow name
fields2 = [(p[1:] if p and p[0] == name else []) for p in fields]
lines2 = value.__export_rows(fields2)
if lines2:
# merge first line with record's main line
for j, val in enumerate(lines2[0]):
if val:
current[j] = val
# check value of current field
if not current[i]:
# assign xml_ids, and forget about remaining lines
xml_ids = [item[1] for item in value.name_get()]
current[i] = ','.join(xml_ids)
else:
# append the other lines at the end
lines += lines2[1:]
else:
current[i] = False
return lines
@api.multi
def export_data(self, fields_to_export, raw_data=False):
""" Export fields for selected objects
:param fields_to_export: list of fields
:param raw_data: True to return value in native Python type
:rtype: dictionary with a *datas* matrix
This method is used when exporting data via client menu
"""
fields_to_export = map(fix_import_export_id_paths, fields_to_export)
if raw_data:
self = self.with_context(export_raw_data=True)
return {'datas': self.__export_rows(fields_to_export)}
def import_data(self, cr, uid, fields, datas, mode='init', current_module='', noupdate=False, context=None, filename=None):
"""
.. deprecated:: 7.0
Use :meth:`~load` instead
Import given data in given module
This method is used when importing data via client menu.
Example of fields to import for a sale.order::
.id, (=database_id)
partner_id, (=name_search)
order_line/.id, (=database_id)
order_line/name,
order_line/product_id/id, (=xml id)
order_line/price_unit,
order_line/product_uom_qty,
order_line/product_uom/id (=xml_id)
This method returns a 4-tuple with the following structure::
(return_code, errored_resource, error_message, unused)
* The first item is a return code, it is ``-1`` in case of
import error, or the last imported row number in case of success
* The second item contains the record data dict that failed to import
in case of error, otherwise it's 0
* The third item contains an error message string in case of error,
otherwise it's 0
* The last item is currently unused, with no specific semantics
:param fields: list of fields to import
:param datas: data to import
:param mode: 'init' or 'update' for record creation
:param current_module: module name
:param noupdate: flag for record creation
:param filename: optional file to store partial import state for recovery
:returns: 4-tuple in the form (return_code, errored_resource, error_message, unused)
:rtype: (int, dict or 0, str or 0, str or 0)
"""
context = dict(context) if context is not None else {}
context['_import_current_module'] = current_module
fields = map(fix_import_export_id_paths, fields)
ir_model_data_obj = self.pool.get('ir.model.data')
def log(m):
if m['type'] == 'error':
raise Exception(m['message'])
if config.get('import_partial') and filename:
with open(config.get('import_partial'), 'rb') as partial_import_file:
data = pickle.load(partial_import_file)
position = data.get(filename, 0)
position = 0
try:
for res_id, xml_id, res, info in self._convert_records(cr, uid,
self._extract_records(cr, uid, fields, datas,
context=context, log=log),
context=context, log=log):
ir_model_data_obj._update(cr, uid, self._name,
current_module, res, mode=mode, xml_id=xml_id,
noupdate=noupdate, res_id=res_id, context=context)
position = info.get('rows', {}).get('to', 0) + 1
if config.get('import_partial') and filename and (not (position%100)):
with open(config.get('import_partial'), 'rb') as partial_import:
data = pickle.load(partial_import)
data[filename] = position
with open(config.get('import_partial'), 'wb') as partial_import:
pickle.dump(data, partial_import)
if context.get('defer_parent_store_computation'):
self._parent_store_compute(cr)
cr.commit()
except Exception, e:
cr.rollback()
return -1, {}, 'Line %d : %s' % (position + 1, tools.ustr(e)), ''
if context.get('defer_parent_store_computation'):
self._parent_store_compute(cr)
return position, 0, 0, 0
def load(self, cr, uid, fields, data, context=None):
"""
Attempts to load the data matrix, and returns a list of ids (or
``False`` if there was an error and no id could be generated) and a
list of messages.
The ids are those of the records created and saved (in database), in
the same order they were extracted from the file. They can be passed
directly to :meth:`~read`
:param fields: list of fields to import, at the same index as the corresponding data
:type fields: list(str)
:param data: row-major matrix of data to import
:type data: list(list(str))
:param dict context:
:returns: {ids: list(int)|False, messages: [Message]}
"""
cr.execute('SAVEPOINT model_load')
messages = []
fields = map(fix_import_export_id_paths, fields)
ModelData = self.pool['ir.model.data'].clear_caches()
fg = self.fields_get(cr, uid, context=context)
mode = 'init'
current_module = ''
noupdate = False
ids = []
for id, xid, record, info in self._convert_records(cr, uid,
self._extract_records(cr, uid, fields, data,
context=context, log=messages.append),
context=context, log=messages.append):
try:
cr.execute('SAVEPOINT model_load_save')
except psycopg2.InternalError, e:
# broken transaction, exit and hope the source error was
# already logged
if not any(message['type'] == 'error' for message in messages):
messages.append(dict(info, type='error',message=
u"Unknown database error: '%s'" % e))
break
try:
ids.append(ModelData._update(cr, uid, self._name,
current_module, record, mode=mode, xml_id=xid,
noupdate=noupdate, res_id=id, context=context))
cr.execute('RELEASE SAVEPOINT model_load_save')
except psycopg2.Warning, e:
messages.append(dict(info, type='warning', message=str(e)))
cr.execute('ROLLBACK TO SAVEPOINT model_load_save')
except psycopg2.Error, e:
messages.append(dict(
info, type='error',
**PGERROR_TO_OE[e.pgcode](self, fg, info, e)))
# Failed to write, log to messages, rollback savepoint (to
# avoid broken transaction) and keep going
cr.execute('ROLLBACK TO SAVEPOINT model_load_save')
except Exception, e:
message = (_('Unknown error during import:') +
' %s: %s' % (type(e), unicode(e)))
moreinfo = _('Resolve other errors first')
messages.append(dict(info, type='error',
message=message,
moreinfo=moreinfo))
# Failed for some reason, perhaps due to invalid data supplied,
# rollback savepoint and keep going
cr.execute('ROLLBACK TO SAVEPOINT model_load_save')
if any(message['type'] == 'error' for message in messages):
cr.execute('ROLLBACK TO SAVEPOINT model_load')
ids = False
return {'ids': ids, 'messages': messages}
def _add_fake_fields(self, cr, uid, fields, context=None):
from openerp.fields import Char, Integer
fields[None] = Char('rec_name')
fields['id'] = Char('External ID')
fields['.id'] = Integer('Database ID')
return fields
def _extract_records(self, cr, uid, fields_, data,
context=None, log=lambda a: None):
""" Generates record dicts from the data sequence.
The result is a generator of dicts mapping field names to raw
(unconverted, unvalidated) values.
For relational fields, if sub-fields were provided the value will be
a list of sub-records
The following sub-fields may be set on the record (by key):
* None is the name_get for the record (to use with name_create/name_search)
* "id" is the External ID for the record
* ".id" is the Database ID for the record
"""
fields = dict(self._fields)
# Fake fields to avoid special cases in extractor
fields = self._add_fake_fields(cr, uid, fields, context=context)
# m2o fields can't be on multiple lines so exclude them from the
# is_relational field rows filter, but special-case it later on to
# be handled with relational fields (as it can have subfields)
is_relational = lambda field: fields[field].relational
get_o2m_values = itemgetter_tuple(
[index for index, field in enumerate(fields_)
if fields[field[0]].type == 'one2many'])
get_nono2m_values = itemgetter_tuple(
[index for index, field in enumerate(fields_)
if fields[field[0]].type != 'one2many'])
# Checks if the provided row has any non-empty non-relational field
def only_o2m_values(row, f=get_nono2m_values, g=get_o2m_values):
return any(g(row)) and not any(f(row))
index = 0
while True:
if index >= len(data): return
row = data[index]
# copy non-relational fields to record dict
record = dict((field[0], value)
for field, value in itertools.izip(fields_, row)
if not is_relational(field[0]))
# Get all following rows which have relational values attached to
# the current record (no non-relational values)
record_span = itertools.takewhile(
only_o2m_values, itertools.islice(data, index + 1, None))
# stitch record row back on for relational fields
record_span = list(itertools.chain([row], record_span))
for relfield in set(
field[0] for field in fields_
if is_relational(field[0])):
# FIXME: how to not use _obj without relying on fields_get?
Model = self.pool[fields[relfield].comodel_name]
# get only cells for this sub-field, should be strictly
# non-empty, field path [None] is for name_get field
indices, subfields = zip(*((index, field[1:] or [None])
for index, field in enumerate(fields_)
if field[0] == relfield))
# return all rows which have at least one value for the
# subfields of relfield
relfield_data = filter(any, map(itemgetter_tuple(indices), record_span))
record[relfield] = [subrecord
for subrecord, _subinfo in Model._extract_records(
cr, uid, subfields, relfield_data,
context=context, log=log)]
yield record, {'rows': {
'from': index,
'to': index + len(record_span) - 1
}}
index += len(record_span)
def _convert_records(self, cr, uid, records,
context=None, log=lambda a: None):
""" Converts records from the source iterable (recursive dicts of
strings) into forms which can be written to the database (via
self.create or (ir.model.data)._update)
:returns: a list of triplets of (id, xid, record)
:rtype: list((int|None, str|None, dict))
"""
if context is None: context = {}
Converter = self.pool['ir.fields.converter']
Translation = self.pool['ir.translation']
fields = dict(self._fields)
field_names = dict(
(f, (Translation._get_source(cr, uid, self._name + ',' + f, 'field',
context.get('lang'))
or field.string))
for f, field in fields.iteritems())
convert = Converter.for_model(cr, uid, self, context=context)
def _log(base, field, exception):
type = 'warning' if isinstance(exception, Warning) else 'error'
# logs the logical (not human-readable) field name for automated
# processing of response, but injects human readable in message
record = dict(base, type=type, field=field,
message=unicode(exception.args[0]) % base)
if len(exception.args) > 1 and exception.args[1]:
record.update(exception.args[1])
log(record)
stream = CountingStream(records)
for record, extras in stream:
dbid = False
xid = False
# name_get/name_create
if None in record: pass
# xid
if 'id' in record:
xid = record['id']
# dbid
if '.id' in record:
try:
dbid = int(record['.id'])
except ValueError:
# in case of overridden id column
dbid = record['.id']
if not self.search(cr, uid, [('id', '=', dbid)], context=context):
log(dict(extras,
type='error',
record=stream.index,
field='.id',
message=_(u"Unknown database identifier '%s'") % dbid))
dbid = False
converted = convert(record, lambda field, err:\
_log(dict(extras, record=stream.index, field=field_names[field]), field, err))
yield dbid, xid, converted, dict(extras, record=stream.index)
@api.multi
def _validate_fields(self, field_names):
field_names = set(field_names)
# old-style constraint methods
trans = self.env['ir.translation']
cr, uid, context = self.env.args
ids = self.ids
errors = []
for fun, msg, names in self._constraints:
try:
# validation must be context-independent; call ``fun`` without context
valid = not (set(names) & field_names) or fun(self._model, cr, uid, ids)
extra_error = None
except Exception, e:
_logger.debug('Exception while validating constraint', exc_info=True)
valid = False
extra_error = tools.ustr(e)
if not valid:
if callable(msg):
res_msg = msg(self._model, cr, uid, ids, context=context)
if isinstance(res_msg, tuple):
template, params = res_msg
res_msg = template % params
else:
res_msg = trans._get_source(self._name, 'constraint', self.env.lang, msg)
if extra_error:
res_msg += "\n\n%s\n%s" % (_('Error details:'), extra_error)
errors.append(
_("Field(s) `%s` failed against a constraint: %s") %
(', '.join(names), res_msg)
)
if errors:
raise ValidationError('\n'.join(errors))
# new-style constraint methods
for check in self._constraint_methods:
if set(check._constrains) & field_names:
try:
check(self)
except ValidationError, e:
raise
except Exception, e:
raise ValidationError("Error while validating constraint\n\n%s" % tools.ustr(e))
@api.model
def default_get(self, fields_list):
""" default_get(fields) -> default_values
Return default values for the fields in ``fields_list``. Default
values are determined by the context, user defaults, and the model
itself.
:param fields_list: a list of field names
:return: a dictionary mapping each field name to its corresponding
default value, if it has one.
"""
# trigger view init hook
self.view_init(fields_list)
defaults = {}
parent_fields = defaultdict(list)
for name in fields_list:
# 1. look up context
key = 'default_' + name
if key in self._context:
defaults[name] = self._context[key]
continue
# 2. look up ir_values
# Note: performance is good, because get_defaults_dict is cached!
ir_values_dict = self.env['ir.values'].get_defaults_dict(self._name)
if name in ir_values_dict:
defaults[name] = ir_values_dict[name]
continue
field = self._fields.get(name)
# 3. look up property fields
# TODO: get rid of this one
if field and field.company_dependent:
defaults[name] = self.env['ir.property'].get(name, self._name)
continue
# 4. look up field.default
if field and field.default:
defaults[name] = field.default(self)
continue
# 5. delegate to parent model
if field and field.inherited:
field = field.related_field
parent_fields[field.model_name].append(field.name)
# convert default values to the right format
defaults = self._convert_to_cache(defaults, validate=False)
defaults = self._convert_to_write(defaults)
# add default values for inherited fields
for model, names in parent_fields.iteritems():
defaults.update(self.env[model].default_get(names))
return defaults
def fields_get_keys(self, cr, user, context=None):
res = self._columns.keys()
# TODO I believe this loop can be replace by
# res.extend(self._inherit_fields.key())
for parent in self._inherits:
res.extend(self.pool[parent].fields_get_keys(cr, user, context))
return res
def _rec_name_fallback(self, cr, uid, context=None):
rec_name = self._rec_name
if rec_name not in self._columns:
rec_name = self._columns.keys()[0] if len(self._columns.keys()) > 0 else "id"
return rec_name
#
# Overload this method if you need a window title which depends on the context
#
def view_header_get(self, cr, user, view_id=None, view_type='form', context=None):
return False
def user_has_groups(self, cr, uid, groups, context=None):
"""Return true if the user is at least member of one of the groups
in groups_str. Typically used to resolve ``groups`` attribute
in view and model definitions.
:param str groups: comma-separated list of fully-qualified group
external IDs, e.g.: ``base.group_user,base.group_system``
:return: True if the current user is a member of one of the
given groups
"""
return any(self.pool['res.users'].has_group(cr, uid, group_ext_id)
for group_ext_id in groups.split(','))
def _get_default_form_view(self, cr, user, context=None):
""" Generates a default single-line form view using all fields
of the current model except the m2m and o2m ones.
:param cr: database cursor
:param int user: user id
:param dict context: connection context
:returns: a form view as an lxml document
:rtype: etree._Element
"""
view = etree.Element('form', string=self._description)
group = etree.SubElement(view, 'group', col="4")
for fname, field in self._fields.iteritems():
if field.automatic or field.type in ('one2many', 'many2many'):
continue
etree.SubElement(group, 'field', name=fname)
if field.type == 'text':
etree.SubElement(group, 'newline')
return view
def _get_default_search_view(self, cr, user, context=None):
""" Generates a single-field search view, based on _rec_name.
:param cr: database cursor
:param int user: user id
:param dict context: connection context
:returns: a tree view as an lxml document
:rtype: etree._Element
"""
view = etree.Element('search', string=self._description)
etree.SubElement(view, 'field', name=self._rec_name_fallback(cr, user, context))
return view
def _get_default_tree_view(self, cr, user, context=None):
""" Generates a single-field tree view, based on _rec_name.
:param cr: database cursor
:param int user: user id
:param dict context: connection context
:returns: a tree view as an lxml document
:rtype: etree._Element
"""
view = etree.Element('tree', string=self._description)
etree.SubElement(view, 'field', name=self._rec_name_fallback(cr, user, context))
return view
def _get_default_calendar_view(self, cr, user, context=None):
""" Generates a default calendar view by trying to infer
calendar fields from a number of pre-set attribute names
:param cr: database cursor
:param int user: user id
:param dict context: connection context
:returns: a calendar view
:rtype: etree._Element
"""
def set_first_of(seq, in_, to):
"""Sets the first value of ``seq`` also found in ``in_`` to
the ``to`` attribute of the view being closed over.
Returns whether it's found a suitable value (and set it on
the attribute) or not
"""
for item in seq:
if item in in_:
view.set(to, item)
return True
return False
view = etree.Element('calendar', string=self._description)
etree.SubElement(view, 'field', name=self._rec_name_fallback(cr, user, context))
if self._date_name not in self._columns:
date_found = False
for dt in ['date', 'date_start', 'x_date', 'x_date_start']:
if dt in self._columns:
self._date_name = dt
date_found = True
break
if not date_found:
raise except_orm(_('Invalid Object Architecture!'), _("Insufficient fields for Calendar View!"))
view.set('date_start', self._date_name)
set_first_of(["user_id", "partner_id", "x_user_id", "x_partner_id"],
self._columns, 'color')
if not set_first_of(["date_stop", "date_end", "x_date_stop", "x_date_end"],
self._columns, 'date_stop'):
if not set_first_of(["date_delay", "planned_hours", "x_date_delay", "x_planned_hours"],
self._columns, 'date_delay'):
raise except_orm(
_('Invalid Object Architecture!'),
_("Insufficient fields to generate a Calendar View for %s, missing a date_stop or a date_delay" % self._name))
return view
def fields_view_get(self, cr, uid, view_id=None, view_type='form', context=None, toolbar=False, submenu=False):
""" fields_view_get([view_id | view_type='form'])
Get the detailed composition of the requested view like fields, model, view architecture
:param view_id: id of the view or None
:param view_type: type of the view to return if view_id is None ('form', 'tree', ...)
:param toolbar: true to include contextual actions
:param submenu: deprecated
:return: dictionary describing the composition of the requested view (including inherited views and extensions)
:raise AttributeError:
* if the inherited view has unknown position to work with other than 'before', 'after', 'inside', 'replace'
* if some tag other than 'position' is found in parent view
:raise Invalid ArchitectureError: if there is view type other than form, tree, calendar, search etc defined on the structure
"""
if context is None:
context = {}
View = self.pool['ir.ui.view']
result = {
'model': self._name,
'field_parent': False,
}
# try to find a view_id if none provided
if not view_id:
# <view_type>_view_ref in context can be used to overrride the default view
view_ref_key = view_type + '_view_ref'
view_ref = context.get(view_ref_key)
if view_ref:
if '.' in view_ref:
module, view_ref = view_ref.split('.', 1)
cr.execute("SELECT res_id FROM ir_model_data WHERE model='ir.ui.view' AND module=%s AND name=%s", (module, view_ref))
view_ref_res = cr.fetchone()
if view_ref_res:
view_id = view_ref_res[0]
else:
_logger.warning('%r requires a fully-qualified external id (got: %r for model %s). '
'Please use the complete `module.view_id` form instead.', view_ref_key, view_ref,
self._name)
if not view_id:
# otherwise try to find the lowest priority matching ir.ui.view
view_id = View.default_view(cr, uid, self._name, view_type, context=context)
# context for post-processing might be overriden
ctx = context
if view_id:
# read the view with inherited views applied
root_view = View.read_combined(cr, uid, view_id, fields=['id', 'name', 'field_parent', 'type', 'model', 'arch'], context=context)
result['arch'] = root_view['arch']
result['name'] = root_view['name']
result['type'] = root_view['type']
result['view_id'] = root_view['id']
result['field_parent'] = root_view['field_parent']
# override context from postprocessing
if root_view.get('model') != self._name:
ctx = dict(context, base_model_name=root_view.get('model'))
else:
# fallback on default views methods if no ir.ui.view could be found
try:
get_func = getattr(self, '_get_default_%s_view' % view_type)
arch_etree = get_func(cr, uid, context)
result['arch'] = etree.tostring(arch_etree, encoding='utf-8')
result['type'] = view_type
result['name'] = 'default'
except AttributeError:
raise except_orm(_('Invalid Architecture!'), _("No default view of type '%s' could be found !") % view_type)
# Apply post processing, groups and modifiers etc...
xarch, xfields = View.postprocess_and_fields(cr, uid, self._name, etree.fromstring(result['arch']), view_id, context=ctx)
result['arch'] = xarch
result['fields'] = xfields
# Add related action information if aksed
if toolbar:
toclean = ('report_sxw_content', 'report_rml_content', 'report_sxw', 'report_rml', 'report_sxw_content_data', 'report_rml_content_data')
def clean(x):
x = x[2]
for key in toclean:
x.pop(key, None)
return x
ir_values_obj = self.pool.get('ir.values')
resprint = ir_values_obj.get(cr, uid, 'action', 'client_print_multi', [(self._name, False)], False, context)
resaction = ir_values_obj.get(cr, uid, 'action', 'client_action_multi', [(self._name, False)], False, context)
resrelate = ir_values_obj.get(cr, uid, 'action', 'client_action_relate', [(self._name, False)], False, context)
resaction = [clean(action) for action in resaction if view_type == 'tree' or not action[2].get('multi')]
resprint = [clean(print_) for print_ in resprint if view_type == 'tree' or not print_[2].get('multi')]
#When multi="True" set it will display only in More of the list view
resrelate = [clean(action) for action in resrelate
if (action[2].get('multi') and view_type == 'tree') or (not action[2].get('multi') and view_type == 'form')]
for x in itertools.chain(resprint, resaction, resrelate):
x['string'] = x['name']
result['toolbar'] = {
'print': resprint,
'action': resaction,
'relate': resrelate
}
return result
def get_formview_id(self, cr, uid, id, context=None):
""" Return an view id to open the document with. This method is meant to be
overridden in addons that want to give specific view ids for example.
:param int id: id of the document to open
"""
return False
def get_formview_action(self, cr, uid, id, context=None):
""" Return an action to open the document. This method is meant to be
overridden in addons that want to give specific view ids for example.
:param int id: id of the document to open
"""
view_id = self.get_formview_id(cr, uid, id, context=context)
return {
'type': 'ir.actions.act_window',
'res_model': self._name,
'view_type': 'form',
'view_mode': 'form',
'views': [(view_id, 'form')],
'target': 'current',
'res_id': id,
'context': context,
}
def get_access_action(self, cr, uid, id, context=None):
""" Return an action to open the document. This method is meant to be
overridden in addons that want to give specific access to the document.
By default it opens the formview of the document.
:param int id: id of the document to open
"""
return self.get_formview_action(cr, uid, id, context=context)
def _view_look_dom_arch(self, cr, uid, node, view_id, context=None):
return self.pool['ir.ui.view'].postprocess_and_fields(
cr, uid, self._name, node, view_id, context=context)
def search_count(self, cr, user, args, context=None):
""" search_count(args) -> int
Returns the number of records in the current model matching :ref:`the
provided domain <reference/orm/domains>`.
"""
res = self.search(cr, user, args, context=context, count=True)
if isinstance(res, list):
return len(res)
return res
@api.returns('self')
def search(self, cr, user, args, offset=0, limit=None, order=None, context=None, count=False):
""" search(args[, offset=0][, limit=None][, order=None][, count=False])
Searches for records based on the ``args``
:ref:`search domain <reference/orm/domains>`.
:param args: :ref:`A search domain <reference/orm/domains>`. Use an empty
list to match all records.
:param int offset: number of results to ignore (default: none)
:param int limit: maximum number of records to return (default: all)
:param str order: sort string
:param bool count: if True, only counts and returns the number of matching records (default: False)
:returns: at most ``limit`` records matching the search criteria
:raise AccessError: * if user tries to bypass access rules for read on the requested object.
"""
return self._search(cr, user, args, offset=offset, limit=limit, order=order, context=context, count=count)
#
# display_name, name_get, name_create, name_search
#
@api.depends(lambda self: (self._rec_name,) if self._rec_name else ())
def _compute_display_name(self):
names = dict(self.name_get())
for record in self:
record.display_name = names.get(record.id, False)
@api.multi
def name_get(self):
""" name_get() -> [(id, name), ...]
Returns a textual representation for the records in ``self``.
By default this is the value of the ``display_name`` field.
:return: list of pairs ``(id, text_repr)`` for each records
:rtype: list(tuple)
"""
result = []
name = self._rec_name
if name in self._fields:
convert = self._fields[name].convert_to_display_name
for record in self:
result.append((record.id, convert(record[name], record)))
else:
for record in self:
result.append((record.id, "%s,%s" % (record._name, record.id)))
return result
@api.model
def name_create(self, name):
""" name_create(name) -> record
Create a new record by calling :meth:`~.create` with only one value
provided: the display name of the new record.
The new record will be initialized with any default values
applicable to this model, or provided through the context. The usual
behavior of :meth:`~.create` applies.
:param name: display name of the record to create
:rtype: tuple
:return: the :meth:`~.name_get` pair value of the created record
"""
if self._rec_name:
record = self.create({self._rec_name: name})
return record.name_get()[0]
else:
_logger.warning("Cannot execute name_create, no _rec_name defined on %s", self._name)
return False
@api.model
def name_search(self, name='', args=None, operator='ilike', limit=100):
""" name_search(name='', args=None, operator='ilike', limit=100) -> records
Search for records that have a display name matching the given
``name`` pattern when compared with the given ``operator``, while also
matching the optional search domain (``args``).
This is used for example to provide suggestions based on a partial
value for a relational field. Sometimes be seen as the inverse
function of :meth:`~.name_get`, but it is not guaranteed to be.
This method is equivalent to calling :meth:`~.search` with a search
domain based on ``display_name`` and then :meth:`~.name_get` on the
result of the search.
:param str name: the name pattern to match
:param list args: optional search domain (see :meth:`~.search` for
syntax), specifying further restrictions
:param str operator: domain operator for matching ``name``, such as
``'like'`` or ``'='``.
:param int limit: optional max number of records to return
:rtype: list
:return: list of pairs ``(id, text_repr)`` for all matching records.
"""
return self._name_search(name, args, operator, limit=limit)
def _name_search(self, cr, user, name='', args=None, operator='ilike', context=None, limit=100, name_get_uid=None):
# private implementation of name_search, allows passing a dedicated user
# for the name_get part to solve some access rights issues
args = list(args or [])
# optimize out the default criterion of ``ilike ''`` that matches everything
if not self._rec_name:
_logger.warning("Cannot execute name_search, no _rec_name defined on %s", self._name)
elif not (name == '' and operator == 'ilike'):
args += [(self._rec_name, operator, name)]
access_rights_uid = name_get_uid or user
ids = self._search(cr, user, args, limit=limit, context=context, access_rights_uid=access_rights_uid)
res = self.name_get(cr, access_rights_uid, ids, context)
return res
def read_string(self, cr, uid, id, langs, fields=None, context=None):
res = {}
res2 = {}
self.pool.get('ir.translation').check_access_rights(cr, uid, 'read')
if not fields:
fields = self._columns.keys() + self._inherit_fields.keys()
#FIXME: collect all calls to _get_source into one SQL call.
for lang in langs:
res[lang] = {'code': lang}
for f in fields:
if f in self._columns:
res_trans = self.pool.get('ir.translation')._get_source(cr, uid, self._name+','+f, 'field', lang)
if res_trans:
res[lang][f] = res_trans
else:
res[lang][f] = self._columns[f].string
for table in self._inherits:
cols = intersect(self._inherit_fields.keys(), fields)
res2 = self.pool[table].read_string(cr, uid, id, langs, cols, context)
for lang in res2:
if lang in res:
res[lang]['code'] = lang
for f in res2[lang]:
res[lang][f] = res2[lang][f]
return res
def write_string(self, cr, uid, id, langs, vals, context=None):
self.pool.get('ir.translation').check_access_rights(cr, uid, 'write')
#FIXME: try to only call the translation in one SQL
for lang in langs:
for field in vals:
if field in self._columns:
src = self._columns[field].string
self.pool.get('ir.translation')._set_ids(cr, uid, self._name+','+field, 'field', lang, [0], vals[field], src)
for table in self._inherits:
cols = intersect(self._inherit_fields.keys(), vals)
if cols:
self.pool[table].write_string(cr, uid, id, langs, vals, context)
return True
def _add_missing_default_values(self, cr, uid, values, context=None):
# avoid overriding inherited values when parent is set
avoid_tables = []
for tables, parent_field in self._inherits.items():
if parent_field in values:
avoid_tables.append(tables)
# compute missing fields
missing_defaults = set()
for field in self._columns.keys():
if not field in values:
missing_defaults.add(field)
for field in self._inherit_fields.keys():
if (field not in values) and (self._inherit_fields[field][0] not in avoid_tables):
missing_defaults.add(field)
# discard magic fields
missing_defaults -= set(MAGIC_COLUMNS)
if missing_defaults:
# override defaults with the provided values, never allow the other way around
defaults = self.default_get(cr, uid, list(missing_defaults), context)
for dv in defaults:
if ((dv in self._columns and self._columns[dv]._type == 'many2many') \
or (dv in self._inherit_fields and self._inherit_fields[dv][2]._type == 'many2many')) \
and defaults[dv] and isinstance(defaults[dv][0], (int, long)):
defaults[dv] = [(6, 0, defaults[dv])]
if (dv in self._columns and self._columns[dv]._type == 'one2many' \
or (dv in self._inherit_fields and self._inherit_fields[dv][2]._type == 'one2many')) \
and isinstance(defaults[dv], (list, tuple)) and defaults[dv] and isinstance(defaults[dv][0], dict):
defaults[dv] = [(0, 0, x) for x in defaults[dv]]
defaults.update(values)
values = defaults
return values
def clear_caches(self):
""" Clear the caches
This clears the caches associated to methods decorated with
``tools.ormcache`` or ``tools.ormcache_multi``.
"""
try:
self.pool.cache.clear_prefix((self._name,))
self.pool._any_cache_cleared = True
except AttributeError:
pass
def _read_group_fill_results(self, cr, uid, domain, groupby, remaining_groupbys,
aggregated_fields, count_field,
read_group_result, read_group_order=None, context=None):
"""Helper method for filling in empty groups for all possible values of
the field being grouped by"""
# self._group_by_full should map groupable fields to a method that returns
# a list of all aggregated values that we want to display for this field,
# in the form of a m2o-like pair (key,label).
# This is useful to implement kanban views for instance, where all columns
# should be displayed even if they don't contain any record.
# Grab the list of all groups that should be displayed, including all present groups
present_group_ids = [x[groupby][0] for x in read_group_result if x[groupby]]
all_groups,folded = self._group_by_full[groupby](self, cr, uid, present_group_ids, domain,
read_group_order=read_group_order,
access_rights_uid=openerp.SUPERUSER_ID,
context=context)
result_template = dict.fromkeys(aggregated_fields, False)
result_template[groupby + '_count'] = 0
if remaining_groupbys:
result_template['__context'] = {'group_by': remaining_groupbys}
# Merge the left_side (current results as dicts) with the right_side (all
# possible values as m2o pairs). Both lists are supposed to be using the
# same ordering, and can be merged in one pass.
result = []
known_values = {}
def append_left(left_side):
grouped_value = left_side[groupby] and left_side[groupby][0]
if not grouped_value in known_values:
result.append(left_side)
known_values[grouped_value] = left_side
else:
known_values[grouped_value].update({count_field: left_side[count_field]})
def append_right(right_side):
grouped_value = right_side[0]
if not grouped_value in known_values:
line = dict(result_template)
line[groupby] = right_side
line['__domain'] = [(groupby,'=',grouped_value)] + domain
result.append(line)
known_values[grouped_value] = line
while read_group_result or all_groups:
left_side = read_group_result[0] if read_group_result else None
right_side = all_groups[0] if all_groups else None
assert left_side is None or left_side[groupby] is False \
or isinstance(left_side[groupby], (tuple,list)), \
'M2O-like pair expected, got %r' % left_side[groupby]
assert right_side is None or isinstance(right_side, (tuple,list)), \
'M2O-like pair expected, got %r' % right_side
if left_side is None:
append_right(all_groups.pop(0))
elif right_side is None:
append_left(read_group_result.pop(0))
elif left_side[groupby] == right_side:
append_left(read_group_result.pop(0))
all_groups.pop(0) # discard right_side
elif not left_side[groupby] or not left_side[groupby][0]:
# left side == "Undefined" entry, not present on right_side
append_left(read_group_result.pop(0))
else:
append_right(all_groups.pop(0))
if folded:
for r in result:
r['__fold'] = folded.get(r[groupby] and r[groupby][0], False)
return result
def _read_group_prepare(self, orderby, aggregated_fields, annotated_groupbys, query):
"""
Prepares the GROUP BY and ORDER BY terms for the read_group method. Adds the missing JOIN clause
to the query if order should be computed against m2o field.
:param orderby: the orderby definition in the form "%(field)s %(order)s"
:param aggregated_fields: list of aggregated fields in the query
:param annotated_groupbys: list of dictionaries returned by _read_group_process_groupby
These dictionaries contains the qualified name of each groupby
(fully qualified SQL name for the corresponding field),
and the (non raw) field name.
:param osv.Query query: the query under construction
:return: (groupby_terms, orderby_terms)
"""
orderby_terms = []
groupby_terms = [gb['qualified_field'] for gb in annotated_groupbys]
groupby_fields = [gb['groupby'] for gb in annotated_groupbys]
if not orderby:
return groupby_terms, orderby_terms
self._check_qorder(orderby)
for order_part in orderby.split(','):
order_split = order_part.split()
order_field = order_split[0]
if order_field in groupby_fields:
if self._fields[order_field.split(':')[0]].type == 'many2one':
order_clause = self._generate_order_by(order_part, query).replace('ORDER BY ', '')
if order_clause:
orderby_terms.append(order_clause)
groupby_terms += [order_term.split()[0] for order_term in order_clause.split(',')]
else:
order = '"%s" %s' % (order_field, '' if len(order_split) == 1 else order_split[1])
orderby_terms.append(order)
elif order_field in aggregated_fields:
orderby_terms.append(order_part)
else:
# Cannot order by a field that will not appear in the results (needs to be grouped or aggregated)
_logger.warn('%s: read_group order by `%s` ignored, cannot sort on empty columns (not grouped/aggregated)',
self._name, order_part)
return groupby_terms, orderby_terms
def _read_group_process_groupby(self, gb, query, context):
"""
Helper method to collect important information about groupbys: raw
field name, type, time information, qualified name, ...
"""
split = gb.split(':')
field_type = self._fields[split[0]].type
gb_function = split[1] if len(split) == 2 else None
temporal = field_type in ('date', 'datetime')
tz_convert = field_type == 'datetime' and context.get('tz') in pytz.all_timezones
qualified_field = self._inherits_join_calc(self._table, split[0], query)
if temporal:
display_formats = {
# Careful with week/year formats:
# - yyyy (lower) must always be used, *except* for week+year formats
# - YYYY (upper) must always be used for week+year format
# e.g. 2006-01-01 is W52 2005 in some locales (de_DE),
# and W1 2006 for others
#
# Mixing both formats, e.g. 'MMM YYYY' would yield wrong results,
# such as 2006-01-01 being formatted as "January 2005" in some locales.
# Cfr: http://babel.pocoo.org/docs/dates/#date-fields
'day': 'dd MMM yyyy', # yyyy = normal year
'week': "'W'w YYYY", # w YYYY = ISO week-year
'month': 'MMMM yyyy',
'quarter': 'QQQ yyyy',
'year': 'yyyy',
}
time_intervals = {
'day': dateutil.relativedelta.relativedelta(days=1),
'week': datetime.timedelta(days=7),
'month': dateutil.relativedelta.relativedelta(months=1),
'quarter': dateutil.relativedelta.relativedelta(months=3),
'year': dateutil.relativedelta.relativedelta(years=1)
}
if tz_convert:
qualified_field = "timezone('%s', timezone('UTC',%s))" % (context.get('tz', 'UTC'), qualified_field)
qualified_field = "date_trunc('%s', %s)" % (gb_function or 'month', qualified_field)
if field_type == 'boolean':
qualified_field = "coalesce(%s,false)" % qualified_field
return {
'field': split[0],
'groupby': gb,
'type': field_type,
'display_format': display_formats[gb_function or 'month'] if temporal else None,
'interval': time_intervals[gb_function or 'month'] if temporal else None,
'tz_convert': tz_convert,
'qualified_field': qualified_field
}
def _read_group_prepare_data(self, key, value, groupby_dict, context):
"""
Helper method to sanitize the data received by read_group. The None
values are converted to False, and the date/datetime are formatted,
and corrected according to the timezones.
"""
value = False if value is None else value
gb = groupby_dict.get(key)
if gb and gb['type'] in ('date', 'datetime') and value:
if isinstance(value, basestring):
dt_format = DEFAULT_SERVER_DATETIME_FORMAT if gb['type'] == 'datetime' else DEFAULT_SERVER_DATE_FORMAT
value = datetime.datetime.strptime(value, dt_format)
if gb['tz_convert']:
value = pytz.timezone(context['tz']).localize(value)
return value
def _read_group_get_domain(self, groupby, value):
"""
Helper method to construct the domain corresponding to a groupby and
a given value. This is mostly relevant for date/datetime.
"""
if groupby['type'] in ('date', 'datetime') and value:
dt_format = DEFAULT_SERVER_DATETIME_FORMAT if groupby['type'] == 'datetime' else DEFAULT_SERVER_DATE_FORMAT
domain_dt_begin = value
domain_dt_end = value + groupby['interval']
if groupby['tz_convert']:
domain_dt_begin = domain_dt_begin.astimezone(pytz.utc)
domain_dt_end = domain_dt_end.astimezone(pytz.utc)
return [(groupby['field'], '>=', domain_dt_begin.strftime(dt_format)),
(groupby['field'], '<', domain_dt_end.strftime(dt_format))]
if groupby['type'] == 'many2one' and value:
value = value[0]
return [(groupby['field'], '=', value)]
def _read_group_format_result(self, data, annotated_groupbys, groupby, groupby_dict, domain, context):
"""
Helper method to format the data contained in the dictionary data by
adding the domain corresponding to its values, the groupbys in the
context and by properly formatting the date/datetime values.
"""
domain_group = [dom for gb in annotated_groupbys for dom in self._read_group_get_domain(gb, data[gb['groupby']])]
for k,v in data.iteritems():
gb = groupby_dict.get(k)
if gb and gb['type'] in ('date', 'datetime') and v:
data[k] = babel.dates.format_date(v, format=gb['display_format'], locale=context.get('lang', 'en_US'))
data['__domain'] = domain_group + domain
if len(groupby) - len(annotated_groupbys) >= 1:
data['__context'] = { 'group_by': groupby[len(annotated_groupbys):]}
del data['id']
return data
def read_group(self, cr, uid, domain, fields, groupby, offset=0, limit=None, context=None, orderby=False, lazy=True):
"""
Get the list of records in list view grouped by the given ``groupby`` fields
:param cr: database cursor
:param uid: current user id
:param domain: list specifying search criteria [['field_name', 'operator', 'value'], ...]
:param list fields: list of fields present in the list view specified on the object
:param list groupby: list of groupby descriptions by which the records will be grouped.
A groupby description is either a field (then it will be grouped by that field)
or a string 'field:groupby_function'. Right now, the only functions supported
are 'day', 'week', 'month', 'quarter' or 'year', and they only make sense for
date/datetime fields.
:param int offset: optional number of records to skip
:param int limit: optional max number of records to return
:param dict context: context arguments, like lang, time zone.
:param list orderby: optional ``order by`` specification, for
overriding the natural sort ordering of the
groups, see also :py:meth:`~osv.osv.osv.search`
(supported only for many2one fields currently)
:param bool lazy: if true, the results are only grouped by the first groupby and the
remaining groupbys are put in the __context key. If false, all the groupbys are
done in one call.
:return: list of dictionaries(one dictionary for each record) containing:
* the values of fields grouped by the fields in ``groupby`` argument
* __domain: list of tuples specifying the search criteria
* __context: dictionary with argument like ``groupby``
:rtype: [{'field_name_1': value, ...]
:raise AccessError: * if user has no read rights on the requested object
* if user tries to bypass access rules for read on the requested object
"""
if context is None:
context = {}
self.check_access_rights(cr, uid, 'read')
query = self._where_calc(cr, uid, domain, context=context)
fields = fields or self._columns.keys()
groupby = [groupby] if isinstance(groupby, basestring) else groupby
groupby_list = groupby[:1] if lazy else groupby
annotated_groupbys = [self._read_group_process_groupby(gb, query, context)
for gb in groupby_list]
groupby_fields = [g['field'] for g in annotated_groupbys]
order = orderby or ','.join([g for g in groupby_list])
groupby_dict = {gb['groupby']: gb for gb in annotated_groupbys}
self._apply_ir_rules(cr, uid, query, 'read', context=context)
for gb in groupby_fields:
assert gb in fields, "Fields in 'groupby' must appear in the list of fields to read (perhaps it's missing in the list view?)"
groupby_def = self._columns.get(gb) or (self._inherit_fields.get(gb) and self._inherit_fields.get(gb)[2])
assert groupby_def and groupby_def._classic_write, "Fields in 'groupby' must be regular database-persisted fields (no function or related fields), or function fields with store=True"
if not (gb in self._fields):
# Don't allow arbitrary values, as this would be a SQL injection vector!
raise except_orm(_('Invalid group_by'),
_('Invalid group_by specification: "%s".\nA group_by specification must be a list of valid fields.')%(gb,))
aggregated_fields = [
f for f in fields
if f not in ('id', 'sequence')
if f not in groupby_fields
if f in self._fields
if self._fields[f].type in ('integer', 'float')
if getattr(self._fields[f].base_field.column, '_classic_write', False)
]
field_formatter = lambda f: (self._fields[f].group_operator or 'sum', self._inherits_join_calc(self._table, f, query), f)
select_terms = ["%s(%s) AS %s" % field_formatter(f) for f in aggregated_fields]
for gb in annotated_groupbys:
select_terms.append('%s as "%s" ' % (gb['qualified_field'], gb['groupby']))
groupby_terms, orderby_terms = self._read_group_prepare(order, aggregated_fields, annotated_groupbys, query)
from_clause, where_clause, where_clause_params = query.get_sql()
if lazy and (len(groupby_fields) >= 2 or not context.get('group_by_no_leaf')):
count_field = groupby_fields[0] if len(groupby_fields) >= 1 else '_'
else:
count_field = '_'
count_field += '_count'
prefix_terms = lambda prefix, terms: (prefix + " " + ",".join(terms)) if terms else ''
prefix_term = lambda prefix, term: ('%s %s' % (prefix, term)) if term else ''
query = """
SELECT min(%(table)s.id) AS id, count(%(table)s.id) AS %(count_field)s %(extra_fields)s
FROM %(from)s
%(where)s
%(groupby)s
%(orderby)s
%(limit)s
%(offset)s
""" % {
'table': self._table,
'count_field': count_field,
'extra_fields': prefix_terms(',', select_terms),
'from': from_clause,
'where': prefix_term('WHERE', where_clause),
'groupby': prefix_terms('GROUP BY', groupby_terms),
'orderby': prefix_terms('ORDER BY', orderby_terms),
'limit': prefix_term('LIMIT', int(limit) if limit else None),
'offset': prefix_term('OFFSET', int(offset) if limit else None),
}
cr.execute(query, where_clause_params)
fetched_data = cr.dictfetchall()
if not groupby_fields:
return fetched_data
many2onefields = [gb['field'] for gb in annotated_groupbys if gb['type'] == 'many2one']
if many2onefields:
data_ids = [r['id'] for r in fetched_data]
many2onefields = list(set(many2onefields))
data_dict = {d['id']: d for d in self.read(cr, uid, data_ids, many2onefields, context=context)}
for d in fetched_data:
d.update(data_dict[d['id']])
data = map(lambda r: {k: self._read_group_prepare_data(k,v, groupby_dict, context) for k,v in r.iteritems()}, fetched_data)
result = [self._read_group_format_result(d, annotated_groupbys, groupby, groupby_dict, domain, context) for d in data]
if lazy and groupby_fields[0] in self._group_by_full:
# Right now, read_group only fill results in lazy mode (by default).
# If you need to have the empty groups in 'eager' mode, then the
# method _read_group_fill_results need to be completely reimplemented
# in a sane way
result = self._read_group_fill_results(cr, uid, domain, groupby_fields[0], groupby[len(annotated_groupbys):],
aggregated_fields, count_field, result, read_group_order=order,
context=context)
return result
def _inherits_join_add(self, current_model, parent_model_name, query):
"""
Add missing table SELECT and JOIN clause to ``query`` for reaching the parent table (no duplicates)
:param current_model: current model object
:param parent_model_name: name of the parent model for which the clauses should be added
:param query: query object on which the JOIN should be added
"""
inherits_field = current_model._inherits[parent_model_name]
parent_model = self.pool[parent_model_name]
parent_alias, parent_alias_statement = query.add_join((current_model._table, parent_model._table, inherits_field, 'id', inherits_field), implicit=True)
return parent_alias
def _inherits_join_calc(self, alias, field, query):
"""
Adds missing table select and join clause(s) to ``query`` for reaching
the field coming from an '_inherits' parent table (no duplicates).
:param alias: name of the initial SQL alias
:param field: name of inherited field to reach
:param query: query object on which the JOIN should be added
:return: qualified name of field, to be used in SELECT clause
"""
# INVARIANT: alias is the SQL alias of model._table in query
model = self
while field in model._inherit_fields and field not in model._columns:
# retrieve the parent model where field is inherited from
parent_model_name = model._inherit_fields[field][0]
parent_model = self.pool[parent_model_name]
parent_field = model._inherits[parent_model_name]
# JOIN parent_model._table AS parent_alias ON alias.parent_field = parent_alias.id
parent_alias, _ = query.add_join(
(alias, parent_model._table, parent_field, 'id', parent_field),
implicit=True,
)
model, alias = parent_model, parent_alias
return '"%s"."%s"' % (alias, field)
def _parent_store_compute(self, cr):
if not self._parent_store:
return
_logger.info('Computing parent left and right for table %s...', self._table)
def browse_rec(root, pos=0):
# TODO: set order
where = self._parent_name+'='+str(root)
if not root:
where = self._parent_name+' IS NULL'
if self._parent_order:
where += ' order by '+self._parent_order
cr.execute('SELECT id FROM '+self._table+' WHERE '+where)
pos2 = pos + 1
for id in cr.fetchall():
pos2 = browse_rec(id[0], pos2)
cr.execute('update '+self._table+' set parent_left=%s, parent_right=%s where id=%s', (pos, pos2, root))
return pos2 + 1
query = 'SELECT id FROM '+self._table+' WHERE '+self._parent_name+' IS NULL'
if self._parent_order:
query += ' order by ' + self._parent_order
pos = 0
cr.execute(query)
for (root,) in cr.fetchall():
pos = browse_rec(root, pos)
self.invalidate_cache(cr, SUPERUSER_ID, ['parent_left', 'parent_right'])
return True
def _update_store(self, cr, f, k):
_logger.info("storing computed values of fields.function '%s'", k)
ss = self._columns[k]._symbol_set
update_query = 'UPDATE "%s" SET "%s"=%s WHERE id=%%s' % (self._table, k, ss[0])
cr.execute('select id from '+self._table)
ids_lst = map(lambda x: x[0], cr.fetchall())
while ids_lst:
iids = ids_lst[:AUTOINIT_RECALCULATE_STORED_FIELDS]
ids_lst = ids_lst[AUTOINIT_RECALCULATE_STORED_FIELDS:]
res = f.get(cr, self, iids, k, SUPERUSER_ID, {})
for key, val in res.items():
if f._multi:
val = val[k]
# if val is a many2one, just write the ID
if type(val) == tuple:
val = val[0]
if val is not False:
cr.execute(update_query, (ss[1](val), key))
@api.model
def _check_selection_field_value(self, field, value):
""" Check whether value is among the valid values for the given
selection/reference field, and raise an exception if not.
"""
field = self._fields[field]
field.convert_to_cache(value, self)
def _check_removed_columns(self, cr, log=False):
# iterate on the database columns to drop the NOT NULL constraints
# of fields which were required but have been removed (or will be added by another module)
columns = [c for c in self._columns if not (isinstance(self._columns[c], fields.function) and not self._columns[c].store)]
columns += MAGIC_COLUMNS
cr.execute("SELECT a.attname, a.attnotnull"
" FROM pg_class c, pg_attribute a"
" WHERE c.relname=%s"
" AND c.oid=a.attrelid"
" AND a.attisdropped=%s"
" AND pg_catalog.format_type(a.atttypid, a.atttypmod) NOT IN ('cid', 'tid', 'oid', 'xid')"
" AND a.attname NOT IN %s", (self._table, False, tuple(columns))),
for column in cr.dictfetchall():
if log:
_logger.debug("column %s is in the table %s but not in the corresponding object %s",
column['attname'], self._table, self._name)
if column['attnotnull']:
cr.execute('ALTER TABLE "%s" ALTER COLUMN "%s" DROP NOT NULL' % (self._table, column['attname']))
_schema.debug("Table '%s': column '%s': dropped NOT NULL constraint",
self._table, column['attname'])
def _save_constraint(self, cr, constraint_name, type):
"""
Record the creation of a constraint for this model, to make it possible
to delete it later when the module is uninstalled. Type can be either
'f' or 'u' depending on the constraint being a foreign key or not.
"""
if not self._module:
# no need to save constraints for custom models as they're not part
# of any module
return
assert type in ('f', 'u')
cr.execute("""
SELECT 1 FROM ir_model_constraint, ir_module_module
WHERE ir_model_constraint.module=ir_module_module.id
AND ir_model_constraint.name=%s
AND ir_module_module.name=%s
""", (constraint_name, self._module))
if not cr.rowcount:
cr.execute("""
INSERT INTO ir_model_constraint
(name, date_init, date_update, module, model, type)
VALUES (%s, now() AT TIME ZONE 'UTC', now() AT TIME ZONE 'UTC',
(SELECT id FROM ir_module_module WHERE name=%s),
(SELECT id FROM ir_model WHERE model=%s), %s)""",
(constraint_name, self._module, self._name, type))
def _save_relation_table(self, cr, relation_table):
"""
Record the creation of a many2many for this model, to make it possible
to delete it later when the module is uninstalled.
"""
cr.execute("""
SELECT 1 FROM ir_model_relation, ir_module_module
WHERE ir_model_relation.module=ir_module_module.id
AND ir_model_relation.name=%s
AND ir_module_module.name=%s
""", (relation_table, self._module))
if not cr.rowcount:
cr.execute("""INSERT INTO ir_model_relation (name, date_init, date_update, module, model)
VALUES (%s, now() AT TIME ZONE 'UTC', now() AT TIME ZONE 'UTC',
(SELECT id FROM ir_module_module WHERE name=%s),
(SELECT id FROM ir_model WHERE model=%s))""",
(relation_table, self._module, self._name))
self.invalidate_cache(cr, SUPERUSER_ID)
# checked version: for direct m2o starting from ``self``
def _m2o_add_foreign_key_checked(self, source_field, dest_model, ondelete):
assert self.is_transient() or not dest_model.is_transient(), \
'Many2One relationships from non-transient Model to TransientModel are forbidden'
if self.is_transient() and not dest_model.is_transient():
# TransientModel relationships to regular Models are annoying
# usually because they could block deletion due to the FKs.
# So unless stated otherwise we default them to ondelete=cascade.
ondelete = ondelete or 'cascade'
fk_def = (self._table, source_field, dest_model._table, ondelete or 'set null')
self._foreign_keys.add(fk_def)
_schema.debug("Table '%s': added foreign key '%s' with definition=REFERENCES \"%s\" ON DELETE %s", *fk_def)
# unchecked version: for custom cases, such as m2m relationships
def _m2o_add_foreign_key_unchecked(self, source_table, source_field, dest_model, ondelete):
fk_def = (source_table, source_field, dest_model._table, ondelete or 'set null')
self._foreign_keys.add(fk_def)
_schema.debug("Table '%s': added foreign key '%s' with definition=REFERENCES \"%s\" ON DELETE %s", *fk_def)
def _drop_constraint(self, cr, source_table, constraint_name):
cr.execute("ALTER TABLE %s DROP CONSTRAINT %s" % (source_table,constraint_name))
def _m2o_fix_foreign_key(self, cr, source_table, source_field, dest_model, ondelete):
# Find FK constraint(s) currently established for the m2o field,
# and see whether they are stale or not
cr.execute("""SELECT confdeltype as ondelete_rule, conname as constraint_name,
cl2.relname as foreign_table
FROM pg_constraint as con, pg_class as cl1, pg_class as cl2,
pg_attribute as att1, pg_attribute as att2
WHERE con.conrelid = cl1.oid
AND cl1.relname = %s
AND con.confrelid = cl2.oid
AND array_lower(con.conkey, 1) = 1
AND con.conkey[1] = att1.attnum
AND att1.attrelid = cl1.oid
AND att1.attname = %s
AND array_lower(con.confkey, 1) = 1
AND con.confkey[1] = att2.attnum
AND att2.attrelid = cl2.oid
AND att2.attname = %s
AND con.contype = 'f'""", (source_table, source_field, 'id'))
constraints = cr.dictfetchall()
if constraints:
if len(constraints) == 1:
# Is it the right constraint?
cons, = constraints
if cons['ondelete_rule'] != POSTGRES_CONFDELTYPES.get((ondelete or 'set null').upper(), 'a')\
or cons['foreign_table'] != dest_model._table:
# Wrong FK: drop it and recreate
_schema.debug("Table '%s': dropping obsolete FK constraint: '%s'",
source_table, cons['constraint_name'])
self._drop_constraint(cr, source_table, cons['constraint_name'])
else:
# it's all good, nothing to do!
return
else:
# Multiple FKs found for the same field, drop them all, and re-create
for cons in constraints:
_schema.debug("Table '%s': dropping duplicate FK constraints: '%s'",
source_table, cons['constraint_name'])
self._drop_constraint(cr, source_table, cons['constraint_name'])
# (re-)create the FK
self._m2o_add_foreign_key_checked(source_field, dest_model, ondelete)
def _set_default_value_on_column(self, cr, column_name, context=None):
# ideally, we should use default_get(), but it fails due to ir.values
# not being ready
# get default value
default = self._defaults.get(column_name)
if callable(default):
default = default(self, cr, SUPERUSER_ID, context)
column = self._columns[column_name]
ss = column._symbol_set
db_default = ss[1](default)
# Write default if non-NULL, except for booleans for which False means
# the same as NULL - this saves us an expensive query on large tables.
write_default = (db_default is not None if column._type != 'boolean'
else db_default)
if write_default:
_logger.debug("Table '%s': setting default value of new column %s to %r",
self._table, column_name, default)
query = 'UPDATE "%s" SET "%s"=%s WHERE "%s" is NULL' % (
self._table, column_name, ss[0], column_name)
cr.execute(query, (db_default,))
# this is a disgrace
cr.commit()
def _auto_init(self, cr, context=None):
"""
Call _field_create and, unless _auto is False:
- create the corresponding table in database for the model,
- possibly add the parent columns in database,
- possibly add the columns 'create_uid', 'create_date', 'write_uid',
'write_date' in database if _log_access is True (the default),
- report on database columns no more existing in _columns,
- remove no more existing not null constraints,
- alter existing database columns to match _columns,
- create database tables to match _columns,
- add database indices to match _columns,
- save in self._foreign_keys a list a foreign keys to create (see
_auto_end).
"""
self._foreign_keys = set()
raise_on_invalid_object_name(self._name)
# This prevents anything called by this method (in particular default
# values) from prefetching a field for which the corresponding column
# has not been added in database yet!
context = dict(context or {}, prefetch_fields=False)
# Make sure an environment is available for get_pg_type(). This is
# because we access column.digits, which retrieves a cursor from
# existing environments.
env = api.Environment(cr, SUPERUSER_ID, context)
store_compute = False
stored_fields = [] # new-style stored fields with compute
todo_end = []
update_custom_fields = context.get('update_custom_fields', False)
self._field_create(cr, context=context)
create = not self._table_exist(cr)
if self._auto:
if create:
self._create_table(cr)
has_rows = False
else:
cr.execute('SELECT 1 FROM "%s" LIMIT 1' % self._table)
has_rows = cr.rowcount
cr.commit()
if self._parent_store:
if not self._parent_columns_exist(cr):
self._create_parent_columns(cr)
store_compute = True
self._check_removed_columns(cr, log=False)
# iterate on the "object columns"
column_data = self._select_column_data(cr)
for k, f in self._columns.iteritems():
if k == 'id': # FIXME: maybe id should be a regular column?
continue
# Don't update custom (also called manual) fields
if f.manual and not update_custom_fields:
continue
if isinstance(f, fields.one2many):
self._o2m_raise_on_missing_reference(cr, f)
elif isinstance(f, fields.many2many):
res = self._m2m_raise_or_create_relation(cr, f)
if res and self._fields[k].depends:
stored_fields.append(self._fields[k])
else:
res = column_data.get(k)
# The field is not found as-is in database, try if it
# exists with an old name.
if not res and hasattr(f, 'oldname'):
res = column_data.get(f.oldname)
if res:
cr.execute('ALTER TABLE "%s" RENAME "%s" TO "%s"' % (self._table, f.oldname, k))
res['attname'] = k
column_data[k] = res
_schema.debug("Table '%s': renamed column '%s' to '%s'",
self._table, f.oldname, k)
# The field already exists in database. Possibly
# change its type, rename it, drop it or change its
# constraints.
if res:
f_pg_type = res['typname']
f_pg_size = res['size']
f_pg_notnull = res['attnotnull']
if isinstance(f, fields.function) and not f.store and\
not getattr(f, 'nodrop', False):
_logger.info('column %s (%s) converted to a function, removed from table %s',
k, f.string, self._table)
cr.execute('ALTER TABLE "%s" DROP COLUMN "%s" CASCADE' % (self._table, k))
cr.commit()
_schema.debug("Table '%s': dropped column '%s' with cascade",
self._table, k)
f_obj_type = None
else:
f_obj_type = get_pg_type(f) and get_pg_type(f)[0]
if f_obj_type:
ok = False
casts = [
('text', 'char', pg_varchar(f.size), '::%s' % pg_varchar(f.size)),
('varchar', 'text', 'TEXT', ''),
('int4', 'float', get_pg_type(f)[1], '::'+get_pg_type(f)[1]),
('date', 'datetime', 'TIMESTAMP', '::TIMESTAMP'),
('timestamp', 'date', 'date', '::date'),
('numeric', 'float', get_pg_type(f)[1], '::'+get_pg_type(f)[1]),
('float8', 'float', get_pg_type(f)[1], '::'+get_pg_type(f)[1]),
]
if f_pg_type == 'varchar' and f._type in ('char', 'selection') and f_pg_size and (f.size is None or f_pg_size < f.size):
try:
with cr.savepoint():
cr.execute('ALTER TABLE "%s" ALTER COLUMN "%s" TYPE %s' % (self._table, k, pg_varchar(f.size)), log_exceptions=False)
except psycopg2.NotSupportedError:
# In place alter table cannot be done because a view is depending of this field.
# Do a manual copy. This will drop the view (that will be recreated later)
cr.execute('ALTER TABLE "%s" RENAME COLUMN "%s" TO temp_change_size' % (self._table, k))
cr.execute('ALTER TABLE "%s" ADD COLUMN "%s" %s' % (self._table, k, pg_varchar(f.size)))
cr.execute('UPDATE "%s" SET "%s"=temp_change_size::%s' % (self._table, k, pg_varchar(f.size)))
cr.execute('ALTER TABLE "%s" DROP COLUMN temp_change_size CASCADE' % (self._table,))
cr.commit()
_schema.debug("Table '%s': column '%s' (type varchar) changed size from %s to %s",
self._table, k, f_pg_size or 'unlimited', f.size or 'unlimited')
for c in casts:
if (f_pg_type==c[0]) and (f._type==c[1]):
if f_pg_type != f_obj_type:
ok = True
cr.execute('ALTER TABLE "%s" RENAME COLUMN "%s" TO __temp_type_cast' % (self._table, k))
cr.execute('ALTER TABLE "%s" ADD COLUMN "%s" %s' % (self._table, k, c[2]))
cr.execute(('UPDATE "%s" SET "%s"= __temp_type_cast'+c[3]) % (self._table, k))
cr.execute('ALTER TABLE "%s" DROP COLUMN __temp_type_cast CASCADE' % (self._table,))
cr.commit()
_schema.debug("Table '%s': column '%s' changed type from %s to %s",
self._table, k, c[0], c[1])
break
if f_pg_type != f_obj_type:
if not ok:
i = 0
while True:
newname = k + '_moved' + str(i)
cr.execute("SELECT count(1) FROM pg_class c,pg_attribute a " \
"WHERE c.relname=%s " \
"AND a.attname=%s " \
"AND c.oid=a.attrelid ", (self._table, newname))
if not cr.fetchone()[0]:
break
i += 1
if f_pg_notnull:
cr.execute('ALTER TABLE "%s" ALTER COLUMN "%s" DROP NOT NULL' % (self._table, k))
cr.execute('ALTER TABLE "%s" RENAME COLUMN "%s" TO "%s"' % (self._table, k, newname))
cr.execute('ALTER TABLE "%s" ADD COLUMN "%s" %s' % (self._table, k, get_pg_type(f)[1]))
cr.execute("COMMENT ON COLUMN %s.\"%s\" IS %%s" % (self._table, k), (f.string,))
_schema.warning("Table `%s`: column `%s` has changed type (DB=%s, def=%s), data moved to column `%s`",
self._table, k, f_pg_type, f._type, newname)
# if the field is required and hasn't got a NOT NULL constraint
if f.required and f_pg_notnull == 0:
if has_rows:
self._set_default_value_on_column(cr, k, context=context)
# add the NOT NULL constraint
try:
cr.execute('ALTER TABLE "%s" ALTER COLUMN "%s" SET NOT NULL' % (self._table, k), log_exceptions=False)
cr.commit()
_schema.debug("Table '%s': column '%s': added NOT NULL constraint",
self._table, k)
except Exception:
msg = "Table '%s': unable to set a NOT NULL constraint on column '%s' !\n"\
"If you want to have it, you should update the records and execute manually:\n"\
"ALTER TABLE %s ALTER COLUMN %s SET NOT NULL"
_schema.warning(msg, self._table, k, self._table, k)
cr.commit()
elif not f.required and f_pg_notnull == 1:
cr.execute('ALTER TABLE "%s" ALTER COLUMN "%s" DROP NOT NULL' % (self._table, k))
cr.commit()
_schema.debug("Table '%s': column '%s': dropped NOT NULL constraint",
self._table, k)
# Verify index
indexname = '%s_%s_index' % (self._table, k)
cr.execute("SELECT indexname FROM pg_indexes WHERE indexname = %s and tablename = %s", (indexname, self._table))
res2 = cr.dictfetchall()
if not res2 and f.select:
cr.execute('CREATE INDEX "%s_%s_index" ON "%s" ("%s")' % (self._table, k, self._table, k))
cr.commit()
if f._type == 'text':
# FIXME: for fields.text columns we should try creating GIN indexes instead (seems most suitable for an ERP context)
msg = "Table '%s': Adding (b-tree) index for %s column '%s'."\
"This is probably useless (does not work for fulltext search) and prevents INSERTs of long texts"\
" because there is a length limit for indexable btree values!\n"\
"Use a search view instead if you simply want to make the field searchable."
_schema.warning(msg, self._table, f._type, k)
if res2 and not f.select:
cr.execute('DROP INDEX "%s_%s_index"' % (self._table, k))
cr.commit()
msg = "Table '%s': dropping index for column '%s' of type '%s' as it is not required anymore"
_schema.debug(msg, self._table, k, f._type)
if isinstance(f, fields.many2one) or (isinstance(f, fields.function) and f._type == 'many2one' and f.store):
dest_model = self.pool[f._obj]
if dest_model._auto and dest_model._table != 'ir_actions':
self._m2o_fix_foreign_key(cr, self._table, k, dest_model, f.ondelete)
# The field doesn't exist in database. Create it if necessary.
else:
if not isinstance(f, fields.function) or f.store:
# add the missing field
cr.execute('ALTER TABLE "%s" ADD COLUMN "%s" %s' % (self._table, k, get_pg_type(f)[1]))
cr.execute("COMMENT ON COLUMN %s.\"%s\" IS %%s" % (self._table, k), (f.string,))
_schema.debug("Table '%s': added column '%s' with definition=%s",
self._table, k, get_pg_type(f)[1])
# initialize it
if has_rows:
self._set_default_value_on_column(cr, k, context=context)
# remember the functions to call for the stored fields
if isinstance(f, fields.function):
order = 10
if f.store is not True: # i.e. if f.store is a dict
order = f.store[f.store.keys()[0]][2]
todo_end.append((order, self._update_store, (f, k)))
# remember new-style stored fields with compute method
if k in self._fields and self._fields[k].depends:
stored_fields.append(self._fields[k])
# and add constraints if needed
if isinstance(f, fields.many2one) or (isinstance(f, fields.function) and f._type == 'many2one' and f.store):
if f._obj not in self.pool:
raise except_orm('Programming Error', 'There is no reference available for %s' % (f._obj,))
dest_model = self.pool[f._obj]
ref = dest_model._table
# ir_actions is inherited so foreign key doesn't work on it
if dest_model._auto and ref != 'ir_actions':
self._m2o_add_foreign_key_checked(k, dest_model, f.ondelete)
if f.select:
cr.execute('CREATE INDEX "%s_%s_index" ON "%s" ("%s")' % (self._table, k, self._table, k))
if f.required:
try:
cr.commit()
cr.execute('ALTER TABLE "%s" ALTER COLUMN "%s" SET NOT NULL' % (self._table, k))
_schema.debug("Table '%s': column '%s': added a NOT NULL constraint",
self._table, k)
except Exception:
msg = "WARNING: unable to set column %s of table %s not null !\n"\
"Try to re-run: openerp-server --update=module\n"\
"If it doesn't work, update records and execute manually:\n"\
"ALTER TABLE %s ALTER COLUMN %s SET NOT NULL"
_logger.warning(msg, k, self._table, self._table, k, exc_info=True)
cr.commit()
else:
cr.execute("SELECT relname FROM pg_class WHERE relkind IN ('r','v') AND relname=%s", (self._table,))
create = not bool(cr.fetchone())
cr.commit() # start a new transaction
if self._auto:
self._add_sql_constraints(cr)
if create:
self._execute_sql(cr)
if store_compute:
self._parent_store_compute(cr)
cr.commit()
if stored_fields:
# trigger computation of new-style stored fields with a compute
def func(cr):
_logger.info("Storing computed values of %s fields %s",
self._name, ', '.join(sorted(f.name for f in stored_fields)))
recs = self.browse(cr, SUPERUSER_ID, [], {'active_test': False})
recs = recs.search([])
if recs:
map(recs._recompute_todo, stored_fields)
recs.recompute()
todo_end.append((1000, func, ()))
return todo_end
def _auto_end(self, cr, context=None):
""" Create the foreign keys recorded by _auto_init. """
for t, k, r, d in self._foreign_keys:
cr.execute('ALTER TABLE "%s" ADD FOREIGN KEY ("%s") REFERENCES "%s" ON DELETE %s' % (t, k, r, d))
self._save_constraint(cr, "%s_%s_fkey" % (t, k), 'f')
cr.commit()
del self._foreign_keys
def _table_exist(self, cr):
cr.execute("SELECT relname FROM pg_class WHERE relkind IN ('r','v') AND relname=%s", (self._table,))
return cr.rowcount
def _create_table(self, cr):
cr.execute('CREATE TABLE "%s" (id SERIAL NOT NULL, PRIMARY KEY(id))' % (self._table,))
cr.execute(("COMMENT ON TABLE \"%s\" IS %%s" % self._table), (self._description,))
_schema.debug("Table '%s': created", self._table)
def _parent_columns_exist(self, cr):
cr.execute("""SELECT c.relname
FROM pg_class c, pg_attribute a
WHERE c.relname=%s AND a.attname=%s AND c.oid=a.attrelid
""", (self._table, 'parent_left'))
return cr.rowcount
def _create_parent_columns(self, cr):
cr.execute('ALTER TABLE "%s" ADD COLUMN "parent_left" INTEGER' % (self._table,))
cr.execute('ALTER TABLE "%s" ADD COLUMN "parent_right" INTEGER' % (self._table,))
if 'parent_left' not in self._columns:
_logger.error('create a column parent_left on object %s: fields.integer(\'Left Parent\', select=1)',
self._table)
_schema.debug("Table '%s': added column '%s' with definition=%s",
self._table, 'parent_left', 'INTEGER')
elif not self._columns['parent_left'].select:
_logger.error('parent_left column on object %s must be indexed! Add select=1 to the field definition)',
self._table)
if 'parent_right' not in self._columns:
_logger.error('create a column parent_right on object %s: fields.integer(\'Right Parent\', select=1)',
self._table)
_schema.debug("Table '%s': added column '%s' with definition=%s",
self._table, 'parent_right', 'INTEGER')
elif not self._columns['parent_right'].select:
_logger.error('parent_right column on object %s must be indexed! Add select=1 to the field definition)',
self._table)
if self._columns[self._parent_name].ondelete not in ('cascade', 'restrict'):
_logger.error("The column %s on object %s must be set as ondelete='cascade' or 'restrict'",
self._parent_name, self._name)
cr.commit()
def _select_column_data(self, cr):
# attlen is the number of bytes necessary to represent the type when
# the type has a fixed size. If the type has a varying size attlen is
# -1 and atttypmod is the size limit + 4, or -1 if there is no limit.
cr.execute("SELECT c.relname,a.attname,a.attlen,a.atttypmod,a.attnotnull,a.atthasdef,t.typname,CASE WHEN a.attlen=-1 THEN (CASE WHEN a.atttypmod=-1 THEN 0 ELSE a.atttypmod-4 END) ELSE a.attlen END as size " \
"FROM pg_class c,pg_attribute a,pg_type t " \
"WHERE c.relname=%s " \
"AND c.oid=a.attrelid " \
"AND a.atttypid=t.oid", (self._table,))
return dict(map(lambda x: (x['attname'], x),cr.dictfetchall()))
def _o2m_raise_on_missing_reference(self, cr, f):
# TODO this check should be a method on fields.one2many.
if f._obj in self.pool:
other = self.pool[f._obj]
# TODO the condition could use fields_get_keys().
if f._fields_id not in other._columns.keys():
if f._fields_id not in other._inherit_fields.keys():
raise except_orm('Programming Error', "There is no reference field '%s' found for '%s'" % (f._fields_id, f._obj,))
def _m2m_raise_or_create_relation(self, cr, f):
""" Create the table for the relation if necessary.
Return ``True`` if the relation had to be created.
"""
m2m_tbl, col1, col2 = f._sql_names(self)
# do not create relations for custom fields as they do not belong to a module
# they will be automatically removed when dropping the corresponding ir.model.field
# table name for custom relation all starts with x_, see __init__
if not m2m_tbl.startswith('x_'):
self._save_relation_table(cr, m2m_tbl)
cr.execute("SELECT relname FROM pg_class WHERE relkind IN ('r','v') AND relname=%s", (m2m_tbl,))
if not cr.dictfetchall():
if f._obj not in self.pool:
raise except_orm('Programming Error', 'Many2Many destination model does not exist: `%s`' % (f._obj,))
dest_model = self.pool[f._obj]
ref = dest_model._table
cr.execute('CREATE TABLE "%s" ("%s" INTEGER NOT NULL, "%s" INTEGER NOT NULL, UNIQUE("%s","%s"))' % (m2m_tbl, col1, col2, col1, col2))
# create foreign key references with ondelete=cascade, unless the targets are SQL views
cr.execute("SELECT relkind FROM pg_class WHERE relkind IN ('v') AND relname=%s", (ref,))
if not cr.fetchall():
self._m2o_add_foreign_key_unchecked(m2m_tbl, col2, dest_model, 'cascade')
cr.execute("SELECT relkind FROM pg_class WHERE relkind IN ('v') AND relname=%s", (self._table,))
if not cr.fetchall():
self._m2o_add_foreign_key_unchecked(m2m_tbl, col1, self, 'cascade')
cr.execute('CREATE INDEX "%s_%s_index" ON "%s" ("%s")' % (m2m_tbl, col1, m2m_tbl, col1))
cr.execute('CREATE INDEX "%s_%s_index" ON "%s" ("%s")' % (m2m_tbl, col2, m2m_tbl, col2))
cr.execute("COMMENT ON TABLE \"%s\" IS 'RELATION BETWEEN %s AND %s'" % (m2m_tbl, self._table, ref))
cr.commit()
_schema.debug("Create table '%s': m2m relation between '%s' and '%s'", m2m_tbl, self._table, ref)
return True
def _add_sql_constraints(self, cr):
"""
Modify this model's database table constraints so they match the one in
_sql_constraints.
"""
def unify_cons_text(txt):
return txt.lower().replace(', ',',').replace(' (','(')
for (key, con, _) in self._sql_constraints:
conname = '%s_%s' % (self._table, key)
self._save_constraint(cr, conname, 'u')
cr.execute("SELECT conname, pg_catalog.pg_get_constraintdef(oid, true) as condef FROM pg_constraint where conname=%s", (conname,))
existing_constraints = cr.dictfetchall()
sql_actions = {
'drop': {
'execute': False,
'query': 'ALTER TABLE "%s" DROP CONSTRAINT "%s"' % (self._table, conname, ),
'msg_ok': "Table '%s': dropped constraint '%s'. Reason: its definition changed from '%%s' to '%s'" % (
self._table, conname, con),
'msg_err': "Table '%s': unable to drop \'%s\' constraint !" % (self._table, con),
'order': 1,
},
'add': {
'execute': False,
'query': 'ALTER TABLE "%s" ADD CONSTRAINT "%s" %s' % (self._table, conname, con,),
'msg_ok': "Table '%s': added constraint '%s' with definition=%s" % (self._table, conname, con),
'msg_err': "Table '%s': unable to add \'%s\' constraint !\n If you want to have it, you should update the records and execute manually:\n%%s" % (
self._table, con),
'order': 2,
},
}
if not existing_constraints:
# constraint does not exists:
sql_actions['add']['execute'] = True
sql_actions['add']['msg_err'] = sql_actions['add']['msg_err'] % (sql_actions['add']['query'], )
elif unify_cons_text(con) not in [unify_cons_text(item['condef']) for item in existing_constraints]:
# constraint exists but its definition has changed:
sql_actions['drop']['execute'] = True
sql_actions['drop']['msg_ok'] = sql_actions['drop']['msg_ok'] % (existing_constraints[0]['condef'].lower(), )
sql_actions['add']['execute'] = True
sql_actions['add']['msg_err'] = sql_actions['add']['msg_err'] % (sql_actions['add']['query'], )
# we need to add the constraint:
sql_actions = [item for item in sql_actions.values()]
sql_actions.sort(key=lambda x: x['order'])
for sql_action in [action for action in sql_actions if action['execute']]:
try:
cr.execute(sql_action['query'])
cr.commit()
_schema.debug(sql_action['msg_ok'])
except:
_schema.warning(sql_action['msg_err'])
cr.rollback()
def _execute_sql(self, cr):
""" Execute the SQL code from the _sql attribute (if any)."""
if hasattr(self, "_sql"):
for line in self._sql.split(';'):
line2 = line.replace('\n', '').strip()
if line2:
cr.execute(line2)
cr.commit()
#
# Update objects that uses this one to update their _inherits fields
#
@classmethod
def _init_inherited_fields(cls):
""" Determine inherited fields. """
# determine candidate inherited fields
fields = {}
for parent_model, parent_field in cls._inherits.iteritems():
parent = cls.pool[parent_model]
for name, field in parent._fields.iteritems():
# inherited fields are implemented as related fields, with the
# following specific properties:
# - reading inherited fields should not bypass access rights
# - copy inherited fields iff their original field is copied
fields[name] = field.new(
inherited=True,
related=(parent_field, name),
related_sudo=False,
copy=field.copy,
)
# add inherited fields that are not redefined locally
for name, field in fields.iteritems():
if name not in cls._fields:
cls._add_field(name, field)
@classmethod
def _inherits_reload(cls):
""" Recompute the _inherit_fields mapping. """
cls._inherit_fields = struct = {}
for parent_model, parent_field in cls._inherits.iteritems():
parent = cls.pool[parent_model]
parent._inherits_reload()
for name, column in parent._columns.iteritems():
struct[name] = (parent_model, parent_field, column, parent_model)
for name, source in parent._inherit_fields.iteritems():
struct[name] = (parent_model, parent_field, source[2], source[3])
@property
def _all_columns(self):
""" Returns a dict mapping all fields names (self fields and inherited
field via _inherits) to a ``column_info`` object giving detailed column
information. This property is deprecated, use ``_fields`` instead.
"""
result = {}
# do not inverse for loops, since local fields may hide inherited ones!
for k, (parent, m2o, col, original_parent) in self._inherit_fields.iteritems():
result[k] = fields.column_info(k, col, parent, m2o, original_parent)
for k, col in self._columns.iteritems():
result[k] = fields.column_info(k, col)
return result
@classmethod
def _inherits_check(cls):
for table, field_name in cls._inherits.items():
field = cls._fields.get(field_name)
if not field:
_logger.info('Missing many2one field definition for _inherits reference "%s" in "%s", using default one.', field_name, cls._name)
from .fields import Many2one
field = Many2one(table, string="Automatically created field to link to parent %s" % table, required=True, ondelete="cascade")
cls._add_field(field_name, field)
elif not field.required or field.ondelete.lower() not in ("cascade", "restrict"):
_logger.warning('Field definition for _inherits reference "%s" in "%s" must be marked as "required" with ondelete="cascade" or "restrict", forcing it to required + cascade.', field_name, cls._name)
field.required = True
field.ondelete = "cascade"
# reflect fields with delegate=True in dictionary cls._inherits
for field in cls._fields.itervalues():
if field.type == 'many2one' and not field.related and field.delegate:
if not field.required:
_logger.warning("Field %s with delegate=True must be required.", field)
field.required = True
if field.ondelete.lower() not in ('cascade', 'restrict'):
field.ondelete = 'cascade'
cls._inherits[field.comodel_name] = field.name
@api.model
def _prepare_setup(self):
""" Prepare the setup of the model. """
type(self)._setup_done = False
@api.model
def _setup_base(self, partial):
""" Determine the inherited and custom fields of the model. """
cls = type(self)
if cls._setup_done:
return
# 1. determine the proper fields of the model; duplicate them on cls to
# avoid clashes with inheritance between different models
for name in getattr(cls, '_fields', {}):
delattr(cls, name)
# retrieve fields from parent classes
cls._fields = {}
cls._defaults = {}
for attr, field in getmembers(cls, Field.__instancecheck__):
cls._add_field(attr, field.new())
# add magic and custom fields
cls._add_magic_fields()
cls._init_manual_fields(self._cr, partial)
# 2. make sure that parent models determine their own fields, then add
# inherited fields to cls
cls._inherits_check()
for parent in cls._inherits:
self.env[parent]._setup_base(partial)
cls._init_inherited_fields()
cls._setup_done = True
@api.model
def _setup_fields(self):
""" Setup the fields, except for recomputation triggers. """
cls = type(self)
# set up fields, and determine their corresponding column
cls._columns = {}
for name, field in cls._fields.iteritems():
field.setup(self.env)
column = field.to_column()
if column:
cls._columns[name] = column
# determine field.computed_fields
computed_fields = defaultdict(list)
for field in cls._fields.itervalues():
if field.compute:
computed_fields[field.compute].append(field)
for fields in computed_fields.itervalues():
for field in fields:
field.computed_fields = fields
@api.model
def _setup_complete(self):
""" Setup recomputation triggers, and complete the model setup. """
cls = type(self)
# set up field triggers
for field in cls._fields.itervalues():
field.setup_triggers(self.env)
# add invalidation triggers on model dependencies
if cls._depends:
triggers = [(field, None) for field in cls._fields.itervalues()]
for model_name, field_names in cls._depends.iteritems():
model = self.env[model_name]
for field_name in field_names:
field = model._fields[field_name]
for trigger in triggers:
field.add_trigger(trigger)
# determine old-api structures about inherited fields
cls._inherits_reload()
# register stuff about low-level function fields
cls._init_function_fields(cls.pool, self._cr)
# register constraints and onchange methods
cls._init_constraints_onchanges()
# check defaults
for name in cls._defaults:
assert name in cls._fields, \
"Model %s has a default for nonexiting field %s" % (cls._name, name)
# validate rec_name
if cls._rec_name:
assert cls._rec_name in cls._fields, \
"Invalid rec_name %s for model %s" % (cls._rec_name, cls._name)
elif 'name' in cls._fields:
cls._rec_name = 'name'
elif 'x_name' in cls._fields:
cls._rec_name = 'x_name'
def fields_get(self, cr, user, allfields=None, context=None, write_access=True, attributes=None):
""" fields_get([fields][, attributes])
Return the definition of each field.
The returned value is a dictionary (indiced by field name) of
dictionaries. The _inherits'd fields are included. The string, help,
and selection (if present) attributes are translated.
:param allfields: list of fields to document, all if empty or not provided
:param attributes: list of description attributes to return for each field, all if empty or not provided
"""
recs = self.browse(cr, user, [], context)
has_access = functools.partial(recs.check_access_rights, raise_exception=False)
readonly = not (has_access('write') or has_access('create'))
res = {}
for fname, field in self._fields.iteritems():
if allfields and fname not in allfields:
continue
if not field.setup_done:
continue
if field.groups and not recs.user_has_groups(field.groups):
continue
description = field.get_description(recs.env)
if readonly:
description['readonly'] = True
description['states'] = {}
if attributes:
description = {k: v for k, v in description.iteritems()
if k in attributes}
res[fname] = description
return res
def get_empty_list_help(self, cr, user, help, context=None):
""" Generic method giving the help message displayed when having
no result to display in a list or kanban view. By default it returns
the help given in parameter that is generally the help message
defined in the action.
"""
return help
def check_field_access_rights(self, cr, user, operation, fields, context=None):
"""
Check the user access rights on the given fields. This raises Access
Denied if the user does not have the rights. Otherwise it returns the
fields (as is if the fields is not falsy, or the readable/writable
fields if fields is falsy).
"""
if user == SUPERUSER_ID:
return fields or list(self._fields)
def valid(fname):
""" determine whether user has access to field ``fname`` """
field = self._fields.get(fname)
if field and field.groups:
return self.user_has_groups(cr, user, groups=field.groups, context=context)
else:
return True
if not fields:
fields = filter(valid, self._fields)
else:
invalid_fields = set(filter(lambda name: not valid(name), fields))
if invalid_fields:
_logger.warning('Access Denied by ACLs for operation: %s, uid: %s, model: %s, fields: %s',
operation, user, self._name, ', '.join(invalid_fields))
raise AccessError(
_('The requested operation cannot be completed due to security restrictions. '
'Please contact your system administrator.\n\n(Document type: %s, Operation: %s)') % \
(self._description, operation))
return fields
# add explicit old-style implementation to read()
@api.v7
def read(self, cr, user, ids, fields=None, context=None, load='_classic_read'):
records = self.browse(cr, user, ids, context)
result = BaseModel.read(records, fields, load=load)
return result if isinstance(ids, list) else (bool(result) and result[0])
# new-style implementation of read()
@api.v8
def read(self, fields=None, load='_classic_read'):
""" read([fields])
Reads the requested fields for the records in ``self``, low-level/RPC
method. In Python code, prefer :meth:`~.browse`.
:param fields: list of field names to return (default is all fields)
:return: a list of dictionaries mapping field names to their values,
with one dictionary per record
:raise AccessError: if user has no read rights on some of the given
records
"""
# check access rights
self.check_access_rights('read')
fields = self.check_field_access_rights('read', fields)
# split fields into stored and computed fields
stored, inherited, computed = [], [], []
for name in fields:
if name in self._columns:
stored.append(name)
elif name in self._fields:
computed.append(name)
field = self._fields[name]
if field.inherited and field.base_field.column:
inherited.append(name)
else:
_logger.warning("%s.read() with unknown field '%s'", self._name, name)
# fetch stored fields from the database to the cache
self._read_from_database(stored, inherited)
# retrieve results from records; this takes values from the cache and
# computes remaining fields
result = []
name_fields = [(name, self._fields[name]) for name in (stored + computed)]
use_name_get = (load == '_classic_read')
for record in self:
try:
values = {'id': record.id}
for name, field in name_fields:
values[name] = field.convert_to_read(record[name], use_name_get)
result.append(values)
except MissingError:
pass
return result
@api.multi
def _prefetch_field(self, field):
""" Read from the database in order to fetch ``field`` (:class:`Field`
instance) for ``self`` in cache.
"""
# fetch the records of this model without field_name in their cache
records = self._in_cache_without(field)
if len(records) > PREFETCH_MAX:
records = records[:PREFETCH_MAX] | self
# determine which fields can be prefetched
if not self.env.in_draft and \
self._context.get('prefetch_fields', True) and \
self._columns[field.name]._prefetch:
# prefetch all classic and many2one fields that the user can access
fnames = {fname
for fname, fcolumn in self._columns.iteritems()
if fcolumn._prefetch
if not fcolumn.groups or self.user_has_groups(fcolumn.groups)
}
else:
fnames = {field.name}
# important: never prefetch fields to recompute!
get_recs_todo = self.env.field_todo
for fname in list(fnames):
if get_recs_todo(self._fields[fname]):
if fname == field.name:
records -= get_recs_todo(field)
else:
fnames.discard(fname)
# fetch records with read()
assert self in records and field.name in fnames
result = []
try:
result = records.read(list(fnames), load='_classic_write')
except AccessError:
# not all records may be accessible, try with only current record
result = self.read(list(fnames), load='_classic_write')
# check the cache, and update it if necessary
if field not in self._cache:
for values in result:
record = self.browse(values.pop('id'))
record._cache.update(record._convert_to_cache(values, validate=False))
if not self._cache.contains(field):
e = AccessError("No value found for %s.%s" % (self, field.name))
self._cache[field] = FailedValue(e)
@api.multi
def _read_from_database(self, field_names, inherited_field_names=[]):
""" Read the given fields of the records in ``self`` from the database,
and store them in cache. Access errors are also stored in cache.
:param field_names: list of column names of model ``self``; all those
fields are guaranteed to be read
:param inherited_field_names: list of column names from parent
models; some of those fields may not be read
"""
env = self.env
cr, user, context = env.args
# make a query object for selecting ids, and apply security rules to it
query = Query(['"%s"' % self._table], ['"%s".id IN %%s' % self._table], [])
self._apply_ir_rules(query, 'read')
order_str = self._generate_order_by(None, query)
# determine the fields that are stored as columns in tables;
# for the sake of simplicity, discard inherited translated fields
fields = map(self._fields.get, field_names + inherited_field_names)
fields_pre = [
field
for field in fields
if field.base_field.column._classic_write
if not (field.inherited and field.base_field.column.translate)
]
# the query may involve several tables: we need fully-qualified names
def qualify(field):
col = field.name
if field.inherited:
res = self._inherits_join_calc(self._table, field.name, query)
else:
res = '"%s"."%s"' % (self._table, col)
if field.type == 'binary' and (context.get('bin_size') or context.get('bin_size_' + col)):
# PG 9.2 introduces conflicting pg_size_pretty(numeric) -> need ::cast
res = 'pg_size_pretty(length(%s)::bigint) as "%s"' % (res, col)
return res
qual_names = map(qualify, set(fields_pre + [self._fields['id']]))
# determine the actual query to execute
from_clause, where_clause, where_params = query.get_sql()
query_str = """ SELECT %(qual_names)s FROM %(from_clause)s
WHERE %(where_clause)s %(order_str)s
""" % {
'qual_names': ",".join(qual_names),
'from_clause': from_clause,
'where_clause': where_clause,
'order_str': order_str,
}
result = []
for sub_ids in cr.split_for_in_conditions(self.ids):
cr.execute(query_str, [tuple(sub_ids)] + where_params)
result.extend(cr.dictfetchall())
ids = [vals['id'] for vals in result]
if ids:
# translate the fields if necessary
if context.get('lang'):
ir_translation = env['ir.translation']
for field in fields_pre:
if not field.inherited and field.column.translate:
f = field.name
#TODO: optimize out of this loop
res_trans = ir_translation._get_ids(
'%s,%s' % (self._name, f), 'model', context['lang'], ids)
for vals in result:
vals[f] = res_trans.get(vals['id'], False) or vals[f]
# apply the symbol_get functions of the fields we just read
for field in fields_pre:
symbol_get = field.base_field.column._symbol_get
if symbol_get:
f = field.name
for vals in result:
vals[f] = symbol_get(vals[f])
# store result in cache for POST fields
for vals in result:
record = self.browse(vals['id'])
record._cache.update(record._convert_to_cache(vals, validate=False))
# determine the fields that must be processed now;
# for the sake of simplicity, we ignore inherited fields
fields_post = [f for f in field_names if not self._columns[f]._classic_write]
# Compute POST fields, grouped by multi
by_multi = defaultdict(list)
for f in fields_post:
by_multi[self._columns[f]._multi].append(f)
for multi, fs in by_multi.iteritems():
if multi:
res2 = self._columns[fs[0]].get(cr, self._model, ids, fs, user, context=context, values=result)
assert res2 is not None, \
'The function field "%s" on the "%s" model returned None\n' \
'(a dictionary was expected).' % (fs[0], self._name)
for vals in result:
# TOCHECK : why got string instend of dict in python2.6
# if isinstance(res2[vals['id']], str): res2[vals['id']] = eval(res2[vals['id']])
multi_fields = res2.get(vals['id'], {})
if multi_fields:
for f in fs:
vals[f] = multi_fields.get(f, [])
else:
for f in fs:
res2 = self._columns[f].get(cr, self._model, ids, f, user, context=context, values=result)
for vals in result:
if res2:
vals[f] = res2[vals['id']]
else:
vals[f] = []
# Warn about deprecated fields now that fields_pre and fields_post are computed
for f in field_names:
column = self._columns[f]
if column.deprecated:
_logger.warning('Field %s.%s is deprecated: %s', self._name, f, column.deprecated)
# store result in cache
for vals in result:
record = self.browse(vals.pop('id'))
record._cache.update(record._convert_to_cache(vals, validate=False))
# store failed values in cache for the records that could not be read
fetched = self.browse(ids)
missing = self - fetched
if missing:
extras = fetched - self
if extras:
raise AccessError(
_("Database fetch misses ids ({}) and has extra ids ({}), may be caused by a type incoherence in a previous request").format(
', '.join(map(repr, missing._ids)),
', '.join(map(repr, extras._ids)),
))
# store an access error exception in existing records
exc = AccessError(
_('The requested operation cannot be completed due to security restrictions. Please contact your system administrator.\n\n(Document type: %s, Operation: %s)') % \
(self._name, 'read')
)
forbidden = missing.exists()
forbidden._cache.update(FailedValue(exc))
# store a missing error exception in non-existing records
exc = MissingError(
_('One of the documents you are trying to access has been deleted, please try again after refreshing.')
)
(missing - forbidden)._cache.update(FailedValue(exc))
@api.multi
def get_metadata(self):
"""
Returns some metadata about the given records.
:return: list of ownership dictionaries for each requested record
:rtype: list of dictionaries with the following keys:
* id: object id
* create_uid: user who created the record
* create_date: date when the record was created
* write_uid: last user who changed the record
* write_date: date of the last change to the record
* xmlid: XML ID to use to refer to this record (if there is one), in format ``module.name``
* noupdate: A boolean telling if the record will be updated or not
"""
fields = ['id']
if self._log_access:
fields += ['create_uid', 'create_date', 'write_uid', 'write_date']
quoted_table = '"%s"' % self._table
fields_str = ",".join('%s.%s' % (quoted_table, field) for field in fields)
query = '''SELECT %s, __imd.noupdate, __imd.module, __imd.name
FROM %s LEFT JOIN ir_model_data __imd
ON (__imd.model = %%s and __imd.res_id = %s.id)
WHERE %s.id IN %%s''' % (fields_str, quoted_table, quoted_table, quoted_table)
self._cr.execute(query, (self._name, tuple(self.ids)))
res = self._cr.dictfetchall()
uids = set(r[k] for r in res for k in ['write_uid', 'create_uid'] if r.get(k))
names = dict(self.env['res.users'].browse(uids).name_get())
for r in res:
for key in r:
value = r[key] = r[key] or False
if key in ('write_uid', 'create_uid') and value in names:
r[key] = (value, names[value])
r['xmlid'] = ("%(module)s.%(name)s" % r) if r['name'] else False
del r['name'], r['module']
return res
def _check_concurrency(self, cr, ids, context):
if not context:
return
if not (context.get(self.CONCURRENCY_CHECK_FIELD) and self._log_access):
return
check_clause = "(id = %s AND %s < COALESCE(write_date, create_date, (now() at time zone 'UTC'))::timestamp)"
for sub_ids in cr.split_for_in_conditions(ids):
ids_to_check = []
for id in sub_ids:
id_ref = "%s,%s" % (self._name, id)
update_date = context[self.CONCURRENCY_CHECK_FIELD].pop(id_ref, None)
if update_date:
ids_to_check.extend([id, update_date])
if not ids_to_check:
continue
cr.execute("SELECT id FROM %s WHERE %s" % (self._table, " OR ".join([check_clause]*(len(ids_to_check)/2))), tuple(ids_to_check))
res = cr.fetchone()
if res:
# mention the first one only to keep the error message readable
raise except_orm('ConcurrencyException', _('A document was modified since you last viewed it (%s:%d)') % (self._description, res[0]))
def _check_record_rules_result_count(self, cr, uid, ids, result_ids, operation, context=None):
"""Verify the returned rows after applying record rules matches
the length of ``ids``, and raise an appropriate exception if it does not.
"""
if context is None:
context = {}
ids, result_ids = set(ids), set(result_ids)
missing_ids = ids - result_ids
if missing_ids:
# Attempt to distinguish record rule restriction vs deleted records,
# to provide a more specific error message - check if the missinf
cr.execute('SELECT id FROM ' + self._table + ' WHERE id IN %s', (tuple(missing_ids),))
forbidden_ids = [x[0] for x in cr.fetchall()]
if forbidden_ids:
# the missing ids are (at least partially) hidden by access rules
if uid == SUPERUSER_ID:
return
_logger.warning('Access Denied by record rules for operation: %s on record ids: %r, uid: %s, model: %s', operation, forbidden_ids, uid, self._name)
raise except_orm(_('Access Denied'),
_('The requested operation cannot be completed due to security restrictions. Please contact your system administrator.\n\n(Document type: %s, Operation: %s)') % \
(self._description, operation))
else:
# If we get here, the missing_ids are not in the database
if operation in ('read','unlink'):
# No need to warn about deleting an already deleted record.
# And no error when reading a record that was deleted, to prevent spurious
# errors for non-transactional search/read sequences coming from clients
return
_logger.warning('Failed operation on deleted record(s): %s, uid: %s, model: %s', operation, uid, self._name)
raise MissingError(
_('One of the documents you are trying to access has been deleted, please try again after refreshing.'))
def check_access_rights(self, cr, uid, operation, raise_exception=True): # no context on purpose.
"""Verifies that the operation given by ``operation`` is allowed for the user
according to the access rights."""
return self.pool.get('ir.model.access').check(cr, uid, self._name, operation, raise_exception)
def check_access_rule(self, cr, uid, ids, operation, context=None):
"""Verifies that the operation given by ``operation`` is allowed for the user
according to ir.rules.
:param operation: one of ``write``, ``unlink``
:raise except_orm: * if current ir.rules do not permit this operation.
:return: None if the operation is allowed
"""
if uid == SUPERUSER_ID:
return
if self.is_transient():
# Only one single implicit access rule for transient models: owner only!
# This is ok to hardcode because we assert that TransientModels always
# have log_access enabled so that the create_uid column is always there.
# And even with _inherits, these fields are always present in the local
# table too, so no need for JOINs.
cr.execute("""SELECT distinct create_uid
FROM %s
WHERE id IN %%s""" % self._table, (tuple(ids),))
uids = [x[0] for x in cr.fetchall()]
if len(uids) != 1 or uids[0] != uid:
raise except_orm(_('Access Denied'),
_('For this kind of document, you may only access records you created yourself.\n\n(Document type: %s)') % (self._description,))
else:
where_clause, where_params, tables = self.pool.get('ir.rule').domain_get(cr, uid, self._name, operation, context=context)
if where_clause:
where_clause = ' and ' + ' and '.join(where_clause)
for sub_ids in cr.split_for_in_conditions(ids):
cr.execute('SELECT ' + self._table + '.id FROM ' + ','.join(tables) +
' WHERE ' + self._table + '.id IN %s' + where_clause,
[sub_ids] + where_params)
returned_ids = [x['id'] for x in cr.dictfetchall()]
self._check_record_rules_result_count(cr, uid, sub_ids, returned_ids, operation, context=context)
def create_workflow(self, cr, uid, ids, context=None):
"""Create a workflow instance for each given record IDs."""
from openerp import workflow
for res_id in ids:
workflow.trg_create(uid, self._name, res_id, cr)
# self.invalidate_cache(cr, uid, context=context) ?
return True
def delete_workflow(self, cr, uid, ids, context=None):
"""Delete the workflow instances bound to the given record IDs."""
from openerp import workflow
for res_id in ids:
workflow.trg_delete(uid, self._name, res_id, cr)
self.invalidate_cache(cr, uid, context=context)
return True
def step_workflow(self, cr, uid, ids, context=None):
"""Reevaluate the workflow instances of the given record IDs."""
from openerp import workflow
for res_id in ids:
workflow.trg_write(uid, self._name, res_id, cr)
# self.invalidate_cache(cr, uid, context=context) ?
return True
def signal_workflow(self, cr, uid, ids, signal, context=None):
"""Send given workflow signal and return a dict mapping ids to workflow results"""
from openerp import workflow
result = {}
for res_id in ids:
result[res_id] = workflow.trg_validate(uid, self._name, res_id, signal, cr)
# self.invalidate_cache(cr, uid, context=context) ?
return result
def redirect_workflow(self, cr, uid, old_new_ids, context=None):
""" Rebind the workflow instance bound to the given 'old' record IDs to
the given 'new' IDs. (``old_new_ids`` is a list of pairs ``(old, new)``.
"""
from openerp import workflow
for old_id, new_id in old_new_ids:
workflow.trg_redirect(uid, self._name, old_id, new_id, cr)
self.invalidate_cache(cr, uid, context=context)
return True
def unlink(self, cr, uid, ids, context=None):
""" unlink()
Deletes the records of the current set
:raise AccessError: * if user has no unlink rights on the requested object
* if user tries to bypass access rules for unlink on the requested object
:raise UserError: if the record is default property for other records
"""
if not ids:
return True
if isinstance(ids, (int, long)):
ids = [ids]
result_store = self._store_get_values(cr, uid, ids, self._fields.keys(), context)
# for recomputing new-style fields
recs = self.browse(cr, uid, ids, context)
recs.modified(self._fields)
self._check_concurrency(cr, ids, context)
self.check_access_rights(cr, uid, 'unlink')
ir_property = self.pool.get('ir.property')
# Check if the records are used as default properties.
domain = [('res_id', '=', False),
('value_reference', 'in', ['%s,%s' % (self._name, i) for i in ids]),
]
if ir_property.search(cr, uid, domain, context=context):
raise except_orm(_('Error'), _('Unable to delete this document because it is used as a default property'))
# Delete the records' properties.
property_ids = ir_property.search(cr, uid, [('res_id', 'in', ['%s,%s' % (self._name, i) for i in ids])], context=context)
ir_property.unlink(cr, uid, property_ids, context=context)
self.delete_workflow(cr, uid, ids, context=context)
self.check_access_rule(cr, uid, ids, 'unlink', context=context)
pool_model_data = self.pool.get('ir.model.data')
ir_values_obj = self.pool.get('ir.values')
ir_attachment_obj = self.pool.get('ir.attachment')
for sub_ids in cr.split_for_in_conditions(ids):
cr.execute('delete from ' + self._table + ' ' \
'where id IN %s', (sub_ids,))
# Removing the ir_model_data reference if the record being deleted is a record created by xml/csv file,
# as these are not connected with real database foreign keys, and would be dangling references.
# Note: following steps performed as admin to avoid access rights restrictions, and with no context
# to avoid possible side-effects during admin calls.
# Step 1. Calling unlink of ir_model_data only for the affected IDS
reference_ids = pool_model_data.search(cr, SUPERUSER_ID, [('res_id','in',list(sub_ids)),('model','=',self._name)])
# Step 2. Marching towards the real deletion of referenced records
if reference_ids:
pool_model_data.unlink(cr, SUPERUSER_ID, reference_ids)
# For the same reason, removing the record relevant to ir_values
ir_value_ids = ir_values_obj.search(cr, uid,
['|',('value','in',['%s,%s' % (self._name, sid) for sid in sub_ids]),'&',('res_id','in',list(sub_ids)),('model','=',self._name)],
context=context)
if ir_value_ids:
ir_values_obj.unlink(cr, uid, ir_value_ids, context=context)
# For the same reason, removing the record relevant to ir_attachment
# The search is performed with sql as the search method of ir_attachment is overridden to hide attachments of deleted records
cr.execute('select id from ir_attachment where res_model = %s and res_id in %s', (self._name, sub_ids))
ir_attachment_ids = [ir_attachment[0] for ir_attachment in cr.fetchall()]
if ir_attachment_ids:
ir_attachment_obj.unlink(cr, uid, ir_attachment_ids, context=context)
# invalidate the *whole* cache, since the orm does not handle all
# changes made in the database, like cascading delete!
recs.invalidate_cache()
for order, obj_name, store_ids, fields in result_store:
if obj_name == self._name:
effective_store_ids = set(store_ids) - set(ids)
else:
effective_store_ids = store_ids
if effective_store_ids:
obj = self.pool[obj_name]
cr.execute('select id from '+obj._table+' where id IN %s', (tuple(effective_store_ids),))
rids = map(lambda x: x[0], cr.fetchall())
if rids:
obj._store_set_values(cr, uid, rids, fields, context)
# recompute new-style fields
recs.recompute()
return True
#
# TODO: Validate
#
@api.multi
def write(self, vals):
""" write(vals)
Updates all records in the current set with the provided values.
:param dict vals: fields to update and the value to set on them e.g::
{'foo': 1, 'bar': "Qux"}
will set the field ``foo`` to ``1`` and the field ``bar`` to
``"Qux"`` if those are valid (otherwise it will trigger an error).
:raise AccessError: * if user has no write rights on the requested object
* if user tries to bypass access rules for write on the requested object
:raise ValidateError: if user tries to enter invalid value for a field that is not in selection
:raise UserError: if a loop would be created in a hierarchy of objects a result of the operation (such as setting an object as its own parent)
* For numeric fields (:class:`~openerp.fields.Integer`,
:class:`~openerp.fields.Float`) the value should be of the
corresponding type
* For :class:`~openerp.fields.Boolean`, the value should be a
:class:`python:bool`
* For :class:`~openerp.fields.Selection`, the value should match the
selection values (generally :class:`python:str`, sometimes
:class:`python:int`)
* For :class:`~openerp.fields.Many2one`, the value should be the
database identifier of the record to set
* Other non-relational fields use a string for value
.. danger::
for historical and compatibility reasons,
:class:`~openerp.fields.Date` and
:class:`~openerp.fields.Datetime` fields use strings as values
(written and read) rather than :class:`~python:datetime.date` or
:class:`~python:datetime.datetime`. These date strings are
UTC-only and formatted according to
:const:`openerp.tools.misc.DEFAULT_SERVER_DATE_FORMAT` and
:const:`openerp.tools.misc.DEFAULT_SERVER_DATETIME_FORMAT`
* .. _openerp/models/relationals/format:
:class:`~openerp.fields.One2many` and
:class:`~openerp.fields.Many2many` use a special "commands" format to
manipulate the set of records stored in/associated with the field.
This format is a list of triplets executed sequentially, where each
triplet is a command to execute on the set of records. Not all
commands apply in all situations. Possible commands are:
``(0, _, values)``
adds a new record created from the provided ``value`` dict.
``(1, id, values)``
updates an existing record of id ``id`` with the values in
``values``. Can not be used in :meth:`~.create`.
``(2, id, _)``
removes the record of id ``id`` from the set, then deletes it
(from the database). Can not be used in :meth:`~.create`.
``(3, id, _)``
removes the record of id ``id`` from the set, but does not
delete it. Can not be used on
:class:`~openerp.fields.One2many`. Can not be used in
:meth:`~.create`.
``(4, id, _)``
adds an existing record of id ``id`` to the set. Can not be
used on :class:`~openerp.fields.One2many`.
``(5, _, _)``
removes all records from the set, equivalent to using the
command ``3`` on every record explicitly. Can not be used on
:class:`~openerp.fields.One2many`. Can not be used in
:meth:`~.create`.
``(6, _, ids)``
replaces all existing records in the set by the ``ids`` list,
equivalent to using the command ``5`` followed by a command
``4`` for each ``id`` in ``ids``. Can not be used on
:class:`~openerp.fields.One2many`.
.. note:: Values marked as ``_`` in the list above are ignored and
can be anything, generally ``0`` or ``False``.
"""
if not self:
return True
self._check_concurrency(self._ids)
self.check_access_rights('write')
# No user-driven update of these columns
for field in itertools.chain(MAGIC_COLUMNS, ('parent_left', 'parent_right')):
vals.pop(field, None)
# split up fields into old-style and pure new-style ones
old_vals, new_vals, unknown = {}, {}, []
for key, val in vals.iteritems():
field = self._fields.get(key)
if field:
if field.column or field.inherited:
old_vals[key] = val
if field.inverse and not field.inherited:
new_vals[key] = val
else:
unknown.append(key)
if unknown:
_logger.warning("%s.write() with unknown fields: %s", self._name, ', '.join(sorted(unknown)))
# write old-style fields with (low-level) method _write
if old_vals:
self._write(old_vals)
# put the values of pure new-style fields into cache, and inverse them
if new_vals:
for record in self:
record._cache.update(record._convert_to_cache(new_vals, update=True))
for key in new_vals:
self._fields[key].determine_inverse(self)
return True
def _write(self, cr, user, ids, vals, context=None):
# low-level implementation of write()
if not context:
context = {}
readonly = None
self.check_field_access_rights(cr, user, 'write', vals.keys())
deleted_related = defaultdict(list)
for field in vals.keys():
fobj = None
if field in self._columns:
fobj = self._columns[field]
elif field in self._inherit_fields:
fobj = self._inherit_fields[field][2]
if not fobj:
continue
if fobj._type in ['one2many', 'many2many'] and vals[field]:
for wtuple in vals[field]:
if isinstance(wtuple, (tuple, list)) and wtuple[0] == 2:
deleted_related[fobj._obj].append(wtuple[1])
groups = fobj.write
if groups:
edit = False
for group in groups:
module = group.split(".")[0]
grp = group.split(".")[1]
cr.execute("select count(*) from res_groups_users_rel where gid IN (select res_id from ir_model_data where name=%s and module=%s and model=%s) and uid=%s", \
(grp, module, 'res.groups', user))
readonly = cr.fetchall()
if readonly[0][0] >= 1:
edit = True
break
if not edit:
vals.pop(field)
result = self._store_get_values(cr, user, ids, vals.keys(), context) or []
# for recomputing new-style fields
recs = self.browse(cr, user, ids, context)
modified_fields = list(vals)
if self._log_access:
modified_fields += ['write_date', 'write_uid']
recs.modified(modified_fields)
parents_changed = []
parent_order = self._parent_order or self._order
if self._parent_store and (self._parent_name in vals) and not context.get('defer_parent_store_computation'):
# The parent_left/right computation may take up to
# 5 seconds. No need to recompute the values if the
# parent is the same.
# Note: to respect parent_order, nodes must be processed in
# order, so ``parents_changed`` must be ordered properly.
parent_val = vals[self._parent_name]
if parent_val:
query = "SELECT id FROM %s WHERE id IN %%s AND (%s != %%s OR %s IS NULL) ORDER BY %s" % \
(self._table, self._parent_name, self._parent_name, parent_order)
cr.execute(query, (tuple(ids), parent_val))
else:
query = "SELECT id FROM %s WHERE id IN %%s AND (%s IS NOT NULL) ORDER BY %s" % \
(self._table, self._parent_name, parent_order)
cr.execute(query, (tuple(ids),))
parents_changed = map(operator.itemgetter(0), cr.fetchall())
updates = [] # list of (column, expr) or (column, pattern, value)
upd_todo = []
updend = []
direct = []
totranslate = context.get('lang', False) and (context['lang'] != 'en_US')
for field in vals:
ffield = self._fields.get(field)
if ffield and ffield.deprecated:
_logger.warning('Field %s.%s is deprecated: %s', self._name, field, ffield.deprecated)
if field in self._columns:
column = self._columns[field]
if hasattr(column, 'selection') and vals[field]:
self._check_selection_field_value(cr, user, field, vals[field], context=context)
if column._classic_write and not hasattr(column, '_fnct_inv'):
if (not totranslate) or not column.translate:
updates.append((field, '%s', column._symbol_set[1](vals[field])))
direct.append(field)
else:
upd_todo.append(field)
else:
updend.append(field)
if self._log_access:
updates.append(('write_uid', '%s', user))
updates.append(('write_date', "(now() at time zone 'UTC')"))
direct.append('write_uid')
direct.append('write_date')
if updates:
self.check_access_rule(cr, user, ids, 'write', context=context)
query = 'UPDATE "%s" SET %s WHERE id IN %%s' % (
self._table, ','.join('"%s"=%s' % u[:2] for u in updates),
)
params = tuple(u[2] for u in updates if len(u) > 2)
for sub_ids in cr.split_for_in_conditions(ids):
cr.execute(query, params + (sub_ids,))
if cr.rowcount != len(sub_ids):
raise MissingError(_('One of the records you are trying to modify has already been deleted (Document type: %s).') % self._description)
if totranslate:
# TODO: optimize
for f in direct:
if self._columns[f].translate:
src_trans = self.pool[self._name].read(cr, user, ids, [f])[0][f]
if not src_trans:
src_trans = vals[f]
# Inserting value to DB
context_wo_lang = dict(context, lang=None)
self.write(cr, user, ids, {f: vals[f]}, context=context_wo_lang)
self.pool.get('ir.translation')._set_ids(cr, user, self._name+','+f, 'model', context['lang'], ids, vals[f], src_trans)
# invalidate and mark new-style fields to recompute; do this before
# setting other fields, because it can require the value of computed
# fields, e.g., a one2many checking constraints on records
recs.modified(direct)
# call the 'set' method of fields which are not classic_write
upd_todo.sort(lambda x, y: self._columns[x].priority-self._columns[y].priority)
# default element in context must be removed when call a one2many or many2many
rel_context = context.copy()
for c in context.items():
if c[0].startswith('default_'):
del rel_context[c[0]]
for field in upd_todo:
for id in ids:
result += self._columns[field].set(cr, self, id, field, vals[field], user, context=rel_context) or []
# for recomputing new-style fields
recs.modified(upd_todo)
unknown_fields = updend[:]
for table in self._inherits:
col = self._inherits[table]
nids = []
for sub_ids in cr.split_for_in_conditions(ids):
cr.execute('select distinct "'+col+'" from "'+self._table+'" ' \
'where id IN %s', (sub_ids,))
nids.extend([x[0] for x in cr.fetchall()])
v = {}
for val in updend:
if self._inherit_fields[val][0] == table:
v[val] = vals[val]
unknown_fields.remove(val)
if v:
self.pool[table].write(cr, user, nids, v, context)
if unknown_fields:
_logger.warning(
'No such field(s) in model %s: %s.',
self._name, ', '.join(unknown_fields))
# check Python constraints
recs._validate_fields(vals)
# TODO: use _order to set dest at the right position and not first node of parent
# We can't defer parent_store computation because the stored function
# fields that are computer may refer (directly or indirectly) to
# parent_left/right (via a child_of domain)
if parents_changed:
if self.pool._init:
self.pool._init_parent[self._name] = True
else:
order = self._parent_order or self._order
parent_val = vals[self._parent_name]
if parent_val:
clause, params = '%s=%%s' % (self._parent_name,), (parent_val,)
else:
clause, params = '%s IS NULL' % (self._parent_name,), ()
for id in parents_changed:
cr.execute('SELECT parent_left, parent_right FROM %s WHERE id=%%s' % (self._table,), (id,))
pleft, pright = cr.fetchone()
distance = pright - pleft + 1
# Positions of current siblings, to locate proper insertion point;
# this can _not_ be fetched outside the loop, as it needs to be refreshed
# after each update, in case several nodes are sequentially inserted one
# next to the other (i.e computed incrementally)
cr.execute('SELECT parent_right, id FROM %s WHERE %s ORDER BY %s' % (self._table, clause, parent_order), params)
parents = cr.fetchall()
# Find Position of the element
position = None
for (parent_pright, parent_id) in parents:
if parent_id == id:
break
position = parent_pright and parent_pright + 1 or 1
# It's the first node of the parent
if not position:
if not parent_val:
position = 1
else:
cr.execute('select parent_left from '+self._table+' where id=%s', (parent_val,))
position = cr.fetchone()[0] + 1
if pleft < position <= pright:
raise except_orm(_('UserError'), _('Recursivity Detected.'))
if pleft < position:
cr.execute('update '+self._table+' set parent_left=parent_left+%s where parent_left>=%s', (distance, position))
cr.execute('update '+self._table+' set parent_right=parent_right+%s where parent_right>=%s', (distance, position))
cr.execute('update '+self._table+' set parent_left=parent_left+%s, parent_right=parent_right+%s where parent_left>=%s and parent_left<%s', (position-pleft, position-pleft, pleft, pright))
else:
cr.execute('update '+self._table+' set parent_left=parent_left+%s where parent_left>=%s', (distance, position))
cr.execute('update '+self._table+' set parent_right=parent_right+%s where parent_right>=%s', (distance, position))
cr.execute('update '+self._table+' set parent_left=parent_left-%s, parent_right=parent_right-%s where parent_left>=%s and parent_left<%s', (pleft-position+distance, pleft-position+distance, pleft+distance, pright+distance))
recs.invalidate_cache(['parent_left', 'parent_right'])
result += self._store_get_values(cr, user, ids, vals.keys(), context)
done = {}
recs.env.recompute_old.extend(result)
for order, model_name, ids_to_update, fields_to_recompute in sorted(recs.env.recompute_old):
key = (model_name, tuple(fields_to_recompute))
done.setdefault(key, {})
# avoid to do several times the same computation
todo = []
for id in ids_to_update:
if id not in done[key]:
done[key][id] = True
if id not in deleted_related[model_name]:
todo.append(id)
self.pool[model_name]._store_set_values(cr, user, todo, fields_to_recompute, context)
recs.env.clear_recompute_old()
# recompute new-style fields
if recs.env.recompute and context.get('recompute', True):
recs.recompute()
self.step_workflow(cr, user, ids, context=context)
return True
#
# TODO: Should set perm to user.xxx
#
@api.model
@api.returns('self', lambda value: value.id)
def create(self, vals):
""" create(vals) -> record
Creates a new record for the model.
The new record is initialized using the values from ``vals`` and
if necessary those from :meth:`~.default_get`.
:param dict vals:
values for the model's fields, as a dictionary::
{'field_name': field_value, ...}
see :meth:`~.write` for details
:return: new record created
:raise AccessError: * if user has no create rights on the requested object
* if user tries to bypass access rules for create on the requested object
:raise ValidateError: if user tries to enter invalid value for a field that is not in selection
:raise UserError: if a loop would be created in a hierarchy of objects a result of the operation (such as setting an object as its own parent)
"""
self.check_access_rights('create')
# add missing defaults, and drop fields that may not be set by user
vals = self._add_missing_default_values(vals)
for field in itertools.chain(MAGIC_COLUMNS, ('parent_left', 'parent_right')):
vals.pop(field, None)
# split up fields into old-style and pure new-style ones
old_vals, new_vals, unknown = {}, {}, []
for key, val in vals.iteritems():
field = self._fields.get(key)
if field:
if field.column or field.inherited:
old_vals[key] = val
if field.inverse and not field.inherited:
new_vals[key] = val
else:
unknown.append(key)
if unknown:
_logger.warning("%s.create() with unknown fields: %s", self._name, ', '.join(sorted(unknown)))
# create record with old-style fields
record = self.browse(self._create(old_vals))
# put the values of pure new-style fields into cache, and inverse them
record._cache.update(record._convert_to_cache(new_vals))
for key in new_vals:
self._fields[key].determine_inverse(record)
return record
def _create(self, cr, user, vals, context=None):
# low-level implementation of create()
if not context:
context = {}
if self.is_transient():
self._transient_vacuum(cr, user)
tocreate = {}
for v in self._inherits:
if self._inherits[v] not in vals:
tocreate[v] = {}
else:
tocreate[v] = {'id': vals[self._inherits[v]]}
updates = [
# list of column assignments defined as tuples like:
# (column_name, format_string, column_value)
# (column_name, sql_formula)
# Those tuples will be used by the string formatting for the INSERT
# statement below.
('id', "nextval('%s')" % self._sequence),
]
upd_todo = []
unknown_fields = []
for v in vals.keys():
if v in self._inherit_fields and v not in self._columns:
(table, col, col_detail, original_parent) = self._inherit_fields[v]
tocreate[table][v] = vals[v]
del vals[v]
else:
if (v not in self._inherit_fields) and (v not in self._columns):
del vals[v]
unknown_fields.append(v)
if unknown_fields:
_logger.warning(
'No such field(s) in model %s: %s.',
self._name, ', '.join(unknown_fields))
for table in tocreate:
if self._inherits[table] in vals:
del vals[self._inherits[table]]
record_id = tocreate[table].pop('id', None)
if record_id is None or not record_id:
record_id = self.pool[table].create(cr, user, tocreate[table], context=context)
else:
self.pool[table].write(cr, user, [record_id], tocreate[table], context=context)
updates.append((self._inherits[table], '%s', record_id))
#Start : Set bool fields to be False if they are not touched(to make search more powerful)
bool_fields = [x for x in self._columns.keys() if self._columns[x]._type=='boolean']
for bool_field in bool_fields:
if bool_field not in vals:
vals[bool_field] = False
#End
for field in vals.keys():
fobj = None
if field in self._columns:
fobj = self._columns[field]
else:
fobj = self._inherit_fields[field][2]
if not fobj:
continue
groups = fobj.write
if groups:
edit = False
for group in groups:
module = group.split(".")[0]
grp = group.split(".")[1]
cr.execute("select count(*) from res_groups_users_rel where gid IN (select res_id from ir_model_data where name='%s' and module='%s' and model='%s') and uid=%s" % \
(grp, module, 'res.groups', user))
readonly = cr.fetchall()
if readonly[0][0] >= 1:
edit = True
break
elif readonly[0][0] == 0:
edit = False
else:
edit = False
if not edit:
vals.pop(field)
for field in vals:
current_field = self._columns[field]
if current_field._classic_write:
updates.append((field, '%s', current_field._symbol_set[1](vals[field])))
#for the function fields that receive a value, we set them directly in the database
#(they may be required), but we also need to trigger the _fct_inv()
if (hasattr(current_field, '_fnct_inv')) and not isinstance(current_field, fields.related):
#TODO: this way to special case the related fields is really creepy but it shouldn't be changed at
#one week of the release candidate. It seems the only good way to handle correctly this is to add an
#attribute to make a field `really readonly´ and thus totally ignored by the create()... otherwise
#if, for example, the related has a default value (for usability) then the fct_inv is called and it
#may raise some access rights error. Changing this is a too big change for now, and is thus postponed
#after the release but, definitively, the behavior shouldn't be different for related and function
#fields.
upd_todo.append(field)
else:
#TODO: this `if´ statement should be removed because there is no good reason to special case the fields
#related. See the above TODO comment for further explanations.
if not isinstance(current_field, fields.related):
upd_todo.append(field)
if field in self._columns \
and hasattr(current_field, 'selection') \
and vals[field]:
self._check_selection_field_value(cr, user, field, vals[field], context=context)
if self._log_access:
updates.append(('create_uid', '%s', user))
updates.append(('write_uid', '%s', user))
updates.append(('create_date', "(now() at time zone 'UTC')"))
updates.append(('write_date', "(now() at time zone 'UTC')"))
# the list of tuples used in this formatting corresponds to
# tuple(field_name, format, value)
# In some case, for example (id, create_date, write_date) we does not
# need to read the third value of the tuple, because the real value is
# encoded in the second value (the format).
cr.execute(
"""INSERT INTO "%s" (%s) VALUES(%s) RETURNING id""" % (
self._table,
', '.join('"%s"' % u[0] for u in updates),
', '.join(u[1] for u in updates)
),
tuple([u[2] for u in updates if len(u) > 2])
)
id_new, = cr.fetchone()
recs = self.browse(cr, user, id_new, context)
if self._parent_store and not context.get('defer_parent_store_computation'):
if self.pool._init:
self.pool._init_parent[self._name] = True
else:
parent = vals.get(self._parent_name, False)
if parent:
cr.execute('select parent_right from '+self._table+' where '+self._parent_name+'=%s order by '+(self._parent_order or self._order), (parent,))
pleft_old = None
result_p = cr.fetchall()
for (pleft,) in result_p:
if not pleft:
break
pleft_old = pleft
if not pleft_old:
cr.execute('select parent_left from '+self._table+' where id=%s', (parent,))
pleft_old = cr.fetchone()[0]
pleft = pleft_old
else:
cr.execute('select max(parent_right) from '+self._table)
pleft = cr.fetchone()[0] or 0
cr.execute('update '+self._table+' set parent_left=parent_left+2 where parent_left>%s', (pleft,))
cr.execute('update '+self._table+' set parent_right=parent_right+2 where parent_right>%s', (pleft,))
cr.execute('update '+self._table+' set parent_left=%s,parent_right=%s where id=%s', (pleft+1, pleft+2, id_new))
recs.invalidate_cache(['parent_left', 'parent_right'])
# invalidate and mark new-style fields to recompute; do this before
# setting other fields, because it can require the value of computed
# fields, e.g., a one2many checking constraints on records
recs.modified(self._fields)
# call the 'set' method of fields which are not classic_write
upd_todo.sort(lambda x, y: self._columns[x].priority-self._columns[y].priority)
# default element in context must be remove when call a one2many or many2many
rel_context = context.copy()
for c in context.items():
if c[0].startswith('default_'):
del rel_context[c[0]]
result = []
for field in upd_todo:
result += self._columns[field].set(cr, self, id_new, field, vals[field], user, rel_context) or []
# for recomputing new-style fields
recs.modified(upd_todo)
# check Python constraints
recs._validate_fields(vals)
result += self._store_get_values(cr, user, [id_new],
list(set(vals.keys() + self._inherits.values())),
context)
recs.env.recompute_old.extend(result)
if recs.env.recompute and context.get('recompute', True):
done = []
for order, model_name, ids, fields2 in sorted(recs.env.recompute_old):
if not (model_name, ids, fields2) in done:
self.pool[model_name]._store_set_values(cr, user, ids, fields2, context)
done.append((model_name, ids, fields2))
recs.env.clear_recompute_old()
# recompute new-style fields
recs.recompute()
if self._log_create and recs.env.recompute and context.get('recompute', True):
message = self._description + \
" '" + \
self.name_get(cr, user, [id_new], context=context)[0][1] + \
"' " + _("created.")
self.log(cr, user, id_new, message, True, context=context)
self.check_access_rule(cr, user, [id_new], 'create', context=context)
self.create_workflow(cr, user, [id_new], context=context)
return id_new
def _store_get_values(self, cr, uid, ids, fields, context):
"""Returns an ordered list of fields.function to call due to
an update operation on ``fields`` of records with ``ids``,
obtained by calling the 'store' triggers of these fields,
as setup by their 'store' attribute.
:return: [(priority, model_name, [record_ids,], [function_fields,])]
"""
if fields is None: fields = []
stored_functions = self.pool._store_function.get(self._name, [])
# use indexed names for the details of the stored_functions:
model_name_, func_field_to_compute_, target_ids_func_, trigger_fields_, priority_ = range(5)
# only keep store triggers that should be triggered for the ``fields``
# being written to.
triggers_to_compute = (
f for f in stored_functions
if not f[trigger_fields_] or set(fields).intersection(f[trigger_fields_])
)
to_compute_map = {}
target_id_results = {}
for store_trigger in triggers_to_compute:
target_func_id_ = id(store_trigger[target_ids_func_])
if target_func_id_ not in target_id_results:
# use admin user for accessing objects having rules defined on store fields
target_id_results[target_func_id_] = [i for i in store_trigger[target_ids_func_](self, cr, SUPERUSER_ID, ids, context) if i]
target_ids = target_id_results[target_func_id_]
# the compound key must consider the priority and model name
key = (store_trigger[priority_], store_trigger[model_name_])
for target_id in target_ids:
to_compute_map.setdefault(key, {}).setdefault(target_id,set()).add(tuple(store_trigger))
# Here to_compute_map looks like:
# { (10, 'model_a') : { target_id1: [ (trigger_1_tuple, trigger_2_tuple) ], ... }
# (20, 'model_a') : { target_id2: [ (trigger_3_tuple, trigger_4_tuple) ], ... }
# (99, 'model_a') : { target_id1: [ (trigger_5_tuple, trigger_6_tuple) ], ... }
# }
# Now we need to generate the batch function calls list
# call_map =
# { (10, 'model_a') : [(10, 'model_a', [record_ids,], [function_fields,])] }
call_map = {}
for ((priority,model), id_map) in to_compute_map.iteritems():
trigger_ids_maps = {}
# function_ids_maps =
# { (function_1_tuple, function_2_tuple) : [target_id1, target_id2, ..] }
for target_id, triggers in id_map.iteritems():
trigger_ids_maps.setdefault(tuple(triggers), []).append(target_id)
for triggers, target_ids in trigger_ids_maps.iteritems():
call_map.setdefault((priority,model),[]).append((priority, model, target_ids,
[t[func_field_to_compute_] for t in triggers]))
result = []
if call_map:
result = reduce(operator.add, (call_map[k] for k in sorted(call_map)))
return result
def _store_set_values(self, cr, uid, ids, fields, context):
"""Calls the fields.function's "implementation function" for all ``fields``, on records with ``ids`` (taking care of
respecting ``multi`` attributes), and stores the resulting values in the database directly."""
if not ids:
return True
field_flag = False
field_dict = {}
if self._log_access:
cr.execute('select id,write_date from '+self._table+' where id IN %s', (tuple(ids),))
res = cr.fetchall()
for r in res:
if r[1]:
field_dict.setdefault(r[0], [])
res_date = time.strptime((r[1])[:19], '%Y-%m-%d %H:%M:%S')
write_date = datetime.datetime.fromtimestamp(time.mktime(res_date))
for i in self.pool._store_function.get(self._name, []):
if i[5]:
up_write_date = write_date + datetime.timedelta(hours=i[5])
if datetime.datetime.now() < up_write_date:
if i[1] in fields:
field_dict[r[0]].append(i[1])
if not field_flag:
field_flag = True
todo = {}
keys = []
for f in fields:
if self._columns[f]._multi not in keys:
keys.append(self._columns[f]._multi)
todo.setdefault(self._columns[f]._multi, [])
todo[self._columns[f]._multi].append(f)
for key in keys:
val = todo[key]
if key:
# use admin user for accessing objects having rules defined on store fields
result = self._columns[val[0]].get(cr, self, ids, val, SUPERUSER_ID, context=context)
for id, value in result.items():
if field_flag:
for f in value.keys():
if f in field_dict[id]:
value.pop(f)
updates = [] # list of (column, pattern, value)
for v in value:
if v not in val:
continue
column = self._columns[v]
if column._type == 'many2one':
try:
value[v] = value[v][0]
except:
pass
updates.append((v, '%s', column._symbol_set[1](value[v])))
if updates:
query = 'UPDATE "%s" SET %s WHERE id = %%s' % (
self._table, ','.join('"%s"=%s' % u[:2] for u in updates),
)
params = tuple(u[2] for u in updates)
cr.execute(query, params + (id,))
else:
for f in val:
column = self._columns[f]
# use admin user for accessing objects having rules defined on store fields
result = column.get(cr, self, ids, f, SUPERUSER_ID, context=context)
for r in result.keys():
if field_flag:
if r in field_dict.keys():
if f in field_dict[r]:
result.pop(r)
for id, value in result.items():
if column._type == 'many2one':
try:
value = value[0]
except:
pass
query = 'UPDATE "%s" SET "%s"=%%s WHERE id = %%s' % (
self._table, f,
)
cr.execute(query, (column._symbol_set[1](value), id))
# invalidate and mark new-style fields to recompute
self.browse(cr, uid, ids, context).modified(fields)
return True
# TODO: ameliorer avec NULL
def _where_calc(self, cr, user, domain, active_test=True, context=None):
"""Computes the WHERE clause needed to implement an OpenERP domain.
:param domain: the domain to compute
:type domain: list
:param active_test: whether the default filtering of records with ``active``
field set to ``False`` should be applied.
:return: the query expressing the given domain as provided in domain
:rtype: osv.query.Query
"""
if not context:
context = {}
domain = domain[:]
# if the object has a field named 'active', filter out all inactive
# records unless they were explicitely asked for
if 'active' in self._fields and active_test and context.get('active_test', True):
if domain:
# the item[0] trick below works for domain items and '&'/'|'/'!'
# operators too
if not any(item[0] == 'active' for item in domain):
domain.insert(0, ('active', '=', 1))
else:
domain = [('active', '=', 1)]
if domain:
e = expression.expression(cr, user, domain, self, context)
tables = e.get_tables()
where_clause, where_params = e.to_sql()
where_clause = where_clause and [where_clause] or []
else:
where_clause, where_params, tables = [], [], ['"%s"' % self._table]
return Query(tables, where_clause, where_params)
def _check_qorder(self, word):
if not regex_order.match(word):
raise except_orm(_('AccessError'), _('Invalid "order" specified. A valid "order" specification is a comma-separated list of valid field names (optionally followed by asc/desc for the direction)'))
return True
def _apply_ir_rules(self, cr, uid, query, mode='read', context=None):
"""Add what's missing in ``query`` to implement all appropriate ir.rules
(using the ``model_name``'s rules or the current model's rules if ``model_name`` is None)
:param query: the current query object
"""
if uid == SUPERUSER_ID:
return
def apply_rule(added_clause, added_params, added_tables, parent_model=None):
""" :param parent_model: name of the parent model, if the added
clause comes from a parent model
"""
if added_clause:
if parent_model:
# as inherited rules are being applied, we need to add the missing JOIN
# to reach the parent table (if it was not JOINed yet in the query)
parent_alias = self._inherits_join_add(self, parent_model, query)
# inherited rules are applied on the external table -> need to get the alias and replace
parent_table = self.pool[parent_model]._table
added_clause = [clause.replace('"%s"' % parent_table, '"%s"' % parent_alias) for clause in added_clause]
# change references to parent_table to parent_alias, because we now use the alias to refer to the table
new_tables = []
for table in added_tables:
# table is just a table name -> switch to the full alias
if table == '"%s"' % parent_table:
new_tables.append('"%s" as "%s"' % (parent_table, parent_alias))
# table is already a full statement -> replace reference to the table to its alias, is correct with the way aliases are generated
else:
new_tables.append(table.replace('"%s"' % parent_table, '"%s"' % parent_alias))
added_tables = new_tables
query.where_clause += added_clause
query.where_clause_params += added_params
for table in added_tables:
if table not in query.tables:
query.tables.append(table)
return True
return False
# apply main rules on the object
rule_obj = self.pool.get('ir.rule')
rule_where_clause, rule_where_clause_params, rule_tables = rule_obj.domain_get(cr, uid, self._name, mode, context=context)
apply_rule(rule_where_clause, rule_where_clause_params, rule_tables)
# apply ir.rules from the parents (through _inherits)
for inherited_model in self._inherits:
rule_where_clause, rule_where_clause_params, rule_tables = rule_obj.domain_get(cr, uid, inherited_model, mode, context=context)
apply_rule(rule_where_clause, rule_where_clause_params, rule_tables,
parent_model=inherited_model)
def _generate_m2o_order_by(self, alias, order_field, query, reverse_direction, seen):
"""
Add possibly missing JOIN to ``query`` and generate the ORDER BY clause for m2o fields,
either native m2o fields or function/related fields that are stored, including
intermediate JOINs for inheritance if required.
:return: the qualified field name to use in an ORDER BY clause to sort by ``order_field``
"""
if order_field not in self._columns and order_field in self._inherit_fields:
# also add missing joins for reaching the table containing the m2o field
order_field_column = self._inherit_fields[order_field][2]
qualified_field = self._inherits_join_calc(alias, order_field, query)
alias, order_field = qualified_field.replace('"', '').split('.', 1)
else:
order_field_column = self._columns[order_field]
assert order_field_column._type == 'many2one', 'Invalid field passed to _generate_m2o_order_by()'
if not order_field_column._classic_write and not getattr(order_field_column, 'store', False):
_logger.debug("Many2one function/related fields must be stored "
"to be used as ordering fields! Ignoring sorting for %s.%s",
self._name, order_field)
return []
# figure out the applicable order_by for the m2o
dest_model = self.pool[order_field_column._obj]
m2o_order = dest_model._order
if not regex_order.match(m2o_order):
# _order is complex, can't use it here, so we default to _rec_name
m2o_order = dest_model._rec_name
# Join the dest m2o table if it's not joined yet. We use [LEFT] OUTER join here
# as we don't want to exclude results that have NULL values for the m2o
join = (alias, dest_model._table, order_field, 'id', order_field)
dst_alias, dst_alias_statement = query.add_join(join, implicit=False, outer=True)
return dest_model._generate_order_by_inner(dst_alias, m2o_order, query,
reverse_direction=reverse_direction, seen=seen)
def _generate_order_by_inner(self, alias, order_spec, query, reverse_direction=False, seen=None):
if seen is None:
seen = set()
order_by_elements = []
self._check_qorder(order_spec)
for order_part in order_spec.split(','):
order_split = order_part.strip().split(' ')
order_field = order_split[0].strip()
order_direction = order_split[1].strip().upper() if len(order_split) == 2 else ''
if reverse_direction:
order_direction = 'ASC' if order_direction == 'DESC' else 'DESC'
do_reverse = order_direction == 'DESC'
order_column = None
inner_clauses = []
add_dir = False
if order_field == 'id':
order_by_elements.append('"%s"."%s" %s' % (alias, order_field, order_direction))
elif order_field in self._columns:
order_column = self._columns[order_field]
if order_column._classic_read:
inner_clauses = ['"%s"."%s"' % (alias, order_field)]
add_dir = True
elif order_column._type == 'many2one':
key = (self._name, order_column._obj, order_field)
if key not in seen:
seen.add(key)
inner_clauses = self._generate_m2o_order_by(alias, order_field, query, do_reverse, seen)
else:
continue # ignore non-readable or "non-joinable" fields
elif order_field in self._inherit_fields:
parent_obj = self.pool[self._inherit_fields[order_field][3]]
order_column = parent_obj._columns[order_field]
if order_column._classic_read:
inner_clauses = [self._inherits_join_calc(alias, order_field, query)]
add_dir = True
elif order_column._type == 'many2one':
key = (parent_obj._name, order_column._obj, order_field)
if key not in seen:
seen.add(key)
inner_clauses = self._generate_m2o_order_by(alias, order_field, query, do_reverse, seen)
else:
continue # ignore non-readable or "non-joinable" fields
else:
raise ValueError(_("Sorting field %s not found on model %s") % (order_field, self._name))
if order_column and order_column._type == 'boolean':
inner_clauses = ["COALESCE(%s, false)" % inner_clauses[0]]
for clause in inner_clauses:
if add_dir:
order_by_elements.append("%s %s" % (clause, order_direction))
else:
order_by_elements.append(clause)
return order_by_elements
def _generate_order_by(self, order_spec, query):
"""
Attempt to construct an appropriate ORDER BY clause based on order_spec, which must be
a comma-separated list of valid field names, optionally followed by an ASC or DESC direction.
:raise" except_orm in case order_spec is malformed
"""
order_by_clause = ''
order_spec = order_spec or self._order
if order_spec:
order_by_elements = self._generate_order_by_inner(self._table, order_spec, query)
if order_by_elements:
order_by_clause = ",".join(order_by_elements)
return order_by_clause and (' ORDER BY %s ' % order_by_clause) or ''
def _search(self, cr, user, args, offset=0, limit=None, order=None, context=None, count=False, access_rights_uid=None):
"""
Private implementation of search() method, allowing specifying the uid to use for the access right check.
This is useful for example when filling in the selection list for a drop-down and avoiding access rights errors,
by specifying ``access_rights_uid=1`` to bypass access rights check, but not ir.rules!
This is ok at the security level because this method is private and not callable through XML-RPC.
:param access_rights_uid: optional user ID to use when checking access rights
(not for ir.rules, this is only for ir.model.access)
"""
if context is None:
context = {}
self.check_access_rights(cr, access_rights_uid or user, 'read')
# For transient models, restrict access to the current user, except for the super-user
if self.is_transient() and self._log_access and user != SUPERUSER_ID:
args = expression.AND(([('create_uid', '=', user)], args or []))
query = self._where_calc(cr, user, args, context=context)
self._apply_ir_rules(cr, user, query, 'read', context=context)
order_by = self._generate_order_by(order, query)
from_clause, where_clause, where_clause_params = query.get_sql()
where_str = where_clause and (" WHERE %s" % where_clause) or ''
if count:
# Ignore order, limit and offset when just counting, they don't make sense and could
# hurt performance
query_str = 'SELECT count(1) FROM ' + from_clause + where_str
cr.execute(query_str, where_clause_params)
res = cr.fetchone()
return res[0]
limit_str = limit and ' limit %d' % limit or ''
offset_str = offset and ' offset %d' % offset or ''
query_str = 'SELECT "%s".id FROM ' % self._table + from_clause + where_str + order_by + limit_str + offset_str
cr.execute(query_str, where_clause_params)
res = cr.fetchall()
# TDE note: with auto_join, we could have several lines about the same result
# i.e. a lead with several unread messages; we uniquify the result using
# a fast way to do it while preserving order (http://www.peterbe.com/plog/uniqifiers-benchmark)
def _uniquify_list(seq):
seen = set()
return [x for x in seq if x not in seen and not seen.add(x)]
return _uniquify_list([x[0] for x in res])
# returns the different values ever entered for one field
# this is used, for example, in the client when the user hits enter on
# a char field
def distinct_field_get(self, cr, uid, field, value, args=None, offset=0, limit=None):
if not args:
args = []
if field in self._inherit_fields:
return self.pool[self._inherit_fields[field][0]].distinct_field_get(cr, uid, field, value, args, offset, limit)
else:
return self._columns[field].search(cr, self, args, field, value, offset, limit, uid)
def copy_data(self, cr, uid, id, default=None, context=None):
"""
Copy given record's data with all its fields values
:param cr: database cursor
:param uid: current user id
:param id: id of the record to copy
:param default: field values to override in the original values of the copied record
:type default: dictionary
:param context: context arguments, like lang, time zone
:type context: dictionary
:return: dictionary containing all the field values
"""
if context is None:
context = {}
# avoid recursion through already copied records in case of circular relationship
seen_map = context.setdefault('__copy_data_seen', {})
if id in seen_map.setdefault(self._name, []):
return
seen_map[self._name].append(id)
if default is None:
default = {}
if 'state' not in default:
if 'state' in self._defaults:
if callable(self._defaults['state']):
default['state'] = self._defaults['state'](self, cr, uid, context)
else:
default['state'] = self._defaults['state']
# build a black list of fields that should not be copied
blacklist = set(MAGIC_COLUMNS + ['parent_left', 'parent_right'])
whitelist = set(name for name, field in self._fields.iteritems() if not field.inherited)
def blacklist_given_fields(obj):
# blacklist the fields that are given by inheritance
for other, field_to_other in obj._inherits.items():
blacklist.add(field_to_other)
if field_to_other in default:
# all the fields of 'other' are given by the record: default[field_to_other],
# except the ones redefined in self
blacklist.update(set(self.pool[other]._fields) - whitelist)
else:
blacklist_given_fields(self.pool[other])
# blacklist deprecated fields
for name, field in obj._fields.iteritems():
if field.deprecated:
blacklist.add(name)
blacklist_given_fields(self)
fields_to_copy = dict((f,fi) for f, fi in self._fields.iteritems()
if fi.copy
if f not in default
if f not in blacklist)
data = self.read(cr, uid, [id], fields_to_copy.keys(), context=context)
if data:
data = data[0]
else:
raise IndexError( _("Record #%d of %s not found, cannot copy!") %( id, self._name))
res = dict(default)
for f, field in fields_to_copy.iteritems():
if field.type == 'many2one':
res[f] = data[f] and data[f][0]
elif field.type == 'one2many':
other = self.pool[field.comodel_name]
# duplicate following the order of the ids because we'll rely on
# it later for copying translations in copy_translation()!
lines = [other.copy_data(cr, uid, line_id, context=context) for line_id in sorted(data[f])]
# the lines are duplicated using the wrong (old) parent, but then
# are reassigned to the correct one thanks to the (0, 0, ...)
res[f] = [(0, 0, line) for line in lines if line]
elif field.type == 'many2many':
res[f] = [(6, 0, data[f])]
else:
res[f] = data[f]
return res
def copy_translations(self, cr, uid, old_id, new_id, context=None):
if context is None:
context = {}
# avoid recursion through already copied records in case of circular relationship
seen_map = context.setdefault('__copy_translations_seen',{})
if old_id in seen_map.setdefault(self._name,[]):
return
seen_map[self._name].append(old_id)
trans_obj = self.pool.get('ir.translation')
for field_name, field in self._fields.iteritems():
if not field.copy:
continue
# removing the lang to compare untranslated values
context_wo_lang = dict(context, lang=None)
old_record, new_record = self.browse(cr, uid, [old_id, new_id], context=context_wo_lang)
# we must recursively copy the translations for o2o and o2m
if field.type == 'one2many':
target_obj = self.pool[field.comodel_name]
# here we rely on the order of the ids to match the translations
# as foreseen in copy_data()
old_children = sorted(r.id for r in old_record[field_name])
new_children = sorted(r.id for r in new_record[field_name])
for (old_child, new_child) in zip(old_children, new_children):
target_obj.copy_translations(cr, uid, old_child, new_child, context=context)
# and for translatable fields we keep them for copy
elif getattr(field, 'translate', False):
if field_name in self._columns:
trans_name = self._name + "," + field_name
target_id = new_id
source_id = old_id
elif field_name in self._inherit_fields:
trans_name = self._inherit_fields[field_name][0] + "," + field_name
# get the id of the parent record to set the translation
inherit_field_name = self._inherit_fields[field_name][1]
target_id = new_record[inherit_field_name].id
source_id = old_record[inherit_field_name].id
else:
continue
trans_ids = trans_obj.search(cr, uid, [
('name', '=', trans_name),
('res_id', '=', source_id)
])
user_lang = context.get('lang')
for record in trans_obj.read(cr, uid, trans_ids, context=context):
del record['id']
# remove source to avoid triggering _set_src
del record['source']
record.update({'res_id': target_id})
if user_lang and user_lang == record['lang']:
# 'source' to force the call to _set_src
# 'value' needed if value is changed in copy(), want to see the new_value
record['source'] = old_record[field_name]
record['value'] = new_record[field_name]
trans_obj.create(cr, uid, record, context=context)
@api.returns('self', lambda value: value.id)
def copy(self, cr, uid, id, default=None, context=None):
""" copy(default=None)
Duplicate record with given id updating it with default values
:param dict default: dictionary of field values to override in the
original values of the copied record, e.g: ``{'field_name': overridden_value, ...}``
:returns: new record
"""
if context is None:
context = {}
context = context.copy()
data = self.copy_data(cr, uid, id, default, context)
new_id = self.create(cr, uid, data, context)
self.copy_translations(cr, uid, id, new_id, context)
return new_id
@api.multi
@api.returns('self')
def exists(self):
""" exists() -> records
Returns the subset of records in ``self`` that exist, and marks deleted
records as such in cache. It can be used as a test on records::
if record.exists():
...
By convention, new records are returned as existing.
"""
ids, new_ids = [], []
for i in self._ids:
(ids if isinstance(i, (int, long)) else new_ids).append(i)
if not ids:
return self
query = """SELECT id FROM "%s" WHERE id IN %%s""" % self._table
self._cr.execute(query, [tuple(ids)])
ids = [r[0] for r in self._cr.fetchall()]
existing = self.browse(ids + new_ids)
if len(existing) < len(self):
# mark missing records in cache with a failed value
exc = MissingError(_("Record does not exist or has been deleted."))
(self - existing)._cache.update(FailedValue(exc))
return existing
def check_recursion(self, cr, uid, ids, context=None, parent=None):
_logger.warning("You are using deprecated %s.check_recursion(). Please use the '_check_recursion()' instead!" % \
self._name)
assert parent is None or parent in self._columns or parent in self._inherit_fields,\
"The 'parent' parameter passed to check_recursion() must be None or a valid field name"
return self._check_recursion(cr, uid, ids, context, parent)
def _check_recursion(self, cr, uid, ids, context=None, parent=None):
"""
Verifies that there is no loop in a hierarchical structure of records,
by following the parent relationship using the **parent** field until a loop
is detected or until a top-level record is found.
:param cr: database cursor
:param uid: current user id
:param ids: list of ids of records to check
:param parent: optional parent field name (default: ``self._parent_name = parent_id``)
:return: **True** if the operation can proceed safely, or **False** if an infinite loop is detected.
"""
if not parent:
parent = self._parent_name
# must ignore 'active' flag, ir.rules, etc. => direct SQL query
query = 'SELECT "%s" FROM "%s" WHERE id = %%s' % (parent, self._table)
for id in ids:
current_id = id
while current_id is not None:
cr.execute(query, (current_id,))
result = cr.fetchone()
current_id = result[0] if result else None
if current_id == id:
return False
return True
def _check_m2m_recursion(self, cr, uid, ids, field_name):
"""
Verifies that there is no loop in a hierarchical structure of records,
by following the parent relationship using the **parent** field until a loop
is detected or until a top-level record is found.
:param cr: database cursor
:param uid: current user id
:param ids: list of ids of records to check
:param field_name: field to check
:return: **True** if the operation can proceed safely, or **False** if an infinite loop is detected.
"""
field = self._fields.get(field_name)
if not (field and field.type == 'many2many' and
field.comodel_name == self._name and field.store):
# field must be a many2many on itself
raise ValueError('invalid field_name: %r' % (field_name,))
query = 'SELECT distinct "%s" FROM "%s" WHERE "%s" IN %%s' % \
(field.column2, field.relation, field.column1)
ids_parent = ids[:]
while ids_parent:
ids_parent2 = []
for i in range(0, len(ids_parent), cr.IN_MAX):
j = i + cr.IN_MAX
sub_ids_parent = ids_parent[i:j]
cr.execute(query, (tuple(sub_ids_parent),))
ids_parent2.extend(filter(None, map(lambda x: x[0], cr.fetchall())))
ids_parent = ids_parent2
for i in ids_parent:
if i in ids:
return False
return True
def _get_external_ids(self, cr, uid, ids, *args, **kwargs):
"""Retrieve the External ID(s) of any database record.
**Synopsis**: ``_get_xml_ids(cr, uid, ids) -> { 'id': ['module.xml_id'] }``
:return: map of ids to the list of their fully qualified External IDs
in the form ``module.key``, or an empty list when there's no External
ID for a record, e.g.::
{ 'id': ['module.ext_id', 'module.ext_id_bis'],
'id2': [] }
"""
ir_model_data = self.pool.get('ir.model.data')
data_ids = ir_model_data.search(cr, uid, [('model', '=', self._name), ('res_id', 'in', ids)])
data_results = ir_model_data.read(cr, uid, data_ids, ['module', 'name', 'res_id'])
result = {}
for id in ids:
# can't use dict.fromkeys() as the list would be shared!
result[id] = []
for record in data_results:
result[record['res_id']].append('%(module)s.%(name)s' % record)
return result
def get_external_id(self, cr, uid, ids, *args, **kwargs):
"""Retrieve the External ID of any database record, if there
is one. This method works as a possible implementation
for a function field, to be able to add it to any
model object easily, referencing it as ``Model.get_external_id``.
When multiple External IDs exist for a record, only one
of them is returned (randomly).
:return: map of ids to their fully qualified XML ID,
defaulting to an empty string when there's none
(to be usable as a function field),
e.g.::
{ 'id': 'module.ext_id',
'id2': '' }
"""
results = self._get_xml_ids(cr, uid, ids)
for k, v in results.iteritems():
if results[k]:
results[k] = v[0]
else:
results[k] = ''
return results
# backwards compatibility
get_xml_id = get_external_id
_get_xml_ids = _get_external_ids
def print_report(self, cr, uid, ids, name, data, context=None):
"""
Render the report ``name`` for the given IDs. The report must be defined
for this model, not another.
"""
report = self.pool['ir.actions.report.xml']._lookup_report(cr, name)
assert self._name == report.table
return report.create(cr, uid, ids, data, context)
# Transience
@classmethod
def is_transient(cls):
""" Return whether the model is transient.
See :class:`TransientModel`.
"""
return cls._transient
def _transient_clean_rows_older_than(self, cr, seconds):
assert self._transient, "Model %s is not transient, it cannot be vacuumed!" % self._name
# Never delete rows used in last 5 minutes
seconds = max(seconds, 300)
query = ("SELECT id FROM " + self._table + " WHERE"
" COALESCE(write_date, create_date, (now() at time zone 'UTC'))::timestamp"
" < ((now() at time zone 'UTC') - interval %s)")
cr.execute(query, ("%s seconds" % seconds,))
ids = [x[0] for x in cr.fetchall()]
self.unlink(cr, SUPERUSER_ID, ids)
def _transient_clean_old_rows(self, cr, max_count):
# Check how many rows we have in the table
cr.execute("SELECT count(*) AS row_count FROM " + self._table)
res = cr.fetchall()
if res[0][0] <= max_count:
return # max not reached, nothing to do
self._transient_clean_rows_older_than(cr, 300)
def _transient_vacuum(self, cr, uid, force=False):
"""Clean the transient records.
This unlinks old records from the transient model tables whenever the
"_transient_max_count" or "_max_age" conditions (if any) are reached.
Actual cleaning will happen only once every "_transient_check_time" calls.
This means this method can be called frequently called (e.g. whenever
a new record is created).
Example with both max_hours and max_count active:
Suppose max_hours = 0.2 (e.g. 12 minutes), max_count = 20, there are 55 rows in the
table, 10 created/changed in the last 5 minutes, an additional 12 created/changed between
5 and 10 minutes ago, the rest created/changed more then 12 minutes ago.
- age based vacuum will leave the 22 rows created/changed in the last 12 minutes
- count based vacuum will wipe out another 12 rows. Not just 2, otherwise each addition
would immediately cause the maximum to be reached again.
- the 10 rows that have been created/changed the last 5 minutes will NOT be deleted
"""
assert self._transient, "Model %s is not transient, it cannot be vacuumed!" % self._name
_transient_check_time = 20 # arbitrary limit on vacuum executions
self._transient_check_count += 1
if not force and (self._transient_check_count < _transient_check_time):
return True # no vacuum cleaning this time
self._transient_check_count = 0
# Age-based expiration
if self._transient_max_hours:
self._transient_clean_rows_older_than(cr, self._transient_max_hours * 60 * 60)
# Count-based expiration
if self._transient_max_count:
self._transient_clean_old_rows(cr, self._transient_max_count)
return True
def resolve_2many_commands(self, cr, uid, field_name, commands, fields=None, context=None):
""" Serializes one2many and many2many commands into record dictionaries
(as if all the records came from the database via a read()). This
method is aimed at onchange methods on one2many and many2many fields.
Because commands might be creation commands, not all record dicts
will contain an ``id`` field. Commands matching an existing record
will have an ``id``.
:param field_name: name of the one2many or many2many field matching the commands
:type field_name: str
:param commands: one2many or many2many commands to execute on ``field_name``
:type commands: list((int|False, int|False, dict|False))
:param fields: list of fields to read from the database, when applicable
:type fields: list(str)
:returns: records in a shape similar to that returned by ``read()``
(except records may be missing the ``id`` field if they don't exist in db)
:rtype: list(dict)
"""
result = [] # result (list of dict)
record_ids = [] # ids of records to read
updates = {} # {id: dict} of updates on particular records
for command in commands or []:
if not isinstance(command, (list, tuple)):
record_ids.append(command)
elif command[0] == 0:
result.append(command[2])
elif command[0] == 1:
record_ids.append(command[1])
updates.setdefault(command[1], {}).update(command[2])
elif command[0] in (2, 3):
record_ids = [id for id in record_ids if id != command[1]]
elif command[0] == 4:
record_ids.append(command[1])
elif command[0] == 5:
result, record_ids = [], []
elif command[0] == 6:
result, record_ids = [], list(command[2])
# read the records and apply the updates
other_model = self.pool[self._fields[field_name].comodel_name]
for record in other_model.read(cr, uid, record_ids, fields=fields, context=context):
record.update(updates.get(record['id'], {}))
result.append(record)
return result
# for backward compatibility
resolve_o2m_commands_to_record_dicts = resolve_2many_commands
def search_read(self, cr, uid, domain=None, fields=None, offset=0, limit=None, order=None, context=None):
"""
Performs a ``search()`` followed by a ``read()``.
:param cr: database cursor
:param user: current user id
:param domain: Search domain, see ``args`` parameter in ``search()``. Defaults to an empty domain that will match all records.
:param fields: List of fields to read, see ``fields`` parameter in ``read()``. Defaults to all fields.
:param offset: Number of records to skip, see ``offset`` parameter in ``search()``. Defaults to 0.
:param limit: Maximum number of records to return, see ``limit`` parameter in ``search()``. Defaults to no limit.
:param order: Columns to sort result, see ``order`` parameter in ``search()``. Defaults to no sort.
:param context: context arguments.
:return: List of dictionaries containing the asked fields.
:rtype: List of dictionaries.
"""
record_ids = self.search(cr, uid, domain or [], offset=offset, limit=limit, order=order, context=context)
if not record_ids:
return []
if fields and fields == ['id']:
# shortcut read if we only want the ids
return [{'id': id} for id in record_ids]
# read() ignores active_test, but it would forward it to any downstream search call
# (e.g. for x2m or function fields), and this is not the desired behavior, the flag
# was presumably only meant for the main search().
# TODO: Move this to read() directly?
read_ctx = dict(context or {})
read_ctx.pop('active_test', None)
result = self.read(cr, uid, record_ids, fields, context=read_ctx)
if len(result) <= 1:
return result
# reorder read
index = dict((r['id'], r) for r in result)
return [index[x] for x in record_ids if x in index]
def _register_hook(self, cr):
""" stuff to do right after the registry is built """
pass
@classmethod
def _patch_method(cls, name, method):
""" Monkey-patch a method for all instances of this model. This replaces
the method called ``name`` by ``method`` in the given class.
The original method is then accessible via ``method.origin``, and it
can be restored with :meth:`~._revert_method`.
Example::
@api.multi
def do_write(self, values):
# do stuff, and call the original method
return do_write.origin(self, values)
# patch method write of model
model._patch_method('write', do_write)
# this will call do_write
records = model.search([...])
records.write(...)
# restore the original method
model._revert_method('write')
"""
origin = getattr(cls, name)
method.origin = origin
# propagate decorators from origin to method, and apply api decorator
wrapped = api.guess(api.propagate(origin, method))
wrapped.origin = origin
setattr(cls, name, wrapped)
@classmethod
def _revert_method(cls, name):
""" Revert the original method called ``name`` in the given class.
See :meth:`~._patch_method`.
"""
method = getattr(cls, name)
setattr(cls, name, method.origin)
#
# Instance creation
#
# An instance represents an ordered collection of records in a given
# execution environment. The instance object refers to the environment, and
# the records themselves are represented by their cache dictionary. The 'id'
# of each record is found in its corresponding cache dictionary.
#
# This design has the following advantages:
# - cache access is direct and thus fast;
# - one can consider records without an 'id' (see new records);
# - the global cache is only an index to "resolve" a record 'id'.
#
@classmethod
def _browse(cls, env, ids):
""" Create an instance attached to ``env``; ``ids`` is a tuple of record
ids.
"""
records = object.__new__(cls)
records.env = env
records._ids = ids
env.prefetch[cls._name].update(ids)
return records
@api.v7
def browse(self, cr, uid, arg=None, context=None):
ids = _normalize_ids(arg)
#assert all(isinstance(id, IdType) for id in ids), "Browsing invalid ids: %s" % ids
return self._browse(Environment(cr, uid, context or {}), ids)
@api.v8
def browse(self, arg=None):
""" browse([ids]) -> records
Returns a recordset for the ids provided as parameter in the current
environment.
Can take no ids, a single id or a sequence of ids.
"""
ids = _normalize_ids(arg)
#assert all(isinstance(id, IdType) for id in ids), "Browsing invalid ids: %s" % ids
return self._browse(self.env, ids)
#
# Internal properties, for manipulating the instance's implementation
#
@property
def ids(self):
""" List of actual record ids in this recordset (ignores placeholder
ids for records to create)
"""
return filter(None, list(self._ids))
# backward-compatibility with former browse records
_cr = property(lambda self: self.env.cr)
_uid = property(lambda self: self.env.uid)
_context = property(lambda self: self.env.context)
#
# Conversion methods
#
def ensure_one(self):
""" Verifies that the current recorset holds a single record. Raises
an exception otherwise.
"""
if len(self) == 1:
return self
raise except_orm("ValueError", "Expected singleton: %s" % self)
def with_env(self, env):
""" Returns a new version of this recordset attached to the provided
environment
:type env: :class:`~openerp.api.Environment`
"""
return self._browse(env, self._ids)
def sudo(self, user=SUPERUSER_ID):
""" sudo([user=SUPERUSER])
Returns a new version of this recordset attached to the provided
user.
"""
return self.with_env(self.env(user=user))
def with_context(self, *args, **kwargs):
""" with_context([context][, **overrides]) -> records
Returns a new version of this recordset attached to an extended
context.
The extended context is either the provided ``context`` in which
``overrides`` are merged or the *current* context in which
``overrides`` are merged e.g.::
# current context is {'key1': True}
r2 = records.with_context({}, key2=True)
# -> r2._context is {'key2': True}
r2 = records.with_context(key2=True)
# -> r2._context is {'key1': True, 'key2': True}
"""
context = dict(args[0] if args else self._context, **kwargs)
return self.with_env(self.env(context=context))
def _convert_to_cache(self, values, update=False, validate=True):
""" Convert the ``values`` dictionary into cached values.
:param update: whether the conversion is made for updating ``self``;
this is necessary for interpreting the commands of *2many fields
:param validate: whether values must be checked
"""
fields = self._fields
target = self if update else self.browse()
return {
name: fields[name].convert_to_cache(value, target, validate=validate)
for name, value in values.iteritems()
if name in fields
}
def _convert_to_write(self, values):
""" Convert the ``values`` dictionary into the format of :meth:`write`. """
fields = self._fields
result = {}
for name, value in values.iteritems():
if name in fields:
value = fields[name].convert_to_write(value)
if not isinstance(value, NewId):
result[name] = value
return result
#
# Record traversal and update
#
def _mapped_func(self, func):
""" Apply function ``func`` on all records in ``self``, and return the
result as a list or a recordset (if ``func`` returns recordsets).
"""
if self:
vals = [func(rec) for rec in self]
return reduce(operator.or_, vals) if isinstance(vals[0], BaseModel) else vals
else:
vals = func(self)
return vals if isinstance(vals, BaseModel) else []
def mapped(self, func):
""" Apply ``func`` on all records in ``self``, and return the result as a
list or a recordset (if ``func`` return recordsets). In the latter
case, the order of the returned recordset is arbitrary.
:param func: a function or a dot-separated sequence of field names
"""
if isinstance(func, basestring):
recs = self
for name in func.split('.'):
recs = recs._mapped_func(operator.itemgetter(name))
return recs
else:
return self._mapped_func(func)
def _mapped_cache(self, name_seq):
""" Same as `~.mapped`, but ``name_seq`` is a dot-separated sequence of
field names, and only cached values are used.
"""
recs = self
for name in name_seq.split('.'):
field = recs._fields[name]
null = field.null(self.env)
recs = recs.mapped(lambda rec: rec._cache.get(field, null))
return recs
def filtered(self, func):
""" Select the records in ``self`` such that ``func(rec)`` is true, and
return them as a recordset.
:param func: a function or a dot-separated sequence of field names
"""
if isinstance(func, basestring):
name = func
func = lambda rec: filter(None, rec.mapped(name))
return self.browse([rec.id for rec in self if func(rec)])
def sorted(self, key=None, reverse=False):
""" Return the recordset ``self`` ordered by ``key``.
:param key: either a function of one argument that returns a
comparison key for each record, or ``None``, in which case
records are ordered according the default model's order
:param reverse: if ``True``, return the result in reverse order
"""
if key is None:
recs = self.search([('id', 'in', self.ids)])
return self.browse(reversed(recs._ids)) if reverse else recs
else:
return self.browse(map(itemgetter('id'), sorted(self, key=key, reverse=reverse)))
def update(self, values):
""" Update record `self[0]` with ``values``. """
for name, value in values.iteritems():
self[name] = value
#
# New records - represent records that do not exist in the database yet;
# they are used to perform onchanges.
#
@api.model
def new(self, values={}):
""" new([values]) -> record
Return a new record instance attached to the current environment and
initialized with the provided ``value``. The record is *not* created
in database, it only exists in memory.
"""
record = self.browse([NewId()])
record._cache.update(record._convert_to_cache(values, update=True))
if record.env.in_onchange:
# The cache update does not set inverse fields, so do it manually.
# This is useful for computing a function field on secondary
# records, if that field depends on the main record.
for name in values:
field = self._fields.get(name)
if field:
for invf in field.inverse_fields:
invf._update(record[name], record)
return record
#
# Dirty flags, to mark record fields modified (in draft mode)
#
def _is_dirty(self):
""" Return whether any record in ``self`` is dirty. """
dirty = self.env.dirty
return any(record in dirty for record in self)
def _get_dirty(self):
""" Return the list of field names for which ``self`` is dirty. """
dirty = self.env.dirty
return list(dirty.get(self, ()))
def _set_dirty(self, field_name):
""" Mark the records in ``self`` as dirty for the given ``field_name``. """
dirty = self.env.dirty
for record in self:
dirty[record].add(field_name)
#
# "Dunder" methods
#
def __nonzero__(self):
""" Test whether ``self`` is nonempty. """
return bool(getattr(self, '_ids', True))
def __len__(self):
""" Return the size of ``self``. """
return len(self._ids)
def __iter__(self):
""" Return an iterator over ``self``. """
for id in self._ids:
yield self._browse(self.env, (id,))
def __contains__(self, item):
""" Test whether ``item`` (record or field name) is an element of ``self``.
In the first case, the test is fully equivalent to::
any(item == record for record in self)
"""
if isinstance(item, BaseModel) and self._name == item._name:
return len(item) == 1 and item.id in self._ids
elif isinstance(item, basestring):
return item in self._fields
else:
raise except_orm("ValueError", "Mixing apples and oranges: %s in %s" % (item, self))
def __add__(self, other):
""" Return the concatenation of two recordsets. """
if not isinstance(other, BaseModel) or self._name != other._name:
raise except_orm("ValueError", "Mixing apples and oranges: %s + %s" % (self, other))
return self.browse(self._ids + other._ids)
def __sub__(self, other):
""" Return the recordset of all the records in ``self`` that are not in ``other``. """
if not isinstance(other, BaseModel) or self._name != other._name:
raise except_orm("ValueError", "Mixing apples and oranges: %s - %s" % (self, other))
other_ids = set(other._ids)
return self.browse([id for id in self._ids if id not in other_ids])
def __and__(self, other):
""" Return the intersection of two recordsets.
Note that recordset order is not preserved.
"""
if not isinstance(other, BaseModel) or self._name != other._name:
raise except_orm("ValueError", "Mixing apples and oranges: %s & %s" % (self, other))
return self.browse(set(self._ids) & set(other._ids))
def __or__(self, other):
""" Return the union of two recordsets.
Note that recordset order is not preserved.
"""
if not isinstance(other, BaseModel) or self._name != other._name:
raise except_orm("ValueError", "Mixing apples and oranges: %s | %s" % (self, other))
return self.browse(set(self._ids) | set(other._ids))
def __eq__(self, other):
""" Test whether two recordsets are equivalent (up to reordering). """
if not isinstance(other, BaseModel):
if other:
filename, lineno = frame_codeinfo(currentframe(), 1)
_logger.warning("Comparing apples and oranges: %r == %r (%s:%s)",
self, other, filename, lineno)
return False
return self._name == other._name and set(self._ids) == set(other._ids)
def __ne__(self, other):
return not self == other
def __lt__(self, other):
if not isinstance(other, BaseModel) or self._name != other._name:
raise except_orm("ValueError", "Mixing apples and oranges: %s < %s" % (self, other))
return set(self._ids) < set(other._ids)
def __le__(self, other):
if not isinstance(other, BaseModel) or self._name != other._name:
raise except_orm("ValueError", "Mixing apples and oranges: %s <= %s" % (self, other))
return set(self._ids) <= set(other._ids)
def __gt__(self, other):
if not isinstance(other, BaseModel) or self._name != other._name:
raise except_orm("ValueError", "Mixing apples and oranges: %s > %s" % (self, other))
return set(self._ids) > set(other._ids)
def __ge__(self, other):
if not isinstance(other, BaseModel) or self._name != other._name:
raise except_orm("ValueError", "Mixing apples and oranges: %s >= %s" % (self, other))
return set(self._ids) >= set(other._ids)
def __int__(self):
return self.id
def __str__(self):
return "%s%s" % (self._name, getattr(self, '_ids', ""))
def __unicode__(self):
return unicode(str(self))
__repr__ = __str__
def __hash__(self):
if hasattr(self, '_ids'):
return hash((self._name, frozenset(self._ids)))
else:
return hash(self._name)
def __getitem__(self, key):
""" If ``key`` is an integer or a slice, return the corresponding record
selection as an instance (attached to ``self.env``).
Otherwise read the field ``key`` of the first record in ``self``.
Examples::
inst = model.search(dom) # inst is a recordset
r4 = inst[3] # fourth record in inst
rs = inst[10:20] # subset of inst
nm = rs['name'] # name of first record in inst
"""
if isinstance(key, basestring):
# important: one must call the field's getter
return self._fields[key].__get__(self, type(self))
elif isinstance(key, slice):
return self._browse(self.env, self._ids[key])
else:
return self._browse(self.env, (self._ids[key],))
def __setitem__(self, key, value):
""" Assign the field ``key`` to ``value`` in record ``self``. """
# important: one must call the field's setter
return self._fields[key].__set__(self, value)
#
# Cache and recomputation management
#
@lazy_property
def _cache(self):
""" Return the cache of ``self``, mapping field names to values. """
return RecordCache(self)
@api.model
def _in_cache_without(self, field):
""" Make sure ``self`` is present in cache (for prefetching), and return
the records of model ``self`` in cache that have no value for ``field``
(:class:`Field` instance).
"""
env = self.env
prefetch_ids = env.prefetch[self._name]
prefetch_ids.update(self._ids)
ids = filter(None, prefetch_ids - set(env.cache[field]))
return self.browse(ids)
@api.model
def refresh(self):
""" Clear the records cache.
.. deprecated:: 8.0
The record cache is automatically invalidated.
"""
self.invalidate_cache()
@api.model
def invalidate_cache(self, fnames=None, ids=None):
""" Invalidate the record caches after some records have been modified.
If both ``fnames`` and ``ids`` are ``None``, the whole cache is cleared.
:param fnames: the list of modified fields, or ``None`` for all fields
:param ids: the list of modified record ids, or ``None`` for all
"""
if fnames is None:
if ids is None:
return self.env.invalidate_all()
fields = self._fields.values()
else:
fields = map(self._fields.__getitem__, fnames)
# invalidate fields and inverse fields, too
spec = [(f, ids) for f in fields] + \
[(invf, None) for f in fields for invf in f.inverse_fields]
self.env.invalidate(spec)
@api.multi
def modified(self, fnames):
""" Notify that fields have been modified on ``self``. This invalidates
the cache, and prepares the recomputation of stored function fields
(new-style fields only).
:param fnames: iterable of field names that have been modified on
records ``self``
"""
# each field knows what to invalidate and recompute
spec = []
for fname in fnames:
spec += self._fields[fname].modified(self)
cached_fields = {
field
for env in self.env.all
for field in env.cache
}
# invalidate non-stored fields.function which are currently cached
spec += [(f, None) for f in self.pool.pure_function_fields
if f in cached_fields]
self.env.invalidate(spec)
def _recompute_check(self, field):
""" If ``field`` must be recomputed on some record in ``self``, return the
corresponding records that must be recomputed.
"""
return self.env.check_todo(field, self)
def _recompute_todo(self, field):
""" Mark ``field`` to be recomputed. """
self.env.add_todo(field, self)
def _recompute_done(self, field):
""" Mark ``field`` as recomputed. """
self.env.remove_todo(field, self)
@api.model
def recompute(self):
""" Recompute stored function fields. The fields and records to
recompute have been determined by method :meth:`modified`.
"""
while self.env.has_todo():
field, recs = self.env.get_todo()
# evaluate the fields to recompute, and save them to database
names = [
f.name
for f in field.computed_fields
if f.store and self.env.field_todo(f)
]
for rec in recs:
try:
values = rec._convert_to_write({
name: rec[name] for name in names
})
with rec.env.norecompute():
rec._write(values)
except MissingError:
pass
# mark the computed fields as done
map(recs._recompute_done, field.computed_fields)
#
# Generic onchange method
#
def _has_onchange(self, field, other_fields):
""" Return whether ``field`` should trigger an onchange event in the
presence of ``other_fields``.
"""
# test whether self has an onchange method for field, or field is a
# dependency of any field in other_fields
return field.name in self._onchange_methods or \
any(dep in other_fields for dep in field.dependents)
@api.model
def _onchange_spec(self, view_info=None):
""" Return the onchange spec from a view description; if not given, the
result of ``self.fields_view_get()`` is used.
"""
result = {}
# for traversing the XML arch and populating result
def process(node, info, prefix):
if node.tag == 'field':
name = node.attrib['name']
names = "%s.%s" % (prefix, name) if prefix else name
if not result.get(names):
result[names] = node.attrib.get('on_change')
# traverse the subviews included in relational fields
for subinfo in info['fields'][name].get('views', {}).itervalues():
process(etree.fromstring(subinfo['arch']), subinfo, names)
else:
for child in node:
process(child, info, prefix)
if view_info is None:
view_info = self.fields_view_get()
process(etree.fromstring(view_info['arch']), view_info, '')
return result
def _onchange_eval(self, field_name, onchange, result):
""" Apply onchange method(s) for field ``field_name`` with spec ``onchange``
on record ``self``. Value assignments are applied on ``self``, while
domain and warning messages are put in dictionary ``result``.
"""
onchange = onchange.strip()
# onchange V8
if onchange in ("1", "true"):
for method in self._onchange_methods.get(field_name, ()):
method_res = method(self)
if not method_res:
continue
if 'domain' in method_res:
result.setdefault('domain', {}).update(method_res['domain'])
if 'warning' in method_res:
result['warning'] = method_res['warning']
return
# onchange V7
match = onchange_v7.match(onchange)
if match:
method, params = match.groups()
# evaluate params -> tuple
global_vars = {'context': self._context, 'uid': self._uid}
if self._context.get('field_parent'):
class RawRecord(object):
def __init__(self, record):
self._record = record
def __getattr__(self, name):
field = self._record._fields[name]
value = self._record[name]
return field.convert_to_onchange(value)
record = self[self._context['field_parent']]
global_vars['parent'] = RawRecord(record)
field_vars = {
key: self._fields[key].convert_to_onchange(val)
for key, val in self._cache.iteritems()
}
params = eval("[%s]" % params, global_vars, field_vars)
# call onchange method with context when possible
args = (self._cr, self._uid, self._origin.ids) + tuple(params)
try:
method_res = getattr(self._model, method)(*args, context=self._context)
except TypeError:
method_res = getattr(self._model, method)(*args)
if not isinstance(method_res, dict):
return
if 'value' in method_res:
method_res['value'].pop('id', None)
self.update(self._convert_to_cache(method_res['value'], validate=False))
if 'domain' in method_res:
result.setdefault('domain', {}).update(method_res['domain'])
if 'warning' in method_res:
result['warning'] = method_res['warning']
@api.multi
def onchange(self, values, field_name, field_onchange):
""" Perform an onchange on the given field.
:param values: dictionary mapping field names to values, giving the
current state of modification
:param field_name: name of the modified field, or list of field
names (in view order), or False
:param field_onchange: dictionary mapping field names to their
on_change attribute
"""
env = self.env
if isinstance(field_name, list):
names = field_name
elif field_name:
names = [field_name]
else:
names = []
if not all(name in self._fields for name in names):
return {}
# determine subfields for field.convert_to_write() below
secondary = []
subfields = defaultdict(set)
for dotname in field_onchange:
if '.' in dotname:
secondary.append(dotname)
name, subname = dotname.split('.')
subfields[name].add(subname)
# create a new record with values, and attach ``self`` to it
with env.do_in_onchange():
record = self.new(values)
values = dict(record._cache)
# attach ``self`` with a different context (for cache consistency)
record._origin = self.with_context(__onchange=True)
# load fields on secondary records, to avoid false changes
with env.do_in_onchange():
for field_seq in secondary:
record.mapped(field_seq)
# determine which field(s) should be triggered an onchange
todo = list(names) or list(values)
done = set()
# dummy assignment: trigger invalidations on the record
for name in todo:
if name == 'id':
continue
value = record[name]
field = self._fields[name]
if field.type == 'many2one' and field.delegate and not value:
# do not nullify all fields of parent record for new records
continue
record[name] = value
result = {'value': {}}
# process names in order (or the keys of values if no name given)
while todo:
name = todo.pop(0)
if name in done:
continue
done.add(name)
with env.do_in_onchange():
# apply field-specific onchange methods
if field_onchange.get(name):
record._onchange_eval(name, field_onchange[name], result)
# force re-evaluation of function fields on secondary records
for field_seq in secondary:
record.mapped(field_seq)
# determine which fields have been modified
for name, oldval in values.iteritems():
field = self._fields[name]
newval = record[name]
if field.type in ('one2many', 'many2many'):
if newval != oldval or newval._is_dirty():
# put new value in result
result['value'][name] = field.convert_to_write(
newval, record._origin, subfields.get(name),
)
todo.append(name)
else:
# keep result: newval may have been dirty before
pass
else:
if newval != oldval:
# put new value in result
result['value'][name] = field.convert_to_write(
newval, record._origin, subfields.get(name),
)
todo.append(name)
else:
# clean up result to not return another value
result['value'].pop(name, None)
# At the moment, the client does not support updates on a *2many field
# while this one is modified by the user.
if field_name and not isinstance(field_name, list) and \
self._fields[field_name].type in ('one2many', 'many2many'):
result['value'].pop(field_name, None)
return result
class RecordCache(MutableMapping):
""" Implements a proxy dictionary to read/update the cache of a record.
Upon iteration, it looks like a dictionary mapping field names to
values. However, fields may be used as keys as well.
"""
def __init__(self, records):
self._recs = records
def contains(self, field):
""" Return whether `records[0]` has a value for ``field`` in cache. """
if isinstance(field, basestring):
field = self._recs._fields[field]
return self._recs.id in self._recs.env.cache[field]
def __contains__(self, field):
""" Return whether `records[0]` has a regular value for ``field`` in cache. """
if isinstance(field, basestring):
field = self._recs._fields[field]
dummy = SpecialValue(None)
value = self._recs.env.cache[field].get(self._recs.id, dummy)
return not isinstance(value, SpecialValue)
def __getitem__(self, field):
""" Return the cached value of ``field`` for `records[0]`. """
if isinstance(field, basestring):
field = self._recs._fields[field]
value = self._recs.env.cache[field][self._recs.id]
return value.get() if isinstance(value, SpecialValue) else value
def __setitem__(self, field, value):
""" Assign the cached value of ``field`` for all records in ``records``. """
if isinstance(field, basestring):
field = self._recs._fields[field]
values = dict.fromkeys(self._recs._ids, value)
self._recs.env.cache[field].update(values)
def update(self, *args, **kwargs):
""" Update the cache of all records in ``records``. If the argument is a
``SpecialValue``, update all fields (except "magic" columns).
"""
if args and isinstance(args[0], SpecialValue):
values = dict.fromkeys(self._recs._ids, args[0])
for name, field in self._recs._fields.iteritems():
if name != 'id':
self._recs.env.cache[field].update(values)
else:
return super(RecordCache, self).update(*args, **kwargs)
def __delitem__(self, field):
""" Remove the cached value of ``field`` for all ``records``. """
if isinstance(field, basestring):
field = self._recs._fields[field]
field_cache = self._recs.env.cache[field]
for id in self._recs._ids:
field_cache.pop(id, None)
def __iter__(self):
""" Iterate over the field names with a regular value in cache. """
cache, id = self._recs.env.cache, self._recs.id
dummy = SpecialValue(None)
for name, field in self._recs._fields.iteritems():
if name != 'id' and not isinstance(cache[field].get(id, dummy), SpecialValue):
yield name
def __len__(self):
""" Return the number of fields with a regular value in cache. """
return sum(1 for name in self)
class Model(BaseModel):
"""Main super-class for regular database-persisted OpenERP models.
OpenERP models are created by inheriting from this class::
class user(Model):
...
The system will later instantiate the class once per database (on
which the class' module is installed).
"""
_auto = True
_register = False # not visible in ORM registry, meant to be python-inherited only
_transient = False # True in a TransientModel
class TransientModel(BaseModel):
"""Model super-class for transient records, meant to be temporarily
persisted, and regularly vacuum-cleaned.
A TransientModel has a simplified access rights management,
all users can create new records, and may only access the
records they created. The super-user has unrestricted access
to all TransientModel records.
"""
_auto = True
_register = False # not visible in ORM registry, meant to be python-inherited only
_transient = True
class AbstractModel(BaseModel):
"""Abstract Model super-class for creating an abstract class meant to be
inherited by regular models (Models or TransientModels) but not meant to
be usable on its own, or persisted.
Technical note: we don't want to make AbstractModel the super-class of
Model or BaseModel because it would not make sense to put the main
definition of persistence methods such as create() in it, and still we
should be able to override them within an AbstractModel.
"""
_auto = False # don't create any database backend for AbstractModels
_register = False # not visible in ORM registry, meant to be python-inherited only
_transient = False
def itemgetter_tuple(items):
""" Fixes itemgetter inconsistency (useful in some cases) of not returning
a tuple if len(items) == 1: always returns an n-tuple where n = len(items)
"""
if len(items) == 0:
return lambda a: ()
if len(items) == 1:
return lambda gettable: (gettable[items[0]],)
return operator.itemgetter(*items)
def convert_pgerror_23502(model, fields, info, e):
m = re.match(r'^null value in column "(?P<field>\w+)" violates '
r'not-null constraint\n',
tools.ustr(e))
field_name = m and m.group('field')
if not m or field_name not in fields:
return {'message': tools.ustr(e)}
message = _(u"Missing required value for the field '%s'.") % field_name
field = fields.get(field_name)
if field:
message = _(u"Missing required value for the field '%s' (%s)") % (field['string'], field_name)
return {
'message': message,
'field': field_name,
}
def convert_pgerror_23505(model, fields, info, e):
m = re.match(r'^duplicate key (?P<field>\w+) violates unique constraint',
tools.ustr(e))
field_name = m and m.group('field')
if not m or field_name not in fields:
return {'message': tools.ustr(e)}
message = _(u"The value for the field '%s' already exists.") % field_name
field = fields.get(field_name)
if field:
message = _(u"%s This might be '%s' in the current model, or a field "
u"of the same name in an o2m.") % (message, field['string'])
return {
'message': message,
'field': field_name,
}
PGERROR_TO_OE = defaultdict(
# shape of mapped converters
lambda: (lambda model, fvg, info, pgerror: {'message': tools.ustr(pgerror)}), {
# not_null_violation
'23502': convert_pgerror_23502,
# unique constraint error
'23505': convert_pgerror_23505,
})
def _normalize_ids(arg, atoms={int, long, str, unicode, NewId}):
""" Normalizes the ids argument for ``browse`` (v7 and v8) to a tuple.
Various implementations were tested on the corpus of all browse() calls
performed during a full crawler run (after having installed all website_*
modules) and this one was the most efficient overall.
A possible bit of correctness was sacrificed by not doing any test on
Iterable and just assuming that any non-atomic type was an iterable of
some kind.
:rtype: tuple
"""
# much of the corpus is falsy objects (empty list, tuple or set, None)
if not arg:
return ()
# `type in set` is significantly faster (because more restrictive) than
# isinstance(arg, set) or issubclass(type, set); and for new-style classes
# obj.__class__ is equivalent to but faster than type(obj). Not relevant
# (and looks much worse) in most cases, but over millions of calls it
# does have a very minor effect.
if arg.__class__ in atoms:
return arg,
return tuple(arg)
# keep those imports here to avoid dependency cycle errors
from .osv import expression
from .fields import Field, SpecialValue, FailedValue
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
felipenaselva/felipe.repository | refs/heads/master | plugin.video.atto.filmes/tv5.py | 67 |
import base64
import zlib, urllib,urllib2,re
key=base64.b64decode("ZXQgb3VhaSBtZWMh")
def getUrl(url, cookieJar=None,post=None, timeout=20, headers=None):
cookie_handler = urllib2.HTTPCookieProcessor(cookieJar)
opener = urllib2.build_opener(cookie_handler, urllib2.HTTPBasicAuthHandler(), urllib2.HTTPHandler())
#opener = urllib2.install_opener(opener)
req = urllib2.Request(url)
req.add_header('User-Agent','Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/33.0.1750.154 Safari/537.36')
if headers:
for h,hv in headers:
req.add_header(h,hv)
response = opener.open(req,post,timeout=timeout)
link=response.read()
response.close()
return link;
def decode_base64_and_inflate( b64string ):
decoded_data = base64.b64decode( b64string )
# print ord(decoded_data[0])
return zlib.decompress( decoded_data , 15)
def deflate_and_base64_encode( string_val ):
zlibbed_str = zlib.compress( string_val )
compressed_string =zlibbed_str## zlibbed_str[2:-4]
return base64.b64encode( compressed_string )
def decode(param1, param2):
param1dec=decode_base64_and_inflate(param1)
_loc3_ = bytearray()
_loc3_.extend(param1dec)
_loc4_ = 0;
_loc5_ = len(param1dec);
_loc6_ = 0;
while _loc6_ < _loc5_:
_loc3_[_loc6_] = _loc3_[_loc6_] ^ ord(param2[_loc4_]);
_loc4_+=1;
if(_loc4_ >= len(param2)):
_loc4_ = 0;
_loc6_+=1;
return _loc3_
def encode(param1, param2):
param1dec=param1
_loc3_ = bytearray()
_loc3_.extend(param1dec)
_loc4_ = 0;
_loc5_ = len(param1dec);
_loc6_ = 0;
while _loc6_ < _loc5_:
_loc3_[_loc6_] = _loc3_[_loc6_] ^ ord(param2[_loc4_]);
_loc4_+=1;
if(_loc4_ >= len(param2)):
_loc4_ = 0;
_loc6_+=1;
return deflate_and_base64_encode(_loc3_.decode("utf-8"))
return _loc3_
def extractUrl(uid):
#enc="eNrjYnGVFRFl8GeOYHERtPTnZuDlYZPgYZdhkfXlCgjR9+XhZAlmCBTVlBRhYI1QFhAMAbIFBKMkPAJURcOcxWNcwwEd4gnn"
# eJzjYnGVFRFl8GeOYHERtPTnZuDlYZPgYZdhkfXlCgjR9+XhZAlmCBTVlBRhYI1QFhAMAbIFBKMkPAJURcOcxWNcwwEd4gnn
str="operation=getPlaylist&uid=%s"%urllib.quote_plus(uid)
str=encode(str,key)
s=getUrl("http://www.tv5mondeplusafrique.com/html/servicesV2/getPlaylist.xml?BulkLoaderNoCache=2_2&",post=str)
s=decode(s,key)
print "returned", repr(s.decode("unicode-escape"))
from BeautifulSoup import BeautifulStoneSoup, BeautifulSoup, BeautifulSOAP
xmlobj= BeautifulSOAP(s.decode("unicode-escape"), convertEntities=BeautifulStoneSoup.XML_ENTITIES)
vurl=xmlobj("video")[0];
# print vurl
su=vurl("secureurl")[0].string
su=re.sub('[\[CDATA\]]', '', su)
#print su
# print 'yyyyyyyyyyyy',vurl
if 'manifest.f4m?' in su:
su='plugin://plugin.video.f4mTester/?url='+urllib.quote_plus(su)
return su
#print extractUrl("aime_malgre_lui_s01_ep42_4377314")
#d1= decode(enc,key)
#print d1
#d2=encode(d1,key)
#print d2
#print d1==d2
#print 1/0
#print decode(enc, key)
#print decode("eNrjYnGVFRFl8GeOYHERVPHh4+FlCJZ0FvNg9HRkk2DxEdXmYPJlYnP1clBnDWVxCEsXcxEEsnkC4mLdRJQdhCOcgiLFAFb/C4g=",key)
#print decode("eJytV2l328iVLSwsgmABRBEEQWIhZVuW1ZKiSHa3bKslmSBRJCFiITaS4CK2J1YnfWKPM3b6W4711wOJcttJus9JZuZrnaq33Hffe7ey0YxuWm3SrvN+2aGvFuGoHb9fxwAEWtEynNLM6qAmOWqAwqANzjzJvDHI+ISYk8WncKDO+uGa9njZVpUEapWE5oH6PyUlY4yqSceZMqRwEDqcScBBsR1azVl/1XeaQPwTdWnohqKTeHvv7Vby7vDlzvfX3jcnfyJPXu/sHD4By8xNEYj40UpLWIztxXc/PTyytl8/eHj8hrz4eWf2NMOdrWfV1bBiUJwZcYt1IDVJ26hCm1uE5qWW0hIGMU3BdiYRoy6DcROhYHE8hlAUiDdZ7L+/IJTOVUibSDaRCF2r5Od1ngINl08rPmnhXsgWAa/TyGEwpCgMHtdoWvSe8HY5CbWohNI2qEcgIVRKw9QsQYFgl5EsjXglelgvkqCGmxENy7aajMWi9ZDoSKiQbYcvygiUgNgpSyQAFSfh6uqt/XZNQRGD1GIfXU98K2CUrVgjL7KywQ7AhUAHZd0PIOv06HJhCBLoCtuZ7n1TJzztoMWf//Bh1NffLAMrx59LWF6g1skbiDkUeftFuyxbwerZrc25plBdxpAM4kEYYGiVihxvsWVIXSY/mrUKTywS4qRfuj6rmKSt4Nt4pr6CYm/d+4mtO8ED0Yg2vsDgJVe0W6xGPYQOXasJJYscd4zkA6+Khut533RgGzySKtBpuwH1XPEMqepw5wjGKv5y/ihi1KNZ5LKQT3vT2Z47d/5Mk9HPwub8ztcqiPl0R3cAWyfuDOw1uALxtim/XgwYzdru5vc/8jIx0z1ujiiec+5zaUGaelgPWK3mcI97nWzL05m4i6/TK8EmLwilcXoe8/MhrYDBK7boswyMc/sHJtBdzuh9gIYHseJwh0LF38TWQd6jiq9iX0+/o4gOBB8n3LakAt97RC8Qjvo3O8hvg55O1IC+9ZVcFfN62WYN5XWp1e33w0pZqvIRq6hDXmvFuKZbd/y0pXQwjfEic4kELd0Qi60QiR0atxb9yZW8ir2RIhsdA3GDnPNdFduQqk6hbz7R3j8P2kgGgwLuleCquiaZ8xadoC7aYJhs6vUXUysYvabKd0F+fjz/Aye6IU49dFvT2KOwj0AcFAeXs93lLAJw4kyaB81JvfeOfRIgY3N+X6O9ew5Yes4fEl6CGEiODtdpUlNgW5D8I0YUjC7WQnBpGBJtRNwR9hZD6tuTx7t/Pbgop5jVOnOlrZilesRWI8jWkKALXkdUPNEoCSIDVLG96DO1KlXRAJXK+7dcbaTcpSJfduRZN8rqT58cC8ms2ut7K9TjZGWkxrQh9D1U5gegmFhyyrYLFMjrZS4Xk2BYKsPOrJhEdrqUF+FcXllXvIATnI3H6rhbm3baXI+auWXd8MT6ctxdQFBdSl2hnqKGGE9zlpBZLcz6pXDRWdnNqxrD4+UqxWPVWkrjcRUlE3mikCVh+T5KL/62yGfUl9xTiNecA8CZyzYGpnRRSNlTCSf7pN3PTmpXUVJNu0YHcEIrrPNpHr9Irat0U564vGbzM4HzeQky60n/ltsWW7eEAZjMG2LR5oaCz2UxpNiRFmClxul+WXK5fhtVEzBBHqUuPbpctIcga0su8nWE/bJGaKli0tOBNMSY7k3zWgBeICBLKUNFUWuQMVXc1Zlfs1MMNOgaPivDuD7RvKk3SvSxmDslXaqEPIT5sYTpdNojX+KvGmOTx+lQckpyTXPosqUPsN80ZOgzQLTLQNCZhA0YCjLOJGl2EPRLwHf53GaNzns5j4fO+wJwJm0N1Om4GVCYtX2a8+0y7ZhapUgAENKBTxuGgoTPfguCESMGD2sU8bKeOZlxKH8oNCX5NoZ/eduQGHPSYQsxKqT8TAVE4wUkD77GZw501h5wPBuz7PCWtzmG9EA1Q036h3xNqympCOo+JTVBwOTx5zO8WxeLPgdVezCjP+eLsDDmZzjv6/yOogz4r/HURVbgOD6qmwE1XYwsloOIU1yuyNpuA/kRa8b5tJrRUuZGCBCDAmbOHzUGt/O2opG4ZQX1/Vjfw/ptnEMOC3RNECNuWElQFov53vQ7Sc4lLT6CysmYymNgTY+fIc7lVYhKlNOFIN+nZV/KY6bghZY8BXLOQws2WNtDJTYGmasZriYthzCcuDlWFe2mhc+6aMMNv3Uqpacg84JeOygNAqZcvDGoC3YcrK1BSYmLv9yRkvzOQlrGsTTtfI15r1jj8qW6tlLZ8oBeQF9zwCkNJVs1Sf52hL7wH9gdIA/rus3nuN3hgyERG1Xkt8DKVMwZ3/z2+7fx4G4WPZuf9/vt0W0vxHpNGfRo7pXI4v6z+S8x3M+fl2401yxaWmqkjWDlt7AaKQ4tuaxF0dgeNlI4q+a6BcjV1cToFDhMqKPjd4uhzUu44DDiysFgnM791lQbfc6r6Tbq97wK+ABc3mmqoxrHkqcc7dGQHDVr4KaVRp8AK0/30rk6n8TqyEjo5qpvc0D2kDYKHVywaiU+wkk+f5ajCVuqdE6bVp4jd5OFCznsYC/lWIsHV0zYaaysdCYWFZYhkDSKnOkw9ElccuimIRSs3G/KAuvISsNqFkbMYphASkCt7w9+/24713IPts5fh5fqEDdI6FRNp1mC7Wfnbx99m+u3l9vPf8gWca79CiTOgMOaPj96CPzNHvFEx+eHCAl2jolWBGbemyuc+PIsSNZGl6rHLHCAvk4rZomelcxYH62aLjIqwzoAmlFI6JZU/AT1hN/L9Vgbn9lKV17vIXhDizDktRtmj82lTFn9VNcjZLD6TTZZNMOO9CX3QWNqpdmv5W6AwkmPxjC3n5rUBRovJvEnTr8BLbYZMKeVT0Dri7pyEtMqGAJ4s+hnYS1LrXaept5kpx9fPOj9197WdXByePbh5a2+siorMs7zwnxawq0YKiChwOrD0RE4OP3uABzunXw8zq5f9aJM2Nqz//vw+C9b31uPi7v7j3/Y3wJnp8/2ftp9vhbrG/uGKNDDMm4dGJWNxuMUo1MvcQ6T85yiIpHioB+nsT4l2mqQc6Oh0inS/dDxP2PYigsCl1FISgnllIe9Ammd8i5n5nuqutH8k74874Rz2rvTJKbXj+fKLB1XKuzAS2U0bSN1WJZ/NpvLkcvVz4Zc08lnlHbDKePm6aC67CSZHCzWubZ3+dHvDj56B4dvD5592NuxTl9vnxy8J8To5HrezbVNhM2EkdaPry/I3tvd8z+eHz21r88nC/OyuOFbCMsdpJFwZ6NJYoR/I19LW3v/Vr6s9ZShXZTr/71qVs3nm5LuJ2atdNvLZyELEyhdzOSnYT6XurVsagEP0dAYdKSrwXRV71Xk/iCJeFoGCVtm/6o2TK8BTvqI470Szv9NcY5hGH/K9Xbxdt4+VegYm+TIjk5qqyRUwy4dOhsOvN1Ofjx8sfv47e7dP8i9/wdV7MXL18Gzre2fHnyzg07savfoeCV1jo9/+N35x5wbL1+/Ot57Q4gcz+/7LlOHolFBrYuN7jUNNq3c8+SfNWd1/bXmvJQ9VnAhVKbG515La8Ae3+2Om83uOM13x368tvNeyJIzZdJTl3Y8AixH94FD3+nV5fRWr9JBkW1FLOYD+kHEDHu/rn+s8Ub/JP1fcPjh4OV18GLr8R9fbfXeHZ2Ds293T98/IpF5ec/51qZ3Ds8ODsDW9u9/fH4MHm3t2h9Xif/muw7+z3FIYpz+H3C4ijY4kPQfcViF/z4O/cm9Duz8P+EwUP9XOCx+A4euzBOgt+K5jJL8k91+/3etec8W",key)
|
liberorbis/libernext | refs/heads/master | apps/erpnext/erpnext/accounts/report/customer_account_head/__init__.py | 12133432 | |
shurihell/testasia | refs/heads/test1 | cms/lib/__init__.py | 12133432 | |
ain7/www.ain7.org | refs/heads/master | ain7/voyages/migrations/__init__.py | 12133432 | |
youprofit/django-cms | refs/heads/develop | cms/south_migrations/0073_auto__chg_field_cmsplugin_path__chg_field_page_path.py | 49 | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'CMSPlugin.path'
db.alter_column(u'cms_cmsplugin', 'path', self.gf('django.db.models.fields.CharField')(default='', unique=True, max_length=255))
# Changing field 'Page.path'
db.alter_column(u'cms_page', 'path', self.gf('django.db.models.fields.CharField')(default='', unique=True, max_length=255))
def backwards(self, orm):
# Changing field 'CMSPlugin.path'
db.alter_column(u'cms_cmsplugin', 'path', self.gf('django.db.models.fields.CharField')(unique=True, max_length=255, null=True))
# Changing field 'Page.path'
db.alter_column(u'cms_page', 'path', self.gf('django.db.models.fields.CharField')(unique=True, max_length=255, null=True))
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'cms.aliaspluginmodel': {
'Meta': {'object_name': 'AliasPluginModel', '_ormbases': ['cms.CMSPlugin']},
'alias_placeholder': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'alias_placeholder'", 'null': 'True', 'to': "orm['cms.Placeholder']"}),
u'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['cms.CMSPlugin']", 'unique': 'True', 'primary_key': 'True'}),
'plugin': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'alias_reference'", 'null': 'True', 'to': "orm['cms.CMSPlugin']"})
},
'cms.cmsplugin': {
'Meta': {'object_name': 'CMSPlugin'},
'changed_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'depth': ('django.db.models.fields.PositiveIntegerField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'}),
'numchild': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.CMSPlugin']", 'null': 'True', 'blank': 'True'}),
'path': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'placeholder': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Placeholder']", 'null': 'True'}),
'plugin_type': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}),
'position': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'})
},
'cms.globalpagepermission': {
'Meta': {'object_name': 'GlobalPagePermission'},
'can_add': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_change': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_change_advanced_settings': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_change_permissions': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_delete': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_move_page': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_publish': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_recover_page': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_view': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.Group']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'sites': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['sites.Site']", 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'cms.page': {
'Meta': {'ordering': "('path',)", 'unique_together': "(('publisher_is_draft', 'application_namespace'), ('reverse_id', 'site', 'publisher_is_draft'))", 'object_name': 'Page'},
'application_namespace': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'application_urls': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '200', 'null': 'True', 'blank': 'True'}),
'changed_by': ('django.db.models.fields.CharField', [], {'max_length': '70'}),
'changed_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'created_by': ('django.db.models.fields.CharField', [], {'max_length': '70'}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'depth': ('django.db.models.fields.PositiveIntegerField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'in_navigation': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'is_home': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'languages': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'limit_visibility_in_menu': ('django.db.models.fields.SmallIntegerField', [], {'default': 'None', 'null': 'True', 'db_index': 'True', 'blank': 'True'}),
'login_required': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'navigation_extenders': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '80', 'null': 'True', 'blank': 'True'}),
'numchild': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['cms.Page']"}),
'path': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'placeholders': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['cms.Placeholder']", 'symmetrical': 'False'}),
'publication_date': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'publication_end_date': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'publisher_is_draft': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'publisher_public': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'publisher_draft'", 'unique': 'True', 'null': 'True', 'to': "orm['cms.Page']"}),
'reverse_id': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '40', 'null': 'True', 'blank': 'True'}),
'revision_id': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'djangocms_pages'", 'to': u"orm['sites.Site']"}),
'soft_root': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'template': ('django.db.models.fields.CharField', [], {'default': "'INHERIT'", 'max_length': '100'}),
'xframe_options': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'cms.pagepermission': {
'Meta': {'object_name': 'PagePermission'},
'can_add': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_change': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_change_advanced_settings': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_change_permissions': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_delete': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_move_page': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_publish': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_view': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'grant_on': ('django.db.models.fields.IntegerField', [], {'default': '5'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.Group']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'page': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Page']", 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'cms.pageuser': {
'Meta': {'object_name': 'PageUser', '_ormbases': [u'auth.User']},
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'created_users'", 'to': u"orm['auth.User']"}),
u'user_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['auth.User']", 'unique': 'True', 'primary_key': 'True'})
},
'cms.pageusergroup': {
'Meta': {'object_name': 'PageUserGroup', '_ormbases': [u'auth.Group']},
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'created_usergroups'", 'to': u"orm['auth.User']"}),
u'group_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['auth.Group']", 'unique': 'True', 'primary_key': 'True'})
},
'cms.placeholder': {
'Meta': {'object_name': 'Placeholder'},
'default_width': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slot': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'})
},
'cms.placeholderreference': {
'Meta': {'object_name': 'PlaceholderReference', '_ormbases': ['cms.CMSPlugin']},
u'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['cms.CMSPlugin']", 'unique': 'True', 'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'placeholder_ref': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Placeholder']", 'null': 'True'})
},
'cms.staticplaceholder': {
'Meta': {'unique_together': "(('code', 'site'),)", 'object_name': 'StaticPlaceholder'},
'code': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'creation_method': ('django.db.models.fields.CharField', [], {'default': "'code'", 'max_length': '20', 'blank': 'True'}),
'dirty': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'draft': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'static_draft'", 'null': 'True', 'to': "orm['cms.Placeholder']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}),
'public': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'static_public'", 'null': 'True', 'to': "orm['cms.Placeholder']"}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sites.Site']", 'null': 'True', 'blank': 'True'})
},
'cms.title': {
'Meta': {'unique_together': "(('language', 'page'),)", 'object_name': 'Title'},
'creation_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'has_url_overwrite': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'}),
'menu_title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'meta_description': ('django.db.models.fields.TextField', [], {'max_length': '155', 'null': 'True', 'blank': 'True'}),
'page': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'title_set'", 'to': "orm['cms.Page']"}),
'page_title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'path': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'published': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'publisher_is_draft': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'publisher_public': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'publisher_draft'", 'unique': 'True', 'null': 'True', 'to': "orm['cms.Title']"}),
'publisher_state': ('django.db.models.fields.SmallIntegerField', [], {'default': '0', 'db_index': 'True'}),
'redirect': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'cms.usersettings': {
'Meta': {'object_name': 'UserSettings'},
'clipboard': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Placeholder']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'djangocms_usersettings'", 'unique': 'True', 'to': u"orm['auth.User']"})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'sites.site': {
'Meta': {'ordering': "(u'domain',)", 'object_name': 'Site', 'db_table': "u'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['cms'] |
hyOzd/kicad-python | refs/heads/master | kicad/__init__.py | 1 | # Copyright 2015 Miguel Angel Ajo Pelayo <miguelangel@ajo.es>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
#
from .units import *
from .point import *
from .size import *
# if `enum` cannot be imported (windoze!) we provide our own copy
try:
import enum
except ImportError:
import sys, os
module_dir = os.path.abspath(os.path.dirname(__file__))
sys.path.append(os.path.join(module_dir,'3rdparty'))
class BareClass(object):
pass
def new(class_type, instance):
"""Returns an object of class without calling __init__.
This could lead to inconsistent objects, use only when you
know what you're doing.
In kicad-python this is used to construct wrapper classes
before injecting the native object.
"""
obj = BareClass()
obj.__class__ = class_type
obj._obj = instance
return obj
|
fiji-flo/servo | refs/heads/master | tests/wpt/web-platform-tests/tools/third_party/pytest/doc/en/example/nonpython/conftest.py | 24 | # content of conftest.py
import pytest
def pytest_collect_file(parent, path):
if path.ext == ".yml" and path.basename.startswith("test"):
return YamlFile(path, parent)
class YamlFile(pytest.File):
def collect(self):
import yaml # we need a yaml parser, e.g. PyYAML
raw = yaml.safe_load(self.fspath.open())
for name, spec in sorted(raw.items()):
yield YamlItem(name, self, spec)
class YamlItem(pytest.Item):
def __init__(self, name, parent, spec):
super(YamlItem, self).__init__(name, parent)
self.spec = spec
def runtest(self):
for name, value in sorted(self.spec.items()):
# some custom test execution (dumb example follows)
if name != value:
raise YamlException(self, name, value)
def repr_failure(self, excinfo):
""" called when self.runtest() raises an exception. """
if isinstance(excinfo.value, YamlException):
return "\n".join([
"usecase execution failed",
" spec failed: %r: %r" % excinfo.value.args[1:3],
" no further details known at this point."
])
def reportinfo(self):
return self.fspath, 0, "usecase: %s" % self.name
class YamlException(Exception):
""" custom exception for error reporting. """
|
jinshana/otp | refs/heads/maint | lib/asn1/test/asn1_SUITE_data/P-Record.py | 97 | P-Record DEFINITIONS ::=
BEGIN
PersonnelRecord ::= [APPLICATION 0] SET
{ name Name,
title VisibleString,
number EmployeeNumber,
dateOfHire Date,
nameOfSpouse [1] Name,
children SEQUENCE OF ChildInformation DEFAULT {}
}
ChildInformation ::= SET
{ name Name,
dateOfBirth Date
}
Name ::= [APPLICATION 1] SEQUENCE
{ givenName VisibleString,
initial VisibleString,
familyName VisibleString
}
EmployeeNumber ::= [APPLICATION 2] INTEGER
Date ::= [APPLICATION 3] VisibleString -- YYYY MMDD
v PersonnelRecord ::=
{
name {
givenName "John",
initial "P",
familyName "Smith"
},
title "Director",
number 51,
dateOfHire "19710917",
nameOfSpouse {
givenName "Mary",
initial "T",
familyName "Smith"
},
children {
{name {
givenName "Ralph",
initial "T",
familyName "Smith"
} ,
dateOfBirth "19571111"},
{name {
givenName "Susan",
initial "B",
familyName "Jones"
} ,
dateOfBirth "19590717" }
}
}
END
|
tbeadle/django | refs/heads/master | tests/forms_tests/field_tests/test_choicefield.py | 18 | from __future__ import unicode_literals
from django.forms import ChoiceField, Form, ValidationError
from django.test import SimpleTestCase
from . import FormFieldAssertionsMixin
class ChoiceFieldTest(FormFieldAssertionsMixin, SimpleTestCase):
def test_choicefield_1(self):
f = ChoiceField(choices=[('1', 'One'), ('2', 'Two')])
with self.assertRaisesMessage(ValidationError, "'This field is required.'"):
f.clean('')
with self.assertRaisesMessage(ValidationError, "'This field is required.'"):
f.clean(None)
self.assertEqual('1', f.clean(1))
self.assertEqual('1', f.clean('1'))
msg = "'Select a valid choice. 3 is not one of the available choices.'"
with self.assertRaisesMessage(ValidationError, msg):
f.clean('3')
def test_choicefield_2(self):
f = ChoiceField(choices=[('1', 'One'), ('2', 'Two')], required=False)
self.assertEqual('', f.clean(''))
self.assertEqual('', f.clean(None))
self.assertEqual('1', f.clean(1))
self.assertEqual('1', f.clean('1'))
msg = "'Select a valid choice. 3 is not one of the available choices.'"
with self.assertRaisesMessage(ValidationError, msg):
f.clean('3')
def test_choicefield_3(self):
f = ChoiceField(choices=[('J', 'John'), ('P', 'Paul')])
self.assertEqual('J', f.clean('J'))
msg = "'Select a valid choice. John is not one of the available choices.'"
with self.assertRaisesMessage(ValidationError, msg):
f.clean('John')
def test_choicefield_4(self):
f = ChoiceField(
choices=[
('Numbers', (('1', 'One'), ('2', 'Two'))),
('Letters', (('3', 'A'), ('4', 'B'))), ('5', 'Other'),
]
)
self.assertEqual('1', f.clean(1))
self.assertEqual('1', f.clean('1'))
self.assertEqual('3', f.clean(3))
self.assertEqual('3', f.clean('3'))
self.assertEqual('5', f.clean(5))
self.assertEqual('5', f.clean('5'))
msg = "'Select a valid choice. 6 is not one of the available choices.'"
with self.assertRaisesMessage(ValidationError, msg):
f.clean('6')
def test_choicefield_callable(self):
def choices():
return [('J', 'John'), ('P', 'Paul')]
f = ChoiceField(choices=choices)
self.assertEqual('J', f.clean('J'))
def test_choicefield_callable_may_evaluate_to_different_values(self):
choices = []
def choices_as_callable():
return choices
class ChoiceFieldForm(Form):
choicefield = ChoiceField(choices=choices_as_callable)
choices = [('J', 'John')]
form = ChoiceFieldForm()
self.assertEqual([('J', 'John')], list(form.fields['choicefield'].choices))
choices = [('P', 'Paul')]
form = ChoiceFieldForm()
self.assertEqual([('P', 'Paul')], list(form.fields['choicefield'].choices))
def test_choicefield_disabled(self):
f = ChoiceField(choices=[('J', 'John'), ('P', 'Paul')], disabled=True)
self.assertWidgetRendersTo(
f,
'<select id="id_f" name="f" disabled required><option value="J">John</option>'
'<option value="P">Paul</option></select>'
)
|
hubsaysnuaa/odoo | refs/heads/8.0 | addons/base_iban/__init__.py | 447 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import base_iban
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
OddEssay/ansible | refs/heads/devel | lib/ansible/playbook/base.py | 17 | # (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import itertools
import operator
import uuid
from functools import partial
from inspect import getmembers
from io import FileIO
from six import iteritems, string_types, text_type
from jinja2.exceptions import UndefinedError
from ansible.errors import AnsibleParserError
from ansible.parsing import DataLoader
from ansible.playbook.attribute import Attribute, FieldAttribute
from ansible.template import Templar
from ansible.utils.boolean import boolean
from ansible.utils.debug import debug
from ansible.utils.vars import combine_vars, isidentifier
from ansible.template import template
class Base:
# connection/transport
_connection = FieldAttribute(isa='string')
_port = FieldAttribute(isa='int')
_remote_user = FieldAttribute(isa='string')
# variables
_vars = FieldAttribute(isa='dict', default=dict(), priority=100)
# flags and misc. settings
_environment = FieldAttribute(isa='list')
_no_log = FieldAttribute(isa='bool')
# param names which have been deprecated/removed
DEPRECATED_ATTRIBUTES = [
'sudo', 'sudo_user', 'sudo_pass', 'sudo_exe', 'sudo_flags',
'su', 'su_user', 'su_pass', 'su_exe', 'su_flags',
]
def __init__(self):
# initialize the data loader and variable manager, which will be provided
# later when the object is actually loaded
self._loader = None
self._variable_manager = None
# every object gets a random uuid:
self._uuid = uuid.uuid4()
# and initialize the base attributes
self._initialize_base_attributes()
try:
from __main__ import display
self._display = display
except ImportError:
from ansible.utils.display import Display
self._display = Display()
# The following three functions are used to programatically define data
# descriptors (aka properties) for the Attributes of all of the playbook
# objects (tasks, blocks, plays, etc).
#
# The function signature is a little strange because of how we define
# them. We use partial to give each method the name of the Attribute that
# it is for. Since partial prefills the positional arguments at the
# beginning of the function we end up with the first positional argument
# being allocated to the name instead of to the class instance (self) as
# normal. To deal with that we make the property name field the first
# positional argument and self the second arg.
#
# Because these methods are defined inside of the class, they get bound to
# the instance when the object is created. After we run partial on them
# and put the result back into the class as a property, they get bound
# a second time. This leads to self being placed in the arguments twice.
# To work around that, we mark the functions as @staticmethod so that the
# first binding to the instance doesn't happen.
@staticmethod
def _generic_g(prop_name, self):
method = "_get_attr_%s" % prop_name
if hasattr(self, method):
return getattr(self, method)()
return self._attributes[prop_name]
@staticmethod
def _generic_s(prop_name, self, value):
self._attributes[prop_name] = value
@staticmethod
def _generic_d(prop_name, self):
del self._attributes[prop_name]
def _get_base_attributes(self):
'''
Returns the list of attributes for this class (or any subclass thereof).
If the attribute name starts with an underscore, it is removed
'''
base_attributes = dict()
for (name, value) in getmembers(self.__class__):
if isinstance(value, Attribute):
if name.startswith('_'):
name = name[1:]
base_attributes[name] = value
return base_attributes
def _initialize_base_attributes(self):
# each class knows attributes set upon it, see Task.py for example
self._attributes = dict()
for (name, value) in self._get_base_attributes().items():
getter = partial(self._generic_g, name)
setter = partial(self._generic_s, name)
deleter = partial(self._generic_d, name)
# Place the property into the class so that cls.name is the
# property functions.
setattr(Base, name, property(getter, setter, deleter))
# Place the value into the instance so that the property can
# process and hold that value/
setattr(self, name, value.default)
def preprocess_data(self, ds):
''' infrequently used method to do some pre-processing of legacy terms '''
for base_class in self.__class__.mro():
method = getattr(self, "_preprocess_data_%s" % base_class.__name__.lower(), None)
if method:
return method(ds)
return ds
def load_data(self, ds, variable_manager=None, loader=None):
''' walk the input datastructure and assign any values '''
assert ds is not None
# the variable manager class is used to manage and merge variables
# down to a single dictionary for reference in templating, etc.
self._variable_manager = variable_manager
# the data loader class is used to parse data from strings and files
if loader is not None:
self._loader = loader
else:
self._loader = DataLoader()
# call the preprocess_data() function to massage the data into
# something we can more easily parse, and then call the validation
# function on it to ensure there are no incorrect key values
ds = self.preprocess_data(ds)
self._validate_attributes(ds)
# Walk all attributes in the class. We sort them based on their priority
# so that certain fields can be loaded before others, if they are dependent.
# FIXME: we currently don't do anything with private attributes but
# may later decide to filter them out of 'ds' here.
base_attributes = self._get_base_attributes()
for name, attr in sorted(base_attributes.items(), key=operator.itemgetter(1)):
# copy the value over unless a _load_field method is defined
if name in ds:
method = getattr(self, '_load_%s' % name, None)
if method:
self._attributes[name] = method(name, ds[name])
else:
self._attributes[name] = ds[name]
# run early, non-critical validation
self.validate()
# cache the datastructure internally
setattr(self, '_ds', ds)
# return the constructed object
return self
def get_ds(self):
try:
return getattr(self, '_ds')
except AttributeError:
return None
def get_loader(self):
return self._loader
def get_variable_manager(self):
return self._variable_manager
def _validate_attributes(self, ds):
'''
Ensures that there are no keys in the datastructure which do
not map to attributes for this object.
'''
valid_attrs = frozenset(name for name in self._get_base_attributes())
for key in ds:
if key not in valid_attrs:
raise AnsibleParserError("'%s' is not a valid attribute for a %s" % (key, self.__class__.__name__), obj=ds)
def validate(self, all_vars=dict()):
''' validation that is done at parse time, not load time '''
# walk all fields in the object
for (name, attribute) in iteritems(self._get_base_attributes()):
# run validator only if present
method = getattr(self, '_validate_%s' % name, None)
if method:
method(attribute, name, getattr(self, name))
else:
# and make sure the attribute is of the type it should be
value = getattr(self, name)
if value is not None:
if attribute.isa == 'string' and isinstance(value, (list, dict)):
raise AnsibleParserError("The field '%s' is supposed to be a string type, however the incoming data structure is a %s" % (name, type(value)), obj=self.get_ds())
def copy(self):
'''
Create a copy of this object and return it.
'''
new_me = self.__class__()
for name in self._get_base_attributes():
setattr(new_me, name, getattr(self, name))
new_me._loader = self._loader
new_me._variable_manager = self._variable_manager
# if the ds value was set on the object, copy it to the new copy too
if hasattr(self, '_ds'):
new_me._ds = self._ds
return new_me
def post_validate(self, templar):
'''
we can't tell that everything is of the right type until we have
all the variables. Run basic types (from isa) as well as
any _post_validate_<foo> functions.
'''
basedir = None
if self._loader is not None:
basedir = self._loader.get_basedir()
# save the omit value for later checking
omit_value = templar._available_variables.get('omit')
for (name, attribute) in iteritems(self._get_base_attributes()):
if getattr(self, name) is None:
if not attribute.required:
continue
else:
raise AnsibleParserError("the field '%s' is required but was not set" % name)
elif not attribute.always_post_validate and self.__class__.__name__ not in ('Task', 'Handler', 'PlayContext'):
# Intermediate objects like Play() won't have their fields validated by
# default, as their values are often inherited by other objects and validated
# later, so we don't want them to fail out early
continue
try:
# Run the post-validator if present. These methods are responsible for
# using the given templar to template the values, if required.
method = getattr(self, '_post_validate_%s' % name, None)
if method:
value = method(attribute, getattr(self, name), templar)
else:
# if the attribute contains a variable, template it now
value = templar.template(getattr(self, name))
# if this evaluated to the omit value, set the value back to
# the default specified in the FieldAttribute and move on
if omit_value is not None and value == omit_value:
value = attribute.default
continue
# and make sure the attribute is of the type it should be
if value is not None:
if attribute.isa == 'string':
value = text_type(value)
elif attribute.isa == 'int':
value = int(value)
elif attribute.isa == 'float':
value = float(value)
elif attribute.isa == 'bool':
value = boolean(value)
elif attribute.isa == 'percent':
# special value, which may be an integer or float
# with an optional '%' at the end
if isinstance(value, string_types) and '%' in value:
value = value.replace('%', '')
value = float(value)
elif attribute.isa == 'list':
if value is None:
value = []
elif not isinstance(value, list):
value = [ value ]
if attribute.listof is not None:
for item in value:
if not isinstance(item, attribute.listof):
raise AnsibleParserError("the field '%s' should be a list of %s, but the item '%s' is a %s" % (name, attribute.listof, item, type(item)), obj=self.get_ds())
elif attribute.required and attribute.listof == string_types:
if item is None or item.strip() == "":
raise AnsibleParserError("the field '%s' is required, and cannot have empty values" % (name,), obj=self.get_ds())
elif attribute.isa == 'set':
if value is None:
value = set()
else:
if not isinstance(value, (list, set)):
value = [ value ]
if not isinstance(value, set):
value = set(value)
elif attribute.isa == 'dict':
if value is None:
value = dict()
elif not isinstance(value, dict):
raise TypeError("%s is not a dictionary" % value)
# and assign the massaged value back to the attribute field
setattr(self, name, value)
except (TypeError, ValueError) as e:
raise AnsibleParserError("the field '%s' has an invalid value (%s), and could not be converted to an %s. Error was: %s" % (name, value, attribute.isa, e), obj=self.get_ds())
except UndefinedError as e:
if templar._fail_on_undefined_errors and name != 'name':
raise AnsibleParserError("the field '%s' has an invalid value, which appears to include a variable that is undefined. The error was: %s" % (name,e), obj=self.get_ds())
def serialize(self):
'''
Serializes the object derived from the base object into
a dictionary of values. This only serializes the field
attributes for the object, so this may need to be overridden
for any classes which wish to add additional items not stored
as field attributes.
'''
repr = dict()
for name in self._get_base_attributes():
repr[name] = getattr(self, name)
# serialize the uuid field
repr['uuid'] = getattr(self, '_uuid')
return repr
def deserialize(self, data):
'''
Given a dictionary of values, load up the field attributes for
this object. As with serialize(), if there are any non-field
attribute data members, this method will need to be overridden
and extended.
'''
assert isinstance(data, dict)
for (name, attribute) in iteritems(self._get_base_attributes()):
if name in data:
setattr(self, name, data[name])
else:
setattr(self, name, attribute.default)
# restore the UUID field
setattr(self, '_uuid', data.get('uuid'))
def _load_vars(self, attr, ds):
'''
Vars in a play can be specified either as a dictionary directly, or
as a list of dictionaries. If the later, this method will turn the
list into a single dictionary.
'''
def _validate_variable_keys(ds):
for key in ds:
if not isidentifier(key):
raise TypeError("%s is not a valid variable name" % key)
try:
if isinstance(ds, dict):
_validate_variable_keys(ds)
return ds
elif isinstance(ds, list):
all_vars = dict()
for item in ds:
if not isinstance(item, dict):
raise ValueError
_validate_variable_keys(item)
all_vars = combine_vars(all_vars, item)
return all_vars
elif ds is None:
return {}
else:
raise ValueError
except ValueError:
raise AnsibleParserError("Vars in a %s must be specified as a dictionary, or a list of dictionaries" % self.__class__.__name__, obj=ds)
except TypeError as e:
raise AnsibleParserError("Invalid variable name in vars specified for %s: %s" % (self.__class__.__name__, e), obj=ds)
def _extend_value(self, value, new_value):
'''
Will extend the value given with new_value (and will turn both
into lists if they are not so already). The values are run through
a set to remove duplicate values.
'''
if not isinstance(value, list):
value = [ value ]
if not isinstance(new_value, list):
new_value = [ new_value ]
#return list(set(value + new_value))
return [i for i,_ in itertools.groupby(value + new_value)]
def __getstate__(self):
return self.serialize()
def __setstate__(self, data):
self.__init__()
self.deserialize(data)
|
AudriusButkevicius/TurboVNC | refs/heads/master | unix/Xvnc/extras/freetype2/src/tools/docmaker/tohtml.py | 4 | from sources import *
from content import *
from formatter import *
import time
# The following defines the HTML header used by all generated pages.
#
html_header_1 = """\
<html>
<header>
<title>"""
html_header_2= """ API Reference</title>
<basefont face="Verdana,Geneva,Arial,Helvetica">
<style content="text/css">
P { text-align=justify }
H1 { text-align=center }
LI { text-align=justify }
</style>
</header>
<body text=#000000
bgcolor=#FFFFFF
link=#0000EF
vlink=#51188E
alink=#FF0000>
<center><h1>"""
html_header_3=""" API Reference</h1></center>
"""
# The HTML footer used by all generated pages.
#
html_footer = """\
</body>
</html>"""
# The header and footer used for each section.
#
section_title_header = "<center><h1>"
section_title_footer = "</h1></center>"
# The header and footer used for code segments.
#
code_header = "<font color=blue><pre>"
code_footer = "</pre></font>"
# Paragraph header and footer.
#
para_header = "<p>"
para_footer = "</p>"
# Block header and footer.
#
block_header = "<center><table width=75%><tr><td>"
block_footer = "</td></tr></table><hr width=75%></center>"
# Description header/footer.
#
description_header = "<center><table width=87%><tr><td>"
description_footer = "</td></tr></table></center><br>"
# Marker header/inter/footer combination.
#
marker_header = "<center><table width=87% cellpadding=5><tr bgcolor=#EEEEFF><td><em><b>"
marker_inter = "</b></em></td></tr><tr><td>"
marker_footer = "</td></tr></table></center>"
# Source code extracts header/footer.
#
source_header = "<center><table width=87%><tr bgcolor=#D6E8FF width=100%><td><pre>\n"
source_footer = "\n</pre></table></center><br>"
# Chapter header/inter/footer.
#
chapter_header = "<br><center><table width=75%><tr><td><h2>"
chapter_inter = "</h2><ul>"
chapter_footer = "</ul></td></tr></table></center>"
# source language keyword coloration/styling
#
keyword_prefix = '<font color="darkblue">'
keyword_suffix = '</font>'
section_synopsis_header = '<h2>Synopsys</h2><font color="cyan">'
section_synopsis_footer = '</font>'
# Translate a single line of source to HTML. This will convert
# a "<" into "<.", ">" into ">.", etc.
#
def html_quote( line ):
result = string.replace( line, "&", "&" )
result = string.replace( result, "<", "<" )
result = string.replace( result, ">", ">" )
return result
# same as 'html_quote', but ignores left and right brackets
#
def html_quote0( line ):
return string.replace( line, "&", "&" )
def dump_html_code( lines, prefix = "" ):
# clean the last empty lines
#
l = len( self.lines )
while l > 0 and string.strip( self.lines[l - 1] ) == "":
l = l - 1
# The code footer should be directly appended to the last code
# line to avoid an additional blank line.
#
print prefix + code_header,
for line in self.lines[0 : l+1]:
print '\n' + prefix + html_quote(line),
print prefix + code_footer,
class HtmlFormatter(Formatter):
def __init__( self, processor, project_title, file_prefix ):
Formatter.__init__( self, processor )
global html_header_1, html_header_2, html_header_3, html_footer
if file_prefix:
file_prefix = file_prefix + "-"
else:
file_prefix = ""
self.project_title = project_title
self.file_prefix = file_prefix
self.html_header = html_header_1 + project_title + html_header_2 + \
project_title + html_header_3
self.html_footer = "<p><center><font size=""-2"">generated on " + \
time.asctime( time.localtime( time.time() ) ) + \
"</font></p></center>" + html_footer
self.columns = 3
def make_section_url( self, section ):
return self.file_prefix + section.name + ".html"
def make_block_url( self, block ):
return self.make_section_url( block.section ) + "#" + block.name
def make_html_words( self, words ):
""" convert a series of simple words into some HTML text """
line = ""
if words:
line = html_quote( words[0] )
for w in words[1:]:
line = line + " " + html_quote( w )
return line
def make_html_word( self, word ):
"""analyze a simple word to detect cross-references and styling"""
# look for cross-references
#
m = re_crossref.match( word )
if m:
try:
name = m.group(1)
block = self.identifiers[ name ]
url = self.make_block_url( block )
return '<a href="' + url + '">' + name + '</a>'
except:
return '?' + name + '?'
# look for italics and bolds
m = re_italic.match( word )
if m:
name = m.group(1)
return '<i>'+name+'</i>'
m = re_bold.match( word )
if m:
name = m.group(1)
return '<b>'+name+'</b>'
return html_quote(word)
def make_html_para( self, words ):
""" convert a paragraph's words into tagged HTML text, handle xrefs """
line = ""
if words:
line = self.make_html_word( words[0] )
for word in words[1:]:
line = line + " " + self.make_html_word( word )
return "<p>" + line + "</p>"
def make_html_code( self, lines ):
""" convert a code sequence to HTML """
line = code_header + '\n'
for l in lines:
line = line + html_quote( l ) + '\n'
return line + code_footer
def make_html_items( self, items ):
""" convert a field's content into some valid HTML """
lines = []
for item in items:
if item.lines:
lines.append( self.make_html_code( item.lines ) )
else:
lines.append( self.make_html_para( item.words ) )
return string.join( lines, '\n' )
def print_html_items( self, items ):
print self.make_html_items( items )
def print_html_field( self, field ):
if field.name:
print "<table valign=top><tr><td><b>"+field.name+"</b></td><td>"
print self.make_html_items( field.items )
if field.name:
print "</td></tr></table>"
def html_source_quote( self, line, block_name = None ):
result = ""
while line:
m = re_source_crossref.match( line )
if m:
name = m.group(2)
prefix = html_quote( m.group(1) )
length = len( m.group(0) )
if name == block_name:
# this is the current block name, if any
result = result + prefix + '<b>' + name + '</b>'
elif re_source_keywords.match(name):
# this is a C keyword
result = result + prefix + keyword_prefix + name + keyword_suffix
elif self.identifiers.has_key(name):
# this is a known identifier
block = self.identifiers[name]
result = result + prefix + '<a href="' + \
self.make_block_url(block) + '">' + name + '</a>'
else:
result = result + html_quote(line[ : length ])
line = line[ length : ]
else:
result = result + html_quote(line)
line = []
return result
def print_html_field_list( self, fields ):
print "<table valign=top cellpadding=3>"
for field in fields:
print "<tr valign=top><td><b>" + field.name + "</b></td><td>"
self.print_html_items( field.items )
print "</td></tr>"
print "</table>"
def print_html_markup( self, markup ):
table_fields = []
for field in markup.fields:
if field.name:
# we begin a new series of field or value definitions, we
# will record them in the 'table_fields' list before outputting
# all of them as a single table
#
table_fields.append( field )
else:
if table_fields:
self.print_html_field_list( table_fields )
table_fields = []
self.print_html_items( field.items )
if table_fields:
self.print_html_field_list( table_fields )
#
# Formatting the index
#
def index_enter( self ):
print self.html_header
self.index_items = {}
def index_name_enter( self, name ):
block = self.identifiers[ name ]
url = self.make_block_url( block )
self.index_items[ name ] = url
def index_exit( self ):
# block_index already contains the sorted list of index names
count = len( self.block_index )
rows = (count + self.columns - 1)/self.columns
print "<center><table border=0 cellpadding=0 cellspacing=0>"
for r in range(rows):
line = "<tr>"
for c in range(self.columns):
i = r + c*rows
if i < count:
bname = self.block_index[ r + c*rows ]
url = self.index_items[ bname ]
line = line + '<td><a href="' + url + '">' + bname + '</a></td>'
else:
line = line + '<td></td>'
line = line + "</tr>"
print line
print "</table></center>"
print self.html_footer
self.index_items = {}
def index_dump( self, index_filename = None ):
if index_filename == None:
index_filename = self.file_prefix + "index.html"
Formatter.index_dump( self, index_filename )
#
# Formatting the table of content
#
def toc_enter( self ):
print self.html_header
print "<center><h1>Table of Contents</h1></center>"
def toc_chapter_enter( self, chapter ):
print chapter_header + string.join(chapter.title) + chapter_inter
print "<table cellpadding=5>"
def toc_section_enter( self, section ):
print "<tr valign=top><td>"
print '<a href="' + self.make_section_url( section ) + '">' + \
section.title + '</a></td><td>'
print self.make_html_para( section.abstract )
def toc_section_exit( self, section ):
print "</td></tr>"
def toc_chapter_exit( self, chapter ):
print "</table>"
print chapter_footer
def toc_index( self, index_filename ):
print chapter_header + '<a href="' + index_filename + '">Global Index</a>' + chapter_inter + chapter_footer
def toc_exit( self ):
print "</table></center>"
print self.html_footer
def toc_dump( self, toc_filename = None, index_filename = None ):
if toc_filename == None:
toc_filename = self.file_prefix + "toc.html"
if index_filename == None:
index_filename = self.file_prefix + "index.html"
Formatter.toc_dump( self, toc_filename, index_filename )
#
# Formatting sections
#
def section_enter( self, section ):
print self.html_header
print section_title_header
print section.title
print section_title_footer
# print section synopsys
print section_synopsis_header
print "<center><table cellspacing=5 cellpadding=0 border=0>"
maxwidth = 0
for b in section.blocks.values():
if len(b.name) > maxwidth:
maxwidth = len(b.name)
width = 70 # XXX magic number
columns = width / maxwidth
if columns < 1:
columns = 1
count = len(section.block_names)
rows = (count + columns-1)/columns
for r in range(rows):
line = "<tr>"
for c in range(columns):
i = r + c*rows
line = line + '<td></td><td>'
if i < count:
name = section.block_names[i]
line = line + '<a href="#' + name + '">' + name + '</a>'
line = line + '</td>'
line = line + "</tr>"
print line
print "</table></center><br><br>"
print section_synopsis_footer
print description_header
print self.make_html_items( section.description )
print description_footer
def block_enter( self, block ):
print block_header
# place html anchor if needed
if block.name:
print '<a name="' + block.name + '">'
print "<h4>" + block.name + "</h4>"
print "</a>"
# dump the block C source lines now
if block.code:
print source_header
for l in block.code:
print self.html_source_quote( l, block.name )
print source_footer
def markup_enter( self, markup, block ):
if markup.tag == "description":
print description_header
else:
print marker_header + markup.tag + marker_inter
self.print_html_markup( markup )
def markup_exit( self, markup, block ):
if markup.tag == "description":
print description_footer
else:
print marker_footer
def block_exit( self, block ):
print block_footer
def section_exit( self, section ):
print html_footer
def section_dump_all( self ):
for section in self.sections:
self.section_dump( section, self.file_prefix + section.name + '.html' )
|
gylian/sickrage | refs/heads/master | autoProcessTV/lib/requests/packages/chardet/chardistribution.py | 2754 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .euctwfreq import (EUCTWCharToFreqOrder, EUCTW_TABLE_SIZE,
EUCTW_TYPICAL_DISTRIBUTION_RATIO)
from .euckrfreq import (EUCKRCharToFreqOrder, EUCKR_TABLE_SIZE,
EUCKR_TYPICAL_DISTRIBUTION_RATIO)
from .gb2312freq import (GB2312CharToFreqOrder, GB2312_TABLE_SIZE,
GB2312_TYPICAL_DISTRIBUTION_RATIO)
from .big5freq import (Big5CharToFreqOrder, BIG5_TABLE_SIZE,
BIG5_TYPICAL_DISTRIBUTION_RATIO)
from .jisfreq import (JISCharToFreqOrder, JIS_TABLE_SIZE,
JIS_TYPICAL_DISTRIBUTION_RATIO)
from .compat import wrap_ord
ENOUGH_DATA_THRESHOLD = 1024
SURE_YES = 0.99
SURE_NO = 0.01
MINIMUM_DATA_THRESHOLD = 3
class CharDistributionAnalysis:
def __init__(self):
# Mapping table to get frequency order from char order (get from
# GetOrder())
self._mCharToFreqOrder = None
self._mTableSize = None # Size of above table
# This is a constant value which varies from language to language,
# used in calculating confidence. See
# http://www.mozilla.org/projects/intl/UniversalCharsetDetection.html
# for further detail.
self._mTypicalDistributionRatio = None
self.reset()
def reset(self):
"""reset analyser, clear any state"""
# If this flag is set to True, detection is done and conclusion has
# been made
self._mDone = False
self._mTotalChars = 0 # Total characters encountered
# The number of characters whose frequency order is less than 512
self._mFreqChars = 0
def feed(self, aBuf, aCharLen):
"""feed a character with known length"""
if aCharLen == 2:
# we only care about 2-bytes character in our distribution analysis
order = self.get_order(aBuf)
else:
order = -1
if order >= 0:
self._mTotalChars += 1
# order is valid
if order < self._mTableSize:
if 512 > self._mCharToFreqOrder[order]:
self._mFreqChars += 1
def get_confidence(self):
"""return confidence based on existing data"""
# if we didn't receive any character in our consideration range,
# return negative answer
if self._mTotalChars <= 0 or self._mFreqChars <= MINIMUM_DATA_THRESHOLD:
return SURE_NO
if self._mTotalChars != self._mFreqChars:
r = (self._mFreqChars / ((self._mTotalChars - self._mFreqChars)
* self._mTypicalDistributionRatio))
if r < SURE_YES:
return r
# normalize confidence (we don't want to be 100% sure)
return SURE_YES
def got_enough_data(self):
# It is not necessary to receive all data to draw conclusion.
# For charset detection, certain amount of data is enough
return self._mTotalChars > ENOUGH_DATA_THRESHOLD
def get_order(self, aBuf):
# We do not handle characters based on the original encoding string,
# but convert this encoding string to a number, here called order.
# This allows multiple encodings of a language to share one frequency
# table.
return -1
class EUCTWDistributionAnalysis(CharDistributionAnalysis):
def __init__(self):
CharDistributionAnalysis.__init__(self)
self._mCharToFreqOrder = EUCTWCharToFreqOrder
self._mTableSize = EUCTW_TABLE_SIZE
self._mTypicalDistributionRatio = EUCTW_TYPICAL_DISTRIBUTION_RATIO
def get_order(self, aBuf):
# for euc-TW encoding, we are interested
# first byte range: 0xc4 -- 0xfe
# second byte range: 0xa1 -- 0xfe
# no validation needed here. State machine has done that
first_char = wrap_ord(aBuf[0])
if first_char >= 0xC4:
return 94 * (first_char - 0xC4) + wrap_ord(aBuf[1]) - 0xA1
else:
return -1
class EUCKRDistributionAnalysis(CharDistributionAnalysis):
def __init__(self):
CharDistributionAnalysis.__init__(self)
self._mCharToFreqOrder = EUCKRCharToFreqOrder
self._mTableSize = EUCKR_TABLE_SIZE
self._mTypicalDistributionRatio = EUCKR_TYPICAL_DISTRIBUTION_RATIO
def get_order(self, aBuf):
# for euc-KR encoding, we are interested
# first byte range: 0xb0 -- 0xfe
# second byte range: 0xa1 -- 0xfe
# no validation needed here. State machine has done that
first_char = wrap_ord(aBuf[0])
if first_char >= 0xB0:
return 94 * (first_char - 0xB0) + wrap_ord(aBuf[1]) - 0xA1
else:
return -1
class GB2312DistributionAnalysis(CharDistributionAnalysis):
def __init__(self):
CharDistributionAnalysis.__init__(self)
self._mCharToFreqOrder = GB2312CharToFreqOrder
self._mTableSize = GB2312_TABLE_SIZE
self._mTypicalDistributionRatio = GB2312_TYPICAL_DISTRIBUTION_RATIO
def get_order(self, aBuf):
# for GB2312 encoding, we are interested
# first byte range: 0xb0 -- 0xfe
# second byte range: 0xa1 -- 0xfe
# no validation needed here. State machine has done that
first_char, second_char = wrap_ord(aBuf[0]), wrap_ord(aBuf[1])
if (first_char >= 0xB0) and (second_char >= 0xA1):
return 94 * (first_char - 0xB0) + second_char - 0xA1
else:
return -1
class Big5DistributionAnalysis(CharDistributionAnalysis):
def __init__(self):
CharDistributionAnalysis.__init__(self)
self._mCharToFreqOrder = Big5CharToFreqOrder
self._mTableSize = BIG5_TABLE_SIZE
self._mTypicalDistributionRatio = BIG5_TYPICAL_DISTRIBUTION_RATIO
def get_order(self, aBuf):
# for big5 encoding, we are interested
# first byte range: 0xa4 -- 0xfe
# second byte range: 0x40 -- 0x7e , 0xa1 -- 0xfe
# no validation needed here. State machine has done that
first_char, second_char = wrap_ord(aBuf[0]), wrap_ord(aBuf[1])
if first_char >= 0xA4:
if second_char >= 0xA1:
return 157 * (first_char - 0xA4) + second_char - 0xA1 + 63
else:
return 157 * (first_char - 0xA4) + second_char - 0x40
else:
return -1
class SJISDistributionAnalysis(CharDistributionAnalysis):
def __init__(self):
CharDistributionAnalysis.__init__(self)
self._mCharToFreqOrder = JISCharToFreqOrder
self._mTableSize = JIS_TABLE_SIZE
self._mTypicalDistributionRatio = JIS_TYPICAL_DISTRIBUTION_RATIO
def get_order(self, aBuf):
# for sjis encoding, we are interested
# first byte range: 0x81 -- 0x9f , 0xe0 -- 0xfe
# second byte range: 0x40 -- 0x7e, 0x81 -- oxfe
# no validation needed here. State machine has done that
first_char, second_char = wrap_ord(aBuf[0]), wrap_ord(aBuf[1])
if (first_char >= 0x81) and (first_char <= 0x9F):
order = 188 * (first_char - 0x81)
elif (first_char >= 0xE0) and (first_char <= 0xEF):
order = 188 * (first_char - 0xE0 + 31)
else:
return -1
order = order + second_char - 0x40
if second_char > 0x7F:
order = -1
return order
class EUCJPDistributionAnalysis(CharDistributionAnalysis):
def __init__(self):
CharDistributionAnalysis.__init__(self)
self._mCharToFreqOrder = JISCharToFreqOrder
self._mTableSize = JIS_TABLE_SIZE
self._mTypicalDistributionRatio = JIS_TYPICAL_DISTRIBUTION_RATIO
def get_order(self, aBuf):
# for euc-JP encoding, we are interested
# first byte range: 0xa0 -- 0xfe
# second byte range: 0xa1 -- 0xfe
# no validation needed here. State machine has done that
char = wrap_ord(aBuf[0])
if char >= 0xA0:
return 94 * (char - 0xA1) + wrap_ord(aBuf[1]) - 0xa1
else:
return -1
|
manuzhang/beam | refs/heads/master | sdks/python/apache_beam/examples/cookbook/bigquery_tornadoes_test.py | 16 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Test for the BigQuery tornadoes example."""
import logging
import unittest
import apache_beam as beam
from apache_beam.examples.cookbook import bigquery_tornadoes
from apache_beam.testing.test_pipeline import TestPipeline
from apache_beam.testing.util import assert_that
from apache_beam.testing.util import equal_to
class BigQueryTornadoesTest(unittest.TestCase):
def test_basics(self):
with TestPipeline() as p:
rows = (p | 'create' >> beam.Create([
{'month': 1, 'day': 1, 'tornado': False},
{'month': 1, 'day': 2, 'tornado': True},
{'month': 1, 'day': 3, 'tornado': True},
{'month': 2, 'day': 1, 'tornado': True}]))
results = bigquery_tornadoes.count_tornadoes(rows)
assert_that(results, equal_to([{'month': 1, 'tornado_count': 2},
{'month': 2, 'tornado_count': 1}]))
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
unittest.main()
|
reinhrst/ycmd | refs/heads/master | cpp/ycm/tests/gmock/gtest/xcode/Scripts/versiongenerate.py | 3088 | #!/usr/bin/env python
#
# Copyright 2008, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""A script to prepare version informtion for use the gtest Info.plist file.
This script extracts the version information from the configure.ac file and
uses it to generate a header file containing the same information. The
#defines in this header file will be included in during the generation of
the Info.plist of the framework, giving the correct value to the version
shown in the Finder.
This script makes the following assumptions (these are faults of the script,
not problems with the Autoconf):
1. The AC_INIT macro will be contained within the first 1024 characters
of configure.ac
2. The version string will be 3 integers separated by periods and will be
surrounded by squre brackets, "[" and "]" (e.g. [1.0.1]). The first
segment represents the major version, the second represents the minor
version and the third represents the fix version.
3. No ")" character exists between the opening "(" and closing ")" of
AC_INIT, including in comments and character strings.
"""
import sys
import re
# Read the command line argument (the output directory for Version.h)
if (len(sys.argv) < 3):
print "Usage: versiongenerate.py input_dir output_dir"
sys.exit(1)
else:
input_dir = sys.argv[1]
output_dir = sys.argv[2]
# Read the first 1024 characters of the configure.ac file
config_file = open("%s/configure.ac" % input_dir, 'r')
buffer_size = 1024
opening_string = config_file.read(buffer_size)
config_file.close()
# Extract the version string from the AC_INIT macro
# The following init_expression means:
# Extract three integers separated by periods and surrounded by squre
# brackets(e.g. "[1.0.1]") between "AC_INIT(" and ")". Do not be greedy
# (*? is the non-greedy flag) since that would pull in everything between
# the first "(" and the last ")" in the file.
version_expression = re.compile(r"AC_INIT\(.*?\[(\d+)\.(\d+)\.(\d+)\].*?\)",
re.DOTALL)
version_values = version_expression.search(opening_string)
major_version = version_values.group(1)
minor_version = version_values.group(2)
fix_version = version_values.group(3)
# Write the version information to a header file to be included in the
# Info.plist file.
file_data = """//
// DO NOT MODIFY THIS FILE (but you can delete it)
//
// This file is autogenerated by the versiongenerate.py script. This script
// is executed in a "Run Script" build phase when creating gtest.framework. This
// header file is not used during compilation of C-source. Rather, it simply
// defines some version strings for substitution in the Info.plist. Because of
// this, we are not not restricted to C-syntax nor are we using include guards.
//
#define GTEST_VERSIONINFO_SHORT %s.%s
#define GTEST_VERSIONINFO_LONG %s.%s.%s
""" % (major_version, minor_version, major_version, minor_version, fix_version)
version_file = open("%s/Version.h" % output_dir, 'w')
version_file.write(file_data)
version_file.close()
|
stbka/ansible | refs/heads/devel | lib/ansible/plugins/inventory/ini.py | 81 | # (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#############################################
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
from ansible import constants as C
from . import InventoryParser
class InventoryIniParser(InventoryAggregateParser):
CONDITION="is_file(%s)"
def __init__(self, inven_directory):
directory = inven_directory
names = os.listdir(inven_directory)
filtered_names = []
# Clean up the list of filenames
for filename in names:
# Skip files that end with certain extensions or characters
if any(filename.endswith(ext) for ext in C.DEFAULT_INVENTORY_IGNORE):
continue
# Skip hidden files
if filename.startswith('.') and not filename.startswith('.{0}'.format(os.path.sep)):
continue
# These are things inside of an inventory basedir
if filename in ("host_vars", "group_vars", "vars_plugins"):
continue
fullpath = os.path.join(directory, filename)
new_names.append(fullpath)
super(InventoryDirectoryParser, self).__init__(new_names)
def parse(self):
return super(InventoryDirectoryParser, self).parse()
|
evansde77/cirrus | refs/heads/develop | tests/__init__.py | 12133432 | |
sjkingo/fantail | refs/heads/master | fantail/plugins/__init__.py | 12133432 | |
phenoxim/cinder | refs/heads/master | cinder/tests/unit/brick/__init__.py | 12133432 | |
Lujeni/ansible | refs/heads/devel | lib/ansible/module_utils/network/ordnance/__init__.py | 12133432 | |
freezmeinster/avagata-site | refs/heads/dev | django/templatetags/__init__.py | 12133432 | |
nesdis/djongo | refs/heads/master | tests/django_tests/tests/v22/tests/gis_tests/gdal_tests/__init__.py | 12133432 | |
Petr-Kovalev/nupic-win32 | refs/heads/master | external/linux32/lib/python2.6/site-packages/matplotlib/numerix/_sp_imports.py | 70 | try:
from numpy.oldnumeric import Int8, UInt8, \
Int16, UInt16, \
Int32, UInt32, \
Float32, Float64, \
Complex32, Complex64, \
Float, Int, Complex
except ImportError:
from numpy import Int8, UInt8, \
Int16, UInt16, \
Int32, UInt32, \
Float32, Float64, \
Complex32, Complex64, \
Float, Int, Complex
class _TypeNamespace:
"""Numeric compatible type aliases for use with extension functions."""
Int8 = Int8
UInt8 = UInt8
Int16 = Int16
UInt16 = UInt16
Int32 = Int32
UInt32 = UInt32
Float32 = Float32
Float64 = Float64
Complex32 = Complex32
Complex64 = Complex64
nx = _TypeNamespace()
from numpy import inf, infty, Infinity
from numpy.random import rand, randn
infinity = Infinity
from numpy import all, isnan, any
|
yanlend/scikit-learn | refs/heads/master | examples/cluster/plot_lena_compress.py | 271 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Vector Quantization Example
=========================================================
The classic image processing example, Lena, an 8-bit grayscale
bit-depth, 512 x 512 sized image, is used here to illustrate
how `k`-means is used for vector quantization.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
from sklearn import cluster
n_clusters = 5
np.random.seed(0)
try:
lena = sp.lena()
except AttributeError:
# Newer versions of scipy have lena in misc
from scipy import misc
lena = misc.lena()
X = lena.reshape((-1, 1)) # We need an (n_sample, n_feature) array
k_means = cluster.KMeans(n_clusters=n_clusters, n_init=4)
k_means.fit(X)
values = k_means.cluster_centers_.squeeze()
labels = k_means.labels_
# create an array from labels and values
lena_compressed = np.choose(labels, values)
lena_compressed.shape = lena.shape
vmin = lena.min()
vmax = lena.max()
# original lena
plt.figure(1, figsize=(3, 2.2))
plt.imshow(lena, cmap=plt.cm.gray, vmin=vmin, vmax=256)
# compressed lena
plt.figure(2, figsize=(3, 2.2))
plt.imshow(lena_compressed, cmap=plt.cm.gray, vmin=vmin, vmax=vmax)
# equal bins lena
regular_values = np.linspace(0, 256, n_clusters + 1)
regular_labels = np.searchsorted(regular_values, lena) - 1
regular_values = .5 * (regular_values[1:] + regular_values[:-1]) # mean
regular_lena = np.choose(regular_labels.ravel(), regular_values)
regular_lena.shape = lena.shape
plt.figure(3, figsize=(3, 2.2))
plt.imshow(regular_lena, cmap=plt.cm.gray, vmin=vmin, vmax=vmax)
# histogram
plt.figure(4, figsize=(3, 2.2))
plt.clf()
plt.axes([.01, .01, .98, .98])
plt.hist(X, bins=256, color='.5', edgecolor='.5')
plt.yticks(())
plt.xticks(regular_values)
values = np.sort(values)
for center_1, center_2 in zip(values[:-1], values[1:]):
plt.axvline(.5 * (center_1 + center_2), color='b')
for center_1, center_2 in zip(regular_values[:-1], regular_values[1:]):
plt.axvline(.5 * (center_1 + center_2), color='b', linestyle='--')
plt.show()
|
jdmcbr/Shapely | refs/heads/master | tests/test_coords.py | 8 | from . import unittest, numpy
from shapely import geometry
class CoordsTestCase(unittest.TestCase):
"""
Shapely assumes contiguous C-order float64 data for internal ops.
Data should be converted to contiguous float64 if numpy exists.
c9a0707 broke this a little bit.
"""
@unittest.skipIf(not numpy, 'Numpy required')
def test_data_promotion(self):
coords = numpy.array([[ 12, 34 ], [ 56, 78 ]], dtype=numpy.float32)
processed_coords = numpy.array(
geometry.LineString(coords).coords
)
self.assertEqual(
coords.tolist(),
processed_coords.tolist()
)
@unittest.skipIf(not numpy, 'Numpy required')
def test_data_destriding(self):
coords = numpy.array([[ 12, 34 ], [ 56, 78 ]], dtype=numpy.float32)
# Easy way to introduce striding: reverse list order
processed_coords = numpy.array(
geometry.LineString(coords[::-1]).coords
)
self.assertEqual(
coords[::-1].tolist(),
processed_coords.tolist()
)
def test_suite():
return unittest.TestLoader().loadTestsFromTestCase(CoordsTestCase)
|
anhstudios/swganh | refs/heads/develop | data/scripts/templates/object/draft_schematic/instrument/shared_instrument_nalargon.py | 2 | #### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Intangible()
result.template = "object/draft_schematic/instrument/shared_instrument_nalargon.iff"
result.attribute_template_id = -1
result.stfName("string_id_table","")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result |
liamgh/liamgreenhughes-sl4a-tf101 | refs/heads/master | python/src/Lib/distutils/unixccompiler.py | 33 | """distutils.unixccompiler
Contains the UnixCCompiler class, a subclass of CCompiler that handles
the "typical" Unix-style command-line C compiler:
* macros defined with -Dname[=value]
* macros undefined with -Uname
* include search directories specified with -Idir
* libraries specified with -lllib
* library search directories specified with -Ldir
* compile handled by 'cc' (or similar) executable with -c option:
compiles .c to .o
* link static library handled by 'ar' command (possibly with 'ranlib')
* link shared library handled by 'cc -shared'
"""
__revision__ = "$Id: unixccompiler.py 65012 2008-07-16 13:24:06Z jesse.noller $"
import os, sys
from types import StringType, NoneType
from distutils import sysconfig
from distutils.dep_util import newer
from distutils.ccompiler import \
CCompiler, gen_preprocess_options, gen_lib_options
from distutils.errors import \
DistutilsExecError, CompileError, LibError, LinkError
from distutils import log
# XXX Things not currently handled:
# * optimization/debug/warning flags; we just use whatever's in Python's
# Makefile and live with it. Is this adequate? If not, we might
# have to have a bunch of subclasses GNUCCompiler, SGICCompiler,
# SunCCompiler, and I suspect down that road lies madness.
# * even if we don't know a warning flag from an optimization flag,
# we need some way for outsiders to feed preprocessor/compiler/linker
# flags in to us -- eg. a sysadmin might want to mandate certain flags
# via a site config file, or a user might want to set something for
# compiling this module distribution only via the setup.py command
# line, whatever. As long as these options come from something on the
# current system, they can be as system-dependent as they like, and we
# should just happily stuff them into the preprocessor/compiler/linker
# options and carry on.
def _darwin_compiler_fixup(compiler_so, cc_args):
"""
This function will strip '-isysroot PATH' and '-arch ARCH' from the
compile flags if the user has specified one them in extra_compile_flags.
This is needed because '-arch ARCH' adds another architecture to the
build, without a way to remove an architecture. Furthermore GCC will
barf if multiple '-isysroot' arguments are present.
"""
stripArch = stripSysroot = 0
compiler_so = list(compiler_so)
kernel_version = os.uname()[2] # 8.4.3
major_version = int(kernel_version.split('.')[0])
if major_version < 8:
# OSX before 10.4.0, these don't support -arch and -isysroot at
# all.
stripArch = stripSysroot = True
else:
stripArch = '-arch' in cc_args
stripSysroot = '-isysroot' in cc_args
if stripArch or 'ARCHFLAGS' in os.environ:
while 1:
try:
index = compiler_so.index('-arch')
# Strip this argument and the next one:
del compiler_so[index:index+2]
except ValueError:
break
if 'ARCHFLAGS' in os.environ and not stripArch:
# User specified different -arch flags in the environ,
# see also distutils.sysconfig
compiler_so = compiler_so + os.environ['ARCHFLAGS'].split()
if stripSysroot:
try:
index = compiler_so.index('-isysroot')
# Strip this argument and the next one:
del compiler_so[index:index+2]
except ValueError:
pass
# Check if the SDK that is used during compilation actually exists,
# the universal build requires the usage of a universal SDK and not all
# users have that installed by default.
sysroot = None
if '-isysroot' in cc_args:
idx = cc_args.index('-isysroot')
sysroot = cc_args[idx+1]
elif '-isysroot' in compiler_so:
idx = compiler_so.index('-isysroot')
sysroot = compiler_so[idx+1]
if sysroot and not os.path.isdir(sysroot):
log.warn("Compiling with an SDK that doesn't seem to exist: %s",
sysroot)
log.warn("Please check your Xcode installation")
return compiler_so
class UnixCCompiler(CCompiler):
compiler_type = 'unix'
# These are used by CCompiler in two places: the constructor sets
# instance attributes 'preprocessor', 'compiler', etc. from them, and
# 'set_executable()' allows any of these to be set. The defaults here
# are pretty generic; they will probably have to be set by an outsider
# (eg. using information discovered by the sysconfig about building
# Python extensions).
executables = {'preprocessor' : None,
'compiler' : ["cc"],
'compiler_so' : ["cc"],
'compiler_cxx' : ["cc"],
'linker_so' : ["cc", "-shared"],
'linker_exe' : ["cc"],
'archiver' : ["ar", "-cr"],
'ranlib' : None,
}
if sys.platform[:6] == "darwin":
executables['ranlib'] = ["ranlib"]
# Needed for the filename generation methods provided by the base
# class, CCompiler. NB. whoever instantiates/uses a particular
# UnixCCompiler instance should set 'shared_lib_ext' -- we set a
# reasonable common default here, but it's not necessarily used on all
# Unices!
src_extensions = [".c",".C",".cc",".cxx",".cpp",".m"]
obj_extension = ".o"
static_lib_extension = ".a"
shared_lib_extension = ".so"
dylib_lib_extension = ".dylib"
static_lib_format = shared_lib_format = dylib_lib_format = "lib%s%s"
if sys.platform == "cygwin":
exe_extension = ".exe"
def preprocess(self, source,
output_file=None, macros=None, include_dirs=None,
extra_preargs=None, extra_postargs=None):
ignore, macros, include_dirs = \
self._fix_compile_args(None, macros, include_dirs)
pp_opts = gen_preprocess_options(macros, include_dirs)
pp_args = self.preprocessor + pp_opts
if output_file:
pp_args.extend(['-o', output_file])
if extra_preargs:
pp_args[:0] = extra_preargs
if extra_postargs:
pp_args.extend(extra_postargs)
pp_args.append(source)
# We need to preprocess: either we're being forced to, or we're
# generating output to stdout, or there's a target output file and
# the source file is newer than the target (or the target doesn't
# exist).
if self.force or output_file is None or newer(source, output_file):
if output_file:
self.mkpath(os.path.dirname(output_file))
try:
self.spawn(pp_args)
except DistutilsExecError, msg:
raise CompileError, msg
def _compile(self, obj, src, ext, cc_args, extra_postargs, pp_opts):
compiler_so = self.compiler_so
if sys.platform == 'darwin':
compiler_so = _darwin_compiler_fixup(compiler_so, cc_args + extra_postargs)
try:
self.spawn(compiler_so + cc_args + [src, '-o', obj] +
extra_postargs)
except DistutilsExecError, msg:
raise CompileError, msg
def create_static_lib(self, objects, output_libname,
output_dir=None, debug=0, target_lang=None):
objects, output_dir = self._fix_object_args(objects, output_dir)
output_filename = \
self.library_filename(output_libname, output_dir=output_dir)
if self._need_link(objects, output_filename):
self.mkpath(os.path.dirname(output_filename))
self.spawn(self.archiver +
[output_filename] +
objects + self.objects)
# Not many Unices required ranlib anymore -- SunOS 4.x is, I
# think the only major Unix that does. Maybe we need some
# platform intelligence here to skip ranlib if it's not
# needed -- or maybe Python's configure script took care of
# it for us, hence the check for leading colon.
if self.ranlib:
try:
self.spawn(self.ranlib + [output_filename])
except DistutilsExecError, msg:
raise LibError, msg
else:
log.debug("skipping %s (up-to-date)", output_filename)
def link(self, target_desc, objects,
output_filename, output_dir=None, libraries=None,
library_dirs=None, runtime_library_dirs=None,
export_symbols=None, debug=0, extra_preargs=None,
extra_postargs=None, build_temp=None, target_lang=None):
objects, output_dir = self._fix_object_args(objects, output_dir)
libraries, library_dirs, runtime_library_dirs = \
self._fix_lib_args(libraries, library_dirs, runtime_library_dirs)
lib_opts = gen_lib_options(self, library_dirs, runtime_library_dirs,
libraries)
if type(output_dir) not in (StringType, NoneType):
raise TypeError, "'output_dir' must be a string or None"
if output_dir is not None:
output_filename = os.path.join(output_dir, output_filename)
if self._need_link(objects, output_filename):
ld_args = (objects + self.objects +
lib_opts + ['-o', output_filename])
if debug:
ld_args[:0] = ['-g']
if extra_preargs:
ld_args[:0] = extra_preargs
if extra_postargs:
ld_args.extend(extra_postargs)
self.mkpath(os.path.dirname(output_filename))
try:
if target_desc == CCompiler.EXECUTABLE:
linker = self.linker_exe[:]
else:
linker = self.linker_so[:]
if target_lang == "c++" and self.compiler_cxx:
# skip over environment variable settings if /usr/bin/env
# is used to set up the linker's environment.
# This is needed on OSX. Note: this assumes that the
# normal and C++ compiler have the same environment
# settings.
i = 0
if os.path.basename(linker[0]) == "env":
i = 1
while '=' in linker[i]:
i = i + 1
linker[i] = self.compiler_cxx[i]
if sys.platform == 'darwin':
linker = _darwin_compiler_fixup(linker, ld_args)
self.spawn(linker + ld_args)
except DistutilsExecError, msg:
raise LinkError, msg
else:
log.debug("skipping %s (up-to-date)", output_filename)
# -- Miscellaneous methods -----------------------------------------
# These are all used by the 'gen_lib_options() function, in
# ccompiler.py.
def library_dir_option(self, dir):
return "-L" + dir
def runtime_library_dir_option(self, dir):
# XXX Hackish, at the very least. See Python bug #445902:
# http://sourceforge.net/tracker/index.php
# ?func=detail&aid=445902&group_id=5470&atid=105470
# Linkers on different platforms need different options to
# specify that directories need to be added to the list of
# directories searched for dependencies when a dynamic library
# is sought. GCC has to be told to pass the -R option through
# to the linker, whereas other compilers just know this.
# Other compilers may need something slightly different. At
# this time, there's no way to determine this information from
# the configuration data stored in the Python installation, so
# we use this hack.
compiler = os.path.basename(sysconfig.get_config_var("CC"))
if sys.platform[:6] == "darwin":
# MacOSX's linker doesn't understand the -R flag at all
return "-L" + dir
elif sys.platform[:5] == "hp-ux":
return "+s -L" + dir
elif sys.platform[:7] == "irix646" or sys.platform[:6] == "osf1V5":
return ["-rpath", dir]
elif compiler[:3] == "gcc" or compiler[:3] == "g++":
return "-Wl,-R" + dir
else:
return "-R" + dir
def library_option(self, lib):
return "-l" + lib
def find_library_file(self, dirs, lib, debug=0):
shared_f = self.library_filename(lib, lib_type='shared')
dylib_f = self.library_filename(lib, lib_type='dylib')
static_f = self.library_filename(lib, lib_type='static')
for dir in dirs:
shared = os.path.join(dir, shared_f)
dylib = os.path.join(dir, dylib_f)
static = os.path.join(dir, static_f)
# We're second-guessing the linker here, with not much hard
# data to go on: GCC seems to prefer the shared library, so I'm
# assuming that *all* Unix C compilers do. And of course I'm
# ignoring even GCC's "-static" option. So sue me.
if os.path.exists(dylib):
return dylib
elif os.path.exists(shared):
return shared
elif os.path.exists(static):
return static
# Oops, didn't find it in *any* of 'dirs'
return None
|
team-xue/xue | refs/heads/master | xue/accounts/migrations/0008_auto__add_field_dmuserprofile_gender__add_field_dmuserprofile_phone.py | 1 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'DMUserProfile.gender'
db.add_column('accounts_dmuserprofile', 'gender', self.gf('django.db.models.fields.IntegerField')(default=0), keep_default=False)
# Adding field 'DMUserProfile.phone'
db.add_column('accounts_dmuserprofile', 'phone', self.gf('django.db.models.fields.CharField')(default='', max_length=24, blank=True), keep_default=False)
def backwards(self, orm):
# Deleting field 'DMUserProfile.gender'
db.delete_column('accounts_dmuserprofile', 'gender')
# Deleting field 'DMUserProfile.phone'
db.delete_column('accounts_dmuserprofile', 'phone')
models = {
'accounts.dmuserprofile': {
'Meta': {'object_name': 'DMUserProfile'},
'ethnic': ('django.db.models.fields.IntegerField', [], {}),
'gender': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'id_number': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'join_date': ('django.db.models.fields.DateField', [], {}),
'language': ('django.db.models.fields.CharField', [], {'default': "'zh'", 'max_length': '5'}),
'location': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'major': ('django.db.models.fields.CharField', [], {'max_length': '16', 'blank': 'True'}),
'mugshot': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'blank': 'True'}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '24', 'blank': 'True'}),
'privacy': ('django.db.models.fields.CharField', [], {'default': "'registered'", 'max_length': '15'}),
'realname': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'role': ('django.db.models.fields.IntegerField', [], {}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'profile'", 'unique': 'True', 'to': "orm['auth.User']"})
},
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['accounts']
|
BaichuanWu/Blog_on_django | refs/heads/master | site-packages/django/contrib/gis/tests/geogapp/__init__.py | 12133432 | |
runekaagaard/django-contrib-locking | refs/heads/master | django/contrib/admin/views/__init__.py | 12133432 | |
bjwbell/servo | refs/heads/master | tests/wpt/web-platform-tests/tools/pywebsocket/src/mod_pywebsocket/handshake/hybi.py | 506 | # Copyright 2012, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""This file provides the opening handshake processor for the WebSocket
protocol (RFC 6455).
Specification:
http://tools.ietf.org/html/rfc6455
"""
# Note: request.connection.write is used in this module, even though mod_python
# document says that it should be used only in connection handlers.
# Unfortunately, we have no other options. For example, request.write is not
# suitable because it doesn't allow direct raw bytes writing.
import base64
import logging
import os
import re
from mod_pywebsocket import common
from mod_pywebsocket.extensions import get_extension_processor
from mod_pywebsocket.extensions import is_compression_extension
from mod_pywebsocket.handshake._base import check_request_line
from mod_pywebsocket.handshake._base import format_header
from mod_pywebsocket.handshake._base import get_mandatory_header
from mod_pywebsocket.handshake._base import HandshakeException
from mod_pywebsocket.handshake._base import parse_token_list
from mod_pywebsocket.handshake._base import validate_mandatory_header
from mod_pywebsocket.handshake._base import validate_subprotocol
from mod_pywebsocket.handshake._base import VersionException
from mod_pywebsocket.stream import Stream
from mod_pywebsocket.stream import StreamOptions
from mod_pywebsocket import util
# Used to validate the value in the Sec-WebSocket-Key header strictly. RFC 4648
# disallows non-zero padding, so the character right before == must be any of
# A, Q, g and w.
_SEC_WEBSOCKET_KEY_REGEX = re.compile('^[+/0-9A-Za-z]{21}[AQgw]==$')
# Defining aliases for values used frequently.
_VERSION_LATEST = common.VERSION_HYBI_LATEST
_VERSION_LATEST_STRING = str(_VERSION_LATEST)
_SUPPORTED_VERSIONS = [
_VERSION_LATEST,
]
def compute_accept(key):
"""Computes value for the Sec-WebSocket-Accept header from value of the
Sec-WebSocket-Key header.
"""
accept_binary = util.sha1_hash(
key + common.WEBSOCKET_ACCEPT_UUID).digest()
accept = base64.b64encode(accept_binary)
return (accept, accept_binary)
class Handshaker(object):
"""Opening handshake processor for the WebSocket protocol (RFC 6455)."""
def __init__(self, request, dispatcher):
"""Construct an instance.
Args:
request: mod_python request.
dispatcher: Dispatcher (dispatch.Dispatcher).
Handshaker will add attributes such as ws_resource during handshake.
"""
self._logger = util.get_class_logger(self)
self._request = request
self._dispatcher = dispatcher
def _validate_connection_header(self):
connection = get_mandatory_header(
self._request, common.CONNECTION_HEADER)
try:
connection_tokens = parse_token_list(connection)
except HandshakeException, e:
raise HandshakeException(
'Failed to parse %s: %s' % (common.CONNECTION_HEADER, e))
connection_is_valid = False
for token in connection_tokens:
if token.lower() == common.UPGRADE_CONNECTION_TYPE.lower():
connection_is_valid = True
break
if not connection_is_valid:
raise HandshakeException(
'%s header doesn\'t contain "%s"' %
(common.CONNECTION_HEADER, common.UPGRADE_CONNECTION_TYPE))
def do_handshake(self):
self._request.ws_close_code = None
self._request.ws_close_reason = None
# Parsing.
check_request_line(self._request)
validate_mandatory_header(
self._request,
common.UPGRADE_HEADER,
common.WEBSOCKET_UPGRADE_TYPE)
self._validate_connection_header()
self._request.ws_resource = self._request.uri
unused_host = get_mandatory_header(self._request, common.HOST_HEADER)
self._request.ws_version = self._check_version()
try:
self._get_origin()
self._set_protocol()
self._parse_extensions()
# Key validation, response generation.
key = self._get_key()
(accept, accept_binary) = compute_accept(key)
self._logger.debug(
'%s: %r (%s)',
common.SEC_WEBSOCKET_ACCEPT_HEADER,
accept,
util.hexify(accept_binary))
self._logger.debug('Protocol version is RFC 6455')
# Setup extension processors.
processors = []
if self._request.ws_requested_extensions is not None:
for extension_request in self._request.ws_requested_extensions:
processor = get_extension_processor(extension_request)
# Unknown extension requests are just ignored.
if processor is not None:
processors.append(processor)
self._request.ws_extension_processors = processors
# List of extra headers. The extra handshake handler may add header
# data as name/value pairs to this list and pywebsocket appends
# them to the WebSocket handshake.
self._request.extra_headers = []
# Extra handshake handler may modify/remove processors.
self._dispatcher.do_extra_handshake(self._request)
processors = filter(lambda processor: processor is not None,
self._request.ws_extension_processors)
# Ask each processor if there are extensions on the request which
# cannot co-exist. When processor decided other processors cannot
# co-exist with it, the processor marks them (or itself) as
# "inactive". The first extension processor has the right to
# make the final call.
for processor in reversed(processors):
if processor.is_active():
processor.check_consistency_with_other_processors(
processors)
processors = filter(lambda processor: processor.is_active(),
processors)
accepted_extensions = []
# We need to take into account of mux extension here.
# If mux extension exists:
# - Remove processors of extensions for logical channel,
# which are processors located before the mux processor
# - Pass extension requests for logical channel to mux processor
# - Attach the mux processor to the request. It will be referred
# by dispatcher to see whether the dispatcher should use mux
# handler or not.
mux_index = -1
for i, processor in enumerate(processors):
if processor.name() == common.MUX_EXTENSION:
mux_index = i
break
if mux_index >= 0:
logical_channel_extensions = []
for processor in processors[:mux_index]:
logical_channel_extensions.append(processor.request())
processor.set_active(False)
self._request.mux_processor = processors[mux_index]
self._request.mux_processor.set_extensions(
logical_channel_extensions)
processors = filter(lambda processor: processor.is_active(),
processors)
stream_options = StreamOptions()
for index, processor in enumerate(processors):
if not processor.is_active():
continue
extension_response = processor.get_extension_response()
if extension_response is None:
# Rejected.
continue
accepted_extensions.append(extension_response)
processor.setup_stream_options(stream_options)
if not is_compression_extension(processor.name()):
continue
# Inactivate all of the following compression extensions.
for j in xrange(index + 1, len(processors)):
if is_compression_extension(processors[j].name()):
processors[j].set_active(False)
if len(accepted_extensions) > 0:
self._request.ws_extensions = accepted_extensions
self._logger.debug(
'Extensions accepted: %r',
map(common.ExtensionParameter.name, accepted_extensions))
else:
self._request.ws_extensions = None
self._request.ws_stream = self._create_stream(stream_options)
if self._request.ws_requested_protocols is not None:
if self._request.ws_protocol is None:
raise HandshakeException(
'do_extra_handshake must choose one subprotocol from '
'ws_requested_protocols and set it to ws_protocol')
validate_subprotocol(self._request.ws_protocol)
self._logger.debug(
'Subprotocol accepted: %r',
self._request.ws_protocol)
else:
if self._request.ws_protocol is not None:
raise HandshakeException(
'ws_protocol must be None when the client didn\'t '
'request any subprotocol')
self._send_handshake(accept)
except HandshakeException, e:
if not e.status:
# Fallback to 400 bad request by default.
e.status = common.HTTP_STATUS_BAD_REQUEST
raise e
def _get_origin(self):
origin_header = common.ORIGIN_HEADER
origin = self._request.headers_in.get(origin_header)
if origin is None:
self._logger.debug('Client request does not have origin header')
self._request.ws_origin = origin
def _check_version(self):
version = get_mandatory_header(self._request,
common.SEC_WEBSOCKET_VERSION_HEADER)
if version == _VERSION_LATEST_STRING:
return _VERSION_LATEST
if version.find(',') >= 0:
raise HandshakeException(
'Multiple versions (%r) are not allowed for header %s' %
(version, common.SEC_WEBSOCKET_VERSION_HEADER),
status=common.HTTP_STATUS_BAD_REQUEST)
raise VersionException(
'Unsupported version %r for header %s' %
(version, common.SEC_WEBSOCKET_VERSION_HEADER),
supported_versions=', '.join(map(str, _SUPPORTED_VERSIONS)))
def _set_protocol(self):
self._request.ws_protocol = None
protocol_header = self._request.headers_in.get(
common.SEC_WEBSOCKET_PROTOCOL_HEADER)
if protocol_header is None:
self._request.ws_requested_protocols = None
return
self._request.ws_requested_protocols = parse_token_list(
protocol_header)
self._logger.debug('Subprotocols requested: %r',
self._request.ws_requested_protocols)
def _parse_extensions(self):
extensions_header = self._request.headers_in.get(
common.SEC_WEBSOCKET_EXTENSIONS_HEADER)
if not extensions_header:
self._request.ws_requested_extensions = None
return
try:
self._request.ws_requested_extensions = common.parse_extensions(
extensions_header)
except common.ExtensionParsingException, e:
raise HandshakeException(
'Failed to parse Sec-WebSocket-Extensions header: %r' % e)
self._logger.debug(
'Extensions requested: %r',
map(common.ExtensionParameter.name,
self._request.ws_requested_extensions))
def _validate_key(self, key):
if key.find(',') >= 0:
raise HandshakeException('Request has multiple %s header lines or '
'contains illegal character \',\': %r' %
(common.SEC_WEBSOCKET_KEY_HEADER, key))
# Validate
key_is_valid = False
try:
# Validate key by quick regex match before parsing by base64
# module. Because base64 module skips invalid characters, we have
# to do this in advance to make this server strictly reject illegal
# keys.
if _SEC_WEBSOCKET_KEY_REGEX.match(key):
decoded_key = base64.b64decode(key)
if len(decoded_key) == 16:
key_is_valid = True
except TypeError, e:
pass
if not key_is_valid:
raise HandshakeException(
'Illegal value for header %s: %r' %
(common.SEC_WEBSOCKET_KEY_HEADER, key))
return decoded_key
def _get_key(self):
key = get_mandatory_header(
self._request, common.SEC_WEBSOCKET_KEY_HEADER)
decoded_key = self._validate_key(key)
self._logger.debug(
'%s: %r (%s)',
common.SEC_WEBSOCKET_KEY_HEADER,
key,
util.hexify(decoded_key))
return key
def _create_stream(self, stream_options):
return Stream(self._request, stream_options)
def _create_handshake_response(self, accept):
response = []
response.append('HTTP/1.1 101 Switching Protocols\r\n')
# WebSocket headers
response.append(format_header(
common.UPGRADE_HEADER, common.WEBSOCKET_UPGRADE_TYPE))
response.append(format_header(
common.CONNECTION_HEADER, common.UPGRADE_CONNECTION_TYPE))
response.append(format_header(
common.SEC_WEBSOCKET_ACCEPT_HEADER, accept))
if self._request.ws_protocol is not None:
response.append(format_header(
common.SEC_WEBSOCKET_PROTOCOL_HEADER,
self._request.ws_protocol))
if (self._request.ws_extensions is not None and
len(self._request.ws_extensions) != 0):
response.append(format_header(
common.SEC_WEBSOCKET_EXTENSIONS_HEADER,
common.format_extensions(self._request.ws_extensions)))
# Headers not specific for WebSocket
for name, value in self._request.extra_headers:
response.append(format_header(name, value))
response.append('\r\n')
return ''.join(response)
def _send_handshake(self, accept):
raw_response = self._create_handshake_response(accept)
self._request.connection.write(raw_response)
self._logger.debug('Sent server\'s opening handshake: %r',
raw_response)
# vi:sts=4 sw=4 et
|
Changaco/oh-mainline | refs/heads/master | vendor/packages/gdata/tests/gdata_tests/contacts/profiles/live_client_test.py | 39 | #!/usr/bin/env python
#
# Copyright (C) 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This module is used for version 2 of the Google Data APIs.
# These tests attempt to connect to Google servers.
__author__ = 'jcgregorio@google.com (Joe Gregorio)'
import atom.core
import atom.data
import atom.http_core
import gdata.contacts.client
import gdata.data
import gdata.test_config as conf
import unittest
conf.options.register_option(conf.APPS_DOMAIN_OPTION)
conf.options.register_option(conf.TARGET_USERNAME_OPTION)
class ProfileTest(unittest.TestCase):
def setUp(self):
self.client = gdata.contacts.client.ContactsClient(domain='example.com')
if conf.options.get_value('runlive') == 'true':
self.client = gdata.contacts.client.ContactsClient(
domain=conf.options.get_value('appsdomain'))
if conf.options.get_value('ssl') == 'true':
self.client.ssl = True
conf.configure_client(self.client, 'ProfileTest',
self.client.auth_service, True)
self.client.username = conf.options.get_value('appsusername').split('@')[0]
def tearDown(self):
conf.close_client(self.client)
def test_profiles_feed(self):
if not conf.options.get_value('runlive') == 'true':
return
# Either load the recording or prepare to make a live request.
conf.configure_cache(self.client, 'test_profiles_feed')
feed = self.client.get_profiles_feed()
self.assert_(isinstance(feed, gdata.contacts.data.ProfilesFeed))
def test_profiles_query(self):
if not conf.options.get_value('runlive') == 'true':
return
# Either load the recording or prepare to make a live request.
conf.configure_cache(self.client, 'test_profiles_feed')
query = gdata.contacts.client.ProfilesQuery(max_results=1)
feed = self.client.get_profiles_feed(q=query)
self.assert_(isinstance(feed, gdata.contacts.data.ProfilesFeed))
self.assert_(len(feed.entry) == 1)
# Needs at least 2 profiles in the feed to test the start-key
# query param.
next = feed.GetNextLink()
feed = None
if next:
# Retrieve the start-key query param from the next link.
uri = atom.http_core.Uri.parse_uri(next.href)
if 'start-key' in uri.query:
query.start_key = uri.query['start-key']
feed = self.client.get_profiles_feed(q=query)
self.assert_(isinstance(feed, gdata.contacts.data.ProfilesFeed))
self.assert_(len(feed.entry) == 1)
self.assert_(feed.GetSelfLink().href == next.href)
# Compare with a feed retrieved with the next link.
next_feed = self.client.get_profiles_feed(uri=next.href)
self.assert_(len(next_feed.entry) == 1)
self.assert_(next_feed.entry[0].id.text == feed.entry[0].id.text)
def suite():
return conf.build_suite([ProfileTest])
if __name__ == '__main__':
unittest.TextTestRunner().run(suite())
|
jamesmcm/luigi | refs/heads/master | test/mock_test.py | 4 | # -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from helpers import unittest
from luigi.mock import MockTarget, MockFileSystem
from luigi.format import Nop
class MockFileTest(unittest.TestCase):
def test_1(self):
t = MockTarget('test')
p = t.open('w')
print('test', file=p)
p.close()
q = t.open('r')
self.assertEqual(list(q), ['test\n'])
q.close()
def test_with(self):
t = MockTarget("foo")
with t.open('w') as b:
b.write("bar")
with t.open('r') as b:
self.assertEqual(list(b), ['bar'])
def test_bytes(self):
t = MockTarget("foo", format=Nop)
with t.open('wb') as b:
b.write(b"bar")
with t.open('rb') as b:
self.assertEqual(list(b), [b'bar'])
def test_default_mode_value(self):
t = MockTarget("foo")
with t.open('w') as b:
b.write("bar")
with t.open() as b:
self.assertEqual(list(b), ['bar'])
def test_mode_none_error(self):
t = MockTarget("foo")
with self.assertRaises(TypeError):
with t.open(None) as b:
b.write("bar")
# That should work in python2 because of the autocast
# That should work in python3 because the default format is Text
def test_unicode(self):
t = MockTarget("foo")
with t.open('w') as b:
b.write(u"bar")
with t.open('r') as b:
self.assertEqual(b.read(), u'bar')
class MockFileSystemTest(unittest.TestCase):
fs = MockFileSystem()
def _touch(self, path):
t = MockTarget(path)
with t.open('w'):
pass
def setUp(self):
self.fs.clear()
self.path = "/tmp/foo"
self.path2 = "/tmp/bar"
self.path3 = "/tmp/foobar"
self._touch(self.path)
self._touch(self.path2)
def test_copy(self):
self.fs.copy(self.path, self.path3)
self.assertTrue(self.fs.exists(self.path))
self.assertTrue(self.fs.exists(self.path3))
def test_exists(self):
self.assertTrue(self.fs.exists(self.path))
def test_remove(self):
self.fs.remove(self.path)
self.assertFalse(self.fs.exists(self.path))
def test_remove_recursive(self):
self.fs.remove("/tmp", recursive=True)
self.assertFalse(self.fs.exists(self.path))
self.assertFalse(self.fs.exists(self.path2))
def test_rename(self):
self.fs.rename(self.path, self.path3)
self.assertFalse(self.fs.exists(self.path))
self.assertTrue(self.fs.exists(self.path3))
def test_listdir(self):
self.assertEqual(sorted([self.path, self.path2]), sorted(self.fs.listdir("/tmp")))
|
tboyce021/home-assistant | refs/heads/dev | tests/components/tellduslive/test_config_flow.py | 9 | # flake8: noqa pylint: skip-file
"""Tests for the TelldusLive config flow."""
import asyncio
from unittest.mock import Mock, patch
import pytest
from homeassistant import data_entry_flow
from homeassistant.components.tellduslive import (
APPLICATION_NAME,
DOMAIN,
KEY_SCAN_INTERVAL,
SCAN_INTERVAL,
config_flow,
)
from homeassistant.config_entries import SOURCE_DISCOVERY
from homeassistant.const import CONF_HOST
from tests.common import MockConfigEntry, mock_coro
def init_config_flow(hass, side_effect=None):
"""Init a configuration flow."""
flow = config_flow.FlowHandler()
flow.hass = hass
if side_effect:
flow._get_auth_url = Mock(side_effect=side_effect)
return flow
@pytest.fixture
def supports_local_api():
"""Set TelldusLive supports_local_api."""
return True
@pytest.fixture
def authorize():
"""Set TelldusLive authorize."""
return True
@pytest.fixture
def mock_tellduslive(supports_local_api, authorize):
"""Mock tellduslive."""
with patch(
"homeassistant.components.tellduslive.config_flow.Session"
) as Session, patch(
"homeassistant.components.tellduslive.config_flow.supports_local_api"
) as tellduslive_supports_local_api:
tellduslive_supports_local_api.return_value = supports_local_api
Session().authorize.return_value = authorize
Session().access_token = "token"
Session().access_token_secret = "token_secret"
Session().authorize_url = "https://example.com"
yield Session, tellduslive_supports_local_api
async def test_abort_if_already_setup(hass):
"""Test we abort if TelldusLive is already setup."""
flow = init_config_flow(hass)
with patch.object(hass.config_entries, "async_entries", return_value=[{}]):
result = await flow.async_step_user()
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "already_setup"
with patch.object(hass.config_entries, "async_entries", return_value=[{}]):
result = await flow.async_step_import(None)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "already_setup"
async def test_full_flow_implementation(hass, mock_tellduslive):
"""Test registering an implementation and finishing flow works."""
flow = init_config_flow(hass)
flow.context = {"source": SOURCE_DISCOVERY}
result = await flow.async_step_discovery(["localhost", "tellstick"])
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "user"
assert len(flow._hosts) == 2
result = await flow.async_step_user()
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "user"
result = await flow.async_step_user({"host": "localhost"})
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "auth"
assert result["description_placeholders"] == {
"auth_url": "https://example.com",
"app_name": APPLICATION_NAME,
}
result = await flow.async_step_auth("")
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["title"] == "localhost"
assert result["data"]["host"] == "localhost"
assert result["data"]["scan_interval"] == 60
assert result["data"]["session"] == {"token": "token", "host": "localhost"}
async def test_step_import(hass, mock_tellduslive):
"""Test that we trigger auth when configuring from import."""
flow = init_config_flow(hass)
result = await flow.async_step_import({CONF_HOST: DOMAIN, KEY_SCAN_INTERVAL: 0})
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "auth"
async def test_step_import_add_host(hass, mock_tellduslive):
"""Test that we add host and trigger user when configuring from import."""
flow = init_config_flow(hass)
result = await flow.async_step_import(
{CONF_HOST: "localhost", KEY_SCAN_INTERVAL: 0}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "user"
async def test_step_import_no_config_file(hass, mock_tellduslive):
"""Test that we trigger user with no config_file configuring from import."""
flow = init_config_flow(hass)
result = await flow.async_step_import(
{CONF_HOST: "localhost", KEY_SCAN_INTERVAL: 0}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "user"
async def test_step_import_load_json_matching_host(hass, mock_tellduslive):
"""Test that we add host and trigger user when configuring from import."""
flow = init_config_flow(hass)
with patch(
"homeassistant.components.tellduslive.config_flow.load_json",
return_value={"tellduslive": {}},
), patch("os.path.isfile"):
result = await flow.async_step_import(
{CONF_HOST: "Cloud API", KEY_SCAN_INTERVAL: 0}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "user"
async def test_step_import_load_json(hass, mock_tellduslive):
"""Test that we create entry when configuring from import."""
flow = init_config_flow(hass)
with patch(
"homeassistant.components.tellduslive.config_flow.load_json",
return_value={"localhost": {}},
), patch("os.path.isfile"):
result = await flow.async_step_import(
{CONF_HOST: "localhost", KEY_SCAN_INTERVAL: SCAN_INTERVAL}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["title"] == "localhost"
assert result["data"]["host"] == "localhost"
assert result["data"]["scan_interval"] == 60
assert result["data"]["session"] == {}
@pytest.mark.parametrize("supports_local_api", [False])
async def test_step_disco_no_local_api(hass, mock_tellduslive):
"""Test that we trigger when configuring from discovery, not supporting local api."""
flow = init_config_flow(hass)
flow.context = {"source": SOURCE_DISCOVERY}
result = await flow.async_step_discovery(["localhost", "tellstick"])
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "auth"
assert len(flow._hosts) == 1
async def test_step_auth(hass, mock_tellduslive):
"""Test that create cloud entity from auth."""
flow = init_config_flow(hass)
await flow.async_step_auth()
result = await flow.async_step_auth(["localhost", "tellstick"])
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["title"] == "Cloud API"
assert result["data"]["host"] == "Cloud API"
assert result["data"]["scan_interval"] == 60
assert result["data"]["session"] == {
"token": "token",
"token_secret": "token_secret",
}
@pytest.mark.parametrize("authorize", [False])
async def test_wrong_auth_flow_implementation(hass, mock_tellduslive):
"""Test wrong auth."""
flow = init_config_flow(hass)
await flow.async_step_auth()
result = await flow.async_step_auth("")
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "auth"
assert result["errors"]["base"] == "invalid_auth"
async def test_not_pick_host_if_only_one(hass, mock_tellduslive):
"""Test not picking host if we have just one."""
flow = init_config_flow(hass)
result = await flow.async_step_user()
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "auth"
async def test_abort_if_timeout_generating_auth_url(hass, mock_tellduslive):
"""Test abort if generating authorize url timeout."""
flow = init_config_flow(hass, side_effect=asyncio.TimeoutError)
result = await flow.async_step_user()
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "authorize_url_timeout"
async def test_abort_no_auth_url(hass, mock_tellduslive):
"""Test abort if generating authorize url returns none."""
flow = init_config_flow(hass)
flow._get_auth_url = Mock(return_value=False)
result = await flow.async_step_user()
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "unknown_authorize_url_generation"
async def test_abort_if_exception_generating_auth_url(hass, mock_tellduslive):
"""Test we abort if generating authorize url blows up."""
flow = init_config_flow(hass, side_effect=ValueError)
result = await flow.async_step_user()
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "unknown_authorize_url_generation"
async def test_discovery_already_configured(hass, mock_tellduslive):
"""Test abort if already configured fires from discovery."""
MockConfigEntry(domain="tellduslive", data={"host": "some-host"}).add_to_hass(hass)
flow = init_config_flow(hass)
flow.context = {"source": SOURCE_DISCOVERY}
with pytest.raises(data_entry_flow.AbortFlow):
result = await flow.async_step_discovery(["some-host", ""])
|
OpenData-NC/open-data-nc | refs/heads/master | opendata/catalog/forms.py | 2 | from django import forms
from django.contrib.admin import site
from django.contrib.admin import widgets
from django.template.defaultfilters import slugify
from secure_input.fields import MiniWYSIWYGField
from .models import (Category, DataType, Department, Division, Resource,
UpdateFrequency)
from opendata.fields_info import FIELDS
class ResourceAdminForm(forms.ModelForm):
description = MiniWYSIWYGField()
division = forms.ModelChoiceField(label=FIELDS['agency_division'],
widget=widgets.ForeignKeyRawIdWidget(
Resource._meta.get_field('division').rel,
site
),
queryset=Division.objects, required=False)
categories = forms.ModelMultipleChoiceField(queryset=Category.objects,
widget=widgets.FilteredSelectMultiple(
"categories", False)
)
data_types = forms.ModelMultipleChoiceField(queryset=DataType.objects,
widget=widgets.FilteredSelectMultiple(
"Data types", False)
)
department = forms.ModelChoiceField(label=FIELDS['agency_name'],
widget=widgets.ForeignKeyRawIdWidget(
Resource._meta.get_field('department').rel,
site
),
queryset=Department.objects)
agency_type = forms.ChoiceField(choices=Resource.AGENCY_TYPES,
label=FIELDS['agency_type'])
newest_record = forms.DateField(label=FIELDS['newest_updated'], required=False,
widget=widgets.AdminDateWidget)
oldest_record = forms.DateField(label=FIELDS['oldest_record'], required=False,
widget=widgets.AdminDateWidget)
updates = forms.ModelChoiceField(label=FIELDS['update_frequency'],
queryset=UpdateFrequency.objects,
required=False)
def clean_name(self):
name = self.cleaned_data['name']
if not self.instance.pk:
slug = slugify(name)
resources = Resource.objects.filter(slug=slug)
if resources:
raise forms.ValidationError("Resource with this name already exists.")
return name
class Meta:
model = Resource
class Media:
js = ("js/wysiwyg.js", )
|
simongoffin/my_odoo_tutorial | refs/heads/master | addons/l10n_multilang/__init__.py | 438 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import account
import l10n_multilang
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
kovidgoyal/html5-parser | refs/heads/master | genattrs.py | 1 | #!/usr/bin/env python
# vim:fileencoding=utf-8
# License: Apache 2.0 Copyright: 2017, Kovid Goyal <kovid at kovidgoyal.net>
from __future__ import absolute_import, division, print_function, unicode_literals
import os
import re
import subprocess
from lxml import html
self_path = os.path.abspath(__file__)
HEADER = '''\
// Do not edit
// Generated by genattrs.py
'''
def generate_attr_headers(attrs):
with open("src/attr_strings.h", "wb") as attr_strings, \
open("src/attr_enum.h", "wb") as attr_enum, \
open("src/attr_sizes.h", "wb") as attr_sizes:
for f in (attr_strings, attr_enum, attr_sizes):
f.write(HEADER.encode('utf-8'))
for attr in attrs:
attr_upper = attr.upper().replace('-', '_').replace(':', '_')
attr_strings.write(('"%s",\n' % attr).encode('utf-8'))
attr_enum.write(('HTML_ATTR_%s,\n' % attr_upper).encode('utf-8'))
attr_sizes.write(('%d, ' % len(attr)).encode('utf-8'))
attr_sizes.write(b'\n')
def generate_attr_perfect_hash(attrs, repetitions=400):
p = subprocess.Popen(
'gperf -LANSI-C -H attr_hash -m{} /dev/stdin'.format(repetitions).split(),
stdout=subprocess.PIPE,
stdin=subprocess.PIPE)
stdout = p.communicate('\n'.join(attrs).encode('utf-8'))[0]
if p.wait() != 0:
raise SystemExit(p.returncode)
raw = stdout.decode('utf-8').splitlines()
for i, line in enumerate(raw):
if line.startswith('in_word_set'):
break
else:
raise SystemExit('Failed to find in_word_set()')
lines = raw[:i - 1]
del raw[:i - 1]
raw = '\n'.join(raw)
wordlist = re.search("wordlist\[\]\s+=\s+{(.*?)}", raw, re.DOTALL)
if wordlist is None:
raise SystemExit('Failed to find wordlist')
wordlist = [w.strip().replace('"', '') for w in wordlist.group(1).split(',')]
attrlist = ["\tHTML_ATTR_" + (w.upper().replace('-', '_').replace(':', '_') if w else 'LAST')
for w in wordlist]
processed = '\n'.join(lines) + '\n\n'
processed += 'static const HTMLAttr HTML_ATTR_MAP[] = {\n%s\n};' % '\n,'.join(attrlist)
processed = re.sub(
r'.+^attr_hash',
HEADER + 'static inline unsigned int\nattr_hash',
processed,
flags=re.DOTALL | re.MULTILINE)
with open('src/attr_perf.h', 'wb') as f:
f.write(processed.encode('utf-8'))
f.write(b'\n')
def get_attr_names():
# HTML Attributes from
# https://developer.mozilla.org/en-US/docs/Web/HTML/Attributes
raw = open('/t/Attributes', 'rb').read()
root = html.fromstring(raw)
table = root.xpath('//table[@class="standard-table"]/tbody')[0]
for tr in table.findall('tr'):
td = tr.find('td')
code = td.find('code')
attr = code.text
if attr and '*' not in attr:
yield attr.strip()
# SVG Attributes from
# https://developer.mozilla.org/en-US/docs/Web/SVG/Attribute
raw = open('/t/Attribute', 'rb').read()
root = html.fromstring(raw)
h2 = root.xpath('//h2[@id="SVG_Attributes"]')[0]
for ul in h2.xpath('following-sibling::div[1]/ul'):
for attr in ul.xpath('./li/code/a/text()'):
yield attr.strip()
def main():
os.chdir(os.path.dirname(self_path))
attrs = sorted(set(get_attr_names()) | {'data-reactid'})
generate_attr_headers(attrs)
generate_attr_perfect_hash(attrs)
if __name__ == '__main__':
main()
|
equalitie/highpass | refs/heads/master | src/utils.py | 1 | import re, sys, math, random, csv, types, networkx as nx
from collections import defaultdict
def parse(filename, isDirected):
reader = csv.reader(open(filename, 'r'), delimiter=',')
data = [row for row in reader]
print "Reading and parsing the data into memory..."
if isDirected:
return parse_josh(data)
#return parse_directed(data)
else:
return parse_undirected(data)
def parse_undirected(data):
G = nx.Graph()
nodes = set([row[0] for row in data])
edges = [(row[0], row[2]) for row in data]
num_nodes = len(nodes)
rank = 1/float(num_nodes)
G.add_nodes_from(nodes, rank=rank)
G.add_edges_from(edges)
return G
def parse_directed(data):
DG = nx.DiGraph()
for i, row in enumerate(data):
node_a = format_key(row[0])
node_b = format_key(row[2])
val_a = digits(row[1])
val_b = digits(row[3])
DG.add_edge(node_a, node_b)
if val_a >= val_b:
DG.add_path([node_a, node_b])
else:
DG.add_path([node_b, node_a])
return DG
def parse_josh(data):
violation_latt = 47.991266 #47.140580
violation_long = 37.792420 #37.564110
distance_value_coeff = 0#100
DG = nx.DiGraph()
content = {}
init_ranks = {}
for i, row in enumerate(data[1:]): #skipping header
node_a = format_key(row[0])
node_b = format_key(row[1])
val_a = digits(row[5])
val_b = 0 #digits(row[3])
from math import sqrt, cos
import pdb
distance_score = 1;
try:
int_latt = float(row[9])
int_long = float(row[10])
#pdb.set_trace()
cos2_theta = cos((int_latt + violation_latt)/2)**2
distance_score =1/(sqrt(int_latt - violation_latt)**2 + cos2_theta*(int_long- violation_long)**2)
distance_score *= distance_value_coeff
distance_score += 1
val_a *= distance_score
except ValueError:
pass
DG.add_edge(node_a, node_b)
#if val_a >= val_b:
DG.add_path([node_a, node_b])
# else:
# DG.add_path([node_b, node_a])
unicoded_content = unicode(row[15], 'utf-8').encode('utf8')
if node_b in content:
content[node_b].append(unicoded_content)
init_ranks[node_b] += val_a
else:
content[node_b] = [unicoded_content]
init_ranks[node_b] = val_a
return (DG, init_ranks, content)
def digits(val):
return int(re.sub("\D", "", val))
def format_key(key):
key = key.strip()
if key.startswith('"') and key.endswith('"'):
key = key[1:-1]
return key
def print_results(f, method, results):
print method
|
Darkmer/masterchief | refs/heads/master | CourseBuilderenv/lib/python2.7/site-packages/pip/_vendor/requests/packages/urllib3/contrib/__init__.py | 12133432 | |
mbauskar/tele-frappe | refs/heads/develop | frappe/core/doctype/block_module/__init__.py | 12133432 | |
vaughamhong/gmock | refs/heads/master | scripts/generator/cpp/__init__.py | 12133432 | |
350dotorg/Django | refs/heads/master | tests/regressiontests/null_fk_ordering/models.py | 92 | """
Regression tests for proper working of ForeignKey(null=True). Tests these bugs:
* #7512: including a nullable foreign key reference in Meta ordering has un
xpected results
"""
from django.db import models
# The first two models represent a very simple null FK ordering case.
class Author(models.Model):
name = models.CharField(max_length=150)
class Article(models.Model):
title = models.CharField(max_length=150)
author = models.ForeignKey(Author, null=True)
def __unicode__(self):
return u'Article titled: %s' % (self.title, )
class Meta:
ordering = ['author__name', ]
# These following 4 models represent a far more complex ordering case.
class SystemInfo(models.Model):
system_name = models.CharField(max_length=32)
class Forum(models.Model):
system_info = models.ForeignKey(SystemInfo)
forum_name = models.CharField(max_length=32)
class Post(models.Model):
forum = models.ForeignKey(Forum, null=True)
title = models.CharField(max_length=32)
def __unicode__(self):
return self.title
class Comment(models.Model):
post = models.ForeignKey(Post, null=True)
comment_text = models.CharField(max_length=250)
class Meta:
ordering = ['post__forum__system_info__system_name', 'comment_text']
def __unicode__(self):
return self.comment_text
|
andela-engmkwalusimbi/Picha | refs/heads/master | api/views.py | 1 | import urllib
import cStringIO
import os
from django.contrib.auth.models import User
from django.contrib.auth import login, logout
from django.views.decorators.csrf import csrf_exempt
from rest_framework import generics, permissions, status
from rest_framework.response import Response
from rest_framework.decorators import api_view, permission_classes
from rest_framework.views import APIView
from social.apps.django_app.utils import load_strategy, load_backend
from social.exceptions import AuthAlreadyAssociated
from api.serializers import PhotoSerializer, ImageSerializer, EffectSerializer
from api.models import UserPhoto, EffectsModel
from api.permissions import IsAuthenticatedOrCreate
from api import utilities
from api import image_effects
from PIL import Image, ImageEnhance, ImageDraw, ImageFont
@csrf_exempt
@api_view(['POST'])
@permission_classes((permissions.AllowAny,))
def social_register(request):
"""Viewset for handling facebook authentication"""
if request.method == 'POST':
auth_token = request.data['access_token']
backend = request.data['backend']
if auth_token and backend:
strategy = load_strategy(request)
backend = load_backend(
strategy=strategy, name=backend, redirect_uri=None)
# do backend authentication and check if account is already associated
try:
user = backend.do_auth(auth_token)
except AuthAlreadyAssociated:
return Response({"errors": "That social media account is already in use"},
status=status.HTTP_400_BAD_REQUEST)
# if a user has been created log the user in
if user:
login(request, user)
return Response( {'user' : user.username}, status=status.HTTP_200_OK )
else:
return Response("Bad Credentials", status=403)
else:
return Response("Bad request", status=400)
class PhotoListView(generics.ListCreateAPIView):
"""The view set for photo creation and upload"""
# Setting permission classes
permission_classes = (IsAuthenticatedOrCreate, permissions.IsAuthenticated)
queryset = UserPhoto.objects.all()
serializer_class = PhotoSerializer
def perform_create(self, serializer):
"""Method for handling the actual creation and upload"""
serializer = PhotoSerializer(data=self.request.data, context={'request': self.request})
if serializer.is_valid():
serializer.save(created_by=self.request.user)
return Response(serializer.data, status=status.HTTP_201_CREATED)
def get_queryset(self):
"""Modify query to display photos for logged in user"""
user = self.request.user
return UserPhoto.objects.all().filter(created_by=user)
class PhotoDetailView(APIView):
"""The view set for handling photo display"""
def get(self, request):
"""return photo data specific to a particular id"""
image = UserPhoto.objects.get(id=request.query_params['id'])
serializer = PhotoSerializer(image, context={'request': request})
# return serialized data
return Response(serializer.data)
def delete(self, request):
"""delete the image record from the database and remove it from the folder too"""
media_route = 'static/media/'
image = UserPhoto.objects.get(id=request.query_params['id'])
image.delete()
try:
media_route += str(image.image)
os.remove(media_route)
except:
print("Error: file not found: {0}".format(e))
# delete it only if it has been removed from the folder
return Response(status=status.HTTP_204_NO_CONTENT)
@api_view(['GET'])
def get_uploaded_image(request):
"""retrieve an uploaded image basing on its name"""
if request.method == 'GET':
image_object = UserPhoto.objects.get(name=request.query_params['name'])
serializer = ImageSerializer(image_object, context={'request': request})
return Response(serializer.data)
@api_view(['GET'])
def reset_effects(request):
"""the view handles resetion of effects generated"""
if request.method == 'GET':
temp_url = 'static/media/temp/'
# read all files in a folder and delete them
file_list = os.listdir(temp_url)
for file_name in file_list:
os.remove(temp_url+"/"+file_name)
return Response(status=status.HTTP_200_OK)
@api_view(['GET'])
def apply_filters(request):
"""view handles setting of filters on the image"""
if request.method == 'GET':
temp_url = 'static/media/temp/'
image_url = request.query_params['image_url']
file_name = image_url.rsplit('/', 1)[-1]
file_ = cStringIO.StringIO(urllib.urlopen(image_url).read())
data = {
'BLUR': image_effects.blur_filter(file_, file_name),
'CONTOUR': image_effects.contour_filter(file_, file_name),
'DETAIL': image_effects.detail_filter(file_, file_name),
'EDGE_ENHANCE': image_effects.edge_enhance_filter(file_, file_name),
'EMBOS': image_effects.embos_filter(file_, file_name),
'FIND_EDGES': image_effects.find_edges_filter(file_, file_name),
'SMOOTH': image_effects.smooth_filter(file_, file_name),
'BAW': image_effects.black_n_white_filter(file_, file_name),
'SHARPEN': image_effects.sharpen_filter(file_, file_name)
}
return Response(data, status=status.HTTP_200_OK)
@api_view(['GET'])
def apply_enhancement(request):
"""view handles enhancement effects on an image"""
if request.method == 'GET':
temp_url = 'static/media/temp/'
image_url = request.query_params['image']
color = int(request.query_params['x'])
contrast = int(request.query_params['y'])
sharpness = int(request.query_params['w'])
brightness = int(request.query_params['z'])
file_name = image_url.rsplit('/', 1)[-1]
file_ = cStringIO.StringIO(urllib.urlopen(image_url).read())
new_image_url = temp_url + "Enhance" + file_name
image = Image.open(file_)
if color != 0:
# when color is not zero enhancement the image
enhancer = ImageEnhance.Color(image.convert('RGB'))
image = enhancer.enhance(color / 2.0)
if contrast != 0:
# when contrast is not zero enhancement the image
enhancer = ImageEnhance.Contrast(image.convert('RGB'))
image = enhancer.enhance(contrast / 2.0)
if brightness > 0:
# when brightness is not zero enhancement the image
enhancer = ImageEnhance.Brightness(image.convert('RGB'))
image = enhancer.enhance(brightness / 2.0)
if sharpness != 0:
# when sharpness is not zero enhancement the image
enhancer = ImageEnhance.Sharpness(image.convert('RGB'))
image = enhancer.enhance(sharpness / 2.0)
image.save(new_image_url)
return Response({"enhance": new_image_url}, status=status.HTTP_200_OK)
@api_view(['GET'])
def apply_rotations(request):
"""view handles rotation of the image"""
if request.method == 'GET':
temp_url = 'static/media/temp/'
image_url = request.query_params['image']
degree = int(request.query_params['x'])
file_name = image_url.rsplit('/', 1)[-1]
file_ = cStringIO.StringIO(urllib.urlopen(image_url).read())
new_image_url = temp_url + "Degree" + file_name
image = Image.open(file_)
if degree == 0:
# flip verticle
image = image.transpose(Image.FLIP_TOP_BOTTOM)
elif degree == 1:
# rotate right
image = image.rotate(-90)
elif degree == 2:
# rotate right
image = image.rotate(90)
else:
# flip horizontal
image = image.transpose(Image.FLIP_LEFT_RIGHT)
image.save(new_image_url)
return Response({"degree": new_image_url}, status=status.HTTP_200_OK)
@api_view(['GET'])
def draw_text(request):
"""view handles drawing of text on an image at a certain location on the image"""
if request.method == 'GET':
temp_url = 'static/media/temp/'
image_url = request.query_params['image']
text = request.query_params['text']
position = request.query_params['position']
file_name = image_url.rsplit('/', 1)[-1]
file_ = cStringIO.StringIO(urllib.urlopen(image_url).read())
new_image_url = temp_url + "text" + file_name
# get an image
base = Image.open(file_).convert('RGBA')
# make a blank image for the text, initialized to transparent text color
txt = Image.new('RGBA', base.size, (255,255,255,0))
width, height = base.size[0], base.size[1]
# get a font
fnt = ImageFont.truetype('static/nimbus.otf', 40)
# get a drawing context
drawing_context = ImageDraw.Draw(txt)
# draw text, half opacity
# drawing_context.text((10,10), "Hello", font=fnt, fill=(255,255,255,128))
# draw text, full opacity
if position == "topleft":
drawing_context.text((20,90), text, font=fnt, fill=(255,255,255,255))
elif position == "left":
drawing_context.text((20,height/2), text, font=fnt, fill=(255,255,255,255))
elif position == "bottomleft":
drawing_context.text((20,(height - 100)), text, font=fnt, fill=(255,255,255,255))
elif position == "center":
drawing_context.text(((width / 2) - (len(text) * len(text)), height / 2), text, font=fnt, fill=(255,255,255,255))
elif position == "top":
drawing_context.text(((width / 2) - (len(text) * len(text)), 90), text, font=fnt, fill=(255,255,255,255))
elif position == "bottom":
drawing_context.text(((width / 2) - (len(text) * len(text)), height - 100), text, font=fnt, fill=(255,255,255,255))
elif position == "topright":
drawing_context.text((width - (len(text) * len(text)) - 200, 90), text, font=fnt, fill=(255,255,255,255))
elif position == "right":
drawing_context.text((width - (len(text) * len(text)) - 200, height / 2), text, font=fnt, fill=(255,255,255,255))
elif position == "bottomright":
drawing_context.text((width - (len(text) * len(text)) - 200, height - 100), text, font=fnt, fill=(255,255,255,255))
output_image = Image.alpha_composite(base, txt)
output_image.save(new_image_url)
return Response({"image_text": new_image_url}, status=status.HTTP_200_OK)
#class EffectListView(APIView):
# """The view set for photoeffect creation and upload"""
# # Setting permission classes
# permission_classes = (IsAuthenticatedOrCreate, permissions.IsAuthenticated)
# def post(self, request):
# """Method for handling the actual creation and upload"""
# url = request.data['effect']
# photo_id = request.data['photo_id']
# # get the domain and the path to the image
# domain, path = utilities.split_url(url)
# # get file name of the image
# filename = utilities.get_url_tail(path)
# # get cString object
# fobject = utilities.retrieve_image(url)
# # Open the image using pill
# pil_image = Image.open(fobject)
# # get file name extension
# ext = utilities.get_extension(filename)
# # convert image to what django understands
# image = utilities.pil_to_django(pil_image, ext)
# user_photo = UserPhoto.objects.get(id=photo_id)
# image_model = EffectsModel()
# image_model.photo_id = user_photo.id
# image_model.effect.save(filename, image)
# # Save the image and return a response of 201
# image_model.save()
# return Response(status=status.HTTP_201_CREATED)
# def get(self, request):
# """Modify query to display photos with efects for a particular id"""
# _id = request.query_params['id']
# effectsobject = EffectsModel.objects.all()
# if _id is not None:
# effectsobject = effectsobject.filter(photo=_id)
# # serialize returned data
# serializer = EffectSerializer(effectsobject, context={'request': request}, many=True)
# return Response(serializer.data)
|
xq262144/hue | refs/heads/master | desktop/libs/libsentry/src/libsentry/tests.py | 2 | #!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import lxml.etree
import os
import shutil
import tempfile
from nose.plugins.skip import SkipTest
from nose.tools import assert_equal, assert_true, assert_false, assert_not_equal, assert_raises
from django.contrib.auth.models import User
from desktop.lib.django_test_util import make_logged_in_client
from desktop.lib.exceptions import StructuredThriftTransportException
from desktop.lib.exceptions_renderable import PopupException
from desktop.lib.test_utils import add_to_group, grant_access
from hadoop.pseudo_hdfs4 import is_live_cluster
from libsentry import sentry_site
from libsentry.api import get_api, clear_api_cache
from libsentry.api2 import get_api as get_api2, clear_api_cache as clear_api2_cache
from libsentry.conf import is_enabled, HOSTNAME, PORT, SENTRY_CONF_DIR
from libsentry.client import SentryClient
from libsentry.sentry_ha import get_next_available_server
from libsentry.sentry_site import get_sentry_server
def create_mock_client_fn(client_class, username, server, component=None):
class MockSentryClient(object):
def __init__(self, host):
self.host = host
def list_sentry_roles_by_group(self, groupName='*'):
if self.host.startswith('bad'):
raise StructuredThriftTransportException(ex=None)
else:
return []
if server is not None:
return MockSentryClient(server['hostname'])
else:
raise PopupException(_('Cannot create a Sentry client without server hostname and port.'))
class TestWithSentry(object):
@classmethod
def setup_class(cls):
if not os.path.exists(os.path.join(SENTRY_CONF_DIR.get(), 'sentry-site.xml')):
raise SkipTest('Could not find sentry-site.xml, skipping sentry tests')
cls.client = make_logged_in_client(username='test', is_superuser=False)
cls.user = User.objects.get(username='test')
add_to_group('test')
grant_access("test", "test", "libsentry")
cls.config_path = os.path.join(SENTRY_CONF_DIR.get(), 'sentry-site.xml')
def setUp(self):
self.rpc_addresses = ''
if sentry_site.get_sentry_server_rpc_addresses() is not None:
self.rpc_addresses = ','.join(sentry_site.get_sentry_server_rpc_addresses())
self.rpc_port = sentry_site.get_sentry_server_rpc_port() or '8038'
self.tmpdir = tempfile.mkdtemp()
self.resets = [
SENTRY_CONF_DIR.set_for_testing(self.tmpdir),
]
clear_api_cache()
clear_api2_cache()
def tearDown(self):
sentry_site.reset()
for reset in self.resets:
reset()
shutil.rmtree(self.tmpdir)
def test_get_random_sentry_server(self):
# Test that with no current_host, a server for a random host is returned
xml = self._sentry_site_xml(rpc_addresses='%s,host-1,host-2' % self.rpc_addresses, rpc_port=self.rpc_port)
file(os.path.join(self.tmpdir, 'sentry-site.xml'), 'w').write(xml)
sentry_site.reset()
server = get_sentry_server()
assert_true(server is not None)
assert_true(server['hostname'] in '%s,host-1,host-2' % self.rpc_addresses)
def test_get_single_sentry_server(self):
# Test that with a current host and single server, the single server is returned
xml = self._sentry_site_xml(rpc_addresses='host-1', rpc_port=self.rpc_port)
file(os.path.join(self.tmpdir, 'sentry-site.xml'), 'w').write(xml)
sentry_site.reset()
server = get_sentry_server(current_host='host-1')
assert_true(server is not None)
assert_equal(server['hostname'], 'host-1')
def test_get_next_sentry_server(self):
# Test that with a current host and multiple servers, the next server is returned
xml = self._sentry_site_xml(rpc_addresses='%s,host-1,host-2' % self.rpc_addresses, rpc_port=self.rpc_port)
file(os.path.join(self.tmpdir, 'sentry-site.xml'), 'w').write(xml)
sentry_site.reset()
server = get_sentry_server(current_host='host-1')
assert_true(server is not None)
assert_equal(server['hostname'], 'host-2')
def test_get_first_sentry_server(self):
# Test that if the current host is the last host of multiple servers, the first server is returned
xml = self._sentry_site_xml(rpc_addresses='host-1,%s,host-2' % self.rpc_addresses, rpc_port=self.rpc_port)
file(os.path.join(self.tmpdir, 'sentry-site.xml'), 'w').write(xml)
sentry_site.reset()
server = get_sentry_server(current_host='host-2')
assert_true(server is not None)
assert_equal(server['hostname'], 'host-1')
def test_round_robin(self):
# Test that get_next_available_client will check each server once and only once then exit
xml = self._sentry_site_xml(rpc_addresses='host-1,host-2,host-3,host-4,host-5', rpc_port=self.rpc_port)
file(os.path.join(self.tmpdir, 'sentry-site.xml'), 'w').write(xml)
sentry_site.reset()
server, attempts = get_next_available_server(SentryClient, self.user.username, failed_host='host-1')
assert_equal(None, server)
assert_equal(['host-2','host-3','host-4','host-5'], attempts)
def test_get_next_good_host(self):
# Test that get_next_available_client will return the next good/successful server
xml = self._sentry_site_xml(rpc_addresses='bad-host-1,good-host-1,bad-host-2,good-host-2,good-host-3', rpc_port=self.rpc_port)
file(os.path.join(self.tmpdir, 'sentry-site.xml'), 'w').write(xml)
sentry_site.reset()
server, attempts = get_next_available_server(SentryClient, self.user.username, failed_host='bad-host-2',
create_client_fn=create_mock_client_fn)
assert_equal('good-host-2', server['hostname'])
assert_equal([], attempts)
def test_single_good_host(self):
# Test that get_next_available_client will return the single good host on first try
xml = self._sentry_site_xml(rpc_addresses='good-host-1', rpc_port=self.rpc_port)
file(os.path.join(self.tmpdir, 'sentry-site.xml'), 'w').write(xml)
sentry_site.reset()
server, attempts = get_next_available_server(SentryClient, self.user.username, failed_host=None,
create_client_fn=create_mock_client_fn)
assert_equal('good-host-1', server['hostname'])
assert_equal([], attempts)
def test_single_bad_host(self):
# Test that get_next_available_client will raise an exception on single bad host
xml = self._sentry_site_xml(rpc_addresses='bad-host-1', rpc_port=self.rpc_port)
file(os.path.join(self.tmpdir, 'sentry-site.xml'), 'w').write(xml)
sentry_site.reset()
assert_raises(PopupException, get_next_available_server, SentryClient, self.user.username, failed_host=None,
create_client_fn=create_mock_client_fn)
def test_bad_good_host(self):
# Test that get_next_available_client will return the good host
xml = self._sentry_site_xml(rpc_addresses='bad-host-1,good-host-1', rpc_port=self.rpc_port)
file(os.path.join(self.tmpdir, 'sentry-site.xml'), 'w').write(xml)
sentry_site.reset()
server, attempts = get_next_available_server(SentryClient, self.user.username, failed_host='bad-host-1',
create_client_fn=create_mock_client_fn)
assert_equal('good-host-1', server['hostname'])
assert_equal([], attempts)
def test_good_bad_host(self):
# Test that get_next_available_client will return the good host
xml = self._sentry_site_xml(rpc_addresses='good-host-1,bad-host-1', rpc_port=self.rpc_port)
file(os.path.join(self.tmpdir, 'sentry-site.xml'), 'w').write(xml)
sentry_site.reset()
server, attempts = get_next_available_server(SentryClient, self.user.username, failed_host='bad-host-1',
create_client_fn=create_mock_client_fn)
assert_equal('good-host-1', server['hostname'])
assert_equal([], attempts)
def test_ha_failover_all_bad(self):
# Test with all bad hosts
xml = self._sentry_site_xml(rpc_addresses='bad-host-1:8039,bad-host-2', rpc_port=self.rpc_port)
file(os.path.join(self.tmpdir, 'sentry-site.xml'), 'w').write(xml)
sentry_site.reset()
api = get_api(self.user)
assert_equal('bad-host-1:8039,bad-host-2', ','.join(sentry_site.get_sentry_server_rpc_addresses()))
assert_raises(PopupException, api.list_sentry_roles_by_group, '*')
api2 = get_api2(self.user, 'solr')
assert_raises(PopupException, api2.list_sentry_roles_by_group, '*')
def test_no_rpc_hosts(self):
# Test with no rpc hosts and fallback to hostname and port
xml = self._sentry_site_xml(rpc_addresses='')
file(os.path.join(self.tmpdir, 'sentry-site.xml'), 'w').write(xml)
sentry_site.reset()
api = get_api(self.user)
assert_false(sentry_site.is_ha_enabled(), sentry_site.get_sentry_server_rpc_addresses())
assert_true(is_enabled() and HOSTNAME.get() and HOSTNAME.get() != 'localhost')
resp = api.list_sentry_roles_by_group(groupName='*')
assert_true(isinstance(resp, list))
api2 = get_api2(self.user, 'solr')
resp = api2.list_sentry_roles_by_group(groupName='*')
assert_true(isinstance(resp, list))
def _sentry_site_xml(self, rpc_addresses, rpc_port='8038'):
config = lxml.etree.parse(self.config_path)
root = config.getroot()
properties = config.findall('property')
for prop in properties:
name = prop.find('name')
if name.text == 'sentry.service.client.server.rpc-address':
value = prop.find('value')
value.text = rpc_addresses
elif name.text == 'sentry.service.client.server.rpc-port':
value = prop.find('value')
value.text = rpc_port
return lxml.etree.tostring(root)
class TestSentryWithHadoop(object):
requires_hadoop = True
@classmethod
def setup_class(cls):
if not is_live_cluster():
raise SkipTest('TestSentryWithHadoop requires a live cluster.')
if not os.path.exists(os.path.join(SENTRY_CONF_DIR.get(), 'sentry-site.xml')):
raise SkipTest('Could not find sentry-site.xml, skipping sentry tests')
cls.client = make_logged_in_client(username='test', is_superuser=False)
cls.user = User.objects.get(username='test')
add_to_group('test')
grant_access("test", "test", "libsentry")
cls.config_path = os.path.join(SENTRY_CONF_DIR.get(), 'sentry-site.xml')
def setUp(self):
self.rpc_addresses = ''
if sentry_site.get_sentry_server_rpc_addresses() is not None:
self.rpc_addresses = ','.join(sentry_site.get_sentry_server_rpc_addresses())
self.rpc_port = sentry_site.get_sentry_server_rpc_port() or '8038'
self.tmpdir = tempfile.mkdtemp()
self.resets = [
SENTRY_CONF_DIR.set_for_testing(self.tmpdir),
]
clear_api_cache()
clear_api2_cache()
def tearDown(self):
sentry_site.reset()
for reset in self.resets:
reset()
shutil.rmtree(self.tmpdir)
def test_get_collections(self):
client = SentryClient(HOSTNAME.get(), PORT.get(), 'test')
resp = client.list_sentry_roles_by_group() # Non Sentry Admin can do that
assert_not_equal(0, resp.status.value, resp)
assert_true('denied' in resp.status.message, resp)
resp = client.list_sentry_roles_by_group(groupName='*')
assert_equal(0, resp.status.value, resp)
|
ajgallegog/gem5_arm | refs/heads/master | src/arch/x86/isa/insts/simd128/integer/data_reordering/shuffle.py | 91 | # Copyright (c) 2007 The Hewlett-Packard Development Company
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Gabe Black
microcode = '''
def macroop PSHUFD_XMM_XMM_I {
shuffle ufp1, xmmlm, xmmhm, size=4, ext="IMMEDIATE"
shuffle xmmh, xmmlm, xmmhm, size=4, ext="IMMEDIATE >> 4"
movfp xmml, ufp1, dataSize=8
};
def macroop PSHUFD_XMM_M_I {
ldfp ufp1, seg, sib, "DISPLACEMENT", dataSize=8
ldfp ufp2, seg, sib, "DISPLACEMENT + 8", dataSize=8
shuffle xmml, ufp1, ufp2, size=4, ext="IMMEDIATE"
shuffle xmmh, ufp1, ufp2, size=4, ext="IMMEDIATE >> 4"
};
def macroop PSHUFD_XMM_P_I {
rdip t7
ldfp ufp1, seg, riprel, "DISPLACEMENT", dataSize=8
ldfp ufp2, seg, riprel, "DISPLACEMENT + 8", dataSize=8
shuffle xmml, ufp1, ufp2, size=4, ext="IMMEDIATE"
shuffle xmmh, ufp1, ufp2, size=4, ext="IMMEDIATE >> 4"
};
def macroop PSHUFHW_XMM_XMM_I {
shuffle xmmh, xmmhm, xmmhm, size=2, ext=imm
};
def macroop PSHUFHW_XMM_M_I {
ldfp ufp1, seg, sib, "DISPLACEMENT + 8", dataSize=8
shuffle xmmh, ufp1, ufp1, size=2, ext=imm
};
def macroop PSHUFHW_XMM_P_I {
rdip t7
ldfp ufp1, seg, riprel, "DISPLACEMENT + 8", dataSize=8
shuffle xmmh, ufp1, ufp1, size=2, ext=imm
};
def macroop PSHUFLW_XMM_XMM_I {
shuffle xmml, xmmlm, xmmlm, size=2, ext=imm
};
def macroop PSHUFLW_XMM_M_I {
ldfp ufp1, seg, sib, "DISPLACEMENT", dataSize=8
shuffle xmml, ufp1, ufp1, size=2, ext=imm
};
def macroop PSHUFLW_XMM_P_I {
rdip t7
ldfp ufp1, seg, riprel, "DISPLACEMENT", dataSize=8
shuffle xmml, ufp1, ufp1, size=2, ext=imm
};
'''
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.