repo_name
stringlengths
5
92
path
stringlengths
4
221
copies
stringclasses
19 values
size
stringlengths
4
6
content
stringlengths
766
896k
license
stringclasses
15 values
hash
int64
-9,223,277,421,539,062,000
9,223,102,107B
line_mean
float64
6.51
99.9
line_max
int64
32
997
alpha_frac
float64
0.25
0.96
autogenerated
bool
1 class
ratio
float64
1.5
13.6
config_test
bool
2 classes
has_no_keywords
bool
2 classes
few_assignments
bool
1 class
tensorflow/tpu
models/official/detection/utils/config_utils.py
1
2218
# Copyright 2020 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Config utils.""" import os import tensorflow.compat.v1 as tf from hyperparameters import params_dict _PARSERS = [ 'classification_parser', 'retinanet_parser', 'maskrcnn_parser', 'segmentation_parser', 'shapemask_parser', ] _BACKBONES = [ 'resnet', 'spinenet', 'spinenet_mbconv', ] _MULTILEVEL_FEATURES = [ 'fpn', 'nasfpn', ] def filter_unused_blocks(params): """Filters unused architecture params blocks.""" filtered_params = params_dict.ParamsDict(params) if 'parser' in params.architecture.as_dict().keys(): for parser in _PARSERS: if (parser in params.as_dict().keys() and parser != params.architecture.parser): delattr(filtered_params, parser) if 'backbone' in params.architecture.as_dict().keys(): for backbone in _BACKBONES: if (backbone in params.as_dict().keys() and backbone != params.architecture.backbone): delattr(filtered_params, backbone) if 'multilevel_features' in params.architecture.as_dict().keys(): for features in _MULTILEVEL_FEATURES: if (features in params.as_dict().keys() and features != params.architecture.multilevel_features): delattr(filtered_params, features) return filtered_params def save_config(params, model_dir): if model_dir: params = filter_unused_blocks(params) if not tf.gfile.Exists(model_dir): tf.gfile.MakeDirs(model_dir) params_dict.save_params_dict_to_yaml( params, os.path.join(model_dir, 'params.yaml'))
apache-2.0
-6,733,611,204,582,500,000
30.239437
80
0.670875
false
3.797945
false
false
false
WillsB3/glue
glue/formats/jsonformat.py
1
2704
import os import json import codecs from base import BaseJSONFormat class JSONFormat(BaseJSONFormat): extension = 'json' build_per_ratio = True @classmethod def populate_argument_parser(cls, parser): group = parser.add_argument_group("JSON format options") group.add_argument("--json", dest="json_dir", nargs='?', const=True, default=os.environ.get('GLUE_JSON', False), metavar='DIR', help="Generate JSON files and optionally where") group.add_argument("--json-format", dest="json_format", metavar='NAME', type=unicode, default=os.environ.get('GLUE_JSON_FORMAT', 'array'), choices=['array', 'hash'], help=("JSON structure format (array, hash)")) def get_context(self, *args, **kwargs): context = super(JSONFormat, self).get_context(*args, **kwargs) frames = dict([[i['filename'], {'filename': i['filename'], 'frame': {'x': i['x'], 'y': i['y'], 'w': i['width'], 'h': i['height']}, 'rotated': False, 'trimmed': False, 'spriteSourceSize': {'x': i['x'], 'y': i['y'], 'w': i['width'], 'h': i['height']}, 'sourceSize': {'w': i['original_width'], 'h': i['original_height']}}] for i in context['images']]) data = dict(frames=None, meta={'version': context['version'], 'hash': context['hash'], 'name': context['name'], 'sprite_path': context['sprite_path'], 'sprite_filename': context['sprite_filename'], 'width': context['width'], 'height': context['height']}) if self.sprite.config['json_format'] == 'array': data['frames'] = frames.values() else: data['frames'] = frames return data
bsd-3-clause
3,899,344,654,583,642,000
41.920635
112
0.366864
false
5.716702
false
false
false
LennonChin/Django-Practices
MxShop/apps/utils/alipay.py
1
6122
# _*_ coding: utf-8 _*_ __author__ = 'LennonChin' __date__ = '2017/10/23 21:37' # pip install pycryptodome __author__ = 'bobby' from datetime import datetime from Crypto.PublicKey import RSA from Crypto.Signature import PKCS1_v1_5 from Crypto.Hash import SHA256 from base64 import b64encode, b64decode from urllib.parse import quote_plus from urllib.parse import urlparse, parse_qs from urllib.request import urlopen from base64 import decodebytes, encodebytes import json class AliPay(object): """ 支付宝支付接口 """ def __init__(self, appid, app_notify_url, app_private_key_path, alipay_public_key_path, return_url, debug=False): self.appid = appid self.app_notify_url = app_notify_url self.app_private_key_path = app_private_key_path self.app_private_key = None self.return_url = return_url with open(self.app_private_key_path) as fp: self.app_private_key = RSA.importKey(fp.read()) self.alipay_public_key_path = alipay_public_key_path with open(self.alipay_public_key_path) as fp: self.alipay_public_key = RSA.import_key(fp.read()) if debug is True: self.__gateway = "https://openapi.alipaydev.com/gateway.do" else: self.__gateway = "https://openapi.alipay.com/gateway.do" def direct_pay(self, subject, out_trade_no, total_amount, return_url=None, **kwargs): biz_content = { "subject": subject, "out_trade_no": out_trade_no, "total_amount": total_amount, "product_code": "FAST_INSTANT_TRADE_PAY", # "qr_pay_mode":4 } biz_content.update(kwargs) data = self.build_body("alipay.trade.page.pay", biz_content, self.return_url) return self.sign_data(data) def build_body(self, method, biz_content, return_url=None): data = { "app_id": self.appid, "method": method, "charset": "utf-8", "sign_type": "RSA2", "timestamp": datetime.now().strftime("%Y-%m-%d %H:%M:%S"), "version": "1.0", "biz_content": biz_content } if return_url is not None: data["notify_url"] = self.app_notify_url data["return_url"] = self.return_url return data def sign_data(self, data): data.pop("sign", None) # 排序后的字符串 unsigned_items = self.ordered_data(data) unsigned_string = "&".join("{0}={1}".format(k, v) for k, v in unsigned_items) sign = self.sign(unsigned_string.encode("utf-8")) ordered_items = self.ordered_data(data) quoted_string = "&".join("{0}={1}".format(k, quote_plus(v)) for k, v in ordered_items) # 获得最终的订单信息字符串 signed_string = quoted_string + "&sign=" + quote_plus(sign) return signed_string def ordered_data(self, data): complex_keys = [] for key, value in data.items(): if isinstance(value, dict): complex_keys.append(key) # 将字典类型的数据dump出来 for key in complex_keys: data[key] = json.dumps(data[key], separators=(',', ':')) return sorted([(k, v) for k, v in data.items()]) def sign(self, unsigned_string): # 开始计算签名 key = self.app_private_key signer = PKCS1_v1_5.new(key) signature = signer.sign(SHA256.new(unsigned_string)) # base64 编码,转换为unicode表示并移除回车 sign = encodebytes(signature).decode("utf8").replace("\n", "") return sign def _verify(self, raw_content, signature): # 开始计算签名 key = self.alipay_public_key signer = PKCS1_v1_5.new(key) digest = SHA256.new() digest.update(raw_content.encode("utf8")) if signer.verify(digest, decodebytes(signature.encode("utf8"))): return True return False def verify(self, data, signature): if "sign_type" in data: sign_type = data.pop("sign_type") # 排序后的字符串 unsigned_items = self.ordered_data(data) message = "&".join(u"{}={}".format(k, v) for k, v in unsigned_items) return self._verify(message, signature) if __name__ == "__main__": return_url = 'http://47.92.87.172:8000/?total_amount=0.01&timestamp=2017-08-15+17%3A15%3A13&sign=jnnA1dGO2iu2ltMpxrF4MBKE20Akyn%2FLdYrFDkQ6ckY3Qz24P3DTxIvt%2BBTnR6nRk%2BPAiLjdS4sa%2BC9JomsdNGlrc2Flg6v6qtNzTWI%2FEM5WL0Ver9OqIJSTwamxT6dW9uYF5sc2Ivk1fHYvPuMfysd90lOAP%2FdwnCA12VoiHnflsLBAsdhJazbvquFP%2Bs1QWts29C2%2BXEtIlHxNgIgt3gHXpnYgsidHqfUYwZkasiDGAJt0EgkJ17Dzcljhzccb1oYPSbt%2FS5lnf9IMi%2BN0ZYo9%2FDa2HfvR6HG3WW1K%2FlJfdbLMBk4owomyu0sMY1l%2Fj0iTJniW%2BH4ftIfMOtADHA%3D%3D&trade_no=2017081521001004340200204114&sign_type=RSA2&auth_app_id=2016080600180695&charset=utf-8&seller_id=2088102170208070&method=alipay.trade.page.pay.return&app_id=2016080600180695&out_trade_no=201702021222&version=1.0' alipay = AliPay( appid="2016080600180695", app_notify_url="http://projectsedus.com/", app_private_key_path=u"../trade/keys/private_2048.txt", alipay_public_key_path="../trade/keys/alipay_pub_key.txt", # 支付宝的公钥,验证支付宝回传消息使用,不是你自己的公钥, debug=True, # 默认False, return_url="http://47.92.87.172:8000/" ) o = urlparse(return_url) query = parse_qs(o.query) processed_query = {} ali_sign = query.pop("sign")[0] for key, value in query.items(): processed_query[key] = value[0] print (alipay.verify(processed_query, ali_sign)) url = alipay.direct_pay( subject="测试订单", out_trade_no="201702021222", total_amount=0.01 ) re_url = "https://openapi.alipaydev.com/gateway.do?{data}".format(data=url) print(re_url)
apache-2.0
472,593,492,612,720,300
36.454545
699
0.607264
false
2.880779
false
false
false
edermartioli/ExoplanetLight
src/spectrum.py
1
2235
# -*- coding: utf-8 -*- """ Created on Nov 25 2016 @author: Eder Martioli Laboratorio Nacional de Astrofisica, Brazil spectrum.py is a library of classes and functions to handle spectral data. """ import numpy as np from scipy import constants ########## SPECTRUM CLASS ############ class Spectrum : 'Common base class for a spectrum' def __init__(self, Filename): """ Create a Spectrum object. Parameters ---------- filename : string File to read the spectrum from. Examples -------- >>> sp = Spectrum("spectrumfile.1d.spc") """ self.filename = Filename self.load_spectrum(self.filename) def load_spectrum(self,Filename): try: self.wl,self.flux,self.var = np.loadtxt(Filename, unpack=True, comments='#', usecols=(0,1,2), delimiter=' ') except: print "Error: could not open file:",Filename exit() def getdata(self, wl0=0., wlf=0.) : """ Retrieve data for a given wavelength range Parameters ---------- wl0 : initial wavelength [nm] wlf : final wavelength [nm] Return : wl[], flux[], variance[] """ if (wl0 == 0.) : wl0 = self.wl[0] if (wlf == 0.) : wlf = self.wl[-1] mask = np.where((self.wl > wl0) & (self.wl < wlf)) return self.wl[mask],self.flux[mask],self.var[mask] def applyRVShift(self, RVshift, interp=False) : """ Apply radial velocity shift to the wavelength data. Parameters ---------- RVshift : radial velocity shift [m/s] interp : interpolate shifted data to keep original wavelength sampling? [boolean] """ self.rvshit = RVshift if interp == True : wl_tmp = self.wl*(1.0 + self.rvshit/constants.c) flux_tmp = np.interp(self.wl, wl_tmp, self.flux) self.flux = flux_tmp else : self.wl *= (1.0 + self.rvshit/constants.c)
mit
5,748,290,067,744,352,000
26.256098
120
0.499329
false
4.019784
false
false
false
dpgaspar/Flask-AppBuilder
examples/quickactions/config.py
1
1945
import os from flask_appbuilder.security.manager import ( AUTH_OID, AUTH_REMOTE_USER, AUTH_DB, AUTH_LDAP, AUTH_OAUTH, ) basedir = os.path.abspath(os.path.dirname(__file__)) CSRF_ENABLED = True SECRET_KEY = "\2\1thisismyscretkey\1\2\e\y\y\h" OPENID_PROVIDERS = [ {"name": "Yahoo", "url": "https://me.yahoo.com"}, {"name": "AOL", "url": "http://openid.aol.com/<username>"}, {"name": "Flickr", "url": "http://www.flickr.com/<username>"}, {"name": "MyOpenID", "url": "https://www.myopenid.com"}, ] SQLALCHEMY_DATABASE_URI = "sqlite:///" + os.path.join(basedir, "app.db") # SQLALCHEMY_DATABASE_URI = 'mysql://root:password@localhost/quickhowto' # SQLALCHEMY_DATABASE_URI = 'postgresql://fab:password@localhost:5432/quickhowto2' # SQLALCHEMY_ECHO = True BABEL_DEFAULT_LOCALE = "en" BABEL_DEFAULT_FOLDER = "translations" LANGUAGES = { "en": {"flag": "gb", "name": "English"}, "pt": {"flag": "pt", "name": "Portuguese"}, "es": {"flag": "es", "name": "Spanish"}, "de": {"flag": "de", "name": "German"}, "zh": {"flag": "cn", "name": "Chinese"}, "ru": {"flag": "ru", "name": "Russian"}, } # ------------------------------ # GLOBALS FOR GENERAL APP's # ------------------------------ UPLOAD_FOLDER = basedir + "/app/static/uploads/" IMG_UPLOAD_FOLDER = basedir + "/app/static/uploads/" IMG_UPLOAD_URL = "/static/uploads/" AUTH_TYPE = AUTH_DB AUTH_ROLE_ADMIN = "Admin" AUTH_ROLE_PUBLIC = "Public" APP_NAME = "F.A.B. Example" APP_ICON = "/static/img/brand.jpg" # APP_THEME = "bootstrap-theme.css" # default # APP_THEME = "cerulean.css" # COOL # APP_THEME = "amelia.css" # APP_THEME = "cosmo.css" # APP_THEME = "cyborg.css" # COOL # APP_THEME = "flatly.css" # APP_THEME = "journal.css" # APP_THEME = "readable.css" # APP_THEME = "simplex.css" # APP_THEME = "slate.css" # COOL # APP_THEME = "spacelab.css" # NICE # APP_THEME = "united.css" # APP_THEME = "yeti.css"
bsd-3-clause
-8,113,568,028,232,835,000
28.923077
82
0.594859
false
2.649864
false
false
false
chipaca/snapcraft
snapcraft/project/_project_options.py
1
12984
# -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*- # # Copyright (C) 2016-2019 Canonical Ltd # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License version 3 as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. import logging import multiprocessing import os import platform import sys from typing import Set from snapcraft import file_utils from snapcraft.internal import common, errors, os_release logger = logging.getLogger(__name__) _ARCH_TRANSLATIONS = { "aarch64": { "kernel": "arm64", "deb": "arm64", "uts_machine": "aarch64", "cross-compiler-prefix": "aarch64-linux-gnu-", "cross-build-packages": ["gcc-aarch64-linux-gnu", "libc6-dev-arm64-cross"], "triplet": "aarch64-linux-gnu", "core-dynamic-linker": "lib/ld-linux-aarch64.so.1", }, "armv7l": { "kernel": "arm", "deb": "armhf", "uts_machine": "arm", "cross-compiler-prefix": "arm-linux-gnueabihf-", "cross-build-packages": ["gcc-arm-linux-gnueabihf", "libc6-dev-armhf-cross"], "triplet": "arm-linux-gnueabihf", "core-dynamic-linker": "lib/ld-linux-armhf.so.3", }, "i686": { "kernel": "x86", "deb": "i386", "uts_machine": "i686", "triplet": "i386-linux-gnu", }, "ppc": { "kernel": "powerpc", "deb": "powerpc", "uts_machine": "powerpc", "cross-compiler-prefix": "powerpc-linux-gnu-", "cross-build-packages": ["gcc-powerpc-linux-gnu", "libc6-dev-powerpc-cross"], "triplet": "powerpc-linux-gnu", }, "ppc64le": { "kernel": "powerpc", "deb": "ppc64el", "uts_machine": "ppc64el", "cross-compiler-prefix": "powerpc64le-linux-gnu-", "cross-build-packages": [ "gcc-powerpc64le-linux-gnu", "libc6-dev-ppc64el-cross", ], "triplet": "powerpc64le-linux-gnu", "core-dynamic-linker": "lib64/ld64.so.2", }, "riscv64": { "kernel": "riscv64", "deb": "riscv64", "uts_machine": "riscv64", "cross-compiler-prefix": "riscv64-linux-gnu-", "cross-build-packages": ["gcc-riscv64-linux-gnu", "libc6-dev-riscv64-cross"], "triplet": "riscv64-linux-gnu", "core-dynamic-linker": "lib/ld-linux-riscv64-lp64d.so.1", }, "s390x": { "kernel": "s390", "deb": "s390x", "uts_machine": "s390x", "cross-compiler-prefix": "s390x-linux-gnu-", "cross-build-packages": ["gcc-s390x-linux-gnu", "libc6-dev-s390x-cross"], "triplet": "s390x-linux-gnu", "core-dynamic-linker": "lib/ld64.so.1", }, "x86_64": { "kernel": "x86", "deb": "amd64", "uts_machine": "x86_64", "triplet": "x86_64-linux-gnu", "core-dynamic-linker": "lib64/ld-linux-x86-64.so.2", }, } _32BIT_USERSPACE_ARCHITECTURE = { "aarch64": "armv7l", "armv8l": "armv7l", "ppc64le": "ppc", "x86_64": "i686", } _WINDOWS_TRANSLATIONS = {"AMD64": "x86_64"} _HOST_CODENAME_FOR_BASE = {"core18": "bionic", "core": "xenial"} _HOST_COMPATIBILITY = { "xenial": ["trusty", "xenial"], "bionic": ["trusty", "xenial", "bionic"], } _STATIC_BASES = ["bare"] # TODO: just check the base. _LINKER_VERSION_FOR_BASE = {"core20": "2.31", "core18": "2.27", "core": "2.23"} def _get_platform_architecture(): architecture = platform.machine() # Translate the windows architectures we know of to architectures # we can work with. if sys.platform == "win32": architecture = _WINDOWS_TRANSLATIONS.get(architecture) if platform.architecture()[0] == "32bit": userspace = _32BIT_USERSPACE_ARCHITECTURE.get(architecture) if userspace: architecture = userspace return architecture class ProjectOptions: @property def parallel_build_count(self) -> int: try: build_count = len(os.sched_getaffinity(0)) except AttributeError: # Fall back to multiprocessing.cpu_count()... try: build_count = multiprocessing.cpu_count() except NotImplementedError: logger.warning( "Unable to determine CPU count; disabling parallel builds" ) build_count = 1 return build_count @property def is_cross_compiling(self): return self.__target_machine != self.__platform_arch @property def target_arch(self): return self.__target_arch @property def cross_compiler_prefix(self): try: # cross-compilation of x86 32bit binaries on a x86_64 host is # possible by reusing the native toolchain - let Kbuild figure # it out by itself and pass down an empty cross-compiler-prefix # to start the build if self.__platform_arch == "x86_64" and self.__target_machine == "i686": return "" return self.__machine_info["cross-compiler-prefix"] except KeyError: raise errors.SnapcraftEnvironmentError( "Cross compilation not supported for target arch {!r}".format( self.__target_machine ) ) @property def additional_build_packages(self): packages = [] if self.is_cross_compiling: packages.extend(self.__machine_info.get("cross-build-packages", [])) return packages @property def arch_triplet(self): return self.__machine_info["triplet"] @property def deb_arch(self): return self.__machine_info["deb"] @property def kernel_arch(self): return self.__machine_info["kernel"] @property def parts_dir(self) -> str: return self._parts_dir @property def stage_dir(self) -> str: return self._stage_dir @property def prime_dir(self) -> str: return self._prime_dir @property def debug(self): return self._debug def __init__( self, target_deb_arch=None, debug=False, *, work_dir: str = None ) -> None: # Here for backwards compatibility. project_dir = os.getcwd() if work_dir is None: work_dir = project_dir self._debug = debug self._parts_dir = os.path.join(work_dir, "parts") self._stage_dir = os.path.join(work_dir, "stage") self._prime_dir = os.path.join(work_dir, "prime") logger.debug("Parts dir {}".format(self._parts_dir)) logger.debug("Stage dir {}".format(self._stage_dir)) logger.debug("Prime dir {}".format(self._prime_dir)) self._set_machine(target_deb_arch) def _get_content_snaps(self) -> Set[str]: """Temporary shim for unit tests using ProjectOptions where Project is really required. Will be removed in future convergence work. """ return set() def _get_provider_content_dirs(self) -> Set[str]: """Temporary shim for unit tests using ProjectOptions where Project is really required. Will be removed in future convergence work. """ return set() def _get_stage_packages_target_arch(self) -> str: """Stub for 'Project' interface for tests using ProjectOptions().""" return self.deb_arch def is_static_base(self, base: str) -> bool: """Return True if a base that is intended to be static is used. Static bases require all their necessary components to live within the snap. """ return base in _STATIC_BASES def is_host_compatible_with_base(self, base: str) -> bool: """Determines if the host is compatible with the GLIBC of the base. The system should warn early on when building using a host that does not match the intended base, this mechanism here enables additional logic when that is ignored to determine built projects will actually run. :param str base: the base core snap to search for linker. :returns: True if there are no GLIBC incompatibilities with the chosen build host, else it returns False. :rtype: bool """ try: codename = os_release.OsRelease().version_codename() except errors.OsReleaseCodenameError: return False logger.debug("Running on {!r}".format(codename)) build_host_for_base = _HOST_CODENAME_FOR_BASE.get(base) if build_host_for_base is None: return False compatible_hosts = _HOST_COMPATIBILITY.get(build_host_for_base, []) return codename in compatible_hosts # This is private to not make the API public given that base # will be part of the new Project. def _get_linker_version_for_base(self, base: str) -> str: """Returns the linker version for base.""" try: return _LINKER_VERSION_FOR_BASE[base] except KeyError: linker_file = os.path.basename(self.get_core_dynamic_linker(base)) return file_utils.get_linker_version_from_file(linker_file) def get_core_dynamic_linker(self, base: str, expand: bool = True) -> str: """Returns the dynamic linker used for the targeted core. :param str base: the base core snap to search for linker. :param bool expand: expand the linker to the actual linker if True, else the main entry point to the linker for the projects architecture. :return: the absolute path to the linker :rtype: str :raises snapcraft.internal.errors.SnapcraftMissingLinkerInBaseError: if the linker cannot be found in the base. :raises snapcraft.internal.errors.SnapcraftEnvironmentError: if a loop is found while resolving the real path to the linker. """ core_path = common.get_installed_snap_path(base) dynamic_linker_path = os.path.join( core_path, self.__machine_info.get("core-dynamic-linker", "lib/ld-linux.so.2"), ) # return immediately if we do not need to expand if not expand: return dynamic_linker_path # We can't use os.path.realpath because any absolute symlinks # have to be interpreted relative to core_path, not the real # root. seen_paths = set() # type: Set[str] while True: if dynamic_linker_path in seen_paths: raise errors.SnapcraftEnvironmentError( "found symlink loop resolving dynamic linker path" ) seen_paths.add(dynamic_linker_path) if not os.path.lexists(dynamic_linker_path): raise errors.SnapcraftMissingLinkerInBaseError( base=base, linker_path=dynamic_linker_path ) if not os.path.islink(dynamic_linker_path): return dynamic_linker_path link_contents = os.readlink(dynamic_linker_path) if os.path.isabs(link_contents): dynamic_linker_path = os.path.join(core_path, link_contents.lstrip("/")) else: dynamic_linker_path = os.path.join( os.path.dirname(dynamic_linker_path), link_contents ) def _set_machine(self, target_deb_arch): self.__platform_arch = _get_platform_architecture() if not target_deb_arch: self.__target_machine = self.__platform_arch else: self.__target_machine = _find_machine(target_deb_arch) logger.info("Setting target machine to {!r}".format(target_deb_arch)) self.__machine_info = _ARCH_TRANSLATIONS[self.__target_machine] # Set target arch to match the host if unspecified. if target_deb_arch is None: self.__target_arch = self.__machine_info.get("deb") else: self.__target_arch = target_deb_arch def _get_deb_arch(machine): return _ARCH_TRANSLATIONS[machine].get("deb", None) def _find_machine(deb_arch): for machine in _ARCH_TRANSLATIONS: if _ARCH_TRANSLATIONS[machine].get("deb", "") == deb_arch: return machine elif _ARCH_TRANSLATIONS[machine].get("uts_machine", "") == deb_arch: return machine raise errors.SnapcraftEnvironmentError( "Cannot set machine from deb_arch {!r}".format(deb_arch) )
gpl-3.0
-6,421,764,754,426,104,000
32.900783
88
0.59658
false
3.788737
false
false
false
eroicaleo/LearningPython
interview/leet/146_LRU_Cache.py
1
2568
#!/usr/bin/env python class LRUCache: class Node: def __init__(self, key, val): self.val, self.key = val, key self.prev = None self.next = None def __init__(self, capacity): """ :type capacity: int """ self.capacity = capacity self.dict = dict() self.head, self.tail = None, None def get(self, key): """ :type key: int :rtype: int """ if key in self.dict: node = self.dict[key] if node == self.head: return node.val node.prev.next = node.next if node == self.tail: self.tail = node.prev else: node.next.prev = node.prev print('In get node: %d' % node.key) print('In get self.head: %d' % self.head.key) self.head.prev = node node.next, self.head, = self.head, node print('In get after swapping node: %d' % node.key) print('In get after swapping self.head: %d' % self.head.key) print('In get after swapping self.head.next.prev: %d' % self.head.next.prev.key) return node.val return -1 def put(self, key, value): """ :type key: int :type value: int :rtype: void """ if self.get(key) != -1: self.head.val = value elif len(self.dict) < self.capacity: print("I am inserting new node: %d" % (key)) node = self.Node(key, value) if len(self.dict) == 0: self.tail = node else: self.head.prev = node node.next, self.head = self.head, node print("new head: %d" % self.head.key) self.dict[key] = node else: self.get(self.tail.key) node = self.head node.val = value print('Prepare to delete key %d' % node.key) del self.dict[node.key] node.key = key self.dict[key] = node cache = LRUCache(2) print(cache.get(1)) cache.put(2, 6) print(cache.get(1)) cache.put(1, 5) cache.put(1, 2) print(cache.get(1)) print(cache.get(2)) quit() cache = LRUCache(2) cache.put(1, 1) print(cache.get(1)) print("now head: ", cache.head.key) print(cache.get(2)) cache.put(2, 2) print("now head: ", cache.head.key) print(cache.get(1)) print("now head: ", cache.head.key) print(cache.get(2)) cache.put(3, 3) print(cache.get(2)) print(cache.get(1))
mit
2,415,132,396,107,739,600
27.21978
92
0.503505
false
3.396825
false
false
false
martinsch/vigra
vigranumpy/lib/pyqt/imagewindow.py
1
23939
####################################################################### # # Copyright 2009-2010 by Ullrich Koethe # # This file is part of the VIGRA computer vision library. # The VIGRA Website is # http://hci.iwr.uni-heidelberg.de/vigra/ # Please direct questions, bug reports, and contributions to # ullrich.koethe@iwr.uni-heidelberg.de or # vigra@informatik.uni-hamburg.de # # Permission is hereby granted, free of charge, to any person # obtaining a copy of this software and associated documentation # files (the "Software"), to deal in the Software without # restriction, including without limitation the rights to use, # copy, modify, merge, publish, distribute, sublicense, and/or # sell copies of the Software, and to permit persons to whom the # Software is furnished to do so, subject to the following # conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the # Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES # OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT # HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR # OTHER DEALINGS IN THE SOFTWARE. # ####################################################################### import math, os, numpy, PyQt4 import PyQt4.QtCore as qcore import PyQt4.QtGui as qt from PyQt4.QtCore import SIGNAL import vigra import vigra.ufunc try: from VigraQt import OverlayViewer, ImageCursor except Exception, e: vigra._fallbackModule('VigraQt', ''' %s If VigraQt is missing on your system, you can download it from http://kogs-www.informatik.uni-hamburg.de/~meine/software/vigraqt/.''' % str(e)) from VigraQt import OverlayViewer, ImageCursor import quickdialog import weakref import viewer2svg class Crosshair(ImageCursor): def __init__(self, *args): ImageCursor.__init__(self, *args) self.visible = False self.position = qcore.QPoint(-1, -1) def setVisible(self, what=True): self.visible = what if what: ImageCursor.setPosition(self, self.position) else: ImageCursor.setPosition(self, qcore.QPoint(-1, -1)) def setPosition(self, pos): self.position = pos if self.visible: ImageCursor.setPosition(self, self.position) class ImageViewer(OverlayViewer): activeViewers = weakref.WeakValueDictionary() def __init__(self, image, normalize=True, title=None, parent=None): OverlayViewer.__init__(self, parent) self.setImage(image, normalize) self._savedExpression = "x" self._lastSaveType = 2 self.overlays = [] if title is not None: self.setWindowTitle(title) elif hasattr(image, "name"): self.setWindowTitle(image.name) else: for k in xrange(1, 10000): if not ImageViewer.activeViewers.has_key(k): break ImageViewer.activeViewers[k] = self self.setWindowTitle("Image %d" % k) #self.imageCursor = ImageCursor(self) # doesn't work anymore - setVisible() is gone self.imageCursor = Crosshair(self) self.imageCursor.setVisible(False) self.imageCursor.setPosition(qcore.QPoint(self.image.width // 2, self.image.height // 2)) OverlayViewer.addOverlay(self, self.imageCursor) self.zoomInAction = qt.QAction("Zoom in", self) self.zoomInAction.setShortcut("+") self.connect(self.zoomInAction, SIGNAL("triggered()"), self.zoomInPopup) self.zoomOutAction = qt.QAction("Zoom out", self) self.zoomOutAction.setShortcut("-") self.connect(self.zoomOutAction, SIGNAL("triggered()"), self.zoomOutPopup) self.saveAction = qt.QAction("Save image...", self) self.saveAction.setShortcut("S") self.connect(self.saveAction, SIGNAL("triggered()"), self.writeImage) self.svgAction = qt.QAction("Save as SVG...", self) self.svgAction.setShortcut("V") self.connect(self.svgAction, SIGNAL("triggered()"), self.writeSVG) self.expressionAction = qt.QAction("Apply expression...", self) self.expressionAction.setShortcut("E") self.connect(self.expressionAction, SIGNAL("triggered()"), self.applyExpression) self.cursorAction = qt.QAction("Line cursor", self) self.cursorAction.setShortcut("L") self.cursorAction.setCheckable(True) self.cursorAction.setChecked(False) self.connect(self.cursorAction, SIGNAL("triggered()"), self._toggleImageCursor) self.popup = qt.QMenu(self) self.popup.addAction(self.zoomInAction) self.popup.addAction(self.zoomOutAction) self.popup.addAction(self.saveAction) self.popup.addAction(self.svgAction) self.popup.addAction(self.expressionAction) self.popup.addAction(self.cursorAction) self.overlayMenu = self.popup.addMenu("Overlays") self.connect(self.overlayMenu, SIGNAL("aboutToShow()"), self.overlayPopup) def setImage(self, image, normalize=True): if not hasattr(image, "qimage"): image = image.view(vigra.Image) self.image = image self._normalized = normalize OverlayViewer.setImage(self, image.qimage(normalize)) def showImageCursor(self, yesOrNo=True): if yesOrNo != self.cursorAction.isChecked(): self.cursorAction.trigger() def _toggleImageCursor(self): self.imageCursor.activateTool(self.cursorAction.isChecked()) self.imageCursor.setVisible(self.cursorAction.isChecked()) def addOverlay(self, overlay): if not hasattr(overlay, "draw"): raise TypeError("addOverlay: " + str(overlay) + "is no valid overlay with 'draw' method!") if overlay.parent() is None: overlay.setParent(self) overlay.visible = True if not hasattr(overlay, "name") or not overlay.name: overlay.name = self._defaultOverlayName(overlay) self.overlays.append(overlay) OverlayViewer.addOverlay(self, overlay) self.update() return len(self.overlays) - 1 def removeOverlay(self, overlay): if type(overlay) == int: try: OverlayViewer.removeOverlay(self, self.overlays[overlay]) self.overlays.pop(overlay) self.update() except IndexError, e: print "No such overlay." else: try: self.overlays.remove(overlay) OverlayViewer.removeOverlay(self, overlay) self.update() except ValueError, e: print "No such overlay." def _slideAfterZoom(self, shift): if self.zoomLevel() > 0: shift *= 1 + self.zoomLevel() elif self.zoomLevel() < 0: shift /= 1 - self.zoomLevel() self.slideBy(shift) def zoomInPopup(self): beforePos = self.imageCoordinate(self.mousepos) self.zoomUp() afterPos = self.imageCoordinate(self.mousepos) self._slideAfterZoom(afterPos - beforePos) def zoomOutPopup(self): beforePos = self.imageCoordinate(self.mousepos) self.zoomDown() afterPos = self.imageCoordinate(self.mousepos) self._slideAfterZoom(afterPos - beforePos) def _defaultOverlayName(self, o): name = str(o.__class__) if name[:8] == "<class '": name = name[8:-2] try: name = name[name.rindex(".") + 1:] except ValueError: pass return name def overlayPopup(self): self.overlayMenu.clear() index = 0 hideable = False showable = False for o in self.overlays: overlayName = o.name text = "[%d] %s" % (index, overlayName) color = None if hasattr(o, "color") and isinstance(o.color, qt.QColor): color = o.color pmHeight = 5 elif hasattr(o, "fillColor") and isinstance(o.fillColor, qt.QColor): color = o.fillColor pmHeight = 16 if color: colorPM = qt.QPixmap(16, pmHeight) colorPM.fill(color) icon = qt.QIcon(colorPM) id = qt.QAction(icon, text, self) else: id = qt.QAction(text, self) self.overlayMenu.addAction(id) id.setCheckable(True) self.connect(id, SIGNAL('triggered()'), self.toggleOverlayVisibilityWithParam(o)) id.setChecked(o.isVisible()) if o.isVisible(): hideable = True else: showable = True index += 1 id = qt.QAction("&Hide all", self) self.overlayMenu.addAction(id) self.connect(id, SIGNAL('triggered()'), self.toggleOverlayVisibilityWithParam(False)) id.setEnabled(hideable) id = qt.QAction("&Show all", self) self.overlayMenu.addAction(id) self.connect(id, SIGNAL('triggered()'), self.toggleOverlayVisibilityWithParam(True)) id.setEnabled(showable) def toggleOverlayVisibilityWithParam(self, o): return lambda: self.toggleOverlayVisibility(o) def toggleOverlayVisibility(self, o=None): '''Toggle or set visibility of given overlay and update view. The parameter can be a boolean - which sets the visibility of all overlays accordingly - an overlay object or the index of the overlay to be hidden/re-shown. If it is omitted, all overlays will be toggled. ''' if o is None: for k in self.overlays: k.setVisible(not k.isVisible()) elif type(o) is bool: for k in self.overlays: k.setVisible(o) else: if type(o) is int: o = self.overlays[o] o.setVisible(not o.isVisible()) self.update() def applyExpression(self, expr=None, normalized=None): if expr is not None: self._savedExpression = expr else: d = quickdialog.QuickDialog(self, "Enter Expression") d.expression = quickdialog.OptionalStringInput(d, "Execute 'lambda x: ") d.expression.setText(self._savedExpression) d.expression.setFocus() d.addSpacing(10) d.norm = quickdialog.CheckBox(d, "Normalize intensity to range 0...255") d.norm.setChecked(self._normalized) if d.exec_() == 0: return self._savedExpression = d.expression.text() self._normalized = True if d.norm.selection() else False if normalized is not None: self._normalized = normalized try: image, normalized = self.getDisplayedImage() except Exception, e: qt.QMessageBox.critical(self, "Error Applying Expression", str(e)) return OverlayViewer.setImage(self, image.qimage(normalized)) def getDisplayedImage(self): """Returns the displayed image and the normalize flag (BYTE or NBYTE) as tuple/pair. Note that the returned image is the original image if no expression is applied, i.e. you should not change the returned object. If active, the expression is applied via eval() on every call of getDisplayedImage().""" if not self._savedExpression or self._savedExpression == "x": self._savedExpression = "x" image = self.image else: for f in vigra.ufunc.__all__: exec 'from vigra.ufunc import %s' % f for f in dir(vigra.colors): if not f.startswith('__'): exec 'from vigra.colors import %s' % f x = self.image image = eval(self._savedExpression) return image, self._normalized def writeImage(self): d = quickdialog.QuickDialog(self, "Write Image") imageFileExtensions = '*.' + ' *.'.join(vigra.impex.listExtensions().split(' ')) d.filedialog = quickdialog.OutputFile( d, "Output filename:", "Image Files (" + imageFileExtensions + ")") d.filedialog.setFocus() d.choices = quickdialog.HDialogGroup(d) d.type = quickdialog.VChoice(d.choices, "Output Pixel Type") d.type.addButton("Byte", "UINT8") d.type.addButton("Normalized to byte", "NBYTE") d.type.addButton("Keep type", "NATIVE") d.type.selectButton(1 if self._normalized else 0) d.type.buttonBox.setEnabled(self._lastSaveType) d.choices.addStretch(1) d.which = quickdialog.VChoice(d.choices, "Save ...") d.which.addButton("displayed image (zoomed, overlays)", 0) d.which.addButton("displayed image (1:1)", 1) d.which.addButton("original image", 2) d.connect(d.which.buttonBox, SIGNAL("clicked(int)"), \ d.type.buttonBox.setEnabled) d.which.selectButton(self._lastSaveType) while True: if d.exec_() == 0: return filename = d.filedialog.text() pixelType = d.type.selection() self._lastSaveType = d.which.selection() if d.which.selection(): if d.which.selection() == 2: image = self.image else: image = self.getDisplay()[0] try: image.writeImage(filename, pixelType) except RuntimeError, e: qt.QMessageBox.critical(self, "Error", str(e)) else: return else: formats = {"png": "PNG", \ "bmp": "BMP", \ "xbm": "XBM", \ "xpm": "XPM", \ "pnm": "PPM", \ "ppm": "PPM", \ "png": "PNG", \ "jpg": "JPEG", \ "jpeg": "JPEG", \ "tif": "TIF"} _, ext = os.path.splitext(filename) if not formats.has_key(ext[1:]): f = " ".join(formats.keys()) qt.QMessageBox.critical(self, "Error", \ "Displayed image with overlays can only be stored as\n" + f) else: pixmap = self.getContentsPixmap() pixmap.save(filename, formats[ext[1:]]) return def writeSVG(self): d = quickdialog.QuickDialog(self, "Write Viewer Contents to SVG") d.filedialog = quickdialog.OutputFile( d, "Output filename:", "SVG Files (*.svg)") d.filedialog.setFocus() d.choices = quickdialog.HDialogGroup(d) d.which = quickdialog.VChoice(d.choices, "Save ...") d.which.addButton("all overlays", 0) d.which.addButton("only displayed overlays", 1) d.which.selectButton(self._lastSaveType) while True: if d.exec_() == 0: return self._lastSaveType = d.which.selection() allOVs = (d.which.selection() == 0) filename = d.filedialog.text() basename, ext = os.path.splitext(filename) try: if ext == ".SVG" or ext == ".svg": viewer2svg.viewer2svg(self, basename, not allOVs) else: viewer2svg.viewer2svg(self, filename, not allOVs) except RuntimeError, e: qt.QMessageBox.critical(self, "Error", str(e)) return def contextMenuEvent(self, e): "handles pop-up menu" self.overlayMenu.setEnabled(len(self.overlays) > 0) self.mousepos = e.pos() self.popup.exec_(e.globalPos()) def keyPressEvent(self, e): "handles keys [S], [E], and possibly [Q] (for toplevel-windows)" if e.key() == qcore.Qt.Key_Q and not self.parent(): self.close() elif e.key() == qcore.Qt.Key_S: self.writeImage() elif e.key() == qcore.Qt.Key_E: self.applyExpression() elif e.key() == qcore.Qt.Key_L: self.cursorAction.trigger() elif e.key() == qcore.Qt.Key_Right or e.key() == qcore.Qt.Key_Left or \ e.key() == qcore.Qt.Key_Up or e.key() == qcore.Qt.Key_Down: OverlayViewer.keyPressEvent(self, e) elif e.key() == qcore.Qt.Key_Plus or e.key() == qcore.Qt.Key_Greater: OverlayViewer.zoomUp(self) elif e.key() == qcore.Qt.Key_Minus or e.key() == qcore.Qt.Key_Less: OverlayViewer.zoomDown(self) else: self.emit(qcore.SIGNAL("keyPressed"), (e.key())) e.ignore() def keyReleaseEvent(self, e): self.emit(qcore.SIGNAL("keyReleased"), (e.key())) e.ignore() def mousePressEvent(self, e): imagePos = OverlayViewer.imageCoordinateF(self, qcore.QPoint(e.x(), e.y())) self.emit(qcore.SIGNAL("mousePressed"), (imagePos.x(), imagePos.y(), e.button())) OverlayViewer.mousePressEvent(self, e) e.ignore() class CaptionImageViewer(qt.QFrame): def __init__(self, image, normalize=True, title=None, parent=None): qt.QFrame.__init__(self, parent) self.viewer = ImageViewer(image, normalize, title, parent=self) self.setWindowTitle(self.viewer.windowTitle()) self._captionCoords = 0, 0 self._xplaces = int(math.log10(self.viewer.image.width) + 1.0) self._yplaces = int(math.log10(self.viewer.image.height) + 1.0) self._valueplaces = self.viewer.image.channels * 5 self.label = qt.QLabel(self) font = qt.QFont() font.setPointSize(10) font.setStyleHint(qt.QFont.TypeWriter) self.label.setFont(font) self._layout = qt.QVBoxLayout(self) self._layout.setSpacing(5) self._layout.addWidget(self.viewer, 1) self._layout.addWidget(self.label) self.connect(self.viewer, SIGNAL('mouseOver(int, int)'), self.updateCaption) self.connect(self.viewer.cursorAction, SIGNAL('triggered()'), self._toggleCaptionSignals) self.updateCaption() def updateCaption(self, x=None, y=None): x = int(round(x)) if x is not None else self._captionCoords[0] y = int(round(y)) if y is not None else self._captionCoords[1] if x < 0 or x >= self.viewer.image.width or \ y < 0 or y >= self.viewer.image.height: return self._captionCoords = x, y label = str(x).rjust(self._xplaces) + " x " + str(y).rjust(self._yplaces) +\ " = " + str(self.viewer.image[x, y]).ljust(self._valueplaces) self.label.setText(label) self.emit(SIGNAL('captionChanged'), self.label.text()) def updateCaptionP(self, point): self.updateCaption(point.x(), point.y()) def _toggleCaptionSignals(self): if self.viewer.cursorAction.isChecked(): self.disconnect(self.viewer, SIGNAL('mouseOver(int, int)'), self.updateCaption) self.connect(self.viewer.imageCursor, SIGNAL('positionChanged(QPoint)'), self.updateCaptionP) else: self.connect(self.viewer, SIGNAL('mouseOver(int, int)'), self.updateCaption) self.disconnect(self.viewer.imageCursor, SIGNAL('positionChanged(QPoint)'), self.updateCaptionP) def setImage(self, image, normalize=None): """imageWindow.setImage(image, normalize = None) Replace the current image with the given one. If normalized is not given (or None), the normalized state is not changed.""" self.viewer.setImage(image, normalize) self.updateCaption() class CursorAction(qt.QAction): def __init__(self, name, parent): qt.QAction.__init__(self, name, parent) self.x, self.y = -1, -1 self.zoomLevel = 0 def trigger(self): qt.QAction.trigger(self) for v in self.viewers: v.viewer.cursorAction.setChecked(self.isChecked()) v.viewer._toggleImageCursor() v._toggleCaptionSignals() def broadcastPosition(self, pos): if self.x == pos.x() and self.y == pos.y(): return self.x, self.y = pos.x(), pos.y() for v in self.viewers: v.viewer.imageCursor.setPosition(pos) def broadcastZoom(self, level): if self.zoomLevel == level: return self.zoomLevel = level for v in self.viewers: v.viewer.setZoomLevel(level) class ImageWindow(qt.QFrame): '''Display one or more images in a grid-like layout. ''' def __init__(self, parent=None): qt.QFrame.__init__(self, parent) self.cursorAction = CursorAction("Connected line cursors", self) self.cursorAction.setCheckable(True) self.cursorAction.setChecked(False) self.addAction(self.cursorAction) self.cursorAction.viewers = [] self.layout = qt.QGridLayout(self) def setImage(self, image, x=0, y=0, normalize=True, title=None): """Place the given image at the given position of this window's grid layout. If an image already exists at this position, it is replaced. """ if self.layout.itemAtPosition(y, x): self.layout.itemAtPosition(y, x).widget().setImage(image, normalize) else: CIviewer = CaptionImageViewer(image, normalize, title, parent=self) self.layout.addWidget(CIviewer, y, x) self.cursorAction.viewers.append(CIviewer) if len(self.cursorAction.viewers) == 1: self.setWindowTitle(CIviewer.windowTitle()) if self.cursorAction.x != -1: CIviewer.viewer.imageCursor.setPosition( qcore.QPoint(self.cursorAction.x, self.cursorAction.y)) CIviewer.viewer.setZoomLevel(self.cursorAction.zoomLevel) if self.cursorAction.isChecked(): CIviewer.viewer.cursorAction.trigger() self.disconnect(CIviewer.viewer.cursorAction, SIGNAL("triggered()"), CIviewer.viewer._toggleImageCursor) self.connect(CIviewer.viewer.cursorAction, SIGNAL("triggered()"), self.cursorAction.trigger) self.connect(CIviewer.viewer.imageCursor, SIGNAL("positionChanged(QPoint)"), self.cursorAction.broadcastPosition) self.connect(CIviewer.viewer, SIGNAL("zoomLevelChanged(int)"), self.cursorAction.broadcastZoom) self.updateGeometry() # this call is necessary to update the sizeHint() before adjustSize() is called qcore.QCoreApplication.processEvents() self.adjustSize() def viewer(self, x=0, y=0): if self.layout.itemAtPosition(y, x): return self.layout.itemAtPosition(y, x).widget().viewer raise ValueError("ImageWindow.viewer(): viewer at (%d, %d) is undefined." % (x, y)) def showImage(image, normalize=True, title=None): if isinstance(image, str): image = vigra.impex.readImage(image) v = ImageWindow() v.setImage(image, normalize=normalize, title=title) v.show() return v
mit
4,147,204,187,256,811,500
37.799028
97
0.588329
false
3.981207
false
false
false
GSA/PricesPaidAPI
SolrLodr.py
1
4723
#!/usr/local/bin/python import solr import sys, traceback # This file is for (for example) Apache with mod_wsgi. import sys, os # import sys # sys.path.insert(0, '../configuration/') # The purpose of this file is to take the standard # datafiles and load them into SOLR in such a way that they # will be searchable. # This is meant to be run from a command line because # I assume it is to be invoked when you change the # source data directory, which implies you are changing # files and it will be easy to run it from a command line. # Later, we can wrap this into something that allows # a file to be uploaded through the API. # We may someday need to manage the SOLR index with # an administrative interface, but for now the goal is # just to make it reflect the directory. I'm assuming # those are the simplest way to do these things. import Transaction import time from configs.ppApiConfig import PathToDataFiles, MAXIMUM_NUMBER_TO_LOAD, SolrDeleteExistingData, PathToActualInputFiles # Note: For now, these are explict imports. # Evntually, we want to make this automatic, and essentially # create a dynamic array of adapters and loaders based on # what we find in some directory so that it is easily # extendable. But that would be over-engineering if we did it now. from RevAucAdapter import getDictionaryFromRevAuc,loadRevAucFromCSVFile from OS2Adapter import getDictionaryFromOS2,loadOS2FromCSVFile from GSAAdvAdapter import getDictionaryFromGSAAdv,loadGSAAdvFromCSVFile from LabEquipAdapter import getDictionaryFromLabEquipment,loadLabequipmentFromCSVFile from USASpendingAdapter import getDictionaryFromUSASpending,loadUSASpendingFromCSVFile from EDWGSAAdvAdapter import getDictionaryFromEDWGSAAdv,loadEDWGSAAdvFromCSVFile from csv_rename import splitfiles from os import listdir from os.path import isfile, join import re import logging import SearchApi logger = logging.getLogger('PPSolrLodr') hdlr = logging.FileHandler('../logs/PPSolrLodr.log') formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s') hdlr.setFormatter(formatter) logger.addHandler(hdlr) logger.setLevel(logging.ERROR) LIMIT_NUM_MATCHING_TRANSACTIONS = 5000*1000*100; # create a connection to a solr server # This needs to come from ppconfig solrCon = solr.SolrConnection('http://localhost:8983/solr') def chunks(l, n): """ Yield successive n-sized chunks from l. """ for i in xrange(0, len(l), n): yield l[i:i+n] idcnt = 0; def loadChunk(filename,chunk): global idcnt l = [] for t in chunk: d = {} # we need to look at the dictionary and map # non standard fields to those matching our "dynamic field" name # in the schema. for key, value in t.dict.items(): v = unicode(value, errors='ignore') # This unicode stuff needs to be changed at the source.. # We should not carry around bad data and then cover it up like this! if (key in Transaction.STANDARD_FIELDS): d[unicode(key,errors='ignore')] = v; else: # I think _txt might be clearer! d[key+"_t"] = v; # possibly the addtion of this id field should actually be done # when we create the objects! That would make the class useful! d['id'] = filename+"_"+str(idcnt); idcnt = idcnt+1; l.append(d); try: print "about to add "+str(len(l)) solrCon.add_many(l) solrCon.commit() print "success" except: print "failure" exc_type, exc_value, exc_traceback = sys.exc_info() traceback.print_exception(exc_type, exc_value, exc_traceback, limit=2, file=sys.stderr) logger.error("don't know what went wrong here") def loadSolr(filename,transactions): global idcnt chunkedTransactions = list(chunks(transactions, 1000)) for chunk in chunkedTransactions: loadChunk(filename,chunk) # Before we load, we need to delete! # This seems a little dangerous, but there is not much we can do. # We really want to make this a command-line argument so # that we can load one data file at a time. # Default param for SolrDeleteExistingData in ppGuiConfig is F if SolrDeleteExistingData=='T': response = solrCon.delete_query('*:*') solrCon.commit() print "Solr Loader Starts" onlyfiles = [ f for f in listdir(PathToActualInputFiles) if isfile(join(PathToActualInputFiles,f)) ] onlycsvfiles = [ f for f in onlyfiles if re.search(".csv$",f)] for filename in onlycsvfiles: splitfiles(filename) SearchApi.applyToLoadedFiles(filename,PathToDataFiles,None,loadSolr,MAXIMUM_NUMBER_TO_LOAD) print "Solr Loader Ends"
unlicense
4,258,783,536,388,830,700
34.246269
119
0.712683
false
3.652746
false
false
false
ntoll/code-dojo
adventure/week3/team3/adventure.py
1
6088
from cmd import Cmd import re DIRECTIONS = 'N', 'E', 'S', 'W' NORTH, EAST, SOUTH, WEST = DIRECTIONS class Player(object): def __init__(self, location, name='Player'): assert isinstance(location, Location) self.location = location self.name = name class Location(object): def __init__(self, name, description=""): self.name = name self.description = description self.exits = dict() self.props = [] def __str__(self): return self.name def add_direction(self, direction, other_location): assert direction in DIRECTIONS self.exits[direction] = other_location def describe(self): out = '' out += "Current location: %s\n%s\n\n" % (self.name, self.description) for direction, location in self.exits.items(): out += "\t%s (%s)\n" % (location, direction) if self.props: plural = len(self.props) > 1 out += "\n%s item%s may come in handy (hint hint):\n\t%s" \ % (['This', 'These'][plural], ['', 's'][plural], '\n\t'.join(prop.aliases[0] for prop in self.props)) return out class Prop(object): def __init__(self, name): self.description = None self.location = None self.aliases = [name] def test_location(): startroot = Location('Start room') kitchen = Location('Kitchen') startroot.add_direction(NORTH, kitchen) def test_player(): lobby = Location('Lobby') john = Player(lobby, 'John') def load_universe(content): location = first_location = None locations = {} props = {} #parts = re.split(r"(?:\n|\r\n|\r){2,}", content.read()) parts = content.read().split('\r\n\r\n') import pdb for part in parts: location = None prop = None for line in part.splitlines(): line = line.strip() if not line or line.startswith('#'): continue #if line == 'N:Hall': # pdb.set_trace() if not location and not prop: # first line if line.startswith(':'): location = Location(line[1:]) locations[line[1:]] = location if not first_location: first_location = location if line.startswith('*'): prop = Prop(line[1:]) props[line[1:]] = prop else: if location: #print 'line', line if not location.description or line[1] != ':': location.description+= line else: direction, destination = line.split(':', 1) #print 'direction, destination', direction, destination location.add_direction(direction, destination) else: if not prop.location: items_location = locations[line] prop.location = items_location items_location.props.append(prop) elif not prop.description: prop.description = line elif line.startswith("A:"): # aliases #A:flashlight prop.aliases = [x.strip() for x in line[2:].split(',')] for location in locations.values(): for direction, destination in location.exits.items(): try: location.add_direction(direction, locations[destination]) except KeyError: raise SystemError("Your universe file sucks! %s" % destination) return locations, first_location class Game(Cmd): def __init__(self, gamefile, player_name): Cmd.__init__(self) self.locations, self.start_room = load_universe(file(gamefile)) self.player = Player(self.start_room, player_name) print self.player.location.describe() def do_move(self, direction): direction = direction.upper() newroom = self.player.location.exits.get(direction,None) if newroom == None: print "No pass around!" return self.player.location = self.player.location.exits[direction] def do_look(self, where): if where == "": self.player.location.describe() else: # TODO validate where newroom = self.player.location.exits.get(where,None) print newroom.describe() pass def do_joke(self, ok): print "that is not funny. What don't you try a pun?" if hasattr(self, 'joke'): print 'this is funny:%s' % self.joke self.joke = ok def postcmd(self, stop, x): #pass if not hasattr(self, 'joke'): print self.player.location.describe() #print self.player.location.describe() def play(gamefile): #start_room = _create_universe() player_name = raw_input('Player name?: ') or 'No name' g = Game(gamefile, player_name) g.cmdloop() ''' while True: if not player.location.exits: print "No more exits! GAME OVER!" break next_direction = raw_input('Where to next? ').upper() while next_direction not in player.location.exits.keys(): next_direction = raw_input('Where to next? (%s) ' %\ ', '.join(player.location.exits.keys())).upper() player.location = player.location.exits[next_direction] ''' if __name__ == '__main__': import sys if sys.argv[1] == 'test': test_location() test_player() sys.exit(0) try: play(sys.argv[1]) except KeyboardInterrupt: pass
mit
1,014,150,448,326,991,400
29.813472
113
0.508377
false
4.272281
false
false
false
amanzi/ats-dev
tools/meshing_ats/meshing_ats/meshing_ats.py
1
34933
"""Extrudes a 2D mesh to generate an ExodusII 3D mesh. Works with and assumes all polyhedra cells (and polygon faces). To see usage, run: ------------------------------------------------------------ python meshing_ats.py -h Example distributed with this source, to run: ------------------------------------------------------------ $> cd four-polygon-test $> python ../meshing_ats.py -n 10 -d 1 ./four_polygon.vtk $> mkdir run0 $> cd run0 $> ats --xml_file=../test1-fv-four-polygon.xml Requires building the latest version of Exodus ------------------------------------------------------------ Note that this is typically done in your standard ATS installation, assuming you have built your Amanzi TPLs with shared libraries (the default through bootstrap). In that case, simply ensure that ${AMANZI_TPLS_DIR}/SEACAS/lib is in your PYTHONPATH. """ from __future__ import print_function import sys,os import numpy as np import collections import argparse try: import exodus except ImportError: sys.path.append(os.path.join(os.environ["SEACAS_DIR"],"lib")) import exodus class SideSet(object): def __init__(self, name, setid, elem_list, side_list): assert(type(setid) == int) assert(type(elem_list) == list or type(elem_list) == np.ndarray) assert(type(side_list) == list or type(side_list) == np.ndarray) self.name = name self.setid = setid self.elem_list = elem_list self.side_list = side_list class LabeledSet(object): def __init__(self, name, setid, entity, ent_ids): assert entity in ['CELL', 'FACE', 'NODE'] assert(type(setid) == int) assert(type(ent_ids) == list or type(ent_ids) == np.ndarray) self.name = name self.setid = setid self.entity = entity self.ent_ids = np.array(ent_ids) class Mesh2D(object): def __init__(self, coords, connectivity, labeled_sets=None, check_handedness=True): """ Creates a 2D mesh from coordinates and a list cell-to-node connectivity lists. coords : numpy array of shape (NCOORDS, NDIMS) connectivity : list of lists of integer indices into coords specifying a (clockwise OR counterclockwise) ordering of the nodes around the 2D cell labeled_sets : list of LabeledSet objects """ assert type(coords) == np.ndarray assert len(coords.shape) == 2 self.dim = coords.shape[1] self.coords = coords self.conn = connectivity if labeled_sets is not None: self.labeled_sets = labeled_sets else: self.labeled_sets = [] self.validate() self.edge_counts() if check_handedness: self.check_handedness() def validate(self): assert self.coords.shape[1] == 2 or self.coords.shape[1] == 3 assert type(self.conn) is list for f in self.conn: assert type(f) is list assert len(set(f)) == len(f) for i in f: assert i < self.coords.shape[0] for ls in self.labeled_sets: if ls.entity == "NODE": size = len(self.coords) elif ls.entity == "CELL": size = len(self.conn) for i in ls.ent_ids: assert i < size return True def num_cells(self): return len(self.conn) def num_nodes(self): return self.coords.shape[0] def num_edges(self): return len(self.edges()) @staticmethod def edge_hash(i,j): return tuple(sorted((i,j))) def edges(self): return self.edge_counts().keys() def edge_counts(self): try: return self._edges except AttributeError: self._edges = collections.Counter(self.edge_hash(f[i], f[(i+1)%len(f)]) for f in self.conn for i in range(len(f))) return self._edges def check_handedness(self): for conn in self.conn: points = np.array([self.coords[c] for c in conn]) cross = 0 for i in range(len(points)): im = i - 1 ip = i + 1 if ip == len(points): ip = 0 p = points[ip] - points[i] m = points[i] - points[im] cross = cross + p[1] * m[0] - p[0] * m[1] if cross < 0: conn.reverse() def plot(self, color=None, ax=None): if color is None: import colors cm = colors.cm_mapper(0,self.num_cells()-1) colors = [cm(i) for i in range(self.num_cells())] else: colors = color verts = [[self.coords[i,0:2] for i in f] for f in self.conn] from matplotlib import collections gons = collections.PolyCollection(verts, facecolors=colors) from matplotlib import pyplot as plt if ax is None: fig,ax = plt.subplots(1,1) ax.add_collection(gons) ax.autoscale_view() @classmethod def read_VTK(cls, filename): try: return cls.read_VTK_Simplices(filename) except AssertionError: return cls.read_VTK_Unstructured(filename) @classmethod def read_VTK_Unstructured(cls, filename): with open(filename,'r') as fid: points_found = False polygons_found = False while True: line = fid.readline().decode('utf-8') if not line: # EOF break line = line.strip() if len(line) == 0: continue split = line.split() section = split[0] if section == 'POINTS': ncoords = int(split[1]) points = np.fromfile(fid, count=ncoords*3, sep=' ', dtype='d') points = points.reshape(ncoords, 3) points_found = True elif section == 'POLYGONS': ncells = int(split[1]) n_to_read = int(split[2]) gons = [] data = np.fromfile(fid, count=n_to_read, sep=' ', dtype='i') idx = 0 for i in range(ncells): n_in_gon = data[idx] gon = list(data[idx+1:idx+1+n_in_gon]) # check handedness -- need normals to point up! cross = [] for i in range(len(gon)): if i == len(gon)-1: ip = 0 ipp = 1 elif i == len(gon)-2: ip = i+1 ipp = 0 else: ip = i+1 ipp = i+2 d2 = points[gon[ipp]] - points[gon[ip]] d1 = points[gon[i]] - points[gon[ip]] cross.append(np.cross(d2, d1)) if (np.array([c[2] for c in cross]).mean() < 0): gon.reverse() gons.append(gon) idx += n_in_gon + 1 assert(idx == n_to_read) polygons_found = True if not points_found: raise RuntimeError("Unstructured VTK must contain sections 'POINTS'") if not polygons_found: raise RuntimeError("Unstructured VTK must contain sections 'POLYGONS'") return cls(points, gons) @classmethod def read_VTK_Simplices(cls, filename): """Stolen from meshio, https://github.com/nschloe/meshio/blob/master/meshio/vtk_io.py""" import vtk_io with open(filename,'r') as fid: data = vtk_io.read_buffer(fid) points = data[0] if len(data[1]) != 1: raise RuntimeError("Simplex VTK file is readable by vtk_io but not by meshing_ats. Includes: %r"%data[1].keys()) gons = [v for v in data[1].itervalues()][0] gons = gons.tolist() # check handedness for gon in gons: cross = [] for i in range(len(gon)): if i == len(gon)-1: ip = 0 ipp = 1 elif i == len(gon)-2: ip = i+1 ipp = 0 else: ip = i+1 ipp = i+2 d2 = points[gon[ipp]] - points[gon[ip]] d1 = points[gon[i]] - points[gon[ip]] cross.append(np.cross(d2, d1)) if (np.array([c[2] for c in cross]).mean() < 0): gon.reverse() return cls(points, gons) @classmethod def from_Transect(cls, x, z, width=1): """Creates a 2D surface strip mesh from transect data""" # coordinates if (type(width) is list or type(width) is np.ndarray): variable_width = True y = np.array([0,1]) else: variable_width = False y = np.array([0,width]) Xc, Yc = np.meshgrid(x, y) if variable_width: assert(Yc.shape[1] == 2) assert(len(width) == Yc.shape[0]) assert(min(width) > 0.) Yc[:,0] = -width/2. Yc[:,1] = width/2. Xc = Xc.flatten() Yc = Yc.flatten() Zc = np.concatenate([z,z]) # connectivity nsurf_cells = len(x)-1 conn = [] for i in range(nsurf_cells): conn.append([i, i+1, nsurf_cells + i + 2, nsurf_cells + i + 1]) coords = np.array([Xc, Yc, Zc]) return cls(coords.transpose(), conn) @classmethod def from_Transect_Guide(cls, x, z, guide): """Creates a 2D surface strip mesh from transect data""" assert type(guide) == np.ndarray assert guide.shape[1] == 3 # coordinates Xc = x Yc = np.zeros_like(x) Zc = z nsteps = guide.shape[0] xnew = Xc ynew = Yc znew = Zc for i in range(nsteps): xnew = xnew + guide[i][0] ynew = ynew + guide[i][1] znew = znew + guide[i][2] Xc = np.concatenate([Xc, xnew]) Yc = np.concatenate([Yc, ynew]) Zc = np.concatenate([Zc, znew]) # y = np.array([0,1,2]) # Xc, Yc = np.meshgrid(x, y) # Xc = Xc.flatten() # Yc = Yc.flatten() # Zc = np.concatenate([z,z,z]) # connectivity ns = len(x) conn = [] for j in range(nsteps): for i in range(ns-1): conn.append([j*ns + i, j*ns + i + 1, (j+1)*ns + i + 1, (j+1)*ns + i ]) coords = np.array([Xc, Yc, Zc]) return cls(coords.transpose(), conn) @classmethod def from_Transect_GuideX(cls, x, z, guide, nsteps): """Creates a 2D surface strip mesh from transect data""" assert type(guide) == np.ndarray assert guide.shape[1] == 3 # coordinates Xc = x Yc = np.zeros_like(x) Zc = z nsteps = guide.shape[0] xnew = np.zeros_like(x) ynew = np.zeros(len(x)) znew = np.zeros_like(x) xnew[:] = Xc[:] ynew[:] = Yc[:] znew[:] = Zc[:] for i in range(nsteps): print(Yc) for j in range(len(x)): xnew[j] = xnew[j] + guide[j][0] ynew[j] = ynew[j] + guide[j][1] znew[j] = znew[j] + guide[j][2] Xc = np.concatenate([Xc, xnew]) Yc = np.concatenate([Yc, ynew]) Zc = np.concatenate([Zc, znew]) # y = np.array([0,1,2]) # Xc, Yc = np.meshgrid(x, y) # Xc = Xc.flatten() # Yc = Yc.flatten() # Zc = np.concatenate([z,z,z]) # connectivity ns = len(x) conn = [] for j in range(nsteps): for i in range(ns-1): conn.append([j*ns + i, j*ns + i + 1, (j+1)*ns + i + 1, (j+1)*ns + i ]) coords = np.array([Xc, Yc, Zc]) return cls(coords.transpose(), conn) class Mesh3D(object): def __init__(self, coords, face_to_node_conn, elem_to_face_conn, side_sets=None, labeled_sets=None, material_ids=None): """ Creates a 3D mesh from coordinates and connectivity lists. coords : numpy array of shape (NCOORDS, 3) face_to_node_conn : list of lists of integer indices into coords specifying an (clockwise OR counterclockwise) ordering of the nodes around the face elem_to_face_conn : list of lists of integer indices into face_to_node_conn specifying a list of faces that make up the elem """ assert type(coords) == np.ndarray assert len(coords.shape) == 2 assert coords.shape[1] == 3 self.dim = coords.shape[1] self.coords = coords self.face_to_node_conn = face_to_node_conn self.elem_to_face_conn = elem_to_face_conn if labeled_sets is not None: self.labeled_sets = labeled_sets else: self.labeled_sets = [] if side_sets is not None: self.side_sets = side_sets else: self.side_sets = [] if material_ids is not None: self.material_id_list = collections.Counter(material_ids).keys() self.material_ids = material_ids else: self.material_id_list = [10000,] self.material_ids = [10000,]*len(self.elem_to_face_conn) self.validate() def validate(self): assert self.coords.shape[1] == 3 assert type(self.face_to_node_conn) is list for f in self.face_to_node_conn: assert type(f) is list assert len(set(f)) == len(f) for i in f: assert i < self.coords.shape[0] assert type(self.elem_to_face_conn) is list for e in self.elem_to_face_conn: assert type(e) is list assert len(set(e)) == len(e) for i in e: assert i < len(self.face_to_node_conn) for ls in self.labeled_sets: if ls.entity == "NODE": size = self.num_nodes() if ls.entity == "FACE": size = self.num_faces() elif ls.entity == "CELL": size = self.num_cells() for i in ls.ent_ids: assert i < size for ss in self.side_sets: for j,i in zip(ss.elem_list, ss.side_list): assert j < self.num_cells() assert i < len(self.elem_to_face_conn[j]) def num_cells(self): return len(self.elem_to_face_conn) def num_faces(self): return len(self.face_to_node_conn) def num_nodes(self): return self.coords.shape[0] def write_exodus(self, filename, face_block_mode="one block"): """Write the 3D mesh to ExodusII using arbitrary polyhedra spec""" # put cells in with blocks, which renumbers the cells, so we have to track sidesets. # Therefore we keep a map of old cell to new cell ordering # # also, though not required by the spec, paraview and visit # seem to crash if num_face_blocks != num_elem_blocks. So # make face blocks here too, which requires renumbering the faces. # -- first pass, form all elem blocks and make the map from old-to-new new_to_old_elems = [] elem_blks = [] for i_m,m_id in enumerate(self.material_id_list): # split out elems of this material, save new_to_old map elems_tuple = [(i,c) for (i,c) in enumerate(self.elem_to_face_conn) if self.material_ids[i] == m_id] new_to_old_elems.extend([i for (i,c) in elems_tuple]) elems = [c for (i,c) in elems_tuple] elem_blks.append(elems) old_to_new_elems = sorted([(old,i) for (i,old) in enumerate(new_to_old_elems)], lambda a,b: int.__cmp__(a[0],b[0])) # -- deal with faces, form all face blocks and make the map from old-to-new face_blks = [] if face_block_mode == "one block": # no reordering of faces needed face_blks.append(self.face_to_node_conn) elif face_block_mode == "n blocks, not duplicated": used_faces = np.zeros((len(self.face_to_node_conn),),'bool') new_to_old_faces = [] for i_m,m_id in enumerate(self.material_id_list): # split out faces of this material, save new_to_old map def used(f): result = used_faces[f] used_faces[f] = True return result elem_blk = elem_blks[i_m] faces_tuple = [(f,self.face_to_node_conn[f]) for c in elem_blk for (j,f) in enumerate(c) if not used(f)] new_to_old_faces.extend([j for (j,f) in faces_tuple]) faces = [f for (j,f) in faces_tuple] face_blks.append(faces) # get the renumbering in the elems old_to_new_faces = sorted([(old,j) for (j,old) in enumerate(new_to_old_faces)], lambda a,b: int.__cmp__(a[0],b[0])) elem_blks = [[[old_to_new_faces[f][1] for f in c] for c in elem_blk] for elem_blk in elem_blks] elif face_block_mode == "n blocks, duplicated": elem_blks_new = [] offset = 0 for i_m, m_id in enumerate(self.material_id_list): used_faces = np.zeros((len(self.face_to_node_conn),),'bool') def used(f): result = used_faces[f] used_faces[f] = True return result elem_blk = elem_blks[i_m] tuple_old_f = [(f,self.face_to_node_conn[f]) for c in elem_blk for f in c if not used(f)] tuple_new_old_f = [(new,old,f) for (new,(old,f)) in enumerate(tuple_old_f)] old_to_new_blk = np.zeros((len(self.face_to_node_conn),),'i')-1 for new,old,f in tuple_new_old_f: old_to_new_blk[old] = new + offset elem_blk_new = [[old_to_new_blk[f] for f in c] for c in elem_blk] #offset = offset + len(ftuple_new) elem_blks_new.append(elem_blk_new) face_blks.append([f for i,j,f in tuple_new_old_f]) elem_blks = elem_blks_new elif face_block_mode == "one block, repeated": # no reordering of faces needed, just repeat for eblock in elem_blks: face_blks.append(self.face_to_node_conn) else: raise RuntimeError("Invalid face_block_mode: '%s', valid='one block', 'n blocks, duplicated', 'n blocks, not duplicated'"%face_block_mode) # open the mesh file num_elems = sum(len(elem_blk) for elem_blk in elem_blks) num_faces = sum(len(face_blk) for face_blk in face_blks) ep = exodus.ex_init_params(title=filename, num_dim=3, num_nodes=self.num_nodes(), num_face=num_faces, num_face_blk=len(face_blks), num_elem=num_elems, num_elem_blk=len(elem_blks), num_side_sets=len(self.side_sets)) e = exodus.exodus(filename, mode='w', array_type='numpy', init_params=ep) # put the coordinates e.put_coord_names(['coordX', 'coordY', 'coordZ']) e.put_coords(self.coords[:,0], self.coords[:,1], self.coords[:,2]) # put the face blocks for i_blk, face_blk in enumerate(face_blks): face_raveled = [n for f in face_blk for n in f] e.put_polyhedra_face_blk(i_blk+1, len(face_blk), len(face_raveled), 0) e.put_node_count_per_face(i_blk+1, np.array([len(f) for f in face_blk])) e.put_face_node_conn(i_blk+1, np.array(face_raveled)+1) # put the elem blocks assert len(elem_blks) == len(self.material_id_list) for i_blk, (m_id, elem_blk) in enumerate(zip(self.material_id_list, elem_blks)): elems_raveled = [f for c in elem_blk for f in c] e.put_polyhedra_elem_blk(m_id, len(elem_blk), len(elems_raveled), 0) e.put_elem_blk_name(m_id, "MATERIAL_ID_%d"%m_id) e.put_face_count_per_polyhedra(m_id, np.array([len(c) for c in elem_blk])) e.put_elem_face_conn(m_id, np.array(elems_raveled)+1) # add sidesets e.put_side_set_names([ss.name for ss in self.side_sets]) for ss in self.side_sets: for elem in ss.elem_list: assert old_to_new_elems[elem][0] == elem new_elem_list = [old_to_new_elems[elem][1] for elem in ss.elem_list] e.put_side_set_params(ss.setid, len(ss.elem_list), 0) e.put_side_set(ss.setid, np.array(new_elem_list)+1, np.array(ss.side_list)+1) # finish and close e.close() @classmethod def extruded_Mesh2D(cls, mesh2D, layer_types, layer_data, ncells_per_layer, mat_ids): """ Regularly extrude a 2D mesh to make a 3D mesh. mesh2D : a Mesh2D object layer_types : either a string (type) or list of strings (types) layer_data : array of data needed (specific to the type) ncells_per_layer : either a single integer (same number of cells in all : layers) or a list of number of cells in the layer mat_ids : either a single integer (one mat_id for all layers) : or a list of integers (mat_id for each layer) : or a 2D numpy array of integers (mat_id for each layer and each column: [layer_id, surface_cell_id]) types: - 'constant' : (data=float thickness) uniform thickness - 'function' : (data=function or functor) thickness as a function : of (x,y) - 'snapped' : (data=float z coordinate) snap the layer to : provided z coordinate, telescoping as needed - 'node' : thickness provided on each node of the surface domain - 'cell' : thickness provided on each cell of the surface domain, : interpolate to nodes NOTE: dz is uniform through the layer in all but the 'snapped' case NOTE: 2D mesh is always labeled 'surface', extrusion is always downwards """ # make the data all lists # --------------------------------- def is_list(data): if type(data) is str: return False try: len(data) except TypeError: return False else: return True if is_list(layer_types): if not is_list(layer_data): layer_data = [layer_data,]*len(layer_types) else: assert len(layer_data) == len(layer_types) if not is_list(ncells_per_layer): ncells_per_layer = [ncells_per_layer,]*len(layer_types) else: assert len(ncells_per_layer) == len(layer_types) elif is_list(layer_data): layer_types = [layer_types,]*len(layer_data) if not is_list(ncells_per_layer): ncells_per_layer = [ncells_per_layer,]*len(layer_data) else: assert len(ncells_per_layer) == len(layer_data) elif is_list(ncells_per_layer): layer_type = [layer_type,]*len(ncells_per_layer) layer_data = [layer_data,]*len(ncells_per_layer) else: layer_type = [layer_type,] layer_data = [layer_data,] ncells_per_layer = [ncells_per_layer,] # helper data and functions for mapping indices from 2D to 3D # ------------------------------------------------------------------ if min(ncells_per_layer) < 0: raise RuntimeError("Invalid number of cells, negative value provided.") ncells_tall = sum(ncells_per_layer) ncells_total = ncells_tall * mesh2D.num_cells() nfaces_total = (ncells_tall+1) * mesh2D.num_cells() + ncells_tall * mesh2D.num_edges() nnodes_total = (ncells_tall+1) * mesh2D.num_nodes() np_mat_ids = np.array(mat_ids, dtype=int) if np_mat_ids.size == np.size(np_mat_ids, 0): if np_mat_ids.size == 1: np_mat_ids = np.full((len(ncells_per_layer), mesh2D.num_cells()), mat_ids[0], dtype=int) else: np_mat_ids = np.empty((len(ncells_per_layer), mesh2D.num_cells()), dtype=int) for ilay in range(len(ncells_per_layer)): np_mat_ids[ilay, :] = np.full(mesh2D.num_cells(), mat_ids[ilay], dtype=int) def col_to_id(column, z_cell): """Maps 2D cell ID and index in the vertical to a 3D cell ID""" return z_cell + column * ncells_tall def node_to_id(node, z_node): """Maps 2D node ID and index in the vertical to a 3D node ID""" return z_node + node * (ncells_tall+1) def edge_to_id(edge, z_cell): """Maps 2D edge hash and index in the vertical to a 3D face ID of a vertical face""" return (ncells_tall + 1) * mesh2D.num_cells() + z_cell + edge * ncells_tall # create coordinates # --------------------------------- coords = np.zeros((mesh2D.coords.shape[0],ncells_tall+1, 3),'d') coords[:,:,0:2] = np.expand_dims(mesh2D.coords[:,0:2],1) if mesh2D.dim == 3: coords[:,0,2] = mesh2D.coords[:,2] # else the surface is at 0 depth cell_layer_start = 0 for layer_type, layer_datum, ncells in zip(layer_types, layer_data, ncells_per_layer): if layer_type.lower() == 'constant': dz = float(layer_datum) / ncells for i in range(1,ncells+1): coords[:,cell_layer_start+i,2] = coords[:,cell_layer_start,2] - i * dz else: # allocate an array of coordinates for the bottom of the layer layer_bottom = np.zeros((mesh2D.coords.shape[0],),'d') if layer_type.lower() == 'snapped': # layer bottom is uniform layer_bottom[:] = layer_datum elif layer_type.lower() == 'function': # layer thickness is given by a function evaluation of x,y for node_col in range(mesh2D.coords.shape[0]): layer_bottom[node_col] = coords[node_col,cell_layer_start,2] - layer_datum(coords[node_col,0,0], coords[node_col,0,1]) elif layer_type.lower() == 'node': # layer bottom specifically provided through thickness layer_bottom[:] = coords[:,cell_layer_start,2] - layer_datum elif layer_type.lower() == 'cell': # interpolate cell thicknesses to node thicknesses import scipy.interpolate centroids = mesh2D.cell_centroids() interp = scipy.interpolate.interp2d(centroids[:,0], centroids[:,1], layer_datum, kind='linear') layer_bottom[:] = coords[:,cell_layer_start,2] - interp(mesh2D.coords[:,0], mesh2D.coords[:,1]) else: raise RuntimeError("Unrecognized layer_type '%s'"%layer_type) # linspace from bottom of previous layer to bottom of this layer for node_col in range(mesh2D.coords.shape[0]): coords[node_col,cell_layer_start:cell_layer_start+ncells+1,2] = np.linspace(coords[node_col,cell_layer_start,2], layer_bottom[node_col], ncells+1) cell_layer_start = cell_layer_start + ncells # create faces, face sets, cells bottom = [] surface = [] faces = [] cells = [list() for c in range(ncells_total)] # -- loop over the columns, adding the horizontal faces for col in range(mesh2D.num_cells()): nodes_2 = mesh2D.conn[col] surface.append(col_to_id(col,0)) for z_face in range(ncells_tall + 1): i_f = len(faces) f = [node_to_id(n, z_face) for n in nodes_2] if z_face != ncells_tall: cells[col_to_id(col, z_face)].append(i_f) if z_face != 0: cells[col_to_id(col, z_face-1)].append(i_f) faces.append(f) bottom.append(col_to_id(col,ncells_tall-1)) # -- loop over the columns, adding the vertical faces added = dict() vertical_side_cells = [] vertical_side_indices = [] for col in range(mesh2D.num_cells()): nodes_2 = mesh2D.conn[col] for i in range(len(nodes_2)): edge = mesh2D.edge_hash(nodes_2[i], nodes_2[(i+1)%len(nodes_2)]) try: i_e = added[edge] except KeyError: # faces not yet added to facelist i_e = len(added.keys()) added[edge] = i_e for z_face in range(ncells_tall): i_f = len(faces) assert i_f == edge_to_id(i_e, z_face) f = [node_to_id(edge[0], z_face), node_to_id(edge[1], z_face), node_to_id(edge[1], z_face+1), node_to_id(edge[0], z_face+1)] faces.append(f) face_cell = col_to_id(col, z_face) cells[face_cell].append(i_f) # check if this is an external if mesh2D._edges[edge] == 1: vertical_side_cells.append(face_cell) vertical_side_indices.append(len(cells[face_cell])-1) else: # faces already added from previous column for z_face in range(ncells_tall): i_f = edge_to_id(i_e, z_face) cells[col_to_id(col, z_face)].append(i_f) # Do some idiot checking # -- check we got the expected number of faces assert len(faces) == nfaces_total # -- check every cell is at least a tet for c in cells: assert len(c) > 4 # -- check surface sideset has the right number of entries assert len(surface) == mesh2D.num_cells() # -- check bottom sideset has the right number of entries assert len(bottom) == mesh2D.num_cells() # -- len of vertical sides sideset is number of external edges * number of cells, no pinchouts here num_sides = ncells_tall * sum(1 for e,c in mesh2D.edge_counts().iteritems() if c == 1) assert num_sides == len(vertical_side_cells) assert num_sides == len(vertical_side_indices) # make the material ids material_ids = np.zeros((len(cells),),'i') for col in range(mesh2D.num_cells()): z_cell = 0 for ilay in range(len(ncells_per_layer)): ncells = ncells_per_layer[ilay] for i in range(z_cell, z_cell+ncells): material_ids[col_to_id(col, i)] = np_mat_ids[ilay, col] z_cell = z_cell + ncells # make the side sets side_sets = [] side_sets.append(SideSet("bottom", 1, bottom, [1,]*len(bottom))) side_sets.append(SideSet("surface", 2, surface, [0,]*len(surface))) side_sets.append(SideSet("external_sides", 3, vertical_side_cells, vertical_side_indices)) # reshape coords coords = coords.reshape(nnodes_total, 3) for e,s in zip(side_sets[0].elem_list, side_sets[0].side_list): face = cells[e][s] fz_coords = np.array([coords[n] for n in faces[face]]) #print "bottom centroid = ", np.mean(fz_coords, axis=0) for e,s in zip(side_sets[1].elem_list, side_sets[1].side_list): face = cells[e][s] fz_coords = np.array([coords[n] for n in faces[face]]) #print "surface centroid = ", np.mean(fz_coords, axis=0) # instantiate the mesh return cls(coords, faces, cells, side_sets=side_sets, material_ids=material_ids) def commandline_options(): parser = argparse.ArgumentParser(description='Extrude a 2D mesh to make a 3D mesh') parser.add_argument("-n", "--num-cells", default=10, type=int, help="number of cells to extrude") parser.add_argument("-d", "--depth", default=40.0, type=float, help="depth to extrude") parser.add_argument("-o", "--outfile", default=None, type=str, help="output filename") parser.add_argument("-p", "--plot", default=False, action="store_true", help="plot the 2D mesh") parser.add_argument("infile",metavar="INFILE", type=str, help="input filename of surface mesh") options = parser.parse_args() if options.outfile is None: options.outfile = ".".join(options.infile.split(".")[:-1])+".exo" if os.path.isfile(options.outfile): print('Output file "%s" exists, cowardly not overwriting.'%options.outfile) sys.exit(1) if not os.path.isfile(options.infile): print('No input file provided') parser.print_usage() sys.exit(1) return options if __name__ == "__main__": options = commandline_options() m2 = Mesh2D.read_VTK(options.infile) if options.plot: m2.plot() m3 = Mesh3D.extruded_Mesh2D(m2, [options.depth,], [options.num_cells,], [10000,]) m3.write_exodus(options.outfile)
bsd-3-clause
6,506,556,784,610,115,000
36.724622
166
0.507972
false
3.681421
false
false
false
pecryptfs/pecryptfs
pecryptfs/cmd_genfile.py
1
2753
#!/usr/bin/env python3 # pecryptfs - Portable Userspace eCryptfs # Copyright (C) 2015 Ingo Ruhnke <grumbel@gmail.com> # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. from typing import List import argparse import os from pecryptfs.ecryptfs import generate_encrypted_file from pecryptfs.auth_token import AuthToken def main(): parser = argparse.ArgumentParser(description="eCryptfs Encrypted File Generator") parser.add_argument('files', metavar='FILE', type=str, nargs='+', help='Filenames to decrypt') parser.add_argument('-p', '--password', type=str, default="Test", help='Password to use for decryption, prompt when none given') parser.add_argument('-s', '--salt', type=str, default="0011223344556677", help='Salt to use for decryption') parser.add_argument('-o', '--output', type=str, help='Output directory') parser.add_argument('-c', '--cipher', type=str, help='Cipher to use', default="aes") parser.add_argument('-k', '--key-bytes', type=int, help='Key bytes to use', default=24) parser.add_argument('-v', '--verbose', action='store_true', help='Be verbose') args = parser.parse_args() output_directory = args.output if not os.path.isdir(output_directory): os.makedirs(output_directory) cipher = args.cipher key_bytes = args.key_bytes auth_token = AuthToken(args.password, args.salt) for input_filename in args.files: filenames: List[str] = [] data = generate_encrypted_file(auth_token, cipher, key_bytes) output_filename = "{}-{}.raw".format(cipher, key_bytes) with open(os.path.join(output_directory, output_filename), "wb") as fout: fout.write(data) if args.verbose: print("Password: {}".format(args.password)) print("Salt: {}".format(args.salt)) print("Filename: {}".format(input_filename)) print() for cipher, key_bytes, f in filenames: print("{:8} {:2} {}".format(cipher, key_bytes, f)) else: for cipher, key_bytes, f in filenames: print(f) # EOF #
gpl-3.0
4,288,613,616,355,062,000
36.712329
98
0.656375
false
3.792011
false
false
false
ptphp/PyLib
src/fangte/fetch/fetch58_bak.py
1
29601
# -*- coding: utf-8 -*- import time import datetime import random import cookielib import urllib import urllib2 from urlparse import urlparse from config import * from common import * from BeautifulSoup import BeautifulSoup class BaseCrawl(object): #房源类型 1 出售 2 出租 3 求购 4 求租 flag = None isStoped = False response = None header = None #房源信息模板 infoT = {} #传入参数 param = {} #全局队列 queue = [] pageNo = 0 isFetched = False #超过时间的条数 overTimeNum = 0 def __init__(self,param,que): self.queue = que self.param = param self.header = header cj = cookielib.MozillaCookieJar() self.br=urllib2.build_opener(urllib2.HTTPHandler(),urllib2.HTTPCookieProcessor(cj),urllib2.HTTPRedirectHandler()) self.endtime=str(datetime.date.today() -datetime.timedelta(days=7)) self._initRe() def getContent(self): if self.__cInit__(self.infoT['url']) : self.response = re.sub(" |\n|\r|\t| |&nbsp;|联系我时,请说是在58同城上看到的,谢谢!","",self.response) self.response = re.sub("rlist\d\">.*?</ul>","",self.response) try: if self.param['flag'] == 1: self.sell(); if self.param['flag'] == 2: self.rent(); if self.param['flag'] == 3: self.buy(); if self.param['flag'] == 4: self.req(); except Exception,what: print what if (time.time() - int(self.infoT['posttime']))>self.param['args']["timelimit"]: self.overTimeNum +=1 if self.overTimeNum > 5: self.pageNo = 0 self.isStoped = True self.overTimeNum = 0 def getPhoneself(self): if self.__cInit__(self.infoT['url']) : sHtml = self.response self.infoT['phone'] = regx_data(self.house_phone_regex,sHtml,"",False) def __getLinks(self,url): if not self.__cInit__(url): return self.response = re.sub("\n|\r|\t| |&nbsp;","",self.response) page_main = regx_data(self.page_main_regex,self.response,"",0) self.page_main_trs_regex = "<tr logr=\".*?\">(.*?)</tr>" page_main_trs = regx_lists(self.page_main_trs_regex,page_main,"",0) if page_main_trs and len(page_main_trs)>0: for tr in page_main_trs: if self.isStoped: self.pageNo = 0 break self._initTemple(self.param['flag'],self.param['city']) try: if self.param['flag'] == 1: self.__parseSellTrs(tr) if self.param['flag'] == 2: self.__parseRentTrs(tr) if self.param['flag'] == 3: self.__parseBuyTrs(tr) if self.param['flag'] == 4: self.__parseReqTrs(tr) except Exception,what: print what else: if not self.isFetched: self.queue.append(self.infoT) self.isFetched = False time.sleep(0.1) self.infoT = {} self.pageNo +=1 else: self.pageNo = 0 def __parseBuyTrs(self,tr): soup = BeautifulSoup(tr) at = soup.find('a',{'class':'t'}) #标题 if at: self.infoT['title'] = at.string #链接 self.infoT['url'] = at['href'] if checkPath("pagecash",self.infoT['url']): self.isFetched = True return else: return #图片 img = soup.find('td',{'class':'img'}) if img: if img.img['src'].find("noimg") == -1: self.infoT['thumb'] = img.img['src'] #信息 t = soup.find('td',{'class':'t'}) self.infoT['belong'] = regx_data(self.house_belong_dict_regex,str(t),"",False) self.infoT['houseType'] = regx_data(self.house_type_regex,str(t),"",False) self.infoT['posttime'] = self.postTime(regx_data("更新时间:(.*?)<",str(t),"",False)) #self.infoT['room'] = regx_data(self.house_room_regex,str(soup),"",False) #if self.infoT['room']: #self.infoT['room'] = re.sub("一|二|三|四|五|六|七|八|九|十","1|2|3|4|5|6|7|8|9|10",self.infoT['room']) self.infoT['hall'] = regx_data(self.house_hall_regex,str(soup),"",False) self.infoT['toilet'] = regx_data(self.house_toilet_regex,str(soup),"",False) agencyname = regx_data("(个人)",str(t),"",False) if agencyname: self.infoT['isPerson'] = 1 else: self.infoT['isPerson'] = 0 #价格 num = soup('td',{'class':'tc'}) if num and len(num) > 1: if str(num[0]).find("面议") == -1: price = num[0].b.string if price.find('-') == -1: self.infoT['price'] = price else: self.infoT['price'] = price.split("-")[0] self.infoT['price_max'] = price.split("-")[1] del price area = num[1].b.string if area.find('-') == -1: self.infoT['area'] = area else: self.infoT['area'] = area.split("-")[0] self.infoT['area_max'] = area.split("-")[1] del area self.infoT['search']= re.sub("<.*?>","",str(soup)) del soup del t del img del at del num del agencyname self.getContent() def __parseReqTrs(self,tr): soup = BeautifulSoup(tr) at = soup.find('a',{'class':'t'}) #标题 if at: self.infoT['title'] = at.string #链接 self.infoT['url'] = at['href'] if checkPath("pagecash",self.infoT['url']): self.isFetched = True return else: return agencyname = regx_data("(个人)",str(soup),"",False) if agencyname: self.infoT['isPerson'] = 1 else: self.infoT['isPerson'] = 0 #价格 if soup.find('b',{'class':'pri'}): self.infoT['price'] = soup.find('b',{'class':'pri'}).string if self.infoT['price']: if self.infoT['price'].find('-') != -1: self.infoT['price_max'] = self.infoT['price'].split("-")[1] self.infoT['price'] = self.infoT['price'].split("-")[0] self.infoT['room'] = soup("td")[2].string #时间 tds = soup("td")[3] if tds: self.infoT['posttime']= self.postTime(tds.string) #rint tds.string self.infoT['search']= re.sub("<.*?>","",str(soup)) del soup del at del agencyname del tds self.getContent() def __parseSellTrs(self,tr): soup = BeautifulSoup(tr) at = soup.find('a',{'class':'t'}) #标题 if at: self.infoT['title'] = at.string #链接 self.infoT['url'] = at['href'] if checkPath("pagecash",self.infoT['url']): self.isFetched = True return else: return #图片 img = soup.find('td',{'class':'img'}) if img: if img.img['src'].find("noimg") == -1: self.infoT['thumb'] = img.img['src'] #信息 t = soup.find('td',{'class':'t'}) self.infoT['topfloor'] = regx_data(self.house_topfloor_regex,str(t),"",False) self.infoT['floor'] = regx_data(self.house_floor_regex,str(t),"",False) self.infoT['belong'] = regx_data(self.house_belong_dict_regex,str(t),"",False) self.infoT['houseType'] = regx_data(self.house_type_regex,str(t),"",False) self.infoT['toward'] = regx_data(self.house_toward_regex,str(t),"",False) self.infoT['age'] = regx_data("(\d+)年",str(t),"",False) self.infoT['posttime'] = self.postTime(regx_data("更新时间:(.*?)<",str(t),"",False)) #self.infoT['room'] = regx_data(self.house_room_regex,str(soup),"",False) #if self.infoT['room']: #self.infoT['room'] = re.sub("一|二|三|四|五|六|七|八|九|十","1|2|3|4|5|6|7|8|9|10",self.infoT['room']) self.infoT['hall'] = regx_data(self.house_hall_regex,str(soup),"",False) self.infoT['toilet'] = regx_data(self.house_toilet_regex,str(soup),"",False) agencyname = regx_data("(个人)",str(t),"",False) if agencyname: self.infoT['isPerson'] = 1 else: self.infoT['isPerson'] = 0 #价格 num = soup('td',{'class':'tc'}) if num and len(num) > 1: if str(num[0]).find("面议") == -1: self.infoT['price'] = num[0].b.string self.infoT['area'] = num[1].b.string self.infoT['search']= re.sub("<.*?>","",str(soup)) del soup del t del img del at del agencyname self.getContent() def __parseRentTrs(self,tr): soup = BeautifulSoup(tr) at = soup.find('a',{'class':'t'}) #标题 if at: self.infoT['title'] = at.string #链接 self.infoT['url'] = at['href'] if checkPath("pagecash",self.infoT['url']): self.isFetched = True return else: return #图片 img = soup.find('td',{'class':'img'}) if img: if img.img['src'].find("noimg") == -1: self.infoT['thumb'] = img.img['src'] #信息 t = soup.find('td',{'class':'t'}) self.infoT['topfloor'] = regx_data(self.house_topfloor_regex,str(t),"",False) self.infoT['floor'] = regx_data(self.house_floor_regex,str(t),"",False) self.infoT['area'] = regx_data(self.house_totalarea_regex,str(t),"",False) self.infoT['fitment'] = regx_data(self.house_fitment_regex,str(t),"",False) self.infoT['room'] = regx_data(self.house_room_regex,str(soup),"",False) self.infoT['hall'] = regx_data(self.house_hall_regex,str(soup),"",False) self.infoT['toilet'] = regx_data(self.house_toilet_regex,str(soup),"",False) self.infoT['equ'] = regx_data("配置:(.*?)<",str(soup),"",False) agencyname = regx_data("(个人)",str(t),"",False) if agencyname: self.infoT['isPerson'] = 1 else: self.infoT['isPerson'] = 0 #价格 if soup.find('b',{'class':'pri'}): self.infoT['price'] = soup.find('b',{'class':'pri'}).string #时间 tds = soup("td")[4] if tds: self.infoT['posttime']= self.postTime(tds.string) #rint tds.string self.infoT['search']= re.sub("<.*?>","",str(soup)) del soup del t del img del at del agencyname del tds self.getContent() def __cInit__(self,url): try: request = urllib2.Request(url, None, self.header) self.response = urllib2.urlopen(request).read() except Exception,what: return False else: return True def req(self): sHtml = self.response self.response = None #个人 OR 经纪人 #agencyname = regx_data(self.agencyname_regex,sHtml,"个人房源",False) #if not agencyname: #agencyname = '个人房源' #联系人 self.infoT['owner'] = regx_data(self.username_regex,sHtml,"个人",False) #价格 if not self.infoT['price']: self.infoT['price'] = regx_data(self.house_price_regex,sHtml,0,False) #500以下 if not self.infoT['price'] : self.infoT['price'] = regx_data(self.house_price1_regex,sHtml,0,False) #以上 if not self.infoT['price'] : self.infoT['price'] = regx_data(self.house_price2_regex,sHtml,0,False) #标题 if not self.infoT['title']: self.infoT['title'] = regx_data(self.house_title_regex,sHtml,"",False) #发布时间 if not self.infoT['posttime']: self.infoT['posttime'] = self.postTime(regx_data(self.house_posttime_regex,sHtml,"",False)) #house_posttime = postTime(house_posttime,1) #室 if not self.infoT['room']: self.infoT['room'] = regx_data(self.house_room_regex,sHtml,"",False) #区 self.infoT['region'] = regx_data(self.house_region_regex,sHtml,"",False) #地段 #print self.house_section_regex self.infoT['section'] = regx_data(self.house_section_regex,sHtml,"",False) #详细 self.infoT['desc'] = regx_data(self.house_desc_regex,sHtml,"",False ,"<.*?>") #电话 if self.param['getPhone']: self.infoT['phone'] = regx_data(self.house_phone_regex,sHtml,"",False) def rent(self): sHtml = self.response self.response = None #个人 OR 经纪人 #agencyname = regx_data(self.agencyname_regex,sHtml,"",False) #联系人 self.infoT['owner'] = regx_data(self.username_regex,sHtml,"个人",False) #楼层 if not self.infoT['floor']: self.infoT['floor'] = regx_data(self.house_floor_regex,sHtml,"",False) #顶层 if not self.infoT['topfloor']: self.infoT['topfloor'] = regx_data(self.house_topfloor_regex,sHtml,"",False) #面积 if not self.infoT['area']: self.infoT['area'] = regx_data(self.house_totalarea_regex,sHtml,"",False) #价格 if not self.infoT['price']: self.infoT['price'] = regx_data(self.house_price_regex,sHtml,0,False) #标题 if not self.infoT['title']: self.infoT['title'] = regx_data(self.house_title_regex,sHtml,"",False) #发布时间 if not self.infoT['posttime']: self.infoT['posttime'] = self.postTime(regx_data(self.house_posttime_regex,sHtml,"",False) ) #house_posttime = postTime(house_posttime,1) #室 if not self.infoT['room']: self.infoT['room'] = regx_data(self.house_room_regex,sHtml,"",False) #厅 if not self.infoT['hall']: self.infoT['hall'] = regx_data(self.house_hall_regex,sHtml,"",False) #卫 if not self.infoT['toilet']: self.infoT['toilet'] = regx_data(self.house_toilet_regex,sHtml,"",False) #押金 if not self.infoT['deposit']: self.infoT['deposit'] = regx_data(self.house_deposit_regex,sHtml,"",False) #小区 self.infoT['borough'] = regx_data(self.borough_name_regex,sHtml,"",False) #地址 self.infoT['addr'] = regx_data(self.house_addr_regex,sHtml,"",False) #区 self.infoT['region'] = regx_data(self.house_region_regex,sHtml,"",False) #地段 self.infoT['section'] = regx_data(self.house_section_regex,sHtml,"",False) #详细 self.infoT['desc'] = regx_data(self.house_desc_regex,sHtml,"",False ,"<.*?>") #图片 self.infoT['pics'] = regx_datas(self.house_pics_regex,sHtml,"",False ,"tiny","big") _t = regx_data(self.house_toward_t_regex,sHtml,"",False) #装修 if not self.infoT['fitment']: self.infoT['fitment'] = regx_data(self.house_fitment_regex,_t,"",False) #朝向 if not self.infoT['toward']: self.infoT['toward'] = regx_data(self.house_toward_regex,_t,"",False) #类型 if not self.infoT['houseType']: self.infoT['houseType'] = regx_data(self.house_type_regex,_t,"",False) #电话 if self.param['getPhone']: self.infoT['phone'] = regx_data(self.house_phone_regex,sHtml,"",False) def buy(self): sHtml = self.response self.response = None #个人 OR 经纪人 #agencyname = regx_data(self.agencyname_regex,sHtml,"",False) #联系人 if not self.infoT['owner']: self.infoT['owner']= regx_data(self.username_regex,sHtml,"个人",False) #面积 if not self.infoT['area']: self.infoT['area'] = regx_data(self.house_totalarea_regex,sHtml,"",False) #价格 if not self.infoT['price']: self.infoT['price']= regx_data(self.house_price_regex,sHtml,0,False) #标题 if not self.infoT['title']: self.infoT['title'] = regx_data(self.house_title_regex,sHtml,"",False) #发布时间 if not self.infoT['posttime']: self.infoT['posttime'] = self.postTime(regx_data(self.house_posttime_regex,sHtml,"",False) ) #house_posttime = postTime(house_posttime,1) #室 if not self.infoT['room']: self.infoT['room'] = regx_data(self.house_room_regex,sHtml,"",False) #地址 self.infoT['addr'] = regx_data(self.house_addr_regex,sHtml,"",False) #详细 self.infoT['desc'] = regx_data(self.house_desc_regex,sHtml,"",False ,"<.*?>") #图片 self.infoT['pics'] = regx_datas(self.house_pics_regex,sHtml,"",False ,"tiny","big") #电话 if self.param['getPhone']: self.infoT['phone'] = regx_data(self.house_phone_regex,sHtml,"",False) def sell(self): sHtml = self.response self.response = None #个人 OR 经纪人 #agencyname = regx_data(self.agencyname_regex,sHtml,"",False) #联系人 self.infoT['owner'] = regx_data(self.username_regex,sHtml,"个人",False) #楼层 if not self.infoT['floor']: self.infoT['floor'] = regx_data(self.house_floor_regex,sHtml,"",False) #顶层 if not self.infoT['topfloor']: self.infoT['topfloor']= regx_data(self.house_topfloor_regex,sHtml,"",False) #面积 if not self.infoT['area']: self.infoT['area'] = regx_data(self.house_totalarea_regex,sHtml,"",False) #价格 if not self.infoT['price']: self.infoT['price'] = regx_data(self.house_price_regex,sHtml,0,False) #标题 if not self.infoT['title']: self.infoT['title'] = regx_data(self.house_title_regex,sHtml,"",False) #发布时间 if not self.infoT['posttime']: self.infoT['posttime'] = self.postTime(regx_data(self.house_posttime_regex,sHtml,"",False) ) #house_posttime = postTime(house_posttime,1) #室 if not self.infoT['room']: self.infoT['room'] = regx_data(self.house_room_regex,sHtml,"",False) #厅 if not self.infoT['hall']: self.infoT['hall'] = regx_data(self.house_hall_regex,sHtml,"",False) #卫 if not self.infoT['toilet']: self.infoT['toilet'] = regx_data(self.house_toilet_regex,sHtml,"",False) #产权 if not self.infoT['belong']: self.infoT['belong'] = regx_data(self.house_belong_regex,sHtml,"",False) #房龄 99年 self.infoT['age'] = regx_data(self.house_age_regex,sHtml,"",False) #小区 self.infoT['borough'] = regx_data(self.borough_name_regex,sHtml,"",False) #地址 self.infoT['addr'] = regx_data(self.house_addr_regex,sHtml,"",False) #区 self.infoT['region'] = regx_data(self.house_region_regex,sHtml,"",False) #地段 self.infoT['section'] = regx_data(self.house_section_regex,sHtml,"",False) #详细 self.infoT['desc'] = regx_data(self.house_desc_regex,sHtml,"",False ,"<.*?>") #图片 self.infoT['pics'] = regx_datas(self.house_pics_regex,sHtml,"",False ,"tiny","big") _t = regx_data(self.house_toward_t_regex,sHtml,"",False) #装修 if not self.infoT['fitment']: self.infoT['fitment'] = regx_data(self.house_fitment_regex,_t,"",False) #朝向 if not self.infoT['toward']: self.infoT['toward'] = regx_data(self.house_toward_regex,_t,"",False) #类型 if not self.infoT['houseType']: self.infoT['houseType'] = regx_data(self.house_type_regex,_t,"",False) #电话 if self.param['getPhone']: self.infoT['phone'] = regx_data(self.house_phone_regex,sHtml,"",False) def _initRe(self): self.page_main_regex = "<div id=\"main\">(.*?)<div id=\"links\"> " self.agencyname_regex="agencyname:'(.*?)'," self.username_regex="username:'(.*?)'," self.house_title_regex="<h1>(.*)</h1>" self.house_floor_regex="第(\d+)层" self.house_topfloor_regex="共(\d+)层" self.house_room_regex="(\d+|一|二|三|四|五|六|七|八|九|十)室" self.house_hall_regex="(\d+)厅" self.house_toilet_regex="(\d+)卫" self.house_posttime_regex="发布时间:(.*?)浏览" self.house_age_regex="(\d+)年" self.house_region_regex = "locallist.*?listname.*?name:'(.*?)'" self.house_section_regex = "<li><i>区域:</i><a.*?<a.*?>(.*?)</a></li>" self.house_desc_regex = "class=\"maincon\">(.*?)</div>" self.house_phone_regex = "(http://image.58.com/showphone.aspx.*?)'" self.house_pics_regex = "(http://\d+.pic.58control.cn/p\d+/tiny/n_\d+.jpg)" self.house_toward_regex = "(东|南|西|北|南北|东西|东南|东北|西北)" self.house_fitment_regex = "(毛坯|简单装修|中等装修|精装修|豪华装修)" self.house_belong_dict_regex = "(商品房|经济适用房|公房|用权)" self.house_type_regex = "(平房|普通住宅|商住两用|公寓|别墅)" self.borough_name_regex = "<li><i>小区:</i><.*?>(.*?)<.*?></li>" self.borough_name1_regex = "<li><i>小区:</i>(.*?)</li>" if self.param['flag'] ==1: self.house_addr_regex = "address\">(.*?)<" self.house_totalarea_regex="(\d+)㎡" self.house_belong_regex="<li><i>产权:</i>(.*?)</li>" self.house_price_regex="(\d+)万元" self.house_toward_t_regex = "房龄:</i>(.*?)<" elif self.param['flag'] ==2: self.house_totalarea_regex="(\d+)㎡" self.house_price_regex="(\d+)元/月" self.house_equ_regex="vartmp='(.*?)';" self.house_deposit_regex="(押一付三|押一付一|押二付一|半年付|年付)" self.house_toward_t_regex = "基本情况:</i>(.*?)<" self.house_addr_regex = "address\">(.*?)<" elif self.param['flag'] ==3: self.house_belong_regex="<li><i>产权:</i>(.*?)</li>" self.house_totalarea_regex="(\d+-\d+)㎡" self.house_addr_regex="<li><i>地段:</i>(.*?)</li>" self.house_price_regex="(\d+-\d+)万元" elif self.param['flag'] ==4: self.house_price_regex="(\d+-\d+)元" self.house_price1_regex="(\d+)元以下" self.house_price2_regex="(\d+)元以上" self.house_room_regex="(一|两|三|四)居室" def _initTemple(self,flag,city): self.infoT = { 'flag':flag,#房源类型 1 出售 2 出租 3 求购 4 求租 'title':'', 'posttime':'', 'price':0, 'price_max':0, 'deposit':'', 'belong':'', 'room':0, 'hall':0, 'toilet':0, 'yt':0, 'area':0, 'area_max':0, 'houseType':'', 'fitment':'', 'floor':0, 'topfloor':0, 'toward':'', 'age':1, 'equ':'', 'city':city, 'region':'', 'borough':'', 'section':'', 'addr':'', 'phone':'', 'owner':'', 'desc':'', 'search':'', 'url':'', 'thumb':'', 'webFlag':1, 'isPerson':1, } def postTime(self,posttime): if posttime and posttime.find('now') != -1: posttime = int(time.time()) if not posttime: return posttime = str(posttime).replace('前','') #print posttime if posttime.find("<") != -1 or posttime.find(">") != -1: posttime = re.sub('<.*?>','' ,pottime) if posttime.find('-') !=-1: if len(posttime.split("-"))==3: s = datetime.datetime(int(posttime.split('-')[0]),int(posttime.split('-')[1],),int(posttime.split('-')[2])) else: s = datetime.datetime(2011,int(posttime.split('-')[0],),int(posttime.split('-')[1])) posttime = int(time.mktime(s.timetuple())) elif posttime.find('分钟') !=-1: n = int(posttime.replace('分钟',''))*60 posttime = int(time.time() - n) elif posttime.find('小时') !=-1: n = int(posttime.replace('小时',''))*60*60 posttime = int(time.time() - n) else: posttime = int(time.time()) return posttime if (time.time() - self.fd['posttime']) > 3600*24*7: return print "++++++++++++++++" print time.strftime('%Y %m %d', time.localtime(self.fd['posttime'])) def run(self): self.pageNo = 1 while 1: if self.isStoped == True: break if self.pageNo: url = self.baseUrl(self.param['args'],self.pageNo) self.__getLinks(url) def baseUrl(self,args,pn): if args['region'] != '': args['region'] = args['region']+"/" else: args['region'] = '' if args['option']!= '': args['option'] = args['option']+"/" else: args['option'] = '' if self.param['flag'] == 1: baseUrl = 'http://%s.58.com/%sershoufang/0/%spn%d/?final=1&searchtype=3&sourcetype=5&key=%s' % (args['city'],args['region'],args['option'],pn,args['q']) if self.param['flag'] == 2: baseUrl = 'http://%s.58.com/%szufang/0/%spn%d/?final=1&key=%s' % (args['city'],args['region'],args['option'],pn,args['q']); if self.param['flag'] == 3: args['option'] = args['option'][:-1] baseUrl = 'http://%s.58.com/%sershoufang/0/%sh2/pn%d/?final=1&key=%s&searchtype=3&sourcetype=5' % (args['city'],args['region'],args['option'],pn,args['q']) if self.param['flag'] == 4: baseUrl = 'http://%s.58.com/%sqiuzu/0/%spn%d/?final=1&key=%s' % (args['city'],args['region'],args['option'],pn,args['q']) return baseUrl q = [] if __name__=="__main__": url1 = 'http://sh.58.com/ershoufang/7489033818376x.shtml' url2 = 'http://sh.58.com/zufang/7468246420482x.shtml' url3 = 'http://sh.58.com/ershoufang/7544211350792x.shtml' url4 = 'http://sh.58.com/qiuzu/7543125341446x.shtml' link2 = 'http://sh.58.com/zufang/0/?selpic=2' link1 = 'http://sh.58.com/ershoufang/' link3 = 'http://sh.58.com/ershoufang/h2/' link4 = 'http://sh.58.com/qiuzu/0/' data = {} data['flag'] = 1 data['city'] = 1 data['getPhone'] = 1 cc = BaseCrawl(data,q) cc.run()
apache-2.0
-6,180,525,446,337,366,000
38.575967
172
0.475727
false
3.201453
false
false
false
AntaresConsulting/odoo-marble
product_marble/models/stock.py
1
25218
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## from openerp import models, api, _ from openerp.osv import osv, fields # from openerp.tools.translate import _ from operator import itemgetter import inspect import _common as comm import logging _logger = logging.getLogger(__name__) class stock_picking(osv.osv): _name = "stock.picking" _inherit = "stock.picking" _description = "Picking List" _tipo_de_move = [ ('raw', 'Raw'), ('insu', 'Input'), ('bac', 'Bacha'), ] def _get_tipo_de_move(self, cr, uid, context=None): return sorted(self._tipo_de_move, key=itemgetter(1)) def _get_types(self, cr, uid, ids, field_name, arg, context=None): res = {} for pick in self.browse(cr, uid, ids): if len(pick.move_lines) > 0: res.update({pick.id : pick.move_lines[0].prod_type}) return res @api.cr_uid_ids_context def do_enter_transfer_details_marble(self, cr, uid, picking, context=None): resp = super(stock_picking, self).do_enter_transfer_details(cr, uid, picking, context=context) return resp['res_id'] _columns = { 'move_prod_type': fields.selection(_get_tipo_de_move, string='Product Type picking', select=True), 'prod_type': fields.function(_get_types, type='char', string='Product Type', store=False), } stock_picking() class stock_pack_operation(osv.osv): _name = "stock.pack.operation" _inherit = "stock.pack.operation" _description = "Packing Operation" #dimension_id = openerp.fields.Many2one('product.marble.dimension', string='Dimension', ondelete='set null') #dimension_unit = openerp.fields.Integer(string='Units') #prod_type = openerp.fields.Char(related='product_id.prod_type', string='Product Type') _columns = { 'dimension_id': fields.many2one('product.marble.dimension', 'Dimension', domain=[('state','=','done')]), 'dimension_unit': fields.integer('Units', size=3), # units 'prod_type' : fields.related('product_id', 'prod_type', type='char', relation='product.template', string='Product Type'), } _defaults = { 'dimension_id': False, 'dimension_unit': 0, } def _before_save(self, cr, uid, vals, context): obj_pick = self.pool.get('stock.picking') pick_id = vals.get('picking_id',False) prod_id = vals.get('product_id',False) # localizo el 'stock_move' x picking + product, luego obteng su units a registrar en stock.pack.operation.- for mov in obj_pick.browse(cr, uid, pick_id, context=context).move_lines: if mov.product_id.id == prod_id and mov.product_id.prod_type == comm.RAW: vals.update(dimension_id = mov.dimension_id.id) vals.update(dimension_unit = mov.dimension_unit) break def create(self, cr, uid, vals, context=None): self._before_save(cr, uid, vals, context) #_logger.info('>> stock_pack_opetarion >> create >> 12- vals = %s', vals) return super(stock_pack_operation, self).create(cr, uid, vals, context=context) def write(self, cr, uid, ids, vals, context=None): #_logger.info('>> stock_pack_opetarion >> write >> 20- vals = %s', vals) self._before_save(cr, uid, vals, context) #_logger.info('>> stock_pack_opetarion >> write >> 21- vals = %s', vals) return super(stock_pack_operation, self).write(cr, uid, ids, vals, context=context) stock_pack_operation() class stock_move(osv.osv): _inherit = "stock.move" # defino tipo de movimiento en Locacion de Stock: # return 0 = no afecta a Stock, # 1 = entra prod. en Stock (in: input), # -1 = sale prod. en Stock (out: output) def stock_move(self, cr, uid, mov=None, zeroVal=None): zeroValue = 0 if zeroVal == None else zeroVal if not mov: _logger.info(">> stock_move >> Stock.Move no definido.") return zeroValue loc_propio = [comm.get_location_stock(self, cr, uid), \ comm.get_location_recortes_stock(self, cr, uid)] loc_orig_parents = comm.get_loc_parents(self, mov.location_id, []) loc_dest_parents = comm.get_loc_parents(self, mov.location_dest_id, []) loc_orig_propio = (loc_propio[0] in loc_orig_parents) or (loc_propio[1] in loc_orig_parents) loc_dest_propio = (loc_propio[0] in loc_dest_parents) or (loc_propio[1] in loc_dest_parents) #_logger.info(">> stock_move >> 1- loc_propio = %s", loc_propio) #_logger.info(">> stock_move >> 2- loc_orig_parents = %s", loc_orig_parents) #_logger.info(">> stock_move >> 3- loc_orig_propio = %s", loc_orig_propio) #_logger.info(">> stock_move >> 4- loc_dest_parents = %s", loc_dest_parents) #_logger.info(">> stock_move >> 5- loc_dest_propio = %s", loc_dest_propio) if loc_orig_propio and loc_dest_propio: _logger.info(">> stock_move = 0 (NULO): movimiento interno en sectores propios.") return zeroValue if not loc_orig_propio and not loc_dest_propio: _logger.info(">> stock_move = 0 (NULO): movimiento interno en sectores no propios.") return zeroValue if not loc_orig_propio and loc_dest_propio: _logger.info(">> stock_move = 1 (IN): ingreso de mercaderia en almacen/sector.") return 1 if loc_orig_propio and not loc_dest_propio: _logger.info(">> stock_move = -1 (OUT): egreso de mercaderia en almacen/sector.") return -1 _logger.warning(">> ERROR >> stock_move = 0 >> ¿Entrada o Salida? operación no definida...") return zeroValue def _get_sign_qty(self, cr, uid, ids, field_name, arg, context=None): if not ids: return {} res = {} bal = 0.00 ids_by_date = self.search(cr, uid, [('id','in',ids)], order='date') for m in self.browse(cr, uid, ids_by_date): fields = {} # sign = self._get_sign(m) sign = self.stock_move(cr, uid, m, 1) fields['qty_dimension'] = sign * m.dimension_unit fields['qty_product'] = sign * m.product_qty bal += fields['qty_product'] fields['qty_balance'] = bal res[m.id] = fields # _logger.info(">> _get_field_with_sign >> 5 >> res = %s", res) return res def _get_types(self, cr, uid, ids, field_name, arg, context=None): #_logger.info(">> _get_types >> 1- ids = %s", ids) res = {} if not ids: return res if not isinstance(ids, (list,tuple)): ids = [ids] types = comm.get_prod_types(self, cr, uid, context) #_logger.info(">> _get_types >> 2- types = %s", types) for ms_id in self.browse(cr, uid, ids, context): cid = ms_id.product_id.categ_id.id #_logger.info(">> _get_types >> 3- cid = %s", cid) res.update({ms_id.id : types.get(cid,'*')}) #_logger.info(">> _get_types >> 4- res = %s", res) return res def _is_raw(self, cr, uid, ids, field_name, arg, context=None): #""" #Determina si [ids stock_move] tiene producto, del tipo is_raw si/no... #""" #res = {} #if not ids: # return res # para cada stock_move -> recupero su correspondiente prod_id #prod_ids = [sm.product_id.id for sm in self.browse(cr, uid, ids)] # recupero is_raw por cada producto: {prod_id: is_raw} #data = comm.is_raw_material_by_product_id(self, cr, uid, prod_ids) # convierto de {prod_id: is_raw} -> {stock_move_id: is_raw}: #res = {ids[k]: (data[prod_ids[k]] or False) for k in range(len(ids))} # _logger.info("10 >> _is_raw >> res = %s", res) #return res res = { sm.id : (sm.product_id.prod_type == comm.RAW) for sm in self.browse(cr, uid, ids) } #_logger.info("10 >> _is_raw >> res = %s", res) return res def _get_move_name(self, cr, uid, pro_id=False, dim_id=False): name = '' if not pro_id: return name obj_pro = self.pool.get('product.product') name = obj_pro.name_get(cr, uid, [pro_id], context=None)[0][1] if not dim_id or \ not comm.is_raw_material_by_product_id(self, cr, uid, [pro_id])[pro_id]: return name obj_dim = self.pool.get('product.marble.dimension') d = obj_dim.browse(cr, uid, [dim_id])[0] name = "%s >> %s" % (name, d.dimension) return name # ------------------------------------------------------------------------ def onchange_product_id(self, cr, uid, ids, prod_id=False, loc_id=False, loc_dest_id=False, partner_id=False): res = super(stock_move, self).onchange_product_id(cr, uid, ids, prod_id, loc_id, loc_dest_id, partner_id) #_logger.info(">> onchange_product_id >> 1- res = %s", res) v = {} if (not res) or (not prod_id): return v no_prod_id = ('product_id' not in res['value']) if no_prod_id: res['value'].update({'product_id':prod_id}) v = self.calculate_dim(cr, uid, res['value']) if no_prod_id: del v['product_id'] res['value'].update(v) #_logger.info(">> onchange_product_id >> 2- res = %s", res) return res def onchange_calculate_dim(self, cr, uid, ids, pro_id, pro_uom, pro_qty, dim_id, dim_unit): v = { 'product_id' : pro_id, 'product_uom' : pro_uom, 'product_uom_qty' : pro_qty, 'dimension_id' : dim_id, 'dimension_unit' : dim_unit, 'is_raw' : False, 'prod_type' : comm.OTHER, } # _logger.info(">> onchange_calculate_dim >> 0- val = %s", val) val = self.calculate_dim(cr, uid, v) # _logger.info(">> onchange_calculate_dim >> 1- val = %s", val) return {'value': val} def calculate_dim(self, cr, uid, val): #_logger.info(" >> calculate_dim >> 100- val = %s", val) pro_id = val.get('product_id', False) pro_uom = val.get('product_uom', False) pro_uos = val.get('product_uos', False) pro_qty = val.get('product_uom_qty', 0.00) dim_id = val.get('dimension_id', False) dim_unit = val.get('dimension_unit', 0.00) is_raw = val.get('is_raw', False) prod_type = val.get('prod_type', comm.OTHER) if not pro_id: return val #pro = self.pool.get('product.product').browse(cr, uid, pro_id) #_logger.info(" >> calculate_dim >> 1- prod = %s", pro) #pro = self.pool.get('product.product').browse(cr, uid, pro_id).categ_id #_logger.info(" >> calculate_dim >> 2- prod = %s", pro) #cid = self.pool.get('product.product').browse(cr, uid, pro_id).categ_id.id #prod_type = comm.get_prod_types(self, cr, uid).get(cid, comm.OTHER) #val['prod_type'] = prod_type prod_type = self.pool.get('product.product').browse(cr, uid, pro_id).prod_type val['prod_type'] = prod_type m2 = 0.00 #is_raw = comm.is_raw_material_by_product_id(self, cr, uid, [pro_id])[pro_id] is_raw = (prod_type == comm.RAW) if prod_type not in ('raw', 'bacha'): val['description'] = self._get_move_name(cr, uid, pro_id, dim_id) return val elif prod_type == 'bacha': val['description'] = self._get_move_name(cr, uid, pro_id, dim_id) val['product_uom'] = comm.get_uom_units_id(self,cr,uid) return val m2 = 0.00 if dim_id: #obj = self.pool.get('product.marble.dimension') #data = obj.read(cr, uid, [dim_id], ['m2'], context=None) #m2 = data[0]['m2'] if (len(data) > 0 and len(data[0]) > 0) else 0.00 m2 = self.pool.get('product.marble.dimension').browse(cr, uid, dim_id).m2 pro_qty = dim_unit * m2 pro_uom = comm.get_uom_m2_id(self,cr,uid) v = {} v['product_id'] = pro_id v['product_uos'] = pro_uos v['product_uom'] = pro_uom v['product_uom_qty'] = pro_qty v['dimension_id'] = dim_id v['dimension_unit'] = dim_unit v['is_raw'] = is_raw v['prod_type'] = prod_type v['description'] = self._get_move_name(cr, uid, pro_id, dim_id) #_logger.info(" >> calculate_dim >> 101- v = %s", v) return v # ------------------------------------------------------------------------ def _check_data_before_save(self, cr, uid, sm_id, val): #_logger.info(">> _check_data_before_save >> 1- sm_id = %s", sm_id) #_logger.info(">> _check_data_before_save >> 2- val = %s", val) if 'product_id' not in val: return # defino campos a evaluar fields_list = ['product_id','product_uom','product_uom_qty','dimension_id','dimension_unit','is_raw','description'] # si (NO existe algun elemento de [fields_list] en [val]) >> me voy, no precesar... if not any(e in fields_list for e in val.keys()): return to_update = {} no_update = {} obj = (sm_id and self.pool.get('stock.move').browse(cr, uid, sm_id)) or False #_logger.info(">> _check_data_before_save >> 3- obj = %s", obj) # divido [info suministrada por actuatizar] e [info calculada, no para actualizar, requerida] for field in fields_list: if (field in val): to_update[field] = val[field] continue # >> si (field es 'read-only') >> la data no viaja... elif (field in ['product_uom', 'product_uom_qty', 'description']): to_update[field] = val.get(field,'') continue else: no_update[field] = (obj and (obj[0][field].id if ('class' in str(type(obj[0][field]))) else obj[0][field])) or False param = dict(to_update.items() + no_update.items()) v = self.calculate_dim(cr, uid, param) # actualizo valores de retorno for field in to_update: if (field not in val) and (not v[field]): # no copiarlo... pass else: val[field] = v[field] # ------------------------------------------------- # si 'is_raw' >> valido datos requeridos... valu = v mov = obj and obj[0] #_logger.info(">> _check_data_before_save >> 6- mov = %s", mov) is_raw = valu.get('is_raw',False) or (mov and mov.is_raw) dim_id = valu.get('dimension_id',0) or (mov and mov.dimension_id.id) dim_unit = valu.get('dimension_unit',0) or (mov and mov.dimension_unit) pro_qty = valu.get('product_uom_qty',0) or (mov and mov.product_uom_qty) msg = self._check_data_required(cr, uid, is_raw, dim_id, dim_unit, pro_qty) if msg: raise osv.except_osv(_('Error'), _(msg)) return def _check_data_required(self, cr, uid, is_raw, dim_id, dim_unit, prod_qty): if not is_raw: return '' if not dim_id: return 'You cannot save a Move-Stock without Dimension (id)' if not dim_unit: return 'You cannot save a Move-Stock without Quantity Dimension (qty)' if not prod_qty: return 'You cannot save a Move-Stock without Quantity Product (uom qty)' return '' # ------------------------------------------------------------------------ def create(self, cr, uid, data, context=None): #_logger.info('>> stock_move >> create >> 1- data = %s', data) self._check_data_before_save(cr, uid, [], data) #_logger.info('>> stock_move >> create >> 2- data = %s', data) return super(stock_move, self).create(cr, uid, data, context=context) def write(self, cr, uid, ids, vals, context=None): #for ms_id in ids: # self._check_data_before_save(cr, uid, ms_id, vals) #_logger.info('>> stock_move >> write >> 11- ids = %s', ids) #_logger.info('>> stock_move >> write >> 12- vals = %s', vals) #if len(ids) > 1: # raise osv.except_osv(_('Error'), 'TODO: A corregir. Mas de un registro a escribir....') sm_id = ids[0] if len(ids) >= 1 else False self._check_data_before_save(cr, uid, sm_id, vals) #_logger.info('>> stock_move >> write >> 13- vals = %s', vals) return super(stock_move, self).write(cr, uid, ids, vals, context=context) # --- extend: registro en balance --- def action_done(self, cr, uid, ids, context=None): if not super(stock_move, self).action_done(cr, uid, ids, context=context): return False #_logger.info(">> _action_done >> 01 >> ids = %s", ids) obj_bal = self.pool.get('product.marble.dimension.balance') #obj_mov = [move for move in self.browse(cr, uid, ids, context=context) if move.state == 'done' and move.product_id.is_raw] obj_mov = [move for move in self.browse(cr, uid, ids, context=context) if move.state == 'done' and (move.product_id.prod_type == comm.RAW)] if not obj_mov: return True #_logger.info(">> _action_done >> 02 >> obj_mov = %s", obj_mov) # obj_mov is raw -> verifico: # >> si (move.location = stock_loc or move.location_dest = stock_loc) # >> registro en Balance. # stock_loc = comm.get_location_stock(self, cr, uid) # bal_list = [mov for mov in obj_mov if stock_loc in [mov.location_id.id, mov.location_dest_id.id]] # bal_list = [mov for mov in obj_mov if self.stock_move(cr, uid, mov) != 0] bal_list = [mov for mov in obj_mov] #_logger.info(">> _action_done >> 02 >> stock_loc = %s", stock_loc) #_logger.info(">> _action_done >> 03 >> bal_list = %s", bal_list) for mov in bal_list: # valid data required #msg = self._check_data_required(cr, uid, mov.product_id.is_raw, mov.dimension_id, mov.dimension_unit, mov.product_uom_qty) is_raw = (mov.product_id.prod_type == comm.RAW) msg = self._check_data_required(cr, uid, is_raw, mov.dimension_id, mov.dimension_unit, mov.product_uom_qty) if msg: raise osv.except_osv(_('Error'), _(msg)) #_logger.info(">> _action_done >> 888- stock_move = %s", self.stock_move(cr, uid, mov)) # set data.. val = { 'prod_id': mov.product_id.id, 'dim_id': mov.dimension_id.id, 'dimension_unit': mov.dimension_unit, 'dimension_m2': mov.product_uom_qty, # 'typeMove': 'in' if stock_loc == mov.location_dest_id.id else 'out' 'typeMove': 'in' if self.stock_move(cr, uid, mov) > 0 else 'out' } #_logger.info(">> _action_done >> 04- val = %s", val) obj_bal.register_balance(cr, uid, val, context) #_logger.info(">> _action_done >> 05- OK >> val = %s", val) return True _columns = { 'description': fields.char('Description'), 'dimension_id': fields.many2one('product.marble.dimension', 'Dimension', select=True, states={'done': [('readonly', True)]}, domain=[('state','=','done')]), 'dimension_unit': fields.integer('Units', size=3, states={'done': [('readonly', True)]}), 'is_raw': fields.function(_is_raw, type='boolean', string='Is Marble'), 'prod_type' : fields.related('product_id', 'prod_type', type='char', relation='product.template', string='Product Type'), 'employee_id': fields.many2one('hr.employee', 'Empleado', select=True, states={'done': [('readonly', True)]}, domain=[('active','=',True)]), 'employee_image': fields.related('employee_id', 'image_small', type='binary', relation='hr.employee', string='Part Number', store=True, readonly=True), 'partner_picking_id': fields.related('picking_id', 'partner_id', type='many2one', relation='res.partner', string='Patern', store=False), 'qty_dimension': fields.function(_get_sign_qty, string='Unidades', multi="sign"), 'qty_product': fields.function(_get_sign_qty, string='Area (m2)', multi="sign"), 'qty_balance': fields.function(_get_sign_qty, string='Balance (m2)', multi="sign"), 'use_client_location': fields.boolean('Does the customer provides the products?', readonly=True), } _defaults = { 'dimension_id': False, 'dimension_unit': 0, } stock_move() class stock_inventory_line(osv.osv): _inherit = "stock.inventory.line" _name = "stock.inventory.line" _description = "Inventory Line" _columns = { 'is_raw': fields.boolean('Is Raw', readonly=True), 'dimension_id': fields.many2one('product.marble.dimension', 'Dimension', domain=[('state','=','done')]), 'dimension_unit': fields.integer('Real Dim. [Units]', size=3), # units 'dimension_m2': fields.float('Real Dim. [M2]', digits=(5,3)), # m2 'dimension_unit_theoretical': fields.integer('Theoretical Dim. [Units]', size=3, readonly=True), # units 'dimension_m2_theoretical': fields.float('Theoretical Dim. [M2]', digits=(5,3), readonly=True), # m2 } defaults = { 'is_raw': False, 'dimension_id': False, 'dimension_unit': 0, 'dimension_m2': 0, 'dimension_unit_theoretical': 0, 'dimension_m2_theoretical': 0, } # overwrite: stock > stock_inventory_line - odoo v8.0 - line: 2727 - 27555 # sobre escribo metodo para incorporar 'dimensiones' en caso de ser materia prima def _resolve_inventory_line(self, cr, uid, inventory_line, context=None): stock_move_obj = self.pool.get('stock.move') if inventory_line.is_raw: diff_unit = inventory_line.dimension_unit_theoretical - inventory_line.dimension_unit diff = inventory_line.dimension_m2_theoretical - inventory_line.dimension_m2 else: diff = inventory_line.theoretical_qty - inventory_line.product_qty if not diff: return # each theorical_lines where difference between theoretical and checked quantities is not 0 is a line for which we need to create a stock move vals = { 'name': _('INV:') + (inventory_line.inventory_id.name or ''), 'product_id': inventory_line.product_id.id, 'product_uom': inventory_line.product_uom_id.id, 'date': inventory_line.inventory_id.date, 'company_id': inventory_line.inventory_id.company_id.id, 'inventory_id': inventory_line.inventory_id.id, 'state': 'confirmed', 'restrict_lot_id': inventory_line.prod_lot_id.id, 'restrict_partner_id': inventory_line.partner_id.id, 'dimension_id': inventory_line.dimension_id.id # dimension } inventory_location_id = inventory_line.product_id.property_stock_inventory.id if diff < 0: # found more than expected vals['location_id'] = inventory_location_id vals['location_dest_id'] = inventory_line.location_id.id vals['product_uom_qty'] = -diff # dim >> m2 [faltante] vals['dimension_unit'] = (inventory_line.is_raw and -diff_unit) or 0 # dim >> unidades [faltante] else: # found less than expected vals['location_id'] = inventory_line.location_id.id vals['location_dest_id'] = inventory_location_id vals['product_uom_qty'] = diff # dim >> m2 [excedente] vals['dimension_unit'] = (inventory_line.is_raw and diff_unit) or 0 # dim >> unidades [excedente] #_logger.info(">> _inv >> 01- vals = %s", vals) #_logger.info(">> _inv >> 02- uom_qty = %s", vals['product_uom_qty']) #_logger.info(">> _inv >> 03- dim_uni = %s", vals['dimension_unit']) return stock_move_obj.create(cr, uid, vals, context=context) stock_inventory_line() #
gpl-2.0
-4,009,658,643,104,912,400
41.884354
164
0.560398
false
3.363927
false
false
false
CiscoSystems/dashboard-quantum-beta
django-openstack/django_openstack/api.py
1
19242
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2011 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Copyright 2011 Fourth Paradigm Development, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Methods and interface objects used to interact with external apis. API method calls return objects that are in many cases objects with attributes that are direct maps to the data returned from the API http call. Unfortunately, these objects are also often constructed dynamically, making it difficult to know what data is available from the API object. Because of this, all API calls should wrap their returned object in one defined here, using only explicitly defined atributes and/or methods. In other words, django_openstack developers not working on django_openstack.api shouldn't need to understand the finer details of APIs for Nova/Glance/Swift et al. """ from django.conf import settings import cloudfiles import glance.client import httplib import json import logging import openstack.compute import openstackx.admin import openstackx.api.exceptions as api_exceptions import openstackx.extras import openstackx.auth from urlparse import urlparse LOG = logging.getLogger('django_openstack.api') class APIResourceWrapper(object): """ Simple wrapper for api objects Define _attrs on the child class and pass in the api object as the only argument to the constructor """ _attrs = [] def __init__(self, apiresource): self._apiresource = apiresource def __getattr__(self, attr): if attr in self._attrs: # __getattr__ won't find properties return self._apiresource.__getattribute__(attr) else: LOG.debug('Attempted to access unknown attribute "%s" on' ' APIResource object of type "%s" wrapping resource of' ' type "%s"' % (attr, self.__class__, self._apiresource.__class__)) raise AttributeError(attr) class APIDictWrapper(object): """ Simple wrapper for api dictionaries Some api calls return dictionaries. This class provides identical behavior as APIResourceWrapper, except that it will also behave as a dictionary, in addition to attribute accesses. Attribute access is the preferred method of access, to be consistent with api resource objects from openstackx """ def __init__(self, apidict): self._apidict = apidict def __getattr__(self, attr): if attr in self._attrs: try: return self._apidict[attr] except KeyError, e: raise AttributeError(e) else: LOG.debug('Attempted to access unknown item "%s" on' 'APIResource object of type "%s"' % (attr, self.__class__)) raise AttributeError(attr) def __getitem__(self, item): try: return self.__getattr__(item) except AttributeError, e: # caller is expecting a KeyError raise KeyError(e) def get(self, item, default=None): try: return self.__getattr__(item) except AttributeError: return default class Container(APIResourceWrapper): """Simple wrapper around cloudfiles.container.Container""" _attrs = ['name'] class Console(APIResourceWrapper): """Simple wrapper around openstackx.extras.consoles.Console""" _attrs = ['id', 'output', 'type'] class Flavor(APIResourceWrapper): """Simple wrapper around openstackx.admin.flavors.Flavor""" _attrs = ['disk', 'id', 'links', 'name', 'ram', 'vcpus'] class Image(APIDictWrapper): """Simple wrapper around glance image dictionary""" _attrs = ['checksum', 'container_format', 'created_at', 'deleted', 'deleted_at', 'disk_format', 'id', 'is_public', 'location', 'name', 'properties', 'size', 'status', 'updated_at'] def __getattr__(self, attrname): if attrname == "properties": return ImageProperties(super(Image, self).__getattr__(attrname)) else: return super(Image, self).__getattr__(attrname) class ImageProperties(APIDictWrapper): """Simple wrapper around glance image properties dictionary""" _attrs = ['architecture', 'image_location', 'image_state', 'kernel_id', 'project_id', 'ramdisk_id'] class KeyPair(APIResourceWrapper): """Simple wrapper around openstackx.extras.keypairs.Keypair""" _attrs = ['fingerprint', 'key_name', 'private_key'] class Server(APIResourceWrapper): """Simple wrapper around openstackx.extras.server.Server Preserves the request info so image name can later be retrieved """ _attrs = ['addresses', 'attrs', 'hostId', 'id', 'imageRef', 'links', 'metadata', 'name', 'private_ip', 'public_ip', 'status', 'uuid', 'image_name', 'virtual_interfaces'] def __init__(self, apiresource, request): super(Server, self).__init__(apiresource) self.request = request def __getattr__(self, attr): if attr == "attrs": return ServerAttributes(super(Server, self).__getattr__(attr)) else: return super(Server, self).__getattr__(attr) @property def image_name(self): image = image_get(self.request, self.imageRef) return image.name class ServerAttributes(APIDictWrapper): """Simple wrapper around openstackx.extras.server.Server attributes Preserves the request info so image name can later be retrieved """ _attrs = ['description', 'disk_gb', 'host', 'image_ref', 'kernel_id', 'key_name', 'launched_at', 'mac_address', 'memory_mb', 'name', 'os_type', 'project_id', 'ramdisk_id', 'scheduled_at', 'terminated_at', 'user_data', 'user_id', 'vcpus', 'hostname'] class Services(APIResourceWrapper): _attrs = ['disabled', 'host', 'id', 'last_update', 'stats', 'type', 'up', 'zone'] class SwiftObject(APIResourceWrapper): _attrs = ['name'] class Tenant(APIResourceWrapper): """Simple wrapper around openstackx.auth.tokens.Tenant""" _attrs = ['id', 'description', 'enabled'] class Token(APIResourceWrapper): """Simple wrapper around openstackx.auth.tokens.Token""" _attrs = ['id', 'serviceCatalog', 'tenant_id', 'username'] class Usage(APIResourceWrapper): """Simple wrapper around openstackx.extras.usage.Usage""" _attrs = ['begin', 'instances', 'stop', 'tenant_id', 'total_active_disk_size', 'total_active_instances', 'total_active_ram_size', 'total_active_vcpus', 'total_cpu_usage', 'total_disk_usage', 'total_hours', 'total_ram_usage'] class User(APIResourceWrapper): """Simple wrapper around openstackx.extras.users.User""" _attrs = ['email', 'enabled', 'id', 'tenantId'] def url_for(request, service_name, admin=False): catalog = request.session['serviceCatalog'] if admin: rv = catalog[service_name][0]['adminURL'] else: rv = catalog[service_name][0]['internalURL'] return rv def check_openstackx(f): """Decorator that adds extra info to api exceptions The dashboard currently depends on openstackx extensions being present in nova. Error messages depending for views depending on these extensions do not lead to the conclusion that nova is missing extensions. This decorator should be dropped and removed after keystone and dashboard more gracefully handle extensions and openstackx extensions aren't required by the dashboard in nova. """ def inner(*args, **kwargs): try: return f(*args, **kwargs) except api_exceptions.NotFound, e: e.message = e.details or '' e.message += ' This error may be caused by missing openstackx' \ ' extensions in nova. See the dashboard README.' raise return inner def compute_api(request): compute = openstack.compute.Compute( auth_token=request.session['token'], management_url=url_for(request, 'nova')) # this below hack is necessary to make the jacobian compute client work # TODO(mgius): It looks like this is unused now? compute.client.auth_token = request.session['token'] compute.client.management_url = url_for(request, 'nova') LOG.debug('compute_api connection created using token "%s"' ' and url "%s"' % (request.session['token'], url_for(request, 'nova'))) return compute def account_api(request): LOG.debug('account_api connection created using token "%s"' ' and url "%s"' % (request.session['token'], url_for(request, 'identity', True))) return openstackx.extras.Account( auth_token=request.session['token'], management_url=url_for(request, 'identity', True)) def glance_api(request): o = urlparse(url_for(request, 'glance')) LOG.debug('glance_api connection created for host "%s:%d"' % (o.hostname, o.port)) return glance.client.Client(o.hostname, o.port) def admin_api(request): LOG.debug('admin_api connection created using token "%s"' ' and url "%s"' % (request.session['token'], url_for(request, 'nova', True))) return openstackx.admin.Admin(auth_token=request.session['token'], management_url=url_for(request, 'nova', True)) def extras_api(request): LOG.debug('extras_api connection created using token "%s"' ' and url "%s"' % (request.session['token'], url_for(request, 'nova'))) return openstackx.extras.Extras(auth_token=request.session['token'], management_url=url_for(request, 'nova')) def auth_api(): LOG.debug('auth_api connection created using url "%s"' % settings.OPENSTACK_KEYSTONE_URL) return openstackx.auth.Auth( management_url=settings.OPENSTACK_KEYSTONE_URL) def swift_api(): return cloudfiles.get_connection( settings.SWIFT_ACCOUNT + ":" + settings.SWIFT_USER, settings.SWIFT_PASS, authurl=settings.SWIFT_AUTHURL) def console_create(request, instance_id, kind=None): return Console(extras_api(request).consoles.create(instance_id, kind)) def flavor_create(request, name, memory, vcpu, disk, flavor_id): return Flavor(admin_api(request).flavors.create( name, int(memory), int(vcpu), int(disk), flavor_id)) def flavor_delete(request, flavor_id, purge=False): admin_api(request).flavors.delete(flavor_id, purge) def flavor_get(request, flavor_id): return Flavor(compute_api(request).flavors.get(flavor_id)) @check_openstackx def flavor_list(request): return [Flavor(f) for f in extras_api(request).flavors.list()] def image_create(request, image_meta, image_file): return Image(glance_api(request).add_image(image_meta, image_file)) def image_delete(request, image_id): return glance_api(request).delete_image(image_id) def image_get(request, image_id): return Image(glance_api(request).get_image(image_id)[0]) def image_list_detailed(request): return [Image(i) for i in glance_api(request).get_images_detailed()] def image_update(request, image_id, image_meta=None): image_meta = image_meta and image_meta or {} return Image(glance_api(request).update_image(image_id, image_meta=image_meta)) def keypair_create(request, name): return KeyPair(extras_api(request).keypairs.create(name)) def keypair_delete(request, keypair_id): extras_api(request).keypairs.delete(keypair_id) @check_openstackx def keypair_list(request): return [KeyPair(key) for key in extras_api(request).keypairs.list()] def server_create(request, name, image, flavor, user_data, key_name): return Server(extras_api(request).servers.create( name, image, flavor, user_data=user_data, key_name=key_name), request) def server_delete(request, instance): compute_api(request).servers.delete(instance) def server_get(request, instance_id): response = compute_api(request).servers.get(instance_id), request LOG.info(response) return Server(compute_api(request).servers.get(instance_id), request) @check_openstackx def server_list(request): return [Server(s, request) for s in extras_api(request).servers.list()] def server_reboot(request, instance_id, hardness=openstack.compute.servers.REBOOT_HARD): server = server_get(request, instance_id) server.reboot(hardness) def service_get(request, name): return Services(admin_api(request).services.get(name)) @check_openstackx def service_list(request): return [Services(s) for s in admin_api(request).services.list()] def service_update(request, name, enabled): return Services(admin_api(request).services.update(name, enabled)) def token_get_tenant(request, tenant_id): tenants = auth_api().tenants.for_token(request.session['token']) for t in tenants: if str(t.id) == str(tenant_id): return Tenant(t) LOG.warning('Unknown tenant id "%s" requested' % tenant_id) def token_list_tenants(request, token): return [Tenant(t) for t in auth_api().tenants.for_token(token)] def tenant_create(request, tenant_id, description, enabled): return Tenant(account_api(request).tenants.create(tenant_id, description, enabled)) def tenant_get(request, tenant_id): return Tenant(account_api(request).tenants.get(tenant_id)) @check_openstackx def tenant_list(request): return [Tenant(t) for t in account_api(request).tenants.list()] def tenant_update(request, tenant_id, description, enabled): return Tenant(account_api(request).tenants.update(tenant_id, description, enabled)) def token_create(request, tenant, username, password): return Token(auth_api().tokens.create(tenant, username, password)) def token_info(request, token): # TODO(mgius): This function doesn't make a whole lot of sense to me. The # information being gathered here really aught to be attached to Token() as # part of token_create. May require modification of openstackx so that the # token_create call returns this information as well hdrs = {"Content-type": "application/json", "X_AUTH_TOKEN": settings.OPENSTACK_ADMIN_TOKEN, "Accept": "text/json"} o = urlparse(token.serviceCatalog['identity'][0]['adminURL']) conn = httplib.HTTPConnection(o.hostname, o.port) conn.request("GET", "/v2.0/tokens/%s" % token.id, headers=hdrs) response = conn.getresponse() data = json.loads(response.read()) admin = False LOG.info(data) for role in data['auth']['user']['roleRefs']: if role['roleId'] == 'Admin': admin = True return {'tenant': data['auth']['user']['tenantId'], 'user': data['auth']['user']['username'], 'admin': admin} @check_openstackx def usage_get(request, tenant_id, start, end): return Usage(extras_api(request).usage.get(tenant_id, start, end)) @check_openstackx def usage_list(request, start, end): return [Usage(u) for u in extras_api(request).usage.list(start, end)] def user_create(request, user_id, email, password, tenant_id): return User(account_api(request).users.create( user_id, email, password, tenant_id)) def user_delete(request, user_id): account_api(request).users.delete(user_id) def user_get(request, user_id): return User(account_api(request).users.get(user_id)) @check_openstackx def user_list(request): return [User(u) for u in account_api(request).users.list()] def user_update_email(request, user_id, email): return User(account_api(request).users.update_email(user_id, email)) def user_update_password(request, user_id, password): return User(account_api(request).users.update_password(user_id, password)) def user_update_tenant(request, user_id, tenant_id): return User(account_api(request).users.update_tenant(user_id, tenant_id)) def swift_container_exists(container_name): try: swift_api().get_container(container_name) return True except cloudfiles.errors.NoSuchContainer: return False def swift_object_exists(container_name, object_name): container = swift_api().get_container(container_name) try: container.get_object(object_name) return True except cloudfiles.errors.NoSuchObject: return False def swift_get_containers(): return [Container(c) for c in swift_api().get_all_containers()] def swift_create_container(name): if swift_container_exists(name): raise Exception('Container with name %s already exists.' % (name)) return Container(swift_api().create_container(name)) def swift_delete_container(name): swift_api().delete_container(name) def swift_get_objects(container_name, prefix=None): container = swift_api().get_container(container_name) return [SwiftObject(o) for o in container.get_objects(prefix=prefix)] def swift_copy_object(orig_container_name, orig_object_name, new_container_name, new_object_name): container = swift_api().get_container(orig_container_name) if swift_object_exists(new_container_name, new_object_name) == True: raise Exception('Object with name %s already exists in container %s' % (new_object_name, new_container_name)) orig_obj = container.get_object(orig_object_name) return orig_obj.copy_to(new_container_name, new_object_name) def swift_upload_object(container_name, object_name, object_data): container = swift_api().get_container(container_name) obj = container.create_object(object_name) obj.write(object_data) def swift_delete_object(container_name, object_name): container = swift_api().get_container(container_name) container.delete_object(object_name) def swift_get_object_data(container_name, object_name): container = swift_api().get_container(container_name) return container.get_object(object_name).stream()
apache-2.0
248,219,022,108,928,350
31.94863
79
0.653934
false
3.922936
false
false
false
leotrs/decu
test/notsosimple_project/src/script.py
1
1196
""" testscript.py ------------- This is a test script for decu. """ from decu import Script, experiment, figure, run_parallel import numpy as np import matplotlib.pyplot as plt class TestScript(Script): @experiment(data_param='data') def exp(self, data, param, param2): """Compute x**param for each data point.""" self.log.info('Working hard for {}..'.format(TestScript.exp.run)) return np.power(data, param) + param2 @figure() def plot_result(self, data, result): """Plot results of experiment.""" plt.plot(data, result) @figure() def plot_many_results(self, data, results): """Plot results of experiment.""" plt.figure() for res in results: plt.plot(data, res) def main(self): """Run some experiments and make some figures.""" data = np.arange(5) result1 = self.exp(data, param=4, param2=10) self.plot_result(data, result1) param_list = [(data, x, y) for x, y in zip(np.arange(5), np.arange(5, 10))] result2 = run_parallel(self.exp, param_list) self.plot_many_results(data, result2, suffix='parallel')
mit
5,614,853,097,660,750,000
26.813953
73
0.594482
false
3.624242
false
false
false
rackerlabs/qonos
qonos/openstack/common/eventlet_backdoor.py
1
4764
# Copyright (c) 2012 OpenStack Foundation. # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from __future__ import print_function import errno import gc import os import pprint import socket import sys import traceback import eventlet import eventlet.backdoor import greenlet from oslo_config import cfg from qonos.openstack.common._i18n import _LI from qonos.openstack.common import log as logging help_for_backdoor_port = ( "Acceptable values are 0, <port>, and <start>:<end>, where 0 results " "in listening on a random tcp port number; <port> results in listening " "on the specified port number (and not enabling backdoor if that port " "is in use); and <start>:<end> results in listening on the smallest " "unused port number within the specified range of port numbers. The " "chosen port is displayed in the service's log file.") eventlet_backdoor_opts = [ cfg.StrOpt('backdoor_port', help="Enable eventlet backdoor. %s" % help_for_backdoor_port) ] CONF = cfg.CONF CONF.register_opts(eventlet_backdoor_opts) LOG = logging.getLogger(__name__) class EventletBackdoorConfigValueError(Exception): def __init__(self, port_range, help_msg, ex): msg = ('Invalid backdoor_port configuration %(range)s: %(ex)s. ' '%(help)s' % {'range': port_range, 'ex': ex, 'help': help_msg}) super(EventletBackdoorConfigValueError, self).__init__(msg) self.port_range = port_range def _dont_use_this(): print("Don't use this, just disconnect instead") def _find_objects(t): return [o for o in gc.get_objects() if isinstance(o, t)] def _print_greenthreads(): for i, gt in enumerate(_find_objects(greenlet.greenlet)): print(i, gt) traceback.print_stack(gt.gr_frame) print() def _print_nativethreads(): for threadId, stack in sys._current_frames().items(): print(threadId) traceback.print_stack(stack) print() def _parse_port_range(port_range): if ':' not in port_range: start, end = port_range, port_range else: start, end = port_range.split(':', 1) try: start, end = int(start), int(end) if end < start: raise ValueError return start, end except ValueError as ex: raise EventletBackdoorConfigValueError(port_range, ex, help_for_backdoor_port) def _listen(host, start_port, end_port, listen_func): try_port = start_port while True: try: return listen_func((host, try_port)) except socket.error as exc: if (exc.errno != errno.EADDRINUSE or try_port >= end_port): raise try_port += 1 def initialize_if_enabled(): backdoor_locals = { 'exit': _dont_use_this, # So we don't exit the entire process 'quit': _dont_use_this, # So we don't exit the entire process 'fo': _find_objects, 'pgt': _print_greenthreads, 'pnt': _print_nativethreads, } if CONF.backdoor_port is None: return None start_port, end_port = _parse_port_range(str(CONF.backdoor_port)) # NOTE(johannes): The standard sys.displayhook will print the value of # the last expression and set it to __builtin__._, which overwrites # the __builtin__._ that gettext sets. Let's switch to using pprint # since it won't interact poorly with gettext, and it's easier to # read the output too. def displayhook(val): if val is not None: pprint.pprint(val) sys.displayhook = displayhook sock = _listen('localhost', start_port, end_port, eventlet.listen) # In the case of backdoor port being zero, a port number is assigned by # listen(). In any case, pull the port number out here. port = sock.getsockname()[1] LOG.info( _LI('Eventlet backdoor listening on %(port)s for process %(pid)d') % {'port': port, 'pid': os.getpid()} ) eventlet.spawn_n(eventlet.backdoor.backdoor_server, sock, locals=backdoor_locals) return port
apache-2.0
620,482,699,583,324,400
31.855172
78
0.645466
false
3.768987
false
false
false
BinMatrix/camshift_ros
scripts/camshift_node.py
1
9812
#!/usr/bin/env python ''' Camshift node ================ This is a ros node that shows mean-shift based tracking You select a color objects such as your face and it tracks it. This subscrib from "/image" topic for reading image, and publish the information of target to "/TargetPositionSize" or "/roi" topic. The position and size have been normalized in "/TargetPositionSize". http://www.robinhewitt.com/research/track/camshift.html Usage: ------ To initialize tracking, select the object with mouse Keys: ----- ESC/q - exit b - toggle back-projected probability visualization s - save roi to file l - load roi from file to calculate hist ''' # Python 2/3 compatibility from __future__ import print_function import sys PY3 = sys.version_info[0] == 3 if PY3: xrange = range import numpy as np import cv2 import time import os # debug with pudb # import pudb; pu.db import rospy from sensor_msgs.msg import Image from cv_bridge import CvBridge, CvBridgeError # from mav_msgs.msg import TargetPositionSize from sensor_msgs.msg import Image, RegionOfInterest, CameraInfo class App: def __init__(self): self.roi_file = os.path.expanduser("~/roi.jpg") cv2.namedWindow('camshift', 1) cv2.setMouseCallback('camshift', self.onmouse) self.frame = None self.vis = None self.vis_roi = None self.selection = None self.drag_start = None self.show_backproj = False self.track_window = None self.track_box = None #rotated rect self.expand_ratio = 0.2 self.hist = None self.last_track = None self.fps = 0 self.fps_values = list() self.fps_n_values = 10 self.time_star = time.time() self.bridge = CvBridge() self.image_sub = rospy.Subscriber( "/image", Image, self.callback) # self.target_pub = rospy.Publisher( # "/TargetPositionSize", TargetPositionSize) self.roi_pub = rospy.Publisher("roi", RegionOfInterest) def onmouse(self, event, x, y, flags, param): if event == cv2.EVENT_LBUTTONDOWN: self.drag_start = (x, y) self.track_window = None if event == cv2.EVENT_LBUTTONUP: self.drag_start = None self.track_window = self.selection if self.drag_start: xmin = min(x, self.drag_start[0]) ymin = min(y, self.drag_start[1]) xmax = max(x, self.drag_start[0]) ymax = max(y, self.drag_start[1]) self.selection = (xmin, ymin, xmax - xmin + 1, ymax - ymin + 1) def show_hist(self): bin_count = self.hist.shape[0] bin_w = 24 img = np.zeros((256, bin_count * bin_w, 3), np.uint8) for i in xrange(bin_count): h = int(self.hist[i]) cv2.rectangle(img, (i * bin_w + 2, 255), ((i + 1) * bin_w - 2, 255 - h), (int(180.0 * i / bin_count), 255, 255), -1) img = cv2.cvtColor(img, cv2.COLOR_HSV2BGR) cv2.imshow('hist', img) def show_hist_new(self): bin_count = self.hist.shape[0] bin_w = 1 img = np.zeros((256, bin_count * bin_w, 3), np.uint8) for i in xrange(bin_count): h = int(self.hist[i]) cv2.rectangle(img, (i * bin_w, 255), ((i + 1) * bin_w, 255 - h), (int(180.0 * i / bin_count), 255, 255), -1) img = cv2.cvtColor(img, cv2.COLOR_HSV2BGR) cv2.imshow('hist', img) def expand_window(self, last_track): x, y, w, h = last_track row, col = self.frame.shape[:2] n_x0 = np.maximum(0, x - int(w * self.expand_ratio) - 1) n_y0 = np.maximum(0, y - int(h * self.expand_ratio) - 1) n_w = np.minimum(col, w + int(w * self.expand_ratio * 2) + 1) n_h = np.minimum(row, h + int(h * self.expand_ratio * 2) + 1) return (n_x0, n_y0, n_w, n_h) def cvBox2D_to_cvRect(self, roi): try: if len(roi) == 3: (center, size, angle) = roi pt1 = ( int(center[0] - size[0] / 2), int(center[1] - size[1] / 2)) pt2 = ( int(center[0] + size[0] / 2), int(center[1] + size[1] / 2)) rect = [pt1[0], pt1[1], pt2[0] - pt1[0], pt2[1] - pt1[1]] else: rect = list(roi) except: return [0, 0, 0, 0] return rect def publish_target(self): target = TargetPositionSize() height, width = self.frame.shape[:2] x, y, w, h = self.track_window target.center_x = (x + w / 2.0) / width * 2 - 1 target.center_y = 1 - (y + h / 2.0) / height * 2 target.size_x = float(w) / width target.size_y = float(h) / height self.target_pub.publish(target) def publish_roi(self): roi_box = self.track_window # roi_box = self.track_box try: roi_box = self.cvBox2D_to_cvRect(roi_box) except: return # Watch out for negative offsets roi_box[0] = max(0, roi_box[0]) roi_box[1] = max(0, roi_box[1]) try: roi = RegionOfInterest() roi.x_offset = int(roi_box[0]) roi.y_offset = int(roi_box[1]) roi.width = int(roi_box[2]) roi.height = int(roi_box[3]) self.roi_pub.publish(roi) except: rospy.loginfo("Publishing ROI failed") def display_fps(self): time_end = time.time() img_fps = int(1 / (time_end - self.time_star)) self.time_star = time_end self.fps_values.append(img_fps) if len(self.fps_values) > self.fps_n_values: self.fps_values.pop(0) self.fps = int(sum(self.fps_values) / len(self.fps_values)) cv2.putText(self.vis, "FPS: " + str(self.fps), (10, 25), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 0)) def callback(self, data): try: cv_image = self.bridge.imgmsg_to_cv2(data, "bgr8") except CvBridgeError as e: print(e) self.frame = np.array(cv_image, dtype=np.uint8) self.vis = self.frame.copy() hsv = cv2.cvtColor(self.frame, cv2.COLOR_BGR2HSV) mask = cv2.inRange( hsv, np.array((0., 60., 32.)), np.array((180., 255., 255.))) if self.selection: x0, y0, w, h = self.selection hsv_roi = hsv[y0:y0 + h, x0:x0 + w] mask_roi = mask[y0:y0 + h, x0:x0 + w] self.hist = cv2.calcHist([hsv_roi], [0], mask_roi, [16], [0, 180]) # self.hist = cv2.calcHist([hsv_roi], [0], mask_roi, [360], [0, 180]) cv2.normalize(self.hist, self.hist, 0, 255, cv2.NORM_MINMAX) self.hist = self.hist.reshape(-1) self.show_hist() # self.show_self.hist_new(self.hist) self.vis_roi = self.vis[y0:y0 + h, x0:x0 + w] cv2.bitwise_not(self.vis_roi, self.vis_roi) # highlight befitting object when selecting # self.vis[mask == 0] = 0 if self.track_window: # lost the target, expand last valid track window if self.track_window == (0, 0, 0, 0): self.track_window = self.expand_window(self.last_track) # print("Re-search at : ", self.track_window) self.last_track = self.track_window self.selection = None prob = cv2.calcBackProject([hsv], [0], self.hist, [0, 180], 1) prob &= mask term_crit = ( cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 1) self.track_box, self.track_window = cv2.CamShift( prob, self.track_window, term_crit) # publish position and size of target, has been normalized. # self.publish_target() self.publish_roi() if self.show_backproj: self.vis[:] = prob[..., np.newaxis] try: cv2.ellipse(self.vis, self.track_box, (0, 0, 255), 2) except: print(self.track_box) # Compute the FPS and display in image self.display_fps() cv2.imshow('camshift', self.vis) ch = 0xFF & cv2.waitKey(1) if ch == 27 or ch == ord('q'): os._exit(0) if ch == ord('b'): self.show_backproj = not self.show_backproj if ch == ord('s'): if self.track_window == None: print("There has no tracked object!") return x, y, w, h = self.track_window cv2.imwrite(self.roi_file, self.frame[y:y+h, x:x+w]) print("Saved to ", self.roi_file) if ch == ord('l'): if not os.path.isfile(self.roi_file): print(self.roi_file, " is not exist!") return roi = cv2.imread(self.roi_file) print("Loaded from ", self.roi_file) roi_hsv = cv2.cvtColor(roi, cv2.COLOR_BGR2HSV) roi_mask = cv2.inRange( roi_hsv, np.array((0., 60., 32.)), np.array((180., 255., 255.))) self.hist = cv2.calcHist([roi_hsv], [0], roi_mask, [16], [0, 180]) cv2.normalize(self.hist, self.hist, 0, 255, cv2.NORM_MINMAX) self.hist = self.hist.reshape(-1) self.show_hist() row, col = self.frame.shape[:2] self.track_window = (0, 0, col, row) if __name__ == '__main__': rospy.init_node('camshift', anonymous=True) cs = App() try: rospy.spin() except KeyboardInterrupt: print("Shutting down") cv2.destroyAllWindows()
gpl-3.0
-7,288,317,239,279,681,000
35.073529
110
0.528027
false
3.249007
false
false
false
rafallo/p2c
torrent/torrent.py
1
10334
# -*- coding: utf-8 -*- import hashlib import libtorrent as lt import logging from threading import Timer, Event import os import time from p2c.exceptions import SessionNotBindedException, TorrentHasNotMetadataYet import settings from torrent.movie import Movie SOURCE_TYPES = ("MAGNET", "TORRENT") logger = logging.getLogger(__name__) class Torrent(object): def __init__(self, source_type, source, name): """ :type source: str magnet or torrent file path :type name: str :type source_type: str """ if not source_type in SOURCE_TYPES: raise Exception( "source_type must be one of {0}".format(SOURCE_TYPES)) self.name = name self.source_type = source_type self.source = source self.torrent_handler = None self._torrent_info = None # dict where key is path and value is Movie instance # this is files which are downloading or downloaded self.files = None # piece_length in this torrent self.piece_length = None # amount of pieces which made up DOWNLOAD_PIECE_SIZE self._jump = None # if first prioritizing task was run once self._prioritized = False self.priority_interval = settings.PRIORITY_INTERVAL self._priority_thread_stop = Event() self._priority_timer = None # currently downloading Movie self._downloading = None def __del__(self): self._stop_torrent_threads() def __str__(self): return self.name def set_source(self, source, session): self.source = source if self.source: self.bind_session(session) def bind_session(self, session): """ Creates torrent handler based on source_type """ add_data = {} if self.source_type == "TORRENT": add_data['ti'] = lt.torrent_info(self.source) elif self.source_type == "MAGNET": add_data['url'] = self.source add_data['save_path'] = self._get_download_dir() add_data['storage_mode'] = lt.storage_mode_t(1) self.torrent_handler = session.add_torrent(add_data) self._prioritize_to_none() def get_filelist(self): info = self.get_torrent_info(wait=True) return [file.path for file in info.files()] def get_movies_filelist(self): if self.files is None: self._create_movies() return list(self.files.keys()) def get_movies(self): if self.files is None: self._create_movies() return list(self.files.values()) def download_file(self, filename:str): if not filename in self.get_movies_filelist(): raise Exception("filename not found in torrent") self._prioritize_to_none() self._downloading = self.files[filename] self._run_torrent_threads() def pause_download(self): self._stop_torrent_threads() self.torrent_handler.pause() self._downloading = None def has_torrent_info(self): """ Checks if torrent has downloaded metadata """ try: self.get_torrent_info() return True except (TorrentHasNotMetadataYet, SessionNotBindedException): return False def get_torrent_info(self, wait=False): """ Gets torrent's metadata """ if self._torrent_info != None: return self._torrent_info if self.torrent_handler is None: if wait: while not self.torrent_handler is None: time.sleep(0.1) else: raise SessionNotBindedException if not self.torrent_handler.has_metadata(): if wait: while not self.torrent_handler.has_metadata(): time.sleep(0.1) else: raise TorrentHasNotMetadataYet self._torrent_info = self.torrent_handler.get_torrent_info() return self._torrent_info def get_status(self): """ Gets torrent's status with field like download rate, peers number, state and progress level """ status = self.torrent_handler.status() state_str = ['queued', 'checking', 'downloading metadata', 'downloading', 'finished', 'seeding', 'allocating', 'checking fastresume'] data = { 'download_rate': status.download_rate, 'download_payload_rate': status.download_payload_rate, 'num_peers': status.num_peers, 'state': state_str[status.state], 'progress': status.progress } return data def get_seconds_to_buffer(self): rate = self.get_status()['download_rate'] if(rate > 100 * 1024): # round to 100 kbs, 200 kbs, 300 kbs rate = int(rate / (100 * 1024)) * 100 * 1024 movie = self.get_downloading_movie() # minimum rate if movie and rate > 30 * 1024: return int(movie.pieces_to_play * movie.piece_length / rate) def get_downloading_movie(self): return self._downloading def _create_movies(self): info = self.get_torrent_info() files = info.files() self.piece_length = info.piece_length() self.priority_interval = settings.PRIORITY_INTERVAL * self.piece_length / ( 1024 ** 2) self._jump = int(settings.DOWNLOAD_PIECE_SIZE / self.piece_length) + 1 self.files = {} for file in files: ext = os.path.splitext(file.path)[1] if ext and ext[1:].lower() in settings.SUPPORTED_MOVIE_EXTENSIONS: first_piece = int(file.offset / self.piece_length) last_piece = int((file.size + file.offset) / self.piece_length) self.files[file.path] = Movie(path=file.path, size=file.size, first_piece=first_piece, last_piece=last_piece, piece_length=self.piece_length, download_dir=self._get_download_dir()) def _update_movies_progress(self): """ Updates movie progress based on number of downloaded pieces """ p_downloaded = self.torrent_handler.status().pieces movie = self.get_downloading_movie() first_piece, last_piece = movie.first_piece, movie.last_piece # logger.debug("first_piece: {}".format(first_piece)) # logger.debug("last_piece: {}".format(last_piece )) counter = 0 for item in p_downloaded[first_piece:last_piece]: if item == True: counter += 1 else: break # logger.debug("download_pieces inside thread is: {}".format(counter)) movie.downloaded_pieces = counter def _manage_pieces_priority(self): """ Sets priority blocks. First pieces should be downloaded first swo its have the highest priority. """ p_downloaded = self.torrent_handler.status().pieces movie = self.get_downloading_movie() if not movie: return first_piece, last_piece = movie.cur_first_piece, movie.cur_last_piece if not False in p_downloaded[first_piece:first_piece + self._jump + 1]: # all block downloaded first_piece += self._jump movie.cur_first_piece = first_piece # prioritezing # [7, 7, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, ...] if first_piece + self._jump + self._jump <= last_piece: for piece in range(first_piece + 4 * self._jump, last_piece + 1): # logger.debug("the lowest priority for: {}".format(piece)) self.torrent_handler.piece_priority(piece, 0) if first_piece + self._jump <= last_piece: for piece in range(first_piece + 2 * self._jump, min(last_piece + 1, first_piece + 4 * self._jump)): # logger.debug("low priority for: {}".format(piece)) self.torrent_handler.piece_priority(piece, 2) if first_piece <= last_piece: for piece in range(first_piece, min(last_piece + 1, first_piece + 2 * self._jump)): # logger.debug("the highest priority for: {}".format(piece)) self.torrent_handler.piece_priority(piece, 7) # for mp4 get 512KB end of file # TODO: bug below # for piece in range( # last_piece - int(self.piece_length / 512 * 1024) + 1, # last_piece): # logger.debug("the highest priority for (512KB end of file): {}".format(piece)) # self.torrent_handler.piece_priority(piece, 7) self._update_movies_progress() if not self._priority_thread_stop.is_set(): if self._priority_timer: self._priority_timer.cancel() self._priority_timer = None self._run_torrent_threads() def _run_torrent_threads(self): # logger.debug("run threads for {}".format(self.priority_interval)) if not self._priority_thread_stop.is_set(): if not self._priority_timer: self._priority_timer = Timer(self.priority_interval, self._manage_pieces_priority) self._priority_timer.start() def _stop_torrent_threads(self): self._priority_thread_stop.set() if self._priority_timer: self._priority_timer.cancel() def _prioritize_to_none(self): if not self._prioritized and self.has_torrent_info(): self._prioritized = True info = self.get_torrent_info() for piece in range(0, info.num_pieces()): self.torrent_handler.piece_priority(piece, 0) def _get_download_dir(self): path = os.path.join(settings.DOWNLOAD_DIR, hashlib.md5(self.name.encode()).hexdigest()) try: os.makedirs(path) except OSError: pass return path
mit
2,340,498,763,809,516,500
35.259649
111
0.561641
false
4.171982
false
false
false
dhaitz/CalibFW
plotting/modules/plot_sandbox.py
1
75936
# -*- coding: utf-8 -*- """ plotting sanbox module for merlin. This module is to be used for testing or development work. """ import plotbase import copy import plot1d import getroot import math import plotresponse import plotfractions import plot2d import plot_tagging import fit import os def recogen_alpha_ptbins(files, opt): """ recogen vs alpha as well as Z pT vs alpha in pT bins. """ zptbins = [ "1", "zpt>30 && zpt<50", "zpt>50 && zpt<70", "zpt>70 && zpt<120", "zpt>120" ] texts = [ "$\mathrm{inclusive}$", "$30 < \mathrm{Z} p_\mathrm{T} < 50\ \mathrm{GeV}$", "$50 < \mathrm{Z} p_\mathrm{T} < 70\ \mathrm{GeV}$", "$70 < \mathrm{Z} p_\mathrm{T} < 120\ \mathrm{GeV}$", "$\mathrm{Z}\ p_\mathrm{T} > 120\ \mathrm{GeV}$", ] fig, axes = plotbase.newPlot(subplots = len(zptbins * 2), subplots_X = len(zptbins)) settings = plotbase.getSettings(opt, quantity='recogen_alpha') for ax1, ax2, selection, text in zip(axes[:(len(axes)/2)], axes[(len(axes)/2):], zptbins, texts): plot1d.datamcplot("recogen_alpha", files, opt, fig_axes = [fig, ax1], changes={ 'allalpha': True, 'y': [0.99, 1.1], 'subplot': True, 'nbins': 6, 'fit': 'slope', 'x': [0, 0.3], 'text': text, 'selection': [selection], } ) plot1d.datamcplot("zpt_alpha", files, opt, fig_axes = [fig, ax2], changes={ 'allalpha': True, 'y': [0, 300], 'subplot': True, 'nbins': 6, 'x': [0, 0.3], 'text': text, 'selection': [selection], } ) plotbase.Save(fig, settings) def corrs(files, opt): fig, ax = plotbase.newPlot() settings = plotbase.getSettings(opt, quantity='recogen_genpt') for quantity, marker, color, label in zip( ['raw/recogen_genpt', 'l1/recogen_genpt', 'l1l2l3/recogen_genpt'], ['o', 'D', '-'], ['black', '#7293cb', '#e1974c'], ['raw', 'L1', 'L1L2L3'] ): plot1d.datamcplot(quantity, files, opt, fig_axes = [fig, ax], changes={ 'algorithm': "", 'markers':[marker], 'colors':[color], 'labels':[label, ""], 'correction':"", 'subplot':True, 'grid': True, 'y': [0.9, 1.5], 'legloc': 'upper right', 'x': [20, 100], 'yname': 'recogen', 'xname':'genpt' }) settings['filename'] = plotbase.getDefaultFilename('recogen', opt, settings) plotbase.Save(fig, settings) def corrbins(files, opt): fig, ax = plotbase.newPlot() settings = plotbase.getSettings(opt, quantity='recogen') for quantity, marker, color, label, n in zip( ['l1l2l3/recogen3040', 'l1l2l3/recogen5080', 'l1l2l3/recogen100'], ['o', 'f', '-'], ['black', '#7293cb', '#e1974c'], ['pT 20-40', 'pT 50-80', 'pT >100'], range(10) ): plot1d.datamcplot(quantity, files, opt, fig_axes = [fig, ax], changes={ 'algorithm': "", 'markers':[marker], 'colors':[color], 'labels':[label, ""], 'correction':"", 'subplot':True, 'grid': True, 'fitlabel_offset':-0.07*n, 'legloc': 'upper right', 'x': [0, 2], 'xname':'recogen' }) settings['filename'] = plotbase.getDefaultFilename('recogen-bins', opt, settings) plotbase.Save(fig, settings) def zmassFitted(files, opt, changes=None, settings=None): """ Plots the FITTED Z mass peak position depending on pT, NPV, y.""" quantity = "zmass" # iterate over raw vs corr electrons for mode in ['raw', 'corr']: filenames = ['work/data_ee_%s.root' % mode, 'work/mc_ee_powheg_%s.root' % mode] files, opt = plotbase.openRootFiles(filenames, opt) # iterate over quantities for xq, xbins in zip( ['npv', 'zpt', 'zy'], [ [a - 0.5 for a, b in opt.npv] + [opt.npv[-1][1] - 0.5], opt.zbins, [(i/2.)-2. for i in range(0, 9)], ] ): # iterate over Z pt (inclusive/low,medium,high) for ptregion, ptselection, ptstring in zip(["_inclusivept", "_lowpt", "_mediumpt", "_highpt"], [ "1", "zpt<60", "zpt>60 && zpt < 120", "zpt>120", ], [ "", "Z $p_\mathrm{T}$ < 60 GeV", "60 < Z $p_\mathrm{T}$ < 120 GeV", "Z $p_\mathrm{T}$ > 120 GeV", ]): # iterate over electron eta regions for etaregion, etaselection, etastring in zip( ["_all", "_EBEB", "_EBEE", "_EEEE"], [ "1", "abs(eminuseta) < 1.5 && abs(epluseta) < 1.5", "((abs(eminuseta) < 1.5 && abs(epluseta) > 1.6) || (abs(epluseta) < 1.5 && abs(eminuseta) > 1.6))", "abs(eminuseta) > 1.6 && abs(epluseta) > 1.6", ], [ "", "EB-EB", "EB-EE & EE-EB", "EE-EE", ]): # we dont need pt-binned Z pT plots: if xq == 'zpt' and ptselection is not "1": continue rootobjects, rootobjects2 = [], [] fig = plotbase.plt.figure(figsize=[7, 10]) ax = plotbase.plt.subplot2grid((3, 1), (0, 0), rowspan=2) ax.number = 1 ax2 = plotbase.plt.subplot2grid((3, 1), (2, 0)) ax2.number = 2 fig.add_axes(ax) fig.add_axes(ax2) # print the Z pt and electron eta region on the plot ax.text(0.98, 0.98, ptstring, va='top', ha='right', transform=ax.transAxes) ax.text(0.98, 0.9, etastring, va='top', ha='right', transform=ax.transAxes) changes = { 'y': [90.8, 94.8], 'yname': r'$m^{\mathrm{Z}}$ (peak position from Breit-Wigner fit) / GeV', 'legloc': 'upper left', 'title': mode + " electrons", 'labels': ['Data', 'Powheg'], } settings = plotbase.getSettings(opt, changes=changes, quantity=quantity + "_" + xq) # iterate over files markers = ['o', 'D'] ys, yerrs, xs = [], [], [] for i, f in enumerate(files): bins = xbins y, yerr, x = [], [], [] # iterate over bins for lower, upper in zip(bins[:-1], bins[1:]): changes = { 'selection': ['(%s > %s && %s < %s) && (%s) && (%s)' % (xq, lower, xq, upper, ptselection, etaselection)], 'nbins': 40, 'folder': 'zcuts', 'x': [71, 101], } local_settings = plotbase.getSettings(opt, changes, None, quantity) # get the zmass, fit, get the xq distribution; append to lists rootobjects += [getroot.histofromfile(quantity, f, local_settings, index=i)] p0, p0err, p1, p1err, p2, p2err, chi2, ndf, conf_intervals = fit.fitline2(rootobjects[-1], breitwigner=True, limits=local_settings['x']) y += [p1] yerr += [p1err] changes['x'] = [lower, upper] local_settings = plotbase.getSettings(opt, changes, None, quantity) rootobjects2 += [getroot.histofromfile(xq, f, local_settings, index=i)] x += [rootobjects2[-1].GetMean()] # fine line to indicate bin borders ax.add_line(plotbase.matplotlib.lines.Line2D((lower, upper), (y[-1],y[-1]), color='black', alpha=0.05)) ys.append(y) yerrs.append(yerr) xs.append(x) #plot ax.errorbar(x, y, yerr, drawstyle='steps-mid', color=settings['colors'][i], fmt=markers[i], capsize=0, label=settings['labels'][i]) # format and save if xq == 'zpt': settings['xlog'] = True settings['x'] = [30, 1000] settings['xticks'] = [30, 50, 70, 100, 200, 400, 1000] plot1d.formatting(ax, settings, opt, [], []) # calculate ratio values ratio_y = [d/m for d, m in zip(ys[0], ys[1])] ratio_yerrs = [math.sqrt((derr/d)**2 + (merr/m)**2)for d, derr, m, merr in zip(ys[0], yerrs[0], ys[1], yerrs[1])] ratio_x = [0.5 * (d + m) for d, m in zip(xs[0], xs[1])] #format ratio plot ax2.errorbar(ratio_x, ratio_y, ratio_yerrs, drawstyle='steps-mid', color='black', fmt='o', capsize=0, label='ratio') ax.axhline(1.0) fig.subplots_adjust(hspace=0.1) ax.set_xticklabels([]) ax.set_xlabel("") settings['ratio'] = True settings['legloc'] = None settings['xynames'][1] = 'ratio' plot1d.formatting(ax2, settings, opt, [], []) ax2.set_ylim(0.99, 1.01) settings['filename'] = plotbase.getDefaultFilename(quantity + "_" + xq + "_" + mode + ptregion + etaregion, opt, settings) plotbase.Save(fig, settings) def zmassEBEE(files, opt): """ Plot the Z mass depending on where the electrons are reconstructed. 3 bins: EB-EB, EB-EE, EE-EE """ selections = [ 'abs(eminuseta)<1.5 && abs(epluseta)<1.5', '(abs(eminuseta)>1.5 && abs(epluseta)<1.5) || abs(eminuseta)<1.5 && abs(epluseta)>1.5', 'abs(eminuseta)>1.5 && abs(epluseta)>1.5', ] filenames = ['zmass_ebeb', 'zmass_ebee', 'zmass_eeee'] titles = ['Barrel electrons only', 'One electron barrel, one endcap', 'Endcap electrons only'] for selection, filename, title in zip(selections, filenames, titles): plot1d.plot1dratiosubplot("zmass", files, opt, changes = { 'x': [81, 101], 'selection': [selection, "hlt * (%s)" % selection], 'fit': 'bw', 'nbins': 40, 'filename': filename, 'title': title, 'folder': 'zcuts', }) def eid(files, opt): quantity = 'mvaid' """changes = { 'x': [0, 1.0001], #'log': True, 'folder': 'electron_all', 'nbins':50, 'subplot':True, 'markers': ['f'], } settings = plotbase.getSettings(opt, quantity=quantity) fig, ax = plotbase.newPlot() for c, l, s in zip(['#236BB2', '#E5AD3D'], ['fake', 'true'], ['1', 'deltar < 0.3 && deltar>0']): changes.update({ 'labels': [l], 'colors': [c], 'selection': s, }) plot1d.datamcplot(quantity, files, opt, fig_axes = [fig, ax], changes=changes) settings['filename'] = plotbase.getDefaultFilename(quantity, opt, settings) plotbase.Save(fig, settings)""" ## id vs deltar for quantity in ["mvaid", "mvatrigid", "looseid", "mediumid", "tightid"]: plot1d.datamcplot("%s_deltar" % quantity, files, opt, changes = { 'folder': 'electron_all', 'nbins': 50, 'xynames': ['$\Delta$R(reco, gen)', quantity], 'x': [0, 0.5], 'legloc': None, }) def plots_2014_07_03(files, opt): """ Plots for JEC presentation 03.07. """ #### 2D histograms for obj, x, nbins in zip(['muon', 'jet', 'electron'], [[-2.5, 2.5], [-5.3, 5.3]]*2, [400, 1000, 300]): changes = { 'out': 'out/2014_07_03', 'y': [-3.2, 3.2], } changes.update({ 'folder': obj + "_all", 'nbins': nbins, 'x':x, 'filename': obj + '_phi_eta', 'xynames': ['%s eta' % obj, '%s phi' % obj, obj + 's'], }) if obj is 'electron': filenames = ["data_ee_noc", "mc_ee_corr_test"] else: filenames = ["data_noc", "mc_rundep_noc"] files = [getroot.openfile("%s/work/%s.root" % (plotbase.os.environ['EXCALIBUR_BASE'], f), opt.verbose) for f in filenames] plot2d.twoD("phi_eta", files, opt, changes = changes) if obj is not 'electron': changes.update({ 'year': 2011, 'filename': obj + '_phi_eta_2011', 'lumi': 5.1, 'energy': 7, }) filenames = ["data11_noc"] files = [getroot.openfile("%s/work/%s.root" % (plotbase.os.environ['EXCALIBUR_BASE'], f), opt.verbose) for f in filenames] plot2d.twoD("phi_eta", files, opt, changes = changes) ##### PU Jet ID filenames = ["dataPUJETID", "data"] files = [getroot.openfile("%s/work/%s.root" % (plotbase.os.environ['EXCALIBUR_BASE'], f), opt.verbose) for f in filenames] changes = { 'normalize': False, 'ratiosubplot': 'True', 'ratiosubploty': [0.8, 1.2], 'out': 'out/2014_07_03', 'x': [30, 250], 'title': 'Data', 'labels': ['PUJetID applied', 'default'], } plot1d.datamcplot('zpt', files, opt, changes=changes) for typ in ['mpf', 'ptbalance']: plotresponse.responseratio(files, opt, over='zpt', types=[typ], changes={ 'labels': ['PUJetID applied', 'default'], 'out': 'out/2014_07_03', 'x': [30, 1000], 'xlog': True, }) ##### timedep filenames = ["data", "mc_rundep"] files = [getroot.openfile("%s/work/%s.root" % (plotbase.os.environ['EXCALIBUR_BASE'], f), opt.verbose) for f in filenames] changes = { 'out': 'out/2014_07_03', 'filename': "timedep", } timedep(files, opt, changes=changes) ###### MPF fix filenames = [ "/storage/a/dhaitz/excalibur/artus/mc_rundep_2014-06-18_10-41/out.root", "/storage/a/dhaitz/excalibur/artus/mc_rundep_2014-06-06_14-26/out.root" ] files = [getroot.openfile(f) for f in filenames] plotresponse.responseratio(files, opt, over='zpt', types=['mpf'], changes={ 'labels': ['MCRD-fixed', 'MCRD'], 'xlog': True, 'filename': "mpf_zpt-fixed", 'out': 'out/2014_07_03', 'x': [30, 1000], 'xticks': [30, 50, 70, 100, 200, 400, 1000], }) # mpf slopes filenames = ["data", "mc_rundep"] files = [getroot.openfile("%s/work/%s.root" % (plotbase.os.environ['EXCALIBUR_BASE'], f), opt.verbose) for f in filenames] changes = { 'filename': "mpfslopes-fixed", 'labels': ['data', 'MCRD'], 'out': 'out/2014_07_03', 'allalpha': True, 'selection': 'alpha<0.3', } mpfslopes(files, opt, changes) changes.update({ 'filename': "mpfslopes", 'labels': ['data', 'MCRD'], }) filenames = [ '/storage/a/dhaitz/excalibur/artus/data_2014-04-10_21-21/out.root', '/storage/a/dhaitz/excalibur/artus/mc_rundep_2014-06-06_14-26/out.root' ] files = [getroot.openfile(f) for f in filenames] mpfslopes(files, opt, changes) # SYNC os.system("rsync ${EXCALIBUR_BASE}/out/2014_07_03 ekplx26:plots -r") def timedep(files, opt, changes = None): """ Plots for the time dependence, requested by Mikko 2014-06-25.""" settings = plotbase.getSettings(opt, quantity="response_run", changes=changes) fig, ax = plotbase.newPlot() factor = 2e4 methods = ['mpf', 'ptbalance'] labels = ['MPF', '$p_T$ balance'] for q, c, l, m, in zip(methods, settings['colors'], labels, settings['markers']): slopes, serrs, x = [], [], [] for eta1, eta2 in zip(opt.eta[:-1], opt.eta[1:]): changes = { 'alleta': True, 'allalpha': True, 'selection': 'alpha<0.3 && abs(jet1eta) > %s && abs(jet1eta) < %s' % (eta1, eta2), 'fit': 'slope', } rootobject = getroot.histofromfile("%s_run" % q, files[0], settings, changes=changes) # get fit parameters slope, serr = fit.fitline2(rootobject)[2:4] slopes += [slope*factor] serrs += [serr*factor] changes['x'] = [0, 6] x += [getroot.histofromfile("abs(jet1eta)", files[0], settings, changes=changes).GetMean()] ax.errorbar(x, slopes, serrs, drawstyle='steps-mid', color=c, fmt='o', capsize=0, label=l) #formatting stuff settings['x'] = [0, 5] plotbase.setAxisLimits(ax, settings) plotbase.labels(ax, opt, settings) plotbase.axislabels(ax, 'Leading jet $\eta$', 'Response vs run: linear fit slope (muliplied with 20 000)', settings=settings) ax.set_ylim(-0.1, 0.05) ax.set_xlim(0, 5.25) ax.grid(True) ax.set_xticks([float("%1.2f" % eta) for eta in opt.eta]) for label in ax.get_xticklabels(): label.set_rotation(45) ax.axhline(0.0, color='black', linestyle='--') settings['filename'] = quantity="response_run" plotbase.Save(fig, settings) def npuplot(files, opt): """ Plots for the JEC paper that Mikko requested 24.4.: npv and rho in bins of npu.""" settings = plotbase.getSettings(opt, quantity='npv') settings['x'] = [-0.5, 99.5] settings['nbins'] = 100 tgraphs = [] for f in files: if files.index(f) == 0: # flag bad runs in data runs = "run!=191411 && run!=198049 && run!=198050 && run!=198063 && run!=201727 && run!=203830 && run!=203832 && run!=203833 && run!=203834 && run!=203835 && run!=203987 && run!=203992 && run!=203994 && run!=204100 && run!=204101 && run!=208509" else: runs = 1 npuhisto = getroot.histofromfile('nputruth', f, settings) for i in range(100): if npuhisto.GetBinContent(i) > 0: npu = i tgraph = ROOT.TGraphErrors() for n in range(npu): changes = {'selection': 'nputruth>%s && nputruth<%s && %s' % (n-0.5, n+0.5, runs)} npv = getroot.histofromfile('npv', f, settings, changes=changes).GetMean() npverr = getroot.histofromfile('npv', f, settings, changes=changes).GetMeanError() rho = getroot.histofromfile('rho', f, settings, changes=changes).GetMean() rhoerr = getroot.histofromfile('rho', f, settings, changes=changes).GetMeanError() tgraph.SetPoint(n, npv, rho) tgraph.SetPointError(n, npverr, rhoerr) tgraphs.append(tgraph) settings['root'] = settings['root'] or settings['filename'] getroot.saveasroot(tgraphs, opt, settings) def electronupdate(files, opt): """Plots for the Zee update 26.06.2014.""" # Reco/gen electron pt vs eta filenames = ['mc_ee_raw', 'mc_ee_corr'] files = [getroot.openfile("%s/work/%s.root" % (plotbase.os.environ['EXCALIBUR_BASE'], f), opt.verbose) for f in filenames] changes={ 'x': [0, 2.5], 'y': [0.9, 1.1], 'nbins': 25, 'labels': ['raw', 'corrected'], 'markers': ['o', '-'], 'colors': ['maroon', 'blue'], 'folder':'zcuts', 'y': [0.94, 1.06], 'title': 'Madgraph', 'xynames': [ r"$|\eta_{e^{-}} \| $", r'$\mathrm{e}^{-} p_\mathrm{T}$ Reco/Gen' ] } plot1d.datamcplot('eminuspt/geneminuspt_abs(eminuseta)', files, opt, changes=changes) changes={ 'ratiosubplot': True, 'title': 'Madgraph', 'x': [0, 1000], 'log': True, 'labels': ['raw', 'corrected'], 'folder': 'all', 'ratiosubplotfit': 'chi2', } plot1d.datamcplot('zpt', files, opt, changes=changes) #LHE information fig, ax = plotbase.newPlot() fig2, ax2 = plotbase.newPlot() changes ={ 'folder':'all', 'x': [-4, 4], 'y': [0, 200000], 'subplot': True, 'nbins':50, 'normalize': False, 'xynames': ['Z rapidity', 'Events'], 'log':True, } for q, c, m, l in zip( ['zy', 'genzy', 'lhezy'], ['black', 'lightskyblue', 'FireBrick'], ['o', 'f', '-'], ['RecoZ', 'GenZ', 'LHE-Z'], ): changes['labels'] = [l] changes['markers'] = [m] changes['colors'] = [c] plot1d.datamcplot(q, files[1:], opt, changes=changes, fig_axes=[fig, ax]) settings = plotbase.getSettings(opt, None, None, 'rapidity') settings['filename'] = 'rapidity' plotbase.Save(fig, settings) # Data-MC comparisons ###################################################### # basic quantities filenames = ['data_ee_corr', 'mc_ee_corr'] files = [getroot.openfile("%s/work/%s.root" % (plotbase.os.environ['EXCALIBUR_BASE'], f), opt.verbose) for f in filenames] changes = { 'x': [-3, 3], 'y': [-3.2, 3.2], 'folder': 'all', 'nbins': 200, } plot2d.twoD('eminusphi_eminuseta', files, opt, changes=changes) for q, c in zip(['eminuspt', 'eminuseta', 'zy', 'zpt', 'zmass'], [ {}, {'x': [-2.5, 2.5]}, {}, {'x': [0, 500], 'log':True}, {'x': [80, 102], 'ratiosubploty':[0.9, 1.1]}, ]): changes = { 'labels': ['Data', 'Madgraph'], 'ratiosubplot': True, 'folder':'zcuts', 'nbins': 50, } changes.update(c) plot1d.datamcplot(q, files, opt, changes=changes) # scale factors changes = { 'x': [0, 100], 'y': [0, 3], 'z': [0.8, 1.2], 'folder': 'all', 'nbins': 100, 'selection': 'sfminus>0', 'colormap': 'bwr', } plot2d.twoD('sfminus_abs(eminuseta)_eminuspt', files[1:], opt, changes=changes) # zpt in rapidities for ybin in [[i/2., (i+1)/2.] for i in range(5)]: changes = { 'x': [0, 600], 'nbins': 30, 'folder':'zcuts', 'title': "%s < $y_Z$ < %s" % tuple(ybin), 'log': 'True', 'ratiosubplot': True, 'selection': 'abs(zy)>%s && abs(zy)<%s' % (ybin[0], ybin[1]), 'filename': ('zpt_rap-%s-%s' % (ybin[0], ybin[1])).replace('.', '_'), } plot1d.datamcplot('zpt', files, opt, changes=changes) # scale factor changes = { 'labels': ['Madgraph'], 'ratiosubplot': True, 'xynames':['eminuspt', r"$|\eta_{e^{-}} \| $"], 'folder':'all', 'x': [0, 60], 'y': [0, 3], 'colormap': 'bwr', 'z': [0.5, 1], } q = 'sfminus_abs(eminuseta)_eminuspt' plot2d.twoD(q, files[1:], opt, changes=changes) ############## # Plot for ID acceptance fig, ax = plotbase.newPlot() changes ={ 'folder':'all', 'x': [0, 150], 'y': [0, 1], 'subplot': True, 'normalize': False, 'legloc': 'lower right', 'xynames': ['eminuspt', 'Acceptance'] } filenames = ['mc_ee_corr_noid'] files = [getroot.openfile("%s/work/%s.root" % (plotbase.os.environ['EXCALIBUR_BASE'], f), opt.verbose) for f in filenames] for q, c, m, l in zip( ['eminusidtight', 'eminusidmedium', 'eminusidloose', 'eminusidveto', 'eminusid'], ['lightskyblue', 'FireBrick', 'green', 'black', 'blue'], ['f', '_', '-', "o", "*"], ['Tight ID', 'Medium ID', 'Loose ID', "Veto ID", "MVA ID"], ): changes['labels'] = [l] changes['markers'] = [m] changes['colors'] = [c] plot1d.datamcplot("%s_eminuspt" % q, files, opt, changes=changes, fig_axes=[fig, ax]) settings = plotbase.getSettings(opt, None, None, 'id') settings['filename'] = 'id' settings['title'] = 'MC' plotbase.Save(fig, settings) def mpfslopes(files, opt, changes=None): """ Plot the slope of a linear fit on MPF vs NPV, in Z pT bins.""" quantity="mpf_npv" settings = plotbase.getSettings(opt, quantity=quantity, changes=changes) settings['special_binning'] = True print opt.zbins fig, ax = plotbase.newPlot() for f, c, l, m, in zip(files, settings['colors'], settings['labels'], settings['markers']): slopes, serrs, x = [], [], [] # iterate over Z pT bins for ptlow, pthigh in zip(opt.zbins[:-1], opt.zbins[1:]): changes = {'selection':'zpt>%s && zpt<%s' % (ptlow, pthigh)} rootobject = getroot.histofromfile(quantity, f, settings, changes=changes) # get fit parameters and mean Z pT; append to lists slope, serr = fit.fitline2(rootobject)[2:4] slopes += [slope] serrs += [serr] x += [getroot.histofromfile("zpt", f, settings, changes=changes).GetMean()] ax.errorbar(x, slopes, serrs, drawstyle='steps-mid', color=c, fmt='o', capsize=0, label=l) #formatting stuff settings['x'] = [30, 100] plotbase.setAxisLimits(ax, settings) plotbase.labels(ax, opt, settings) ax.set_xscale('log') settings['xticks'] = opt.zbins plotbase.axislabels(ax, 'zpt', 'slope from fit on MPF vs NPV', settings=settings) ax.set_ylim(-0.002, 0.002) ax.grid(True) ax.axhline(0.0, color='black', linestyle='--') plotbase.Save(fig, settings) def pileup(files, opt): for ptlow, pthigh in zip(opt.zbins[:-1], opt.zbins[1:]): plotresponse.responseratio(files, opt, over='npv', types=['mpf'], changes={ 'allalpha':True, 'selection':'alpha<0.3 && zpt>%s && zpt<%s' % (ptlow, pthigh), 'filename': "mpf_npv_%s-%s" % (ptlow, pthigh) } ) def emucomparison(files, opt): values = [] valueerrs = [] for filenames in [['data', 'mc'], ['data_ee', 'mc_ee']]: files = [getroot.openfile("%s/work/%s.root" % (plotbase.os.environ['EXCALIBUR_BASE'], f), opt.verbose) for f in filenames] for quantity in ['mpf', 'ptbalance']: settings = plotbase.getSettings(opt, None, None, quantity) settings['nbins'] = 40 settings['correction'] = 'L1L2L3' if 'ee' in filenames[0]: if settings['selection']: settings['selection'] = 'abs(epluseta<1.0) && abs(eminuseta)<1.0 && %s' % settings['selection'] else: settings['selection'] = 'abs(epluseta<1.0) && abs(eminuseta)<1.0' datamc = [] rootobjects = [] fitvalues = [] for f in files: rootobjects += [getroot.histofromfile(quantity, f, settings)] p0, p0err, p1, p1err, p2, p2err, chi2, ndf, conf_intervals = fit.fitline2(rootobjects[-1], gauss=True, limits=[0, 2]) fitvalues += [p1, p1err] ratio = fitvalues[0] / fitvalues[2] ratioerr = math.sqrt(fitvalues[1] ** 2 + fitvalues[3] ** 2) values.append(ratio) valueerrs.append(ratioerr) fig, ax = plotbase.newPlot() ax.errorbar(range(4), values, valueerrs, drawstyle='steps-mid', color='black', fmt='o', capsize=0,) ax.set_xticks([0, 1, 2, 3]) ax.set_xticklabels(['Zmm\nMPF', 'Zmm\npT balance', 'Zee\nMPF', 'Zee\npT balance']) ax.set_xlim(-0.5, 3.5) ax.set_ylim(0.96, 1.001) ax.axhline(1.0, color='black', linestyle=':') ax.set_ylabel('Jet response Data/MC ratio', ha="right", x=1) plotbase.Save(fig, settings) def electrons(files, opt): """ Standard set of plots for the dielectron analysis. """ filenames = ['data_ee', 'mc_ee'] files = [getroot.openfile("%s/work/%s.root" % (plotbase.os.environ['EXCALIBUR_BASE'], f), opt.verbose) for f in filenames] base_changes = { 'out': 'out/ee2014', 'folder': 'zcuts', # no additional restrictions on jets 'normalize': False, # no normalizing to check if the lumi reweighting works 'factor': 1., # on the fly lumi reweighting 'efficiency': 1., # no trigger reweighting for electrons 'ratiosubplot': True, } # zmass with fit changes = { 'legloc': 'center right', 'nbins': 50, 'fit': 'gauss' } changes.update(base_changes) plot1d.datamcplot('zmass', files, opt, changes=changes) #electron quantities for charge in ['plus', 'minus']: changes = { 'x': [0, 150], 'nbins': 40, } changes.update(base_changes) plot1d.datamcplot('e%spt' % charge, files, opt, changes=changes) changes['x'] = [-2.5, 2.5] plot1d.datamcplot('e%seta' % charge, files, opt, changes=changes) changes['x'] = None plot1d.datamcplot('e%sphi' % charge, files, opt, changes=changes) changes['legloc'] = 'center right' changes['filename'] = 'zmass_barrel' changes['selection'] = 'abs(epluseta)<1.0 && abs(eminuseta)<1.0' changes['title'] = '|eta(e)| < 1.0' changes['fit'] = 'gauss' plot1d.datamcplot('zmass', files, opt, changes=changes) changes['filename'] = 'zmass_endcap' changes['selection'] = 'abs(epluseta)>1.0 && abs(eminuseta)>1.0' changes['title'] = '|eta(e)| > 1.0' changes['fit'] = 'gauss' plot1d.datamcplot('zmass', files, opt, changes=changes) #electron quantities for charge in ['plus', 'minus']: changes = { 'x': [0, 150], 'nbins': 40, } changes.update(base_changes) plot1d.datamcplot('e%spt' % charge, files, opt, changes=changes) changes['x'] = [-2.5, 2.5] plot1d.datamcplot('e%seta' % charge, files, opt, changes=changes) changes['x'] = None plot1d.datamcplot('e%sphi' % charge, files, opt, changes=changes) # Z pT in rapidity bins rapbins = ['abs(zy)<1', 'abs(zy)>1 && abs(zy)<2', 'abs(zy)>2 && abs(zy)<3'] raplabels = ['|Y(Z)|<1', '1<|Y(Z)|<2', '2<|Y(Z)|<3'] rapname = ['0zy1', '1zy2', '2zy3'] for rbin, rlabel, rname in zip(rapbins, raplabels, rapname): changes = { 'selection': rbin, 'filename': 'zpt-%s' % rname, 'x': [30, 750], 'log': True, 'title': rlabel, 'nbins': 40, } changes.update(base_changes) plot1d.datamcplot('zpt', files, opt, changes=changes) #electron quantities for charge in ['plus', 'minus']: changes = { 'x': [0, 150], 'nbins': 40, } changes.update(base_changes) plot1d.datamcplot('e%spt' % charge, files, opt, changes=changes) changes['x'] = [-2.5, 2.5] plot1d.datamcplot('e%seta' % charge, files, opt, changes=changes) changes['x'] = None plot1d.datamcplot('e%sphi' % charge, files, opt, changes=changes) # npv changes = { 'folder': 'all', } changes.update(base_changes) changes['folder'] = 'all' plot1d.datamcplot('npv', files, opt, changes=changes) changes['noweighting'] = True changes['factor'] = 3503.71 / 30459503 * 1000 changes['filename'] = 'npv_noweights' plot1d.datamcplot('npv', files, opt, changes=changes) changes['noweighting'] = True changes['factor'] = 3503.71 / 30459503 * 1000 changes['filename'] = 'npv_noweights' plot1d.datamcplot('npv', files, opt, changes=changes) # z pt and rapidity changes = { 'nbins': 40, } changes.update(base_changes) plot1d.datamcplot('zy', files, opt, changes=changes) plot1d.datamcplot('zeta', files, opt, changes=changes) changes['x'] = [30, 750] changes['log'] = True plot1d.datamcplot('zpt', files, opt, changes=changes) #powheg comparison filenames = ['data_ee', 'mc_ee', 'mc_ee_powheg'] files = [getroot.openfile("%s/work/%s.root" % (plotbase.os.environ['EXCALIBUR_BASE'], f), opt.verbose) for f in filenames] changes = { 'log': True, 'x': [30, 750], 'nbins': 40, 'filename': 'zpt_mad-pow', 'labels': ['Data', 'Madgraph', 'Powheg'], } changes.update(base_changes) plot1d.datamcplot('zpt', files, opt, changes=changes) changes = { 'nbins': 40, 'filename': 'zmass_mad-pow', 'labels': ['Data', 'Madgraph', 'Powheg'], } changes.update(base_changes) plot1d.datamcplot('zmass', files, opt, changes=changes) files = files[::2] filenames = filenames[::2] changes = { 'log':True, 'x': [30, 750], 'nbins': 40, 'filename': 'zpt_pow', 'labels':['Data', 'Powheg'], } changes.update(base_changes) plot1d.Datamcplot('zpt', files, opt, changes=changes) #backgrounds filenames = ['Data_ee', 'mc_ee', 'background_ee'] files = [getroot.openfile("%s/work/%s.root" % (plotbase.os.environ['EXCALIBUR_BASE'], f), opt.verbose) for f in filenames] changes = { 'log': True, 'x': [30, 750], 'filename': 'zpt_backgrounds', 'labels': ['Data', 'MC', 'Backgrounds'], 'markers': ['o', 'f', 'f'], 'stacked': True, 'ratiosubplot': False, } changes.update(base_changes) changes['ratiosubplot'] = False plot1d.datamcplot('zpt', files, opt, changes=changes) changes.pop('x', None) changes['filename'] = 'zmass_backgrounds' changes['log'] = False changes['ratiosubplot'] = False plot1d.datamcplot('zmass', files, opt, changes=changes) # sync the plots import subprocess subprocess.call(['rsync out/ee2014 dhaitz@ekplx26:plots/ -u -r --progress'], shell=True) """ merlin 2D_zmass_zpt --files $DATAEE $ARGS -x 0 50 --nbins 100 -y 80 100 -o $OUT merlin eemass -o $OUT --files $DATAEE $ARGS --nbins 100 -x 0 120 -C lightskyblue -m f --folder all merlin eemass -o $OUT --files $DATAEE $ARGS --nbins 100 -x 0 15 --filename eemass_low -C lightskyblue -m f --folder all merlin 2D_zpt_zy -o $OUT --files $DATAEE $ARGS -y 0 100 --nbins 100 """ def an(files, opt): """ Plots for the 2014 Z->mumu JEC AN.""" """ #MET for quantity in ['METpt', 'METphi']: plot1d.datamcplot(quantity, files, opt, changes = {'title': 'CMS preliminary'}) plot1d.datamcplot("npv", files, opt, changes = {'folder': 'all', 'title': 'CMS preliminary'}) for n in ['1', '2']: for quantity in ['pt', 'eta', 'phi']: plot1d.datamcplot('mu%s%s' % (n, quantity), files, opt, changes = {'title': 'CMS preliminary'}) if n is '2' and quantity is 'eta': plot1d.datamcplot('jet%s%s' % (n, quantity), files, opt, changes = {'nbins': 10, 'correction': 'L1L2L3', 'title': 'CMS preliminary'}) else: plot1d.datamcplot('jet%s%s' % (n, quantity), files, opt, changes = {'correction': 'L1L2L3', 'title': 'CMS preliminary'}) for quantity in ['zpt', 'zeta', 'zy', 'zphi', 'zmass']: plot1d.datamcplot(quantity, files, opt, changes = {'title': 'CMS preliminary'}) #response stuff plotresponse.responseratio(files, opt, over='zpt', types=['mpf'], changes={'y': [0.98, 1.03, 0.96, 1.03], 'x': [0, 400, 0, 400]}) plotresponse.responseratio(files, opt, over='jet1abseta', types=['mpf'], changes={'y': [0.95, 1.1, 0.93, 1.1]}) plotresponse.responseratio(files, opt, over='npv', types=['mpf'], changes={'y': [0.95, 1.05, 0.92, 1.03], 'x': [0, 35, 0, 35]}) plotresponse.responseratio(files, opt, over='zpt', types=['ptbalance'], changes={'y': [0.93, 1.01, 0.96, 1.03], 'x': [0, 400, 0, 400]}) plotresponse.responseratio(files, opt, over='jet1abseta', types=['ptbalance'], changes={'y': [0.91, 1.01, 0.93, 1.1]}) plotresponse.responseratio(files, opt, over='npv', types=['ptbalance'], changes={'y': [0.91, 1.01, 0.92, 1.03], 'x': [0, 35, 0, 35]}) """ for q in ['mpf', 'ptbalance']: plot1d.datamcplot(q, files, opt, changes={'correction': 'L1L2L3', 'legloc': 'center right', 'nbins': 100, 'fit': 'gauss'}) plotresponse.extrapol(files, opt, changes={'save_individually': True, 'correction': 'L1L2L3'}) """ plotfractions.fractions(files, opt, over='zpt', changes={'x': [0, 400], 'title': 'CMS preliminary'}) plotfractions.fractions(files, opt, over='jet1abseta', changes = {'title': 'CMS preliminary'}) plotfractions.fractions(files, opt, over='npv', changes = {'title': 'CMS preliminary'}) for changes in [{'rebin':10, 'title':'|$\eta^{\mathrm{jet}}$|<1.3'}, {'alleta':True, 'rebin':10, 'selection':'jet1abseta>2.5 && jet1abseta<2.964', 'title':'2.5<|$\eta^{\mathrm{jet}}$|<2.964'}]: if 'alleta' in changes: opt.out += '/ECOT' opt.user_options['out'] += '/ECOT' plotfractions.fractions_run(files, opt, diff=True, response=True, changes=changes, nbr=6) plotfractions.fractions_run(files, opt, diff=False, response=True, changes=changes, nbr=6) plotfractions.fractions_run(files, opt, diff=True, response=False, changes=changes, nbr=6) plotresponse.response_run(files, opt, changes=changes) opt.out = opt.out[:-5] opt.user_options['out'] = opt.user_options['out'][:-5] else: plotfractions.fractions_run(files, opt, diff=True, response=True, changes=changes) plotfractions.fractions_run(files, opt, diff=False, response=True, changes=changes) plotfractions.fractions_run(files, opt, diff=True, response=False, changes=changes) plotresponse.response_run(files, opt, changes=changes) changes['y'] = [0.84, 1.2] plot2d.twoD("qgtag_btag", files, opt, changes = {'title': 'CMS Preliminary', 'nbins':50} ) plot_tagging.tagging_response(files, opt) plot_tagging.tagging_response_corrected(files, opt) """ ## MCONLY if len(files) > 1: files = files[1:] """ # PF composition as function of mc flavour flavour_comp(files, opt, changes={'title': 'CMS Simulation','mconly':True}) # response vs flavour for var in [True, False]: plotresponse.response_physflavour(files, opt, changes={'title': 'CMS Simulation','mconly':True}, add_neutrinopt=var, restrict_neutrals=var, extrapolation=var) plotfractions.flavour_composition(files, opt, changes={'title': 'CMS Simulation','mconly':True}) plotfractions.flavour_composition_eta(files, opt, changes={'title': 'CMS Simulation','mconly':True, 'selection': 'zpt>95 && zpt<110'}) changes = {'cutlabel' : 'ptetaalpha', 'labels' : ['Pythia 6 Tune Z2*', 'Herwig++ Tune EE3C'], 'y' : [0.98, 1.05], 'markers' : ['o', 'd'], 'colors' : ['red', 'blue'], 'title' : 'CMS Simulation', 'mconly' : True, 'legloc' : 'lower left', 'filename': 'recogen_physflavour_pythia-herwig'} files += [getroot.openfile("/storage/a/dhaitz/excalibur/work/mc_herwig/out/closure.root")] plot1d.datamcplot("recogen_physflavour", files, opt, changes=changes) """ def eleven(files, opt): """ Summary of the plots for the response studies with 2011 rereco. """ runrange = [160000, 183000] plot1d.datamcplot('npv', files, opt, changes={'rebin': 1}) plot1d.datamcplot('zmass', files, opt, changes={'fit': 'vertical', 'legloc': 'center right'}) plotresponse.extrapol(files, opt) plotresponse.responseratio(files, opt, over='zpt', types=['mpf'], changes={'y': [0.98, 1.03, 0.96, 1.03], 'uncertaintyband': True, 'x': [0, 400, 0, 400]}) plotresponse.responseratio(files, opt, over='jet1abseta', types=['mpf'], changes={'y': [0.95, 1.1, 0.93, 1.1], 'uncertaintyband': True}) plotresponse.responseratio(files, opt, over='npv', types=['mpf'], changes={'y': [0.95, 1.05, 0.92, 1.03], 'uncertaintyband': True, 'x': [0, 18, 0, 18]}) plotresponse.responseratio(files, opt, over='zpt', types=['ptbalance'], changes={'y': [0.93, 1.01, 0.96, 1.03], 'x': [0, 400, 0, 400], 'uncertaintyband': True}) plotresponse.responseratio(files, opt, over='jet1abseta', types=['ptbalance'], changes={'y': [0.91, 1.01, 0.93, 1.1], 'uncertaintyband': True}) plotresponse.responseratio(files, opt, over='npv', types=['ptbalance'], changes={'y': [0.91, 1.01, 0.92, 1.03], 'x': [0, 18, 0, 18], 'uncertaintyband': True}) plot1d.datamcplot('npv_run', files, opt, changes={'x': runrange, 'y': [0, 15], 'run': True, 'fit': True}) plotfractions.fractions(files, opt, over='zpt', changes={'x': [0, 400]}) plotfractions.fractions(files, opt, over='jet1abseta') plotfractions.fractions(files, opt, over='npv', changes={'x': [-0.5, 24.5]}) for changes in [{'x': runrange, 'rebin':10, 'title':'|$\eta^{\mathrm{jet}}$|<1.3'}, {'x': runrange, 'alleta':True, 'rebin':10, 'selection':'jet1abseta>2.5 && jet1abseta<2.964', 'title':'2.5<|$\eta^{\mathrm{jet}}$|<2.964'}]: if 'alleta' in changes: opt.out += '/ECOT' opt.user_options['out'] += '/ECOT' plotfractions.fractions_run(files, opt, diff=True, response=True, changes=changes, nbr=6) plotfractions.fractions_run(files, opt, diff=False, response=True, changes=changes, nbr=6) plotfractions.fractions_run(files, opt, diff=True, response=False, changes=changes, nbr=6) else: plotfractions.fractions_run(files, opt, diff=True, response=True, changes=changes) plotfractions.fractions_run(files, opt, diff=False, response=True, changes=changes) plotfractions.fractions_run(files, opt, diff=True, response=False, changes=changes) changes['y'] = [0.84, 1.2] plotresponse.response_run(files, opt, changes=changes) def rootfile(files, opt): """Function for the rootfile sent to the JEC group in early August 2013.""" list_of_quantities = ['ptbalance_alpha', 'mpf_alpha', 'ptbalance', 'mpf', 'zpt', 'npv', 'zmass', 'zpt_alpha', 'npv_alpha', 'ptbalance_zpt', 'mpf_zpt', 'ptbalance_npv', 'mpf_npv', ] for muon in [["zmumu", "1"], ["zmumu_muoncuts", "(mupluspt>25 && muminuspt>25 && abs(mupluseta)<1.0 && abs(muminuseta)<1.0)"]]: for alpha in [[0, "alpha<0.2", "alpha0_2"], [1, "alpha<0.3", "alpha0_3"], [1, "alpha<0.4", "alpha0_4"]]: for quantity in list_of_quantities: changes = {'rebin': 1, 'out': 'out/root/', 'allalpha': True, 'root': "__".join([quantity, alpha[2]]), 'filename': muon[0], 'selection': "&&".join([alpha[1], muon[1]]), } if ("_zpt" in quantity) or ("_npv" in quantity): changes['special_binning'] = True if "alpha" in quantity: changes['rebin'] = 10 plot1d.datamcplot(quantity, files, opt, changes=changes) changes['ratio'] = True changes['labels'] = ['ratio'] plot1d.datamcplot(quantity, files, opt, changes=changes) def ineff(files, opt): settings = plotbase.getSettings(opt, changes=None, settings=None, quantity="flavour_zpt") fig, ax = plotbase.newPlot() labels = ["no matching partons", "two matching partons"] colors = ['red', 'blue'] markers = ['o', 'd'] changes = {'subplot': True, 'lumi': 0, 'xynames': ['zpt', 'physflavourfrac'], 'legloc': 'upper left', } for n, l, c, m in zip([0, 2], labels, colors, markers): quantity = "(nmatchingpartons3==%s)_zpt" % n changes['labels'] = [l] changes['colors'] = c changes['markers'] = m plot1d.datamcplot(quantity, files, opt, fig_axes=(fig, ax), changes=changes, settings=settings) settings['filename'] = plotbase.getDefaultFilename("physflavourfrac_zpt", opt, settings) plotbase.Save(fig, settings['filename'], opt) def flav(files, opt): etabins = [0, 1.3, 2.5, 3, 3.2, 5.2] etastrings = ['0-1_3', '1_3-2_5', '2_5-3', '3-3_2', '3_2-5_2'] flavourdefs = ["algoflavour", "physflavour"] flavourdefinitions = ["algorithmic", "physics"] flist = ["(flavour>0&&flavour<4)", "(flavour==1)", "(flavour==2)", "(flavour==3)", "(flavour==4)", "(flavour==5)", "(flavour==21)", "(flavour==0)"] q_names = ['uds', 'u', 'd', 's', 'c', 'b', 'gluon', 'unmatched'] changes = {} ############### FLAVOUR NOT 0!!!!! # barrel: """changes['rebin'] = 1 changes['filename']="flavour" changes['filename']="flavour" for f_id, quantity in zip(['uds','c','b','gluon'], flist): changes['root']=f_id plot1d.datamcplot("%s_zpt" % quantity, files, opt, changes=changes) """ for flavourdef, flavourdefinition in zip(flavourdefs, flavourdefinitions): # iterate over eta bins: for filename, selection in zip(etastrings, getroot.etacuts(etabins)): changes['filename'] = "_".join([filename, flavourdefinition]) changes['alleta'] = True changes['selection'] = "%s && %s" % (selection, "alpha<0.2") changes['rebin'] = 1 for f_id, quantity in zip(q_names, flist): changes['root'] = f_id plot1d.datamcplot("%s_zpt" % quantity.replace("flavour", flavourdef), files, opt, changes=changes) def gif(files, opt): local_opt = copy.deepcopy(opt) runlist = listofruns.runlist[::10] for run, number in zip(runlist, range(len(runlist))): local_opt.lumi = (run - 190456) * 19500 / (209465 - 190456) print plotbase.plot1d.datamcplot('balresp', files, local_opt, changes={'var': 'var_RunRange_0to%s' % run}, filename="%03d" % number) def closure(files, opt): def divide((a, a_err), (b, b_err)): if (b != 0.0): R = a / b else: R = 0 Rerr = R * math.sqrt((a_err / a) ** 2 + (b_err / b) ** 2) return R, Rerr def multiply((a, a_err), (b, b_err)): R = a * b Rerr = R * math.sqrt((a_err / a) ** 2 + (b_err / b) ** 2) return R, Rerr changes = {} changes = plotbase.getchanges(opt, changes) #get extrapol factors with alpha 035 #changes['var']='var_CutSecondLeadingToZPt_0_4' #changes['correction']='L1L2L3' balresp = (getroot.getobjectfromnick('balresp', files[0], changes, rebin=1).GetMean(), getroot.getobjectfromnick('balresp', files[0], changes, rebin=1).GetMeanError()) mpfresp = (getroot.getobjectfromnick('mpfresp', files[0], changes, rebin=1).GetMean(), getroot.getobjectfromnick('mpfresp', files[0], changes, rebin=1).GetMeanError()) genbal = (getroot.getobjectfromnick('genbal', files[0], changes, rebin=1).GetMean(), getroot.getobjectfromnick('genbal', files[0], changes, rebin=1).GetMeanError()) intercept, ierr, slope, serr, chi2, ndf, conf_intervals = getroot.fitline2(getroot.getobjectfromnick('ptbalance_alpha', files[0], changes, rebin=1)) balresp_extrapol = (intercept, conf_intervals[0]) extrapol_reco_factor = divide(balresp_extrapol, balresp) intercept2, ierr2, slope2, serr2, chi22, ndf2, conf_intervals2 = getroot.fitline2(getroot.getobjectfromnick('genbalance_genalpha', files[0], changes, rebin=1)) genbal_extrapol = (intercept2, conf_intervals2[0]) extrapol_gen_factor = divide(genbal_extrapol, genbal) intercept3, ierr3, slope3, serr3, chi23, ndf3, conf_intervals3 = getroot.fitline2(getroot.getobjectfromnick('mpf_alpha', files[0], changes, rebin=1)) mpf_extrapol = (intercept3, conf_intervals3[0]) extrapol_mpf_factor = divide(mpf_extrapol, mpfresp) #del changes['var'] #del changes['correction'] #other quantities with alpha 02 recogen = (getroot.getobjectfromnick('recogen', files[0], changes, rebin=1).GetMean(), getroot.getobjectfromnick('recogen', files[0], changes, rebin=1).GetMeanError()) zresp = (getroot.getobjectfromnick('zresp', files[0], changes, rebin=1).GetMean(), getroot.getobjectfromnick('zresp', files[0], changes, rebin=1).GetMeanError()) balresp = (getroot.getobjectfromnick('balresp', files[0], changes, rebin=1).GetMean(), getroot.getobjectfromnick('balresp', files[0], changes, rebin=1).GetMeanError()) mpfresp = (getroot.getobjectfromnick('mpfresp', files[0], changes, rebin=1).GetMean(), getroot.getobjectfromnick('mpfresp', files[0], changes, rebin=1).GetMeanError()) mpfresp_raw = (getroot.getobjectfromnick('mpfresp-raw', files[0], changes, rebin=1).GetMean(), getroot.getobjectfromnick('mpfresp-raw', files[0], changes, rebin=1).GetMeanError()) genbal = (getroot.getobjectfromnick('genbal', files[0], changes, rebin=1).GetMean(), getroot.getobjectfromnick('genbal', files[0], changes, rebin=1).GetMeanError()) balparton = (getroot.getobjectfromnick('balparton', files[0], changes, rebin=1).GetMean(), getroot.getobjectfromnick('balparton', files[0], changes, rebin=1).GetMeanError()) partoncorr = divide(balparton, genbal) format = "%1.4f" print changes print "" print (r"balresp reco %s +- %s" % (format, format)) % balresp print (r"mpf %s +- %s" % (format, format)) % mpfresp print (r"balparton %s +- %s" % (format, format)) % balparton print (r"zresp %s +- %s" % (format, format)) % zresp print (r"recogen %s +- %s" % (format, format)) % recogen print (r"extrapolReco_factor %s +- %s" % (format, format)) % extrapol_reco_factor print (r"extrapolGen_factor %s +- %s" % (format, format)) % extrapol_gen_factor print (r"extrapolMPF_factor %s +- %s" % (format, format)) % extrapol_mpf_factor print (r"parton/genjet %s +- %s" % (format, format)) % divide(balparton, genbal) print "" print (r"pTgenjet / pTgenZ %s +- %s" % (format, format)) % genbal genbal = multiply(genbal, extrapol_gen_factor) print (r"* gen Level extrapolation %s +- %s" % (format, format)) % genbal #genbal = multiply(genbal, partoncorr) #print (r"* pTparton/pTgenjet correction %s +- %s" % (format, format) ) % genbal #genbal = divide(genbal, balparton) #print (r"* pTparton/pTZ correction %s +- %s" % (format, format) ) % genbal reco_bal = divide(multiply(genbal, recogen), zresp) print (r"* GenToReco for Jet and Z %s +- %s" % (format, format)) % reco_bal print "" print (r"pTrecojet / pTrecoZ %s +- %s" % (format, format)) % balresp balresp = multiply(balresp, extrapol_reco_factor) print (r"* reco Level extrapolation %s +- %s" % (format, format)) % balresp print "" print (r"MPF (typeI) %s +- %s" % (format, format)) % mpfresp #mpfresp = divide(mpfresp, zresp) #print (r"MPF (GenZ) %s +- %s" % (format, format) ) % mpfresp mpfresp = multiply(mpfresp, extrapol_mpf_factor) print (r"MPF (extrapol) %s +- %s" % (format, format)) % mpfresp print (r"MPF (Raw) %s +- %s" % (format, format)) % mpfresp_raw def extrapola(files, opt): fig, ax = plotbase.newPlot() changes = {} changes['var'] = "_var_CutSecondLeadingToZPt_0_3" local_opt = copy.deepcopy(opt) rebin = 5 if opt.rebin is not None: rebin = opt.rebin plot1d.datamcplot('ptbalance_alpha', files, local_opt, legloc='upper center', changes=changes, rebin=rebin, subplot=True, subtext="", fig_axes=(fig, ax), fit='intercept', ratio=False) local_opt.colors = ['red', 'maroon'] plot1d.datamcplot('mpf_alpha', files, local_opt, legloc='upper center', changes=changes, rebin=rebin, subplot=True, xy_names=['alpha', 'response'], subtext="", fig_axes=(fig, ax), fit='intercept', ratio=False, fit_offset=-0.1) file_name = plotbase.getDefaultFilename("extrapolation_", opt, changes) plotbase.Save(fig, file_name, opt) # function for comparing old and new corrections def comparison(datamc, opt): """file_names = [ '/storage/8/dhaitz/CalibFW/work/mc_madgraphSummer12/out/closure.root', '/storage/8/dhaitz/CalibFW/work/data_2012/out/closure.root', '/storage/8/dhaitz/CalibFW/work/mc_madgraphSummer12_L1Offset/out/closure.root', '/storage/8/dhaitz/CalibFW/work/data_2012_L1Offset/out/closure.root', '/storage/8/dhaitz/CalibFW/work/mc_madgraphSummer12_Fall12JEC/out/closure.root', '/storage/8/dhaitz/CalibFW/work/data_2012_Fall12JEC/out/closure.root', '/storage/8/dhaitz/CalibFW/work/mc_madgraphSummer12_Fall12JEC_L1Offset/out/closure.root', '/storage/8/dhaitz/CalibFW/work/data_2012_Fall12JEC_L1Offset/out/closure.root' ]""" colors = ['red', 'blue', 'blue', 'red'] markers = ['*', 'o', 'o', '*'] #labels = [['MC_52xFast', 'data_52xFast'], ['MC_52xOff', 'data_52xOff'], ['MC_53xFast', 'data_53xFast'], ['MC_53xOff', 'data_53xOff']] rebin = 1 import copy file_names = [ '/storage/8/dhaitz/CalibFW/work/mc_madgraphSummer12/out/closure.root', '/storage/8/dhaitz/CalibFW/work/data_2012/out/closure.root', '/storage/8/dhaitz/CalibFW/work/mc_madgraphSummer12_Fall12JEC/out/closure.root', '/storage/8/dhaitz/CalibFW/work/data_2012_Fall12JEC/out/closure.root', '/storage/8/dhaitz/CalibFW/work/mc_madgraphSummer12_L1Offset/out/closure.root', '/storage/8/dhaitz/CalibFW/work/data_2012_L1Offset/out/closure.root', '/storage/8/dhaitz/CalibFW/work/mc_madgraphSummer12_Fall12JEC_L1Offset/out/closure.root', '/storage/8/dhaitz/CalibFW/work/data_2012_Fall12JEC_L1Offset/out/closure.root', ] labels = [['MC_52xFast', 'data_52xFast'], ['MC_53xFast', 'data_53xFast'], ['MC_52xOff', 'data_52xOff'], ['MC_53xOff', 'data_53xOff']] files = [] for f in file_names: files += [getroot.openfile(f, opt.verbose)] local_opt = copy.deepcopy(opt) local_opt.style = markers local_opt.colors = colors quantity = 'L1abs_npv' # ALL fig, axes = plotbase.newPlot(subplots=4) for a, f1, f2, l in zip(axes, files[::2], files[1::2], labels): local_opt.labels = l datamcplot(quantity, (f1, f2), local_opt, 'upper center', changes={'correction': ''}, fig_axes=(fig, a), rebin=rebin, subplot=True, subtext="") filename = "L1_all__" + opt.algorithm plotbase.Save(fig, filename, opt) """ #Fastjet vs Offset fig = plotbase.plt.figure(figsize=(14,7)) axes = [fig.add_subplot(1,2,n) for n in [1,2]] local_opt.labels = labels[0] local_opt.colors = ['blue', 'blue'] datamcplot(quantity, (files[0], files[1]), local_opt, 'upper center', changes={'correction':''}, fig_axes=(fig,axes[0]), rebin=rebin, subplot=True, subtext="") local_opt.labels = labels[1] local_opt.colors = ['red', 'red'] datamcplot(quantity, (files[2], files[3]), local_opt, 'upper center', changes={'correction':''}, fig_axes=(fig,axes[0]), rebin=rebin, subplot=True, subtext="") #53 local_opt.labels = labels[2] local_opt.colors = ['blue', 'blue'] datamcplot(quantity, (files[4], files[5]), local_opt, 'upper center', changes={'correction':''}, fig_axes=(fig,axes[1]), rebin=rebin, subplot=True, subtext="") local_opt.labels = labels[3] local_opt.colors = ['red', 'red'] datamcplot(quantity, (files[6], files[7]), local_opt, 'upper center', changes={'correction':''}, fig_axes=(fig,axes[1]), rebin=rebin, subplot=True, subtext="") filename = "L1_Fastjet_vs_Offset__"+opt.algorithm plotbase.Save(fig, filename, opt) #52X vs 53X fig = plotbase.plt.figure(figsize=(14,7)) axes = [fig.add_subplot(1,2,n) for n in [1,2]] local_opt.labels = labels[0] local_opt.colors = ['blue', 'blue'] datamcplot(quantity, (files[0], files[1]), local_opt, 'upper center', changes={'correction':''}, fig_axes=(fig,axes[0]), rebin=rebin, subplot=True, subtext="") local_opt.labels = labels[2] local_opt.colors = ['red', 'red'] datamcplot(quantity, (files[4], files[5]), local_opt, 'upper center', changes={'correction':''}, fig_axes=(fig,axes[0]), rebin=rebin, subplot=True, subtext="") local_opt.labels = labels[1] local_opt.colors = ['blue', 'blue'] datamcplot(quantity, (files[2], files[3]), local_opt, 'upper center', changes={'correction':''}, fig_axes=(fig,axes[1]), rebin=rebin, subplot=True, subtext="") # local_opt.labels = labels[3] local_opt.colors = ['red', 'red'] datamcplot(quantity, (files[6], files[7]), local_opt, 'upper center', changes={'correction':''}, fig_axes=(fig,axes[1]), rebin=rebin, subplot=True, subtext="") filename = "L1_52X_vs_53X__"+opt.algorithm plotbase.Save(fig, filename, opt) import plotresponse file_names = [ '/storage/8/dhaitz/CalibFW/work/data_2012/out/closure.root', '/storage/8/dhaitz/CalibFW/work/mc_madgraphSummer12/out/closure.root', '/storage/8/dhaitz/CalibFW/work/data_2012_Fall12JEC/out/closure.root', '/storage/8/dhaitz/CalibFW/work/mc_madgraphSummer12_Fall12JEC/out/closure.root', '/storage/8/dhaitz/CalibFW/work/data_2012_L1Offset/out/closure.root', '/storage/8/dhaitz/CalibFW/work/mc_madgraphSummer12_L1Offset/out/closure.root', '/storage/8/dhaitz/CalibFW/work/data_2012_Fall12JEC_L1Offset/out/closure.root', '/storage/8/dhaitz/CalibFW/work/mc_madgraphSummer12_Fall12JEC_L1Offset/out/closure.root', ] labels = [['data_52xFast', 'MC_52xFast'], [ 'data_53xFast', 'MC_53xFast'], [ 'data_52xOff', 'MC_52xOff'], ['data_53xOff', 'MC_53xOff']] files=[] for f in file_names: files += [getroot.openfile(f, opt.verbose)] for over, fit in zip(['zpt', 'jet1eta', 'npv'], [True, False, True]): fig, axes= plotbase.newPlot(subplots=4) fig2, axes2= plotbase.newPlot(subplots=4) for a1, a2, f1, f2, l in zip(axes, axes2, files[::2], files[1::2], labels): local_opt.labels = l changes ={}# {'correction':'L1L2L3'} plotresponse.responseplot((f1, f2), local_opt, ['bal', 'mpf'], over=over, changes=changes, figaxes=(fig,a1), subplot=True, subtext="") plotresponse.ratioplot((f1, f2), local_opt, ['bal', 'mpf'], over=over, changes=changes, figaxes=(fig2 ,a2), fit=fit, subplot=True, subtext="") filename = "Response_"+over+"_all__"+opt.algorithm plotbase.Save(fig, filename, opt) filename = "Ratio_"+over+"_all__"+opt.algorithm plotbase.Save(fig2, filename, opt)""" # function for 2d grid plots """def twoD_all_grid(quantity, datamc, opt): pt_thresholds = [12, 16, 20, 24, 28, 32, 36] var_list = ['var_JetPt_%1.fto%1.f' % (s1, s2) for (s1, s2) in zip(pt_thresholds, [1000, 1000, 1000, 1000, 1000, 1000, 1000])] var_list_2 = getroot.npvstrings(opt.npv) fig = plt.figure(figsize=(10.*len(var_list), 7.*len(var_list_2))) grid = AxesGrid(fig, 111, nrows_ncols = (len(var_list), len(var_list_2)), axes_pad = 0.4, share_all=True, label_mode = "L", #aspect = True, #cbar_pad = 0, #cbar_location = "right", #cbar_mode='single', ) for n1, var1 in enumerate(var_list): for n2, var2 in enumerate(var_list_2): change = {'var':var1+"_"+var2} index = len(var_list_2)*n1 + n2 change['incut']='allevents' twoD(quantity, datamc, opt, changes=change, fig_axes = [fig, grid[index]], subplot = True, axtitle = change['var'].replace('var_', '')) for grid_element, var_strings in zip(grid, opt.npv): text = r"$%s\leq\mathrm{NPV}\leq%s$" % var_strings grid_element.text(0.5, 5.5, text, ha='center', va='center', size ='40') for grid_element, pt_threshold in zip(grid[::len(var_list_2)], pt_thresholds): text = r"$p_\mathrm{T}^\mathrm{Jet1}$"+"\n"+r"$\geq%s\mathrm{GeV}$" % pt_threshold grid_element.text(-8.7, 0, text, ha='left', va='center', size ='30') #fig.suptitle("%s leading jet $\eta-\phi$ distribution ($before$ cuts) for %s %s" % (opt.labels[0], opt.algorithm, opt.correction), size='50') fig.suptitle("%s %s $\eta-\phi$ distribution ($before$ cuts) for %s %s" % (opt.labels[0], quantity[7:-16], opt.algorithm, opt.correction), size='30') file_name = "grid_"+opt.labels[0]+"_"+quantity +"_"+opt.algorithm + opt.correction fig.set_figwidth(fig.get_figwidth() * 1.2) plotbase.Save(fig, file_name, opt, crop=False, pad=1.5)""" def Fall12(files, opt): local_opt = copy.deepcopy(opt) filelist = [ ['/storage/8/dhaitz/CalibFW/work/data_2012/out/closure.root', '/storage/8/dhaitz/CalibFW/work/mc_madgraphSummer12/out/closure.root'], ['/storage/8/dhaitz/CalibFW/work/data_2012_Fall12JEC/out/closure.root', '/storage/8/dhaitz/CalibFW/work/mc_madgraphSummer12_Fall12JEC/out/closure.root'], ['/storage/8/dhaitz/CalibFW/work/data_2012_Fall12JEC_V4/out/closure.root', '/storage/8/dhaitz/CalibFW/work/mc_madgraphSummer12_Fall12JEC_V4/out/closure.root'] ] labellist = [['data_Summer12', 'MC_Summer12'], ['data_Fall12V1', 'MC_Fall12V1'], ['data_Fall12V4', 'MC_Fall12V4']] over = 'zpt' for over in ['zpt', 'npv', 'jet1eta']: fig = plotbase.plt.figure(figsize=[21, 14]) fig.suptitle(opt.title, size='xx-large') for typ, row in zip(['bal', 'mpf'], [0, 4]): for filenames, labels, col in zip(filelist, labellist, [0, 1, 2]): ax1 = plotbase.plt.subplot2grid((7, 3), (row, col), rowspan=2) ax2 = plotbase.plt.subplot2grid((7, 3), (row + 2, col)) fig.add_axes(ax1) fig.add_axes(ax2) if over == 'jet1eta' and typ == 'bal': legloc = 'upper right' else: legloc = 'lower left' local_opt.labels = labels files = [] for f in filenames: files += [getroot.openfile(f, opt.verbose)] plotresponse.responseplot(files, local_opt, [typ], over=over, figaxes=(fig, ax1), legloc=legloc, subplot=True) plotresponse.ratioplot(files, local_opt, [typ], binborders=True, fit=True, over=over, subplot=True, figaxes=(fig, ax2), ratiosubplot=True) fig.subplots_adjust(hspace=0.05) ax1.set_xticks([]) ax1.set_xlabel("") ax2.set_yticks([1.00, 0.95, 0.90]) if col > 0: ax1.set_ylabel("") ax2.set_ylabel("") title = "" # " Jet Response ($p_T$ balance / MPF) vs. Z $p_T$, $N_{vtx}$ , Jet $\eta$ (" +opt.algorithm+" "+opt.correction+")" fig.suptitle(title, size='x-large') file_name = "comparison_ALL_" + over + opt.algorithm + opt.correction plotbase.Save(fig, file_name, opt) def factors(files, opt): local_opt = copy.deepcopy(opt) filelist = [ ['/storage/8/dhaitz/CalibFW/work/Data_2012_Fall12JEC/out/closure.root', '/storage/8/dhaitz/CalibFW/work/mc_madgraphSummer12_Fall12JEC/out/closure.root', '/storage/8/dhaitz/CalibFW/work/Data_2012_Fall12JEC_L1Offset/out/closure.root', '/storage/8/dhaitz/CalibFW/work/mc_madgraphSummer12_Fall12JEC_L1Offset/out/closure.root'], ['/storage/8/dhaitz/CalibFW/work/Data_2012_Fall12JEC_V4/out/closure.root', '/storage/8/dhaitz/CalibFW/work/mc_madgraphSummer12_Fall12JEC_V4/out/closure.root', '/storage/8/dhaitz/CalibFW/work/Data_2012_Fall12JEC_V4_L1Offset/out/closure.root', '/storage/8/dhaitz/CalibFW/work/mc_madgraphSummer12_Fall12JEC_V4_L1Offset/out/closure.root'] ] labellist = [ ['Data FastJet V1', 'MC FastJet V1', 'Data Offset V1', 'MC Offset V1'], ['Data FastJet V4', 'MC FastJet V4', 'Data Offset V4', 'MC Offset V4']] """filelistt = [ ['/storage/8/dhaitz/CalibFW/work/Data_2012_Fall12JEC/out/closure.root', '/storage/8/dhaitz/CalibFW/work/Data_2012_Fall12JEC_V4/out/closure.root'], ['/storage/8/dhaitz/CalibFW/work/mc_madgraphSummer12_Fall12JEC/out/closure.root', '/storage/8/dhaitz/CalibFW/work/mc_madgraphSummer12_Fall12JEC_V4/out/closure.root'], ['/storage/8/dhaitz/CalibFW/work/Data_2012_Fall12JEC_L1Offset/out/closure.root', '/storage/8/dhaitz/CalibFW/work/Data_2012_Fall12JEC_V4_L1Offset/out/closure.root'], ['/storage/8/dhaitz/CalibFW/work/mc_madgraphSummer12_Fall12JEC_L1Offset/out/closure.root', '/storage/8/dhaitz/CalibFW/work/mc_madgraphSummer12_Fall12JEC_V4_L1Offset/out/closure.root'] ] labellistt = ['Data FastJet V1', 'Data FastJet V4'], ['MC FastJet V1', 'MC FastJet V4'], ['Data Offset V1', 'Data Offset V4'], ['MC Offset V1','MC Offset V4' ]] names = ['DataV1', 'MCV1', 'DataV4', 'MCV4' ]""" files = [] #for sublist in filelist: # rootfiles = [getroot.openfile(f, opt.verbose) for f in sublist] # files.append( rootfiles) for sublist in filelist: files.append([getroot.openfile(f, opt.verbose) for f in sublist]) fit = None rebin = 1 # for files, labellist, name in zip(files, labellist, names) fig, axes = plotbase.newPlot(subplots=2) quantity = 'L1abs_npv' local_opt.style = ['o', '*', 'o', '*'] local_opt.labels = labellist[0] local_opt.colors = ['blue', 'blue', 'red', 'red'] plot1d.datamcplot(quantity, files[0], local_opt, 'upper center', changes={'correction': ''}, fig_axes=(fig, axes[0]), fit=fit, rebin=rebin, subplot=True, subtext="") local_opt.labels = labellist[1] plot1d.datamcplot(quantity, files[1], local_opt, 'upper center', changes={'correction': ''}, fig_axes=(fig, axes[1]), fit=fit, rebin=rebin, subplot=True, subtext="") file_name = "L1_comparison_" # +name plotbase.Save(fig, file_name, opt) def factors2(files, opt): local_opt = copy.deepcopy(opt) filelist = [ ['/storage/8/dhaitz/CalibFW/work/data_2012_Fall12JEC/out/closure.root', '/storage/8/dhaitz/CalibFW/work/data_2012_Fall12JEC_V4/out/closure.root'], ['/storage/8/dhaitz/CalibFW/work/mc_madgraphSummer12_Fall12JEC/out/closure.root', '/storage/8/dhaitz/CalibFW/work/mc_madgraphSummer12_Fall12JEC_V4/out/closure.root'], ['/storage/8/dhaitz/CalibFW/work/data_2012_Fall12JEC_L1Offset/out/closure.root', '/storage/8/dhaitz/CalibFW/work/data_2012_Fall12JEC_V4_L1Offset/out/closure.root'], ['/storage/8/dhaitz/CalibFW/work/mc_madgraphSummer12_Fall12JEC_L1Offset/out/closure.root', '/storage/8/dhaitz/CalibFW/work/mc_madgraphSummer12_Fall12JEC_V4_L1Offset/out/closure.root'] ] labellistt = [['data FastJet V1', 'data FastJet V4'], ['MC FastJet V1', 'MC FastJet V4'], ['data Offset V1', 'data Offset V4'], ['MC Offset V1', 'MC Offset V4'] ] names = ['dataV1', 'MCV1', 'dataV4', 'MCV4'] files = [] for sublist in filelist: rootfiles = [getroot.openfile(f, opt.verbose) for f in sublist] files.append(rootfiles) #print files fit = 'chi2_linear' rebin = 1 fit_offset = -0.1 for files, labellist, name in zip(files, labellistt, names): print labellist fig, axes = plotbase.newPlot(subplots=2) quantity = 'L1abs_npv' local_opt.style = ['o', '*', 'o', '*'] local_opt.labels = [labellist[0]] local_opt.colors = ['blue', 'blue', 'red', 'red'] plot1d.datamcplot(quantity, [files[0]], local_opt, 'upper center', changes={'correction': ''}, fig_axes=(fig, axes[0]), fit=fit, rebin=rebin, fit_offset=fit_offset, subplot=True, subtext="") local_opt.labels = [labellist[1]] plot1d.datamcplot(quantity, [files[1]], local_opt, 'upper center', changes={'correction': ''}, fig_axes=(fig, axes[1]), fit=fit, rebin=rebin, fit_offset=fit_offset, subplot=True, subtext="") file_name = "L1_comparison_" + name plotbase.Save(fig, file_name, opt) import ROOT def allpu(files, opt, truth=True): print files settings = plotbase.getSettings(opt, quantity='npu') #print settings print settings['folder'] name = "_".join([settings['folder'], settings['algorithm'] + settings['correction']]) print name, files[1] name = name.replace("Res", "") t = files[1].Get(name) if not t: print "no tree", name, t.GetName() exit(1) # raw wei data weight if truth: histos = [getroot.getobject("pileup", files[2])] else: histos = [getroot.getobject("pileup;2", files[2])] histos[-1].Rebin(10) print histos[-1].GetNbinsX(), "pu2" histos[0].SetTitle("Data") histos += [ROOT.TH1D("mcraw", "MC", 1600, 0, 80)] if truth: histos += [ROOT.TH1D("mcraw", "MC", 1600, 0, 80)] t.Project("mcraw", "nputruth") else: histos += [ROOT.TH1D("mcraw", "MC", 80, 0, 80)] t.Project("mcraw", "npu") if truth: histos += [ROOT.TH1D("mcwei", "MC'", 1600, 0, 80)] t.Project("mcwei", "nputruth", "weight") else: histos += [ROOT.TH1D("mcwei", "MC'", 80, 0, 80)] t.Project("mcwei", "npu") binning = [[0, 1, 2, 3.5, 5], range(45, 80)] for h in histos: if h.GetNbinsX() > 1000: h.Rebin() if h.GetNbinsX() > 82: print h.GetNbinsX(), ">82! in", h.GetTitle() if not truth: break print "rebin:", binning b = binning if histos.index(h) == 1: b = binning + [range(5, 46)] print b for l in b: for a, b in zip(l[:-1], l[1:]): x1 = h.FindBin(a) x2 = h.FindBin(b) sumh = sum([h.GetBinContent(i) for i in range(x1, x2)]) / (x2 - x1) for i in range(x1, x2): h.SetBinContent(i, sumh) if truth: f = histos[1].Integral() / histos[1].Integral(histos[1].FindBin(8), histos[1].FindBin(40)) for i in range(3 + 0 * len(histos)): #histos[i].Rebin(4) print i ff = f / histos[i].Integral(histos[i].FindBin(8), histos[i].FindBin(40)) ff = 1.0 / histos[i].Integral() histos[i].Scale(ff) histos += [histos[0].Clone("dataraw")] histos[-1].SetTitle("Data/MC") histos[-1].Divide(histos[1]) if len(files) > 3: histos += [getroot.getobject("pileup", files[3])] histos[-1].SetTitle("weight") histos += [histos[2].Clone("rawmc")] histos[-1].Divide(histos[1]) histos[-1].SetTitle("MC'/MC") histos += [histos[0].Clone("datamc")] histos[-1].Divide(histos[2]) histos[-1].SetTitle("Data/MC'") plots = [getroot.root2histo(h) for h in histos] fig, ax, ratio = plotbase.newPlot(ratio=True) fig = plotbase.plt.figure(figsize=[7, 10]) ax = plotbase.plt.subplot2grid((3, 1), (0, 0), rowspan=2) ax.number = 1 ratio = plotbase.plt.subplot2grid((3, 1), (2, 0)) ratio.number = 2 fig.add_axes(ax) fig.add_axes(ratio) fig.subplots_adjust(hspace=0.05) colors = ['black', 'navy', 'red', 'green'] for p, c in zip(plots[:3], colors): ax.errorbar(p.x, p.y, label=p.title, drawstyle='steps-post', color=c, lw=1.6) colors[1] = 'gray' for p, c in zip(plots[3:], colors): r = ratio.errorbar(p.x, p.y, label=p.title, drawstyle='steps-post', color=c, lw=1.6) plotbase.labels(ax, opt, settings, settings['subplot']) plotbase.axislabels(ax, r"$n_\mathrm{PU}", settings['xynames'][1], settings=settings) xaxistext = r"observed number of pile-up interactions $n_\mathrm{PU}$" if truth: xaxistext = xaxistext.replace("observed", "true") plotbase.axislabels(ratio, xaxistext, "ratio", settings=settings) print ratio.number, r plotbase.setAxisLimits(ax, settings) plotbase.labels(ratio, opt, settings, settings['subplot']) plotbase.setAxisLimits(ratio, settings) #handles, labels = ratio.get_legend_handles_labels() ratio.legend(bbox_to_anchor=[0.8, 1], loc='upper center') ax.set_xticklabels([]) ax.set_xlabel("") settings['filename'] = plotbase.getDefaultFilename("npus", opt, settings) plotbase.Save(fig, settings) def pu(files, opt): allpu(files, opt) def puobserved(files, opt): allpu(files, opt, False)
gpl-2.0
1,261,465,762,515,416,000
40.024311
257
0.540758
false
3.169679
false
false
false
baccenfutter/cpassdb
cpassdb/protocols/client.py
1
19784
"""cpassdb - Client Protocol Classes""" __author__ = "Brian Wiborg <baccenfutter@c-base.org>" __license__ = "GNU/GPLv2" import os import sys import json import base64 import commands from twisted.internet import reactor from twisted.protocols.basic import LineReceiver class ClientProtocol(LineReceiver): """Abstract client protocol base-class This class serves as an abstract base-class for all other cpassdb client protocols. It provides common methods for all client protocols. """ # This variable holds the request structure. request = {} # In case the server encounters an error, that error will be saved into # this instance attribute. error = None def connectionMade(self): self.send_request(**self.request) def dataReceived(self, data): """Callback function for incoming data. This function should be called from ancestor functions as it respects failure responses and handles them accordingly. Additionally, it will load the received JSON string into a data structure and return that. So there is always a benefit of calling this method. :param data: str - The received line of data. """ if data.startswith('ERROR: '): self.error = data self.exit_code = 1 return self.terminate(data) try: return json.loads(data) except ValueError: print data self.terminate("ERROR: Can not decode JSON.") def connectionLost(self, reason=""): """Callback function for lost connections. For cpassdb clients, a lost connection means that there is no more data expected from the server, so the reactor should be stopped. :param reason: str - An optional reason. """ if reactor.running: reactor.stop() def terminate(self, error=None): """Helper function for terminating a connection. The provided error message is written to stderr. :param error: str - An optional error message. """ if error is not None: self.error = error self.exit_code = 1 if error: sys.stderr.write("{}\n".format(error)) self.transport.loseConnection() def gracefully_disconnect(self): """Helper function for gracefully terminating a connection. The gracefulness comes from sending a termination request to the server and having the server terminate the connection. """ self.transport.write("{}\n".format(self.sign_request({"type": "BYE"}))) def sign_request(self, request): """Help function for request signing. :param request: struct - Request structure. :return: str - GPG CLear-Text Armor """ request_string = json.dumps(request) status, armor = commands.getstatusoutput("echo '{}' | gpg --clearsign".format(request_string)) if status: return self.terminate("ERROR: Can not load private key.") return armor def send_request(self, *args, **kwargs): """This method must be overloaded by derived classes.""" raise NotImplementedError("This method must be overloaded by derived classes!") class MessyClientProtocol(ClientProtocol): """Abstract base-class for messy client protocols A messy client protocol is one that leaves behind dirty secrets after running. So this would be the case for operations such as adding a key to or removing it from a recipient group. All secrets that are encrypted for this particular recipient group does not match the current state any more; it is still decryptable by the old keys or not decryptable by the new ones. """ # This dict can be used by ancestors as a small state-machine. state_machine = { 'requested_dirty_secrets': False, } def build_request_get_dirty_secrets(self, dirty_secrets): print "{} secret(s) need to be cycled, requesting.".format(len(dirty_secrets)), return { 'type': 'GET', 'names': dirty_secrets, } def build_request_set_dirty_secrets(self, incoming_dirty_secrets): request = { 'type': 'SET', 'secrets': [], } for secret_name in incoming_dirty_secrets: secret = json.loads( commands.getoutput( "echo '{}' | gpg --decrypt 2>/dev/null".format( incoming_dirty_secrets[secret_name]['armor'] ) ) ) ttl = incoming_dirty_secrets[secret_name]['metadata']['ttl'] recipients = incoming_dirty_secrets[secret_name]['metadata']['recipients'] secret_object = { 'name': secret_name, 'secret': secret, 'metadata': { 'ttl': ttl, 'recipients': recipients, } } request['secrets'].append(secret_object) print '.', print return request def get_dirty_secrets(self, dirty_secrets): request = self.build_request_get_dirty_secrets(dirty_secrets) self.transport.write("{}\n".format(self.sign_request(request))) def set_dirty_secrets(self, dirty_secrets): request = self.build_request_set_dirty_secrets(dirty_secrets) self.transport.write("{}\n".format(self.sign_request(request))) def handle_dirty_secret_dialog(self, response): if self.state_machine['requested_dirty_secrets'] is False: self.get_dirty_secrets(response) self.state_machine['requested_dirty_secrets'] = True elif self.state_machine['requested_dirty_secrets'] is True: self.set_dirty_secrets(response) self.state_machine['requested_dirty_secrets'] = None else: print "Cycled {} secret(s).".format(len(response)) class SetSecret(ClientProtocol): """cpassdb client protocol class for setting a secret.""" def send_request(self, name, secret, ttl, recipients): """Send write request to server. :param name: str - Name of the secret (incl. categories). :param secret: struct - The secret data structure (usually a dict). :param ttl: int - Time to live (in days past today). :param recipients: list - List of all recipients. """ request = { "type": "SET", "secrets": [{ "name": name, "secret": secret, "metadata": { "ttl": ttl, "recipients": recipients, } }] } self.transport.write("{}\n".format(self.sign_request(request))) def dataReceived(self, data): """Callback function for incoming response data. This protocol will return a list of written secrets upon successful execution or an error message upon failure. :param data: str - The incoming line of data. """ # Parse incoming data into response object. response = ClientProtocol.dataReceived(self, data) if response: print "Written:", ' '.join(response) self.gracefully_disconnect() class GetSecret(ClientProtocol): """cpassdb client protocol class for getting secrets.""" # Set this variable to true if you would like the data to be outputted as # a JSON string. The JSON output will always print the full secret object # including all metadata. as_json = False # Set this value to the integer value of line indentation you desire for # the JSON output. Defining this attribute only makes sense in combination # with the as_json class attribute. indent_json = None # Set this list to the fields you wish the output the be limited to. Using # this attribute only works on non-JSON output format. filter_fields = [] # Store a sorted list of all requested secrets, so they can be outputted in # the requested order. requested_secrets = [] def send_request(self, names): """Send read request to server. :param names: list - A list of secret-object names. """ request = { "type": "GET", "names": names, } self.transport.write("{}\n".format(self.sign_request(request))) def decrypt_secret_armor(self, gpg_armor): """Helper function for decrypting a GPG encrypted message armor. :param gpg_armor: str - The GPG armor. :return: struct - JSON-loaded secret data structure. """ return json.loads( commands.getoutput( "echo '{}' | gpg --decrypt 2>/dev/null".format(gpg_armor) ) ) def dataReceived(self, data): """Callback function for incoming data. This protocol will return a list of secret object data structures containing all metadata in the following format: [ { 'name': <secret-object-name>, 'secret': <secret-object-data-struct>, 'metadata': { 'ttl': <ttl>, 'recipients': [<recipient>, ...], } }, ... ] :param data: str - The line of incoming data. """ # Parse incoming data. response = ClientProtocol.dataReceived(self, data) if response: if self.as_json: print json.dumps([{ 'name': secret_name, 'secret': self.decrypt_secret_armor(response[secret_name]['armor']), 'metadata': response[secret_name]['metadata'], } for secret_name in response], indent=self.indent_json) else: for secret_name in response: secret = self.decrypt_secret_armor(response[secret_name]['armor']) if self.filter_fields: for field in self.filter_fields: if field in secret: print secret[field] else: print '###', secret_name, '###' for field in secret: print field + ':', secret[field] print self.gracefully_disconnect() class DelSecret(ClientProtocol): """cpassdb client protocol class for deleting a secret.""" def send_request(self, name): """Send delete request to server. :param name: str - Name of secret to delete (incl. its category) """ request = { "type": "DEL", "name": name, } self.transport.write("{}\n".format(self.sign_request(request))) def dataReceived(self, data): """Callback function for incoming response data. This protocol will reply with a success string upon successful execution. :param data: str - The actual line of incoming data. """ response = ClientProtocol.dataReceived(self, data) if response: print response self.gracefully_disconnect() class ListSecrets(ClientProtocol): """cpassdb client protocol class for listing secrets in a given category.""" def send_request(self, category=None): """Send list request to server. :param category: str - Name of category (default: None) """ request = { "type": "LIST", "path": category, } self.transport.write("{}\n".format(self.sign_request(request))) def dataReceived(self, data): """Callback function for incoming response data. This protocol will reply with a list of secret-object names upon successful execution. :param data: str - Actual line of incoming data. """ response = ClientProtocol.dataReceived(self, data) if response: if isinstance(response, dict): for d in response['dirs']: print d + os.path.sep for secret in response['secrets']: print secret elif isinstance(response, list): for secret in response: print secret else: raise NotImplementedError self.gracefully_disconnect() class InstallRootKey(ClientProtocol): """cpassdb client protocol class for installing the root-key. There is a special recipient group that - apart of being a regular recipient group - mark all cpassdb admins. Admins are allowed to perform key-management operations such as adding keys, deleting keys and adding/removing keys from recipient groups. The cpassdb admin group is always included as recipient to every secret-object, meaning that cpassdb admins can always decrypt every secret. The admin group is not stated inside the recipient field in the metadata if not explicitly defined for that recipient group. Due to this convention it is not possible to use the cpassdb server before at least one key-id has been imported and added to the admin group. That's what this protocol is for. It is really only used once per server, usually. """ def send_request(self, pubkey_armor): """Send root-key installation request to server. :param pubkey_armor: str - The GPG armor of the public key. """ request = { 'pubkey_armor': pubkey_armor, } request_string = json.dumps(request) self.transport.write("{}\n".format(request_string)) def dataReceived(self, data): """Callback function for incoming response data. This protocol usually replies with a success string upon successful execution. :param data: str - Actual line of incoming data. :return: """ response = ClientProtocol.dataReceived(self, data) print response self.gracefully_disconnect() class AddKey(MessyClientProtocol): """cpassdb client protocol class for adding a key.""" def send_request(self, pubkey_armor, groups): """Send add-key request to server. :param pubkey_armor: str - GPG armor of public key. :param groups: list - List of group to add this key-id to. """ request = { 'type': 'ADDKEY', 'pubkey_armor': base64.b64encode(pubkey_armor), 'groups': groups, } self.transport.write("{}\n".format(self.sign_request(request))) def dataReceived(self, data): """Callback function for incoming response data. :param data: str - Actual line of incoming data. """ response = ClientProtocol.dataReceived(self, data) if response: self.handle_dirty_secret_dialog(response) self.gracefully_disconnect() class DelKey(MessyClientProtocol): """cpassdb client protocol class for deleting a key.""" def send_request(self, pubkey_id): """Send delete-key request to server. :param pubkey_id: str - Key-id of key to delete. """ request = { "type": "DELKEY", "pubkey_id": pubkey_id, } self.transport.write("{}\n".format(self.sign_request(request))) def dataReceived(self, data): """Callback function for incoming response data. :param data: str - Actual line of incoming data. :return: """ response = ClientProtocol.dataReceived(self, data) if response: self.handle_dirty_secret_dialog(response) self.gracefully_disconnect() class ListKeys(ClientProtocol): """cpassdb client protocol class for listing all keys in the keyring.""" def send_request(self, keyid_length=8): """Send list-keys request to server. :param keyid_length: int - Length of the key-ids (common are 8 or 16). """ request = { "type": "KEYS", "keyid_length": keyid_length, } self.transport.write("{}\n".format(self.sign_request(request))) def dataReceived(self, data): """Callback function for incoming response data. :param data: str - Actual line of incoming data. """ response = ClientProtocol.dataReceived(self, data) if response: for key in response: print key[0], ' '.join(key[1]) self.gracefully_disconnect() class ListGroups(ClientProtocol): """cpassdb client protocol class for listing all recipient groups.""" def send_request(self, pubkey_id=None): """Send list-groups request to server. If the request is supplied with key-id, only the groups of that key-id will be returned. :param pubkey_id: str - Optional key-id. """ request = { "type": "GROUPS", "pubkey_id": pubkey_id, } self.transport.write('{}\n'.format(self.sign_request(request))) def dataReceived(self, data): """Callback function for incoming reponse data. :param data: str - Actual line of incoming data. """ response = ClientProtocol.dataReceived(self, data) if response: for group in response: print group + ':', ' '.join(response[group]) self.gracefully_disconnect() class AddGroups(MessyClientProtocol): """cpassdb client protocol class for add a key-id to a list of groups. When the members of a recipient group change, all secrets of that recipient group must be re-encrypted. If the secrets where not re-encrypted they would not be readable by the keys they should be in the current state of the recipient group. This adds extra ping-pong complexity to this protocol that the other client protocols don't have. """ def send_request(self, pubkey_id, groups): """Send add-group request to server. :param pubkey_id: str - Key-id of concern. :param groups: - List of groups to add this key-id to. """ request = { 'type': 'ADDGRP', 'pubkey_id': pubkey_id, 'groups': groups, } self.transport.write("{}\n".format(self.sign_request(request))) def dataReceived(self, data): """Callback function for incoming response data. :param data: str - Actual line of incoming data. :return: """ response = ClientProtocol.dataReceived(self, data) if response: self.handle_dirty_secret_dialog(response) self.gracefully_disconnect() class DelGroups(AddGroups): """cpassdb client protocol class for deleting a key-id from a group.""" def send_request(self, pubkey_id, groups): """Send delete-group request to the server. :param pubkey_id: str - Key-id of concern. :param groups: - List of groups to free from the given key-id. """ request = { 'type': 'DELGRP', 'pubkey_id': pubkey_id, 'groups': groups, } self.transport.write("{}\n".format(self.sign_request(request))) def dataReceived(self, data): """Callback function for incoming response data. :param data: str - str - Actual line of incoming data. """ AddGroups.dataReceived(self, data)
gpl-2.0
-7,313,434,773,447,821,000
31.863787
102
0.591336
false
4.556426
false
false
false
nonapod/gzinflatekiller
gzinflatekiller.py
1
3114
#!/bin/env python #:############################################ #: GZINFLATEKILLER #: by Les Cordell #: #: Hunt through files containing a base64 #: GZInflate Command #: #: Written on 07/08/2013 #: last modified @ 07/08/2013 #:############################################ import sys, os, re #: Extensions constant, these are the files that our program will check EXTS = ['php'] #: Our Patterns constant contains all of our regular expressions that we want to check against and skip PATTERNS = [re.compile("<\?php eval\(gzinflate\(base64_decode\(\'.*\'\)\)\);\?>"), re.compile('^\r\n')] def gzInflateKill(): """ #: The main function that is run, it checks through the argv arguements first, #: it requires a directory enclosed in quotes. """ dirname = False #: Check provided directory name if (len(sys.argv) < 2): print "You must provide a directory name enclosed in quotes to run this script.\n" quit() elif (len(sys.argv) > 2): print "Too many arguements provided, you must provide a directory for this script to run" quit() elif (len(sys.argv) == 2): #: Store the directory name dirname = sys.argv[1] else: #: If there is an error return false print "There was an error running this script, please check that you have specified a directory enclosed in quotes." quit() #: Open the directory parseDir(dirname) quit() def parseDir(dirname): """ #: This is our directory parser, here we parse through every directory until we hit the last #: feeding the files off to the cleanFile function """ if os.path.exists(dirname): #: If our directory exists then we'll open it and return some files #: Walk through the directory for root, dirs, files in os.walk(dirname): if files: #: If we get any files for file in files: #: For each file in the list if file.split('.')[-1] in EXTS: #: Get the extension thisFile = os.path.join(root, file) if os.path.isfile(thisFile): print "cleaning: " + thisFile cleanFile(thisFile) if dirs: #: If we get any directories for dir in dirs: #: For each directory in the list parseDir(dir); #: Recursively run the function def cleanFile(filename): """ #: Here we will strip the injection from the php file """ newFile = [] #: First open the file for reading and get our new file with open(filename, 'r') as aFile: for line in aFile.readlines(): #: For each line check if it matches the injection or the new line if patternMatch(line): pass else: #: Append line to new file if no match newFile.append(line) aFile.close() #: close the file #: Now we open the file for reading if newFile: newFile = ''.join(newFile) # : join our new file with open(filename, 'w+b') as aFile: aFile.write(newFile) aFile.close() def patternMatch(line): """ #: We pass lines into this function, check them against our PATTERNS constant #: if we match any of them, we return a true, otherwise we return false """ for pattern in PATTERNS: if pattern.match(line): return True return False # BEGIN # if __name__ == '__main__': gzInflateKill();
mit
-5,594,238,935,680,430,000
28.942308
118
0.659923
false
3.395856
false
false
false
L1NT/django-training-log
log/models.py
1
5458
from django.db import models # Create your models here. class Sport(models.Model): """ don't use models.choices because we want the list to be transactional data example list: [ 'bike', 'run', 'swim', 'measurements', 'yoga', 'weights', # for multi-sport `Event`s: 'multisport', #EventType.sport 'transition', #Entry.sport ] """ sport = models.CharField(max_length=20) class Meta: ordering = ['sport'] def __unicode__(self): return self.sport def __str__(self): return self.sport class Measurements(models.Model): id = models.AutoField(primary_key=True) #added by default weight = models.FloatField(blank=True, null=True) class Equipment(models.Model): """ this is for things such as bikes, shoes, wheelsets; i.e. things with a determinable depreciation cost or maintenance periods """ name = models.CharField(max_length=50) cost = models.DecimalField(blank=True, null=True, max_digits=8, decimal_places=2) acquired_date = models.DateField() disposal_date = models.DateField(blank=True, null=True) disposal_method = models.CharField(blank=True, max_length=7, choices=[ ('sold', 'sold'), ('donated', 'donated'), ('retired', 'retired'),# i.e. 'broken' ]) disposal_proceeds = models.DecimalField(blank=True, null=True, max_digits=8, decimal_places=2) expected_lifespan = models.DurationField(blank=True, null=True) maintenance_interval = models.DurationField(blank=True, null=True) def history(self): return EquipmentMaintenance.objects.filter(equipment=self.id) def __unicode__(self): return self.name def __str__(self): return self.name class EquipmentMaintenance(models.Model): date = models.DateField() description = models.CharField(max_length=250) equipment = models.ForeignKey(Equipment) cost = models.DecimalField(blank=True, null=True, max_digits=8, decimal_places=2) vendor = models.CharField(max_length=50, default='DIY') class EventType(models.Model): """ examples: '5k', 'Olympic', 'Criterium' """ event_type = models.CharField(max_length=20) sport = models.ForeignKey(Sport) class Meta: ordering = ['sport', 'event_type'] def __unicode__(self): return str(self.sport) + ': ' + self.event_type def __str__(self): return str(self.sport) + ': ' + self.event_type class Event(models.Model): name = models.CharField(max_length=35) location = models.CharField(max_length=50) event_type = models.ForeignKey(EventType, blank=True, null=True) bib_number = models.IntegerField(blank=True, null=True) dnf = models.BooleanField() finish_overall = models.IntegerField(blank=True, null=True) finishers_overall = models.IntegerField(blank=True, null=True) #maybe just use "handicapped" as the age group description?? finish_handicapped = models.IntegerField(blank=True, null=True) finish_gender = models.IntegerField(blank=True, null=True) finishers_gender = models.IntegerField(blank=True, null=True) finish_age_group = models.IntegerField(blank=True, null=True) finishers_age_group = models.IntegerField(blank=True, null=True) # category/age_group seem to be mutually-exclusive? category = models.CharField(max_length=10, blank=True, null=True) age_group = models.CharField(max_length=10, blank=True) results_url = models.URLField(blank=True, null=True) official_time = models.TimeField(blank=True, null=True) #used for total event time (brevets & triathlons) ## TODO: maybe this should be handled by multiple `Entry`s? # swim_distance = models.FloatField(blank=True) # bike_distance = models.FloatField(blank=True) # run_distance = models.FloatField(blank=True) # swim_time = models.TimeField(blank=True) # bike_time = models.TimeField(blank=True) # run_time = models.TimeField(blank=True) # t1_time = models.TimeField(blank=True) # t2_time = models.TimeField(blank=True) def get_absolute_url(self): return "/events?event=%d" % self.id def __unicode__(self): return self.name + ' ['+self.date.strftime('%b %d, %Y')+']' def __str__(self): return self.name + ' ['+self.date.strftime('%b %d, %Y')+']' class Entry(models.Model): #entry_id: date = models.DateField() sport = models.ForeignKey(Sport) event = models.ForeignKey(Event, blank=True, null=True) route = models.CharField(max_length=50, blank=True) # routes Model? notes = models.CharField(max_length=256, blank=True) equipment = models.ForeignKey(Equipment, blank=True, null=True) distance = models.FloatField(blank=True, null=True) time = models.TimeField(blank=True, null=True) avg_speed = models.FloatField(blank=True, null=True) max_speed = models.FloatField(blank=True, null=True) elevation_gain = models.IntegerField(blank=True, null=True) calories = models.IntegerField(blank=True, null=True) #pace: models.TimeField(blank=True, default=calc_pace(self.time/self.distance)) #could be calculated... class Meta: ordering = ['date', 'id'] def __unicode__(self): return self.date.strftime('%b %d, %Y') + ' ['+str(self.sport)+']' def __str__(self): return self.date.strftime('%b %d, %Y') + ' ['+str(self.sport)+']'
gpl-2.0
1,526,296,373,929,901,000
35.878378
109
0.662147
false
3.47422
false
false
false
chaosk/trinitee
trinitee/forums/migrations/0001_initial.py
1
14851
# encoding: utf-8 import datetime from south.db import db from south.v2 import SchemaMigration from django.db import models class Migration(SchemaMigration): def forwards(self, orm): # Adding model 'Category' db.create_table('forums_category', ( ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('title', self.gf('django.db.models.fields.CharField')(max_length=100)), ('description', self.gf('django.db.models.fields.TextField')(blank=True)), ('parent', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['forums.Category'], null=True, blank=True)), ('ordering', self.gf('django.db.models.fields.IntegerField')(default=1)), )) db.send_create_signal('forums', ['Category']) # Adding model 'Topic' db.create_table('forums_topic', ( ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('title', self.gf('django.db.models.fields.CharField')(max_length=100)), ('created_at', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)), ('created_by', self.gf('django.db.models.fields.related.ForeignKey')(related_name='created_topics', to=orm['auth.User'])), ('modified_at', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)), ('modified_by', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='modified_topics', null=True, to=orm['auth.User'])), ('category', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['forums.Category'])), ('is_closed', self.gf('django.db.models.fields.BooleanField')(default=False)), ('is_sticky', self.gf('django.db.models.fields.BooleanField')(default=False)), ('first_post', self.gf('django.db.models.fields.related.OneToOneField')(related_name='topic_root', unique=True, to=orm['forums.Post'])), )) db.send_create_signal('forums', ['Topic']) # Adding model 'Post' db.create_table('forums_post', ( ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('topic', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['forums.Topic'])), ('created_at', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)), ('created_by', self.gf('django.db.models.fields.related.ForeignKey')(related_name='created_posts', to=orm['auth.User'])), ('modified_at', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)), ('modified_by', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='modified_posts', null=True, to=orm['auth.User'])), ('show_edits', self.gf('django.db.models.fields.BooleanField')(default=True)), ('content', self.gf('django.db.models.fields.TextField')()), ('content_html', self.gf('django.db.models.fields.TextField')()), )) db.send_create_signal('forums', ['Post']) # Adding model 'PostKarma' db.create_table('forums_postkarma', ( ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])), ('post', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['forums.Post'])), ('karma', self.gf('django.db.models.fields.IntegerField')(default=0)), )) db.send_create_signal('forums', ['PostKarma']) # Adding unique constraint on 'PostKarma', fields ['user', 'post'] db.create_unique('forums_postkarma', ['user_id', 'post_id']) # Adding model 'Poll' db.create_table('forums_poll', ( ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('created_at', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)), ('created_by', self.gf('django.db.models.fields.related.ForeignKey')(related_name='polls_started', to=orm['auth.User'])), ('expires_at', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)), ('question', self.gf('django.db.models.fields.CharField')(max_length=255)), ('topic', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['forums.Topic'])), ('max_votes', self.gf('django.db.models.fields.PositiveIntegerField')(default=1)), )) db.send_create_signal('forums', ['Poll']) # Adding model 'Choice' db.create_table('forums_choice', ( ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('poll', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['forums.Poll'])), ('choice', self.gf('django.db.models.fields.CharField')(max_length=255)), )) db.send_create_signal('forums', ['Choice']) # Adding model 'Vote' db.create_table('forums_vote', ( ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('poll', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['forums.Poll'])), ('choice', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['forums.Choice'])), ('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])), )) db.send_create_signal('forums', ['Vote']) # Adding unique constraint on 'Vote', fields ['poll', 'choice', 'user'] db.create_unique('forums_vote', ['poll_id', 'choice_id', 'user_id']) def backwards(self, orm): # Removing unique constraint on 'Vote', fields ['poll', 'choice', 'user'] db.delete_unique('forums_vote', ['poll_id', 'choice_id', 'user_id']) # Removing unique constraint on 'PostKarma', fields ['user', 'post'] db.delete_unique('forums_postkarma', ['user_id', 'post_id']) # Deleting model 'Category' db.delete_table('forums_category') # Deleting model 'Topic' db.delete_table('forums_topic') # Deleting model 'Post' db.delete_table('forums_post') # Deleting model 'PostKarma' db.delete_table('forums_postkarma') # Deleting model 'Poll' db.delete_table('forums_poll') # Deleting model 'Choice' db.delete_table('forums_choice') # Deleting model 'Vote' db.delete_table('forums_vote') models = { 'auth.group': { 'Meta': {'object_name': 'Group'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}), 'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}) }, 'auth.permission': { 'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'}, 'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}) }, 'auth.user': { 'Meta': {'object_name': 'User'}, 'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}), 'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}), 'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}) }, 'contenttypes.contenttype': { 'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"}, 'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}) }, 'forums.category': { 'Meta': {'object_name': 'Category'}, 'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'ordering': ('django.db.models.fields.IntegerField', [], {'default': '1'}), 'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['forums.Category']", 'null': 'True', 'blank': 'True'}), 'title': ('django.db.models.fields.CharField', [], {'max_length': '100'}) }, 'forums.choice': { 'Meta': {'object_name': 'Choice'}, 'choice': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'poll': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['forums.Poll']"}) }, 'forums.poll': { 'Meta': {'object_name': 'Poll'}, 'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'polls_started'", 'to': "orm['auth.User']"}), 'expires_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'max_votes': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}), 'question': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'topic': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['forums.Topic']"}) }, 'forums.post': { 'Meta': {'object_name': 'Post'}, 'content': ('django.db.models.fields.TextField', [], {}), 'content_html': ('django.db.models.fields.TextField', [], {}), 'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'created_posts'", 'to': "orm['auth.User']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'modified_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'modified_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'modified_posts'", 'null': 'True', 'to': "orm['auth.User']"}), 'show_edits': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'topic': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['forums.Topic']"}) }, 'forums.postkarma': { 'Meta': {'unique_together': "(('user', 'post'),)", 'object_name': 'PostKarma'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'karma': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'post': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['forums.Post']"}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}) }, 'forums.topic': { 'Meta': {'object_name': 'Topic'}, 'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['forums.Category']"}), 'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'created_topics'", 'to': "orm['auth.User']"}), 'first_post': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'topic_root'", 'unique': 'True', 'to': "orm['forums.Post']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_closed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'is_sticky': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'modified_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'modified_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'modified_topics'", 'null': 'True', 'to': "orm['auth.User']"}), 'title': ('django.db.models.fields.CharField', [], {'max_length': '100'}) }, 'forums.vote': { 'Meta': {'unique_together': "(('poll', 'choice', 'user'),)", 'object_name': 'Vote'}, 'choice': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['forums.Choice']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'poll': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['forums.Poll']"}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}) } } complete_apps = ['forums']
bsd-3-clause
-7,617,905,021,076,581,000
64.422907
182
0.570265
false
3.634606
false
false
false
pgroudas/pants
src/python/pants/backend/jvm/tasks/jvm_compile/jvm_compile_strategy.py
1
8647
# coding=utf-8 # Copyright 2014 Pants project contributors (see CONTRIBUTORS.md). # Licensed under the Apache License, Version 2.0 (see LICENSE). from __future__ import (absolute_import, division, generators, nested_scopes, print_function, unicode_literals, with_statement) import os from abc import ABCMeta, abstractmethod from collections import OrderedDict, defaultdict from twitter.common.collections import OrderedSet from pants.base.build_environment import get_buildroot, get_scm from pants.base.exceptions import TaskError from pants.util.dirutil import safe_delete class JvmCompileStrategy(object): """An abstract base strategy for JVM compilation.""" __metaclass__ = ABCMeta class CompileContext(object): """A context for the compilation of a target. This can be used to differentiate between a partially completed compile in a temporary location and a finalized compile in its permanent location. """ def __init__(self, target, analysis_file, classes_dir, sources): self.target = target self.analysis_file = analysis_file self.classes_dir = classes_dir self.sources = sources @property def _id(self): return (self.target, self.analysis_file, self.classes_dir) def __eq__(self, other): return self._id == other._id def __hash__(self): return hash(self._id) # Common code. # ------------ @staticmethod def _analysis_for_target(analysis_dir, target): return os.path.join(analysis_dir, target.id + '.analysis') @staticmethod def _portable_analysis_for_target(analysis_dir, target): return JvmCompileStrategy._analysis_for_target(analysis_dir, target) + '.portable' @classmethod @abstractmethod def register_options(cls, register, language, supports_concurrent_execution): """Registration for strategy-specific options. The abstract base class does not register any options itself: those are left to JvmCompile. """ pass def __init__(self, context, options, workdir, analysis_tools, language, sources_predicate): self._language = language self.context = context self._analysis_tools = analysis_tools # Mapping of relevant (as selected by the predicate) sources by target. self._sources_by_target = None self._sources_predicate = sources_predicate # The ivy confs for which we're building. self._confs = options.confs self._clear_invalid_analysis = options.clear_invalid_analysis @abstractmethod def name(self): """A readable, unique name for this strategy.""" pass @abstractmethod def invalidation_hints(self, relevant_targets): """A tuple of partition_size_hint and locally_changed targets for the given inputs.""" pass @abstractmethod def compile_context(self, target): """Returns the default/stable compile context for the given target. Temporary compile contexts are private to the strategy. """ pass @abstractmethod def compute_classes_by_source(self, compile_contexts): """Compute a map of (context->(src->classes)) for the given compile_contexts. It's possible (although unfortunate) for multiple targets to own the same sources, hence the top level division. Srcs are relative to buildroot. Classes are absolute paths. """ pass @abstractmethod def compile_chunk(self, invalidation_check, all_targets, relevant_targets, invalid_targets, extra_compile_time_classpath_elements, compile_vts, register_vts, update_artifact_cache_vts_work): """Executes compilations for that invalid targets contained in a single language chunk.""" pass @abstractmethod def post_process_cached_vts(self, cached_vts): """Post processes VTS that have been fetched from the cache.""" pass @abstractmethod def compute_resource_mapping(self, compile_contexts): """Computes a merged ResourceMapping for the given compile contexts. Since classes should live in exactly one context, a merged mapping is unambiguous. """ pass def pre_compile(self): """Executed once before any compiles.""" pass def validate_analysis(self, path): """Throws a TaskError for invalid analysis files.""" try: self._analysis_parser.validate_analysis(path) except Exception as e: if self._clear_invalid_analysis: self.context.log.warn("Invalid analysis detected at path {} ... pants will remove these " "automatically, but\nyou may experience spurious warnings until " "clean-all is executed.\n{}".format(path, e)) safe_delete(path) else: raise TaskError("An internal build directory contains invalid/mismatched analysis: please " "run `clean-all` if your tools versions changed recently:\n{}".format(e)) def prepare_compile(self, cache_manager, all_targets, relevant_targets): """Prepares to compile the given set of targets. Has the side effects of pruning old analysis, and computing deleted sources. """ # Target -> sources (relative to buildroot). # TODO(benjy): Should sources_by_target be available in all Tasks? self._sources_by_target = self._compute_sources_by_target(relevant_targets) def class_name_for_class_file(self, compile_context, class_file_name): assert class_file_name.endswith(".class") assert class_file_name.startswith(compile_context.classes_dir) class_file_name = class_file_name[len(compile_context.classes_dir) + 1:-len(".class")] return class_file_name.replace("/", ".") def _compute_sources_by_target(self, targets): """Computes and returns a map target->sources (relative to buildroot).""" def resolve_target_sources(target_sources): resolved_sources = [] for target in target_sources: if target.has_sources(): resolved_sources.extend(target.sources_relative_to_buildroot()) return resolved_sources def calculate_sources(target): sources = [s for s in target.sources_relative_to_buildroot() if self._sources_predicate(s)] # TODO: Make this less hacky. Ideally target.java_sources will point to sources, not targets. if hasattr(target, 'java_sources') and target.java_sources: sources.extend(resolve_target_sources(target.java_sources)) return sources return {t: calculate_sources(t) for t in targets} def _sources_for_targets(self, targets): """Returns a cached map of target->sources for the specified targets.""" if self._sources_by_target is None: raise TaskError('self._sources_by_target not computed yet.') return {t: self._sources_by_target.get(t, []) for t in targets} def _sources_for_target(self, target): """Returns the cached sources for the given target.""" if self._sources_by_target is None: raise TaskError('self._sources_by_target not computed yet.') return self._sources_by_target.get(target, []) def _find_locally_changed_targets(self, sources_by_target): """Finds the targets whose sources have been modified locally. Returns a list of targets, or None if no SCM is available. """ # Compute the src->targets mapping. There should only be one target per source, # but that's not yet a hard requirement, so the value is a list of targets. # TODO(benjy): Might this inverse mapping be needed elsewhere too? targets_by_source = defaultdict(list) for tgt, srcs in sources_by_target.items(): for src in srcs: targets_by_source[src].append(tgt) ret = OrderedSet() scm = get_scm() if not scm: return None changed_files = scm.changed_files(include_untracked=True, relative_to=get_buildroot()) for f in changed_files: ret.update(targets_by_source.get(f, [])) return list(ret) @property def _analysis_parser(self): return self._analysis_tools.parser # Compute any extra compile-time-only classpath elements. # TODO(benjy): Model compile-time vs. runtime classpaths more explicitly. # TODO(benjy): Add a pre-execute goal for injecting deps into targets, so e.g., # we can inject a dep on the scala runtime library and still have it ivy-resolve. def _compute_extra_classpath(self, extra_compile_time_classpath_elements): def extra_compile_classpath_iter(): for conf in self._confs: for jar in extra_compile_time_classpath_elements: yield (conf, jar) return list(extra_compile_classpath_iter())
apache-2.0
-8,624,641,274,634,726,000
36.925439
99
0.686481
false
4.141284
false
false
false
ruhan/django-silk-mongoengine
silk/profiling/profiler.py
1
6695
import inspect import logging import time import traceback from django.conf import settings from django.utils import timezone import six from silk.collector import DataCollector from silk.config import SilkyConfig from silk.models import _time_taken Logger = logging.getLogger('silk') # noinspection PyPep8Naming class silk_meta_profiler(object): """Used in the profiling of Silk itself.""" def __init__(self): super(silk_meta_profiler, self).__init__() self.start_time = None @property def _should_meta_profile(self): return SilkyConfig().SILKY_META def __enter__(self): if self._should_meta_profile: self.start_time = timezone.now() def __exit__(self, exc_type, exc_val, exc_tb): if self._should_meta_profile: end_time = timezone.now() exception_raised = exc_type is not None if exception_raised: Logger.error('Exception when performing meta profiling, dumping trace below') traceback.print_exception(exc_type, exc_val, exc_tb) request = getattr(DataCollector().local, 'request', None) if request: curr = request.meta_time or 0 request.meta_time = curr + _time_taken(self.start_time, end_time) def __call__(self, target): if self._should_meta_profile: def wrapped_target(*args, **kwargs): request = DataCollector().request if request: start_time = timezone.now() result = target(*args, **kwargs) end_time = timezone.now() curr = request.meta_time or 0 request.meta_time = curr + _time_taken(start_time, end_time) else: result = target(*args, **kwargs) return result return wrapped_target return target # noinspection PyPep8Naming class silk_profile(object): def __init__(self, name=None, _dynamic=False): super(silk_profile, self).__init__() self.name = name self.profile = None self._queries_before = None self._queries_after = None self._dynamic = _dynamic def _query_identifiers_from_collector(self): return [x for x in DataCollector().queries] def _start_queries(self): """record queries that have been executed before profiling began""" self._queries_before = self._query_identifiers_from_collector() def _end_queries(self): """record queries that have been executed after profiling has finished""" self._queries_after = self._query_identifiers_from_collector() def __enter__(self): if self._silk_installed() and self._should_profile(): with silk_meta_profiler(): self._start_queries() if not self.name: raise ValueError('silk_profile used as a context manager must have a name') frame = inspect.currentframe() frames = inspect.getouterframes(frame) outer_frame = frames[1] path = outer_frame[1] line_num = outer_frame[2] request = DataCollector().request self.profile = { 'name': self.name, 'file_path': path, 'line_num': line_num, 'dynamic': self._dynamic, 'request': request, 'start_time': timezone.now(), } else: Logger.warn('Cannot execute silk_profile as silk is not installed correctly.') def _finalise_queries(self): collector = DataCollector() self._end_queries() assert self.profile, 'no profile was created' diff = set(self._queries_after).difference(set(self._queries_before)) self.profile['queries'] = diff collector.register_profile(self.profile) # noinspection PyUnusedLocal def __exit__(self, exc_type, exc_val, exc_tb): if self._silk_installed() and self._should_profile(): with silk_meta_profiler(): start_time = None exception_raised = exc_type is not None self.profile['exception_raised'] = exception_raised self.profile['end_time'] = timezone.now() self._finalise_queries() def _silk_installed(self): app_installed = 'silk' in settings.INSTALLED_APPS middleware_installed = 'silk.middleware.SilkyMiddleware' in settings.MIDDLEWARE_CLASSES return app_installed and middleware_installed def _should_profile(self): return DataCollector().request is not None def __call__(self, target): if self._silk_installed(): def wrapped_target(*args, **kwargs): with silk_meta_profiler(): try: func_code = six.get_function_code(target) except AttributeError: raise NotImplementedError('Profile not implemented to decorate type %s' % target.__class__.__name__) line_num = func_code.co_firstlineno file_path = func_code.co_filename func_name = target.__name__ if not self.name: self.name = func_name self.profile = { 'func_name': func_name, 'name': self.name, 'file_path': file_path, 'line_num': line_num, 'dynamic': self._dynamic, 'start_time': timezone.now(), 'request': DataCollector().request } self._start_queries() try: result = target(*args, **kwargs) except Exception: self.profile['exception_raised'] = True raise finally: with silk_meta_profiler(): self.profile['end_time'] = timezone.now() self._finalise_queries() return result return wrapped_target else: Logger.warn('Cannot execute silk_profile as silk is not installed correctly.') return target def distinct_queries(self): queries = [x for x in self._queries_after if not x in self._queries_before] return queries @silk_profile() def blah(): time.sleep(1) if __name__ == '__main__': blah()
mit
-1,680,985,588,628,297,200
35.391304
124
0.542644
false
4.579343
false
false
false
sandz-in/twilio_trello
twilio_sms_handler/views.py
1
1174
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.http import HttpResponse from django.views.decorators.http import require_POST from twilio.twiml.messaging_response import MessagingResponse # Create your views here. from twilio_sms_handler.TrelloQuery import TrelloQuery from twilio_trello.twilio_util import validate_twilio_request from django.views.decorators.csrf import csrf_exempt @require_POST @validate_twilio_request @csrf_exempt def sms_view(request): """Twilio Messaging URL - receives incoming messages from Twilio""" # Create a new TwiML response resp = MessagingResponse() # <Message> a text back to the person who texted us text = request.POST['Body'] split_text = text.lower().split(" ") if len(split_text) < 2: body = '''1)get boards 2)get lists <board-no> 3)get cards <board-no:list-no> ''' else: trello_query = TrelloQuery() action = '_'.join(split_text[:2]) try: body = getattr(trello_query, action)(split_text[2:]) except: body = "Incorrect input!!" resp.message(body) # Return the TwiML return HttpResponse(resp)
mit
3,368,238,826,495,782,000
29.102564
71
0.683986
false
3.463127
false
false
false
fake-name/ReadableWebProxy
WebMirror/management/rss_parser_funcs/feed_parse_extractKeztranslationsWordpressCom.py
1
1210
def extractKeztranslationsWordpressCom(item): ''' Parser for 'keztranslations.wordpress.com' ''' vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title']) if not (chp or vol) or "preview" in item['title'].lower(): return None tagmap = [ ('FOD', 'Quickly Wear the Face of the Devil', 'translated'), ('ABO', 'ABO Cadets', 'translated'), ('dfc', 'The First Dragon Convention', 'translated'), ('ogu', 'My Family’s Omega Has Just Grown Up', 'translated'), ] for tagname, name, tl_type in tagmap: if tagname in item['tags']: return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type) titlemap = [ ('ABO Vol', 'ABO Cadets', 'translated'), ('FOD Chapter', 'Quickly Wear the Face of the Devil', 'translated'), ('FOD Chap', 'Quickly Wear the Face of the Devil', 'translated'), ] for titlecomponent, name, tl_type in titlemap: if titlecomponent.lower() in item['title'].lower(): return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type) return False
bsd-3-clause
-7,868,676,419,566,919,000
36.78125
104
0.620861
false
3.212766
false
false
false
dvida/UWO-PA-Python-Course
Lecture 3/L3_lecture.py
1
5075
from __future__ import print_function ### READING FILES file_name = 'data.txt' # Reading in and parsing file contents data_list = [] with open(file_name) as f: # SKip the header (the first line) next(f) for line in f: # Remove newline char line = line.replace('\n', '') # Split the line into a list by a comma line = line.split(',') # Parse the line num = line[0] name = line[1].strip() epoch = int(line[2]) elements = list(map(float, line[3:9])) ref = line[9] # Add the line to the data list data_list.append([num, name, epoch, elements, ref]) print(num, name, epoch, elements, ref) ################################### print(data_list) # Wile E. Coyote rewrites history... for line in data_list: line[1] = 'Coyote' print(data_list) # But before we write the data back to disk... ################################### ### STRING FORMATTING ### Note for the lecture: ### C/P and explain how formatting works # Converting floats to strings x = 3.14159 print('{:4.2f}'.format(x)) # Signed formatting print('{:+5.2f}'.format(x)) # Zero padding print('{:06.2f}'.format(x)) # More decimals print('{:7.5f}'.format(x)) # More decimal places than the number precision y = 2.71 print('{:7.5f}'.format(y)) # Less decimal precision, but same size -> left padding print('{:7.2f}'.format(y)) # Integers (same singed and zero padding rules) z = 42 print('{:7d}'.format(z)) # Strings print('{:10}'.format('wile e')) # Align to the right print('{:>10}'.format('wile e')) # Named agruments print("{a} {b} {c}".format(a=5, b=8, c=10)) ################################### ### WRITING FILES # Writing the data back to the list new_file_name = 'true_data.txt' # Open a file for writing (if a file with the same name exists, it will erase its content!) with open(new_file_name, 'w') as f: # Write the header f.write('Num,Name,Epoch,q,e,i,w,Node,Tp,Ref\n') for line in data_list: # Composing a string str_line = ['{:>3}'.format(line[0]), line[1], '{:5d}'.format(line[2])] # Convert all elemets using the same format for element in line[3]: str_line.append('{:.3f}'.format(element)) # Add the reference str_line.append(line[-1]) print(str_line) # Convert the list to a comma delimited string final_line = ','.join(str_line) # Write the line f.write(final_line+'\n') ################################### # Appending to a file with open(new_file_name, 'a') as f: f.write('Wile E. was here') ################################### ### PYTHON MODULES # Python standard library: https://docs.python.org/3/library/ import math # Sqrt print(math.sqrt(2)) # Sine print(math.sin(math.pi)) # Log10 print(math.log10(100)) # Random module import random # Random integer in the 1 to 100 range print(random.randint(1, 100)) # Random float in the 0 to 1 range print(random.random()) # Shuffle a list a = [1, 2, 3, 4, 5] random.shuffle(a) print(a) # Sample 10 elements from a list b = range(1, 100) print(random.sample(b, 10)) # Sampling a gaussian distribution for i in range(10): print(random.gauss(0, 2)) ################################### ### Ways of importing modules # Module alias import math as m print(m.sqrt(2)) # Importing individual functions - PREFERED! from math import sqrt print(sqrt(2)) # Importing all functions from a module - NOT RECOMMENDED! from math import * print(sqrt(2)) print(pi) ################################### # FILE HANDLING - os library import os # Listing the contents of the current directory print(os.listdir('.')) # Printing the current directory print(os.getcwd()) # Changing the current directory one up os.chdir('..') print(os.getcwd()) # Directory separator # DO NOT USE / or \ print(os.sep) ### Making a new directory # Construct a new path to the directory new_dir_path = os.path.join(os.getcwd(), 'test') print(new_dir_path) # Make new dir if the dir does not exist if not os.path.exists(new_dir_path): os.mkdir(new_dir_path) else: print('The directory already exists!') ### # Make an example file in the new directory file_name = 'top_secret.txt' file_path = os.path.join(new_dir_path, file_name) with open(file_path, 'w') as f: pass # Delete the file if os.path.isfile(file_path): os.remove(file_path) else: print('The file does not exist!') ################################### # FILE HANDLING - shutil library import shutil # Make an example file with open(file_path, 'w') as f: pass # Copying files copy_path = 'unclassified.txt' shutil.copy2(file_path, copy_path) # Moving/renaming files new_name = 'public_release.txt' shutil.move(copy_path, new_name)
mit
-3,814,201,120,210,156,500
17.909804
91
0.575369
false
3.199874
false
false
false
terna/SLAPP3
6 objectSwarmObserverAgents_AESOP_turtleLib_NetworkX/$$slapp$$/txtxFunctions.py
1
1888
import os def executeFormula(fIn, fOu, nrow, n, s): # v=0 #init. not required; it can interfere with the try/except structure pos = s.find("v") if pos == -1: print("missing 'v' in formula, row", nrow, "\nexecution stopped in error") fIn.close() fOu.close() os.sys.exit(1) pos = s.find("=") if pos == -1: print("missing '=' in formula, row", nrow, "\nexecution stopped in error") fIn.close() fOu.close() os.sys.exit(1) try: while s[0] == ' ': if s[0] == ' ': s = s[1:] pos = s.find('\n') # eliminating spaces after \n (formerly #) if any if pos != -1: while s[pos + 1] == ' ': s = s[:pos + 1] + s[pos + 2:] # print "[",n, s,"]", d = dict([('n', n), ('v', 0)]) exec(s, d) v = d['v'] return str(v) except BaseException: print("error in formula, row", nrow, "\nexecution stopped in error") fIn.close() fOu.close() os.sys.exit(1) def fill(s): s = list(s) if s == "": return s change = False s = list(s) for i in range(len(s)): if s[i] == '&': if not change: change = True else: change = False if s[i] == ' ' and change: s[i] = '&' return "".join(s) def splitUnfill(s): if s == "": return s # print s s = s.split() # print s for i in range(len(s)): s_tmp = list(s[i]) # print s_tmp, len(s_tmp) for j in range(len(s_tmp)): if s_tmp[j] == "&": s_tmp[j] = ' ' if s_tmp[j] == "#": s_tmp[j] = '\n' # inserting \n sign # print s_tmp s[i] = "".join(s_tmp) return s
cc0-1.0
2,214,278,890,246,831,600
21.211765
77
0.417373
false
3.238422
false
false
false
attdona/NAIS
pynais/msg.py
1
2142
import struct import pynais as ns class Profile: def __init__(self, uid=None, pwd=None): self.uid = uid self.pwd = pwd def __str__(self): return "Profile uid: [%s], pwd: [%s]" % (self.uid, self.pwd) def set_protobuf(self, obj): obj.uid = self.uid obj.pwd = self.pwd def build_from_protobuf(self, obj): self.uid = obj.uid self.pwd = obj.pwd return self class Config: """ board configuration items and connection parameters """ def __init__(self, network="local", board="", host='localhost', port=1883, alive_period=None, secure=False): self.network = network self.board = board self.host = host self.port = port self.alive_period = alive_period self.secure = secure def __str__(self): return "Config network: [%s], board: [%s], remote: [%s:%d]" % ( self.network, self.board, self.host, self.port) def set_protobuf(self, obj): obj.network = self.network obj.board = self.board obj.host = self.host obj.port = self.port if self.alive_period: obj.alive_period = self.alive_period obj.secure = self.secure def build_from_protobuf(self, obj): self.network = obj.network self.host = obj.host self.board = obj.board self.port = obj.port self.alive_period = obj.alive_period self.secure = obj.secure return self class Ack: """ a message acknowledgement Args: id (int): message request identifier (packet.id field value) """ def __init__(self, id=None, sts=None): self.id = id self.sts = sts def __str__(self): return "Ack ([%s] - sts:[%s])" % (ns.msg_type(self.id), self.sts) def set_protobuf(self, obj): obj.id = self.id if not self.sts==None: obj.status = self.sts def build_from_protobuf(self, obj): self.id = obj.id if (obj.HasField('status')): self.sts = obj.status return self
gpl-3.0
-5,864,915,119,413,480,000
25.775
78
0.548086
false
3.725217
false
false
false
liuslevis/handwrite_dataset_generator
3_gen_digit_data_label.py
1
2918
import os import shutil DEBUG = False img_suffix = ['.jpeg','.jpg','.png','.tiff'] def gen_img_unique_file_name(count,total): assert(count<=total) name = '' for i in range(len(str(total)) - len(str(count))): name+='0' offset = str(count) name+=offset return name def copy_img_file(src_path,save_data_path,filename): if DEBUG: print src_path,filename print src_path,filename if not os.path.isdir(save_data_path): os.mkdir(save_data_path) shutil.copyfile(src_path, os.path.join(save_data_path, filename)) def count_img_under_dir(path): count = 0 for label_paths in os.listdir(path): label_path = os.path.join(path, label_paths) if os.path.isdir(label_path) and label_path[-1] >= '0' and label_path[-1] <= '9' : label = int(label_path[-1]) assert( label >= 0 and label <=9) for digit_img in os.listdir(label_path): count+=1 return count def gen_label_file(dict,save_label_path): label_list = [] for label in dict.keys(): times = dict.get(label) print 'digit:',label,' has ',times,' imgs' label_list+=[label for i in range(times)] content = '' for label in label_list: content += str(label) + '\n' with open(save_label_path,'w') as f: f.write(content); f.close() print 'gen_label_file:',save_label_path def main(): save_label_path = './4_dataset/testLabel.txt' save_data_path = './4_dataset/' rootDir ='./3_cropped' dict = {} # store num of each digit labels total = count_img_under_dir(rootDir) uid = 0 suffix = "" print 'total_img:',total for label_paths in os.listdir(rootDir): label_path = os.path.join(rootDir, label_paths) if os.path.isdir(label_path) and label_path[-1] >= '0' and label_path[-1] <= '9' : label = int(label_path[-1]) if DEBUG: print '--------------label:%d--------------'%label assert( label >= 0 and label <=9) for img_path in os.listdir(label_path): if DEBUG: print img_path if suffix not in img_suffix: (filepath,filename)=os.path.split(img_path) suffix = os.path.splitext(filename)[-1] if suffix in img_suffix: count = dict.get(label) if None == count: dict.update({label:1}) else: count += 1 dict.update({label:count}) uid+=1 save_name = gen_img_unique_file_name(uid,total) copy_img_file(os.path.join(label_path,img_path),save_data_path, save_name+suffix ) print 'database' gen_label_file(dict,save_label_path) if __name__ == '__main__': main()
mit
667,820,483,972,718,700
29.726316
102
0.536326
false
3.567237
false
false
false
nrz/ylikuutio
external/bullet3/examples/pybullet/gym/pybullet_envs/minitaur/envs/env_randomizers/minitaur_alternating_legs_env_randomizer.py
2
2810
"""Randomize the minitaur_gym_alternating_leg_env when reset() is called. The randomization include swing_offset, extension_offset of all legs that mimics bent legs, desired_pitch from user input, battery voltage and motor damping. """ import os, inspect currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe()))) parentdir = os.path.dirname(os.path.dirname(currentdir)) parentdir = os.path.dirname(os.path.dirname(parentdir)) os.sys.path.insert(0, parentdir) import numpy as np import tf.compat.v1 as tf from pybullet_envs.minitaur.envs import env_randomizer_base # Absolute range. NUM_LEGS = 4 BATTERY_VOLTAGE_RANGE = (14.8, 16.8) MOTOR_VISCOUS_DAMPING_RANGE = (0, 0.01) class MinitaurAlternatingLegsEnvRandomizer(env_randomizer_base.EnvRandomizerBase): """A randomizer that changes the minitaur_gym_alternating_leg_env.""" def __init__(self, perturb_swing_bound=0.1, perturb_extension_bound=0.1, perturb_desired_pitch_bound=0.01): super(MinitaurAlternatingLegsEnvRandomizer, self).__init__() self.perturb_swing_bound = perturb_swing_bound self.perturb_extension_bound = perturb_extension_bound self.perturb_desired_pitch_bound = perturb_desired_pitch_bound def randomize_env(self, env): perturb_magnitude = np.random.uniform(low=-self.perturb_swing_bound, high=self.perturb_swing_bound, size=NUM_LEGS) env.set_swing_offset(perturb_magnitude) tf.logging.info("swing_offset: {}".format(perturb_magnitude)) perturb_magnitude = np.random.uniform(low=-self.perturb_extension_bound, high=self.perturb_extension_bound, size=NUM_LEGS) env.set_extension_offset(perturb_magnitude) tf.logging.info("extension_offset: {}".format(perturb_magnitude)) perturb_magnitude = np.random.uniform(low=-self.perturb_desired_pitch_bound, high=self.perturb_desired_pitch_bound) env.set_desired_pitch(perturb_magnitude) tf.logging.info("desired_pitch: {}".format(perturb_magnitude)) randomized_battery_voltage = np.random.uniform(BATTERY_VOLTAGE_RANGE[0], BATTERY_VOLTAGE_RANGE[1]) env.minitaur.SetBatteryVoltage(randomized_battery_voltage) tf.logging.info("battery_voltage: {}".format(randomized_battery_voltage)) randomized_motor_damping = np.random.uniform(MOTOR_VISCOUS_DAMPING_RANGE[0], MOTOR_VISCOUS_DAMPING_RANGE[1]) env.minitaur.SetMotorViscousDamping(randomized_motor_damping) tf.logging.info("motor_damping: {}".format(randomized_motor_damping))
agpl-3.0
-7,600,583,277,604,451,000
45.065574
86
0.666904
false
3.39372
false
false
false
dreaming-dog/kaldi-long-audio-alignment
scripts/classes/entry_manager.py
1
2096
# Copyright 2017 Speech Lab, EE Dept., IITM (Author: Srinivas Venkattaramanujam) from entry import Entry class EntryManager: __statuses__ = ['PENDING','DONE'] def __init__(self): self.entries=[] def add_entry(self,entry): # Problem: # add new entries to the existing list such that: # 1) the start and end time of an entry is not the same # 2) All the words in the range are covered # 3) If two consecutive entries have the same status, merge # trivial cases: # 1) if list is empty, simply add to list # edge cases: # 1) While merging, if there is a status change, have to check previous entry, therefore don't do it inplace! remove the last entry, make changes and insert the entry if(len(self.entries)==0): self.entries.append(entry) else: # assert (last word+1) of previous entry and the first word of current entry match try: assert (self.entries[-1].word_end+1)==entry.word_begin except AssertionError: print "Words are not continous in ",self.entries[-1]," and ", entry exit(1) # check if to be merged. if not, just insert. if(entry.begin_time!=entry.end_time and self.entries[-1].status!=entry.status and (entry.end_time-entry.begin_time)>=0.1): self.entries.append(entry) else: # merge case prev_entry=self.entries[-1] self.entries=self.entries[:-1] entry=self.__merge__(prev_entry, entry) return self.add_entry(entry) def __min_status__(self, status1, status2): # _list=[EntryManager.__statuses__.index(status1), EntryManager.__statuses__.index(status2)] # print 'status 1,2', status1, status2 _list=[EntryManager.__statuses__.index(status1), EntryManager.__statuses__.index(status2)] return EntryManager.__statuses__[min(_list)] def __merge__(self,prev_entry, entry): # print 'merge called' return Entry(prev_entry.begin_time, entry.end_time, self.__min_status__(prev_entry.status, entry.status), prev_entry.word_begin, entry.word_end) def print_entries(self): #print the entries for e in self.entries: print e.begin_time, e.end_time, e.status, e.word_begin, e.word_end
apache-2.0
-5,431,410,990,582,398,000
42.666667
169
0.694179
false
3.114413
false
false
false
hirochachacha/apython
bpython/completion/completers/import_completer.py
1
9006
# The MIT License # # Copyright (c) 2009-2011 Andreas Stuehrk # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. from __future__ import with_statement import imp import itertools import os import sys import warnings try: from warnings import catch_warnings except ImportError: import contextlib @contextlib.contextmanager def catch_warnings(): """Stripped-down version of `warnings.catch_warnings()` (available in Py >= 2.6).""" filters = warnings.filters warnings.filters = list(filters) try: yield finally: warnings.filters = filters from bpython._py3compat import PY3 from six import next # The cached list of all known modules modules = dict() sorted_modules = [] fully_loaded = False def get_object(cw, line): if not cw: cw = "" tokens = line.split() completing_from = False if len(tokens) == 1: return if tokens[0] == 'from': if len(tokens) > 3: if '.' in cw: # This will result in a SyntaxError, so do not return # any matches return None completing_from = True cw = '%s.%s' % (tokens[1], cw) elif len(tokens) == 3: if 'import '.startswith(cw): return None else: # Will result in a SyntaxError return None match_objects = list() for name in sorted_modules: if not (name == cw and name.find('.', len(cw)) == -1): continue try: obj = sys.modules[name] except: if modules[name].endswith('.pyc'): f = modules[name][:-1] if os.path.isfile(f): obj = f else: obj = None else: obj = None if completing_from: name = name[len(tokens[1]) + 1:] try: obj = getattr(obj, name) except: obj = None match_objects.append(obj) if completing_from and tokens[1] in sys.modules: # from x import y -> search for attributes starting with y if # x is in sys.modules _, _, cw = cw.rpartition('.') module = sys.modules[tokens[1]] names = [name for name in dir(module) if name == cw] objects = [getattr(module, name) for name in names] match_objects.extend(objects) elif len(tokens) == 2: # from x.y or import x.y -> search for attributes starting # with y if x is in sys.modules and the attribute is also in # sys.modules module_name, _, cw = cw.rpartition('.') if module_name in sys.modules: module = sys.modules[module_name] for name in dir(module): if name != cw: continue submodule_name = '%s.%s' % (module_name, name) if submodule_name in sys.modules: match_objects.append(sys.modules[submodule_name]) if not match_objects: return None return match_objects[0] def complete(cw, line): """Construct a full list of possibly completions for imports.""" if not cw: return None tokens = line.split() completing_from = False if tokens[0] == 'from': if len(tokens) > 3: if '.' in cw: # This will result in a SyntaxError, so do not return # any matches return None completing_from = True cw = '%s.%s' % (tokens[1], cw) elif len(tokens) == 3: if 'import '.startswith(cw): return ['import '] else: # Will result in a SyntaxError return None matches = list() for name in sorted_modules: if not (name.startswith(cw) and name.find('.', len(cw)) == -1): continue if completing_from: name = name[len(tokens[1]) + 1:] matches.append(name) if completing_from and tokens[1] in sys.modules: # from x import y -> search for attributes starting with y if # x is in sys.modules _, _, cw = cw.rpartition('.') module = sys.modules[tokens[1]] names = [name for name in dir(module) if name.startswith(cw)] matches.extend(names) elif len(tokens) == 2: # from x.y or import x.y -> search for attributes starting # with y if x is in sys.modules and the attribute is also in # sys.modules module_name, _, cw = cw.rpartition('.') if module_name in sys.modules: module = sys.modules[module_name] for name in dir(module): if not name.startswith(cw): continue submodule_name = '%s.%s' % (module_name, name) if submodule_name in sys.modules: matches.append(submodule_name) if not matches: return [] return matches def find_modules(path): """Find all modules (and packages) for a given directory.""" if not os.path.isdir(path): # Perhaps a zip file return try: filenames = os.listdir(path) except EnvironmentError: filenames = [] for name in filenames: filename = name if not any(name.endswith(suffix[0]) for suffix in imp.get_suffixes()): # Possibly a package if '.' in name: continue elif os.path.isdir(os.path.join(path, name)): # Unfortunately, CPython just crashes if there is a directory # which ends with a python extension, so work around. continue for suffix in imp.get_suffixes(): if name.endswith(suffix[0]): name = name[:-len(suffix[0])] break if PY3 and name == "badsyntax_pep3120": # Workaround for issue #166 continue try: with catch_warnings(): warnings.simplefilter("ignore", ImportWarning) fo, pathname, _ = imp.find_module(name, [path]) except (ImportError, IOError, SyntaxError): continue except UnicodeEncodeError: # Happens with Python 3 when there is a filename in some # invalid encoding continue else: if fo is not None: fo.close() else: # Yay, package for subname, filename in find_modules(pathname): if subname != '__init__': yield '%s.%s' % (name, subname), os.path.join(pathname, filename) yield name, filename def find_all_modules(path=None): """Return a list with all modules in `path`, which should be a list of directory names. If path is not given, sys.path will be used.""" global sorted_modules i = itertools.repeat(None) if path is None: d = dict(zip(sys.builtin_module_names, i)) modules.update(d) path = sys.path for p in path: if not p: p = os.curdir for module, filename in find_modules(p): if not PY3 and not isinstance(module, unicode): try: module = module.decode(sys.getfilesystemencoding()) except UnicodeDecodeError: # Not importable anyway, ignore it continue modules[module] = os.path.join(p, filename) sorted_modules = sorted(modules) yield def find_coroutine(): global fully_loaded if fully_loaded: return None try: next(find_iterator) except StopIteration: fully_loaded = True return True def reload(): """Refresh the list of known modules.""" modules.clear() for _ in find_all_modules(): pass find_iterator = find_all_modules()
mit
6,365,923,528,338,857,000
31.989011
89
0.563957
false
4.406067
false
false
false
julzhk/codekata
instant_runoff_voting.py
1
1947
from collections import defaultdict, Counter def runoff(voters): """ a function that calculates an election winner from a list of voter selections using an Instant Runoff Voting algorithm. https://en.wikipedia.org/wiki/Instant-runoff_voting Each voter selects several candidates in order of preference. The votes are tallied from the each voter's first choice. If the first-place candidate has more than half the total votes, they win. Otherwise, find the candidate who got the least votes and remove them from each person's voting list. In case of a tie for least, remove all of the tying candidates. In case of a complete tie between every candidate, return None Continue until somebody has more than half the votes; they are the winner. The function takes a list of voter ballots; each ballot will be a list of candidates in descending order of preference. Returns the symbol corresponding to the winning candidate. """ votes_cast_so_far=0 final_tally = Counter() removed_candidates = set() for this_round in range(len(voters[0])): this_round_votes = [voter[this_round] for voter in voters if voter[this_round] not in removed_candidates] if not this_round_votes: # all knocked out return None tally = Counter(this_round_votes) final_tally.update(tally) leader = final_tally.most_common(1) votes_cast_so_far += sum(final_tally.values()) if final_tally[leader] >= votes_cast_so_far / 2.0: return leader lowest_vote = min(tally.values()) knockout_candidates = [candidate for candidate in tally if tally[candidate] == lowest_vote] removed_candidates |= set(knockout_candidates) voters = [ ['c', 'a', 'b', 'd', 'e'], ['b', 'e', 'd', 'c', 'a'], ['b', 'e', 'c', 'a', 'd'], ['d', 'b', 'c', 'a', 'e'], ['c', 'b', 'd', 'a', 'e'] ] assert(runoff(voters) == "b")
mit
-6,354,312,978,943,293,000
42.266667
113
0.659476
false
3.427817
false
false
false
factorlibre/carrier-delivery
delivery_carrier_ups/model/ups_config.py
1
2330
############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2015 FactorLibre (http://www.factorlibre.com) # Hugo Santos <hugo.santos@factorlibre.com> # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## from openerp import models, fields, api UPS_LABEL_FORMAT = [ ('EPL', 'EPL'), ('ZPL', 'ZPL'), ('GIF', 'GIF'), ('STARPL', 'STARPL'), ('SPL', 'SPL') ] class UPSConfig(models.Model): _name = 'ups.config' @api.model def _ups_weight_uom(self): return [ ('KGS', 'KGS'), ('LBS', 'LBS') ] @api.model def _ups_dimension_uom(self): return [ ('CM', 'CM'), ('IN', 'IN') ] @api.model def _ups_label_file_format(self): return UPS_LABEL_FORMAT name = fields.Char('UPS Config Name', required=True) is_test = fields.Boolean('Is a test?') username = fields.Char('UPS Username', required=True) password = fields.Char('UPS Password', required=True) access_license = fields.Char('UPS Access license', required=True) shipper_number = fields.Char('UPS Shipper number', required=True) weight_uom = fields.Selection('_ups_weight_uom', required=True, default="KGS") dimension_uom = fields.Selection('_ups_dimension_uom', required=True, default='CM') label_file_format = fields.Selection('_ups_label_file_format', required=True, default='EPL')
agpl-3.0
545,612,102,985,848,500
34.30303
78
0.566953
false
3.982906
false
false
false
wenhuchen/ETHZ-Bootstrapped-Captioning
visual-concepts/coco/PythonAPI/pycocotools/coco.py
1
16953
__author__ = 'tylin' __version__ = '2.0' # Interface for accessing the Microsoft COCO dataset. # Microsoft COCO is a large image dataset designed for object detection, # segmentation, and caption generation. pycocotools is a Python API that # assists in loading, parsing and visualizing the annotations in COCO. # Please visit http://mscoco.org/ for more information on COCO, including # for the data, paper, and tutorials. The exact format of the annotations # is also described on the COCO website. For example usage of the pycocotools # please see pycocotools_demo.ipynb. In addition to this API, please download both # the COCO images and annotations in order to run the demo. # An alternative to using the API is to load the annotations directly # into Python dictionary # Using the API provides additional utility functions. Note that this API # supports both *instance* and *caption* annotations. In the case of # captions not all functions are defined (e.g. categories are undefined). # The following API functions are defined: # COCO - COCO api class that loads COCO annotation file and prepare data structures. # decodeMask - Decode binary mask M encoded via run-length encoding. # encodeMask - Encode binary mask M using run-length encoding. # getAnnIds - Get ann ids that satisfy given filter conditions. # getCatIds - Get cat ids that satisfy given filter conditions. # getImgIds - Get img ids that satisfy given filter conditions. # loadAnns - Load anns with the specified ids. # loadCats - Load cats with the specified ids. # loadImgs - Load imgs with the specified ids. # segToMask - Convert polygon segmentation to binary mask. # showAnns - Display the specified annotations. # loadRes - Load algorithm results and create API for accessing them. # download - Download COCO images from mscoco.org server. # Throughout the API "ann"=annotation, "cat"=category, and "img"=image. # Help on each functions can be accessed by: "help COCO>function". # See also COCO>decodeMask, # COCO>encodeMask, COCO>getAnnIds, COCO>getCatIds, # COCO>getImgIds, COCO>loadAnns, COCO>loadCats, # COCO>loadImgs, COCO>segToMask, COCO>showAnns # Microsoft COCO Toolbox. version 2.0 # Data, paper, and tutorials available at: http://mscoco.org/ # Code written by Piotr Dollar and Tsung-Yi Lin, 2014. # Licensed under the Simplified BSD License [see bsd.txt] import json import time import matplotlib.pyplot as plt from matplotlib.collections import PatchCollection from matplotlib.patches import Polygon import numpy as np import urllib import copy import itertools import mask import os from collections import defaultdict class COCO: def __init__(self, annotation_file=None): """ Constructor of Microsoft COCO helper class for reading and visualizing annotations. :param annotation_file (str): location of annotation file :param image_folder (str): location to the folder that hosts images. :return: """ # load dataset self.dataset,self.anns,self.cats,self.imgs = dict(),dict(),dict(),dict() self.imgToAnns, self.catToImgs = defaultdict(list), defaultdict(list) if not annotation_file == None: print 'loading annotations into memory...' tic = time.time() dataset = json.load(open(annotation_file, 'r')) assert type(dataset)==dict, "annotation file format %s not supported"%(type(dataset)) print 'Done (t=%0.2fs)'%(time.time()- tic) self.dataset = dataset self.createIndex() def createIndex(self): # create index print 'creating index...' anns,cats,imgs = dict(),dict(),dict() imgToAnns,catToImgs = defaultdict(list),defaultdict(list) if 'annotations' in self.dataset: for ann in self.dataset['annotations']: imgToAnns[ann['image_id']].append(ann) anns[ann['id']] = ann if 'images' in self.dataset: for img in self.dataset['images']: imgs[img['id']] = img if 'categories' in self.dataset: for cat in self.dataset['categories']: cats[cat['id']] = cat for ann in self.dataset['annotations']: catToImgs[ann['category_id']].append(ann['image_id']) print 'index created!' # create class members self.anns = anns self.imgToAnns = imgToAnns self.catToImgs = catToImgs self.imgs = imgs self.cats = cats def info(self): """ Print information about the annotation file. :return: """ for key, value in self.dataset['info'].items(): print '%s: %s'%(key, value) def getAnnIds(self, imgIds=[], catIds=[], areaRng=[], iscrowd=None): """ Get ann ids that satisfy given filter conditions. default skips that filter :param imgIds (int array) : get anns for given imgs catIds (int array) : get anns for given cats areaRng (float array) : get anns for given area range (e.g. [0 inf]) iscrowd (boolean) : get anns for given crowd label (False or True) :return: ids (int array) : integer array of ann ids """ imgIds = imgIds if type(imgIds) == list else [imgIds] catIds = catIds if type(catIds) == list else [catIds] if len(imgIds) == len(catIds) == len(areaRng) == 0: anns = self.dataset['annotations'] else: if not len(imgIds) == 0: lists = [self.imgToAnns[imgId] for imgId in imgIds if imgId in self.imgToAnns] anns = list(itertools.chain.from_iterable(lists)) else: anns = self.dataset['annotations'] anns = anns if len(catIds) == 0 else [ann for ann in anns if ann['category_id'] in catIds] anns = anns if len(areaRng) == 0 else [ann for ann in anns if ann['area'] > areaRng[0] and ann['area'] < areaRng[1]] if not iscrowd == None: ids = [ann['id'] for ann in anns if ann['iscrowd'] == iscrowd] else: ids = [ann['id'] for ann in anns] return ids def getCatIds(self, catNms=[], supNms=[], catIds=[]): """ filtering parameters. default skips that filter. :param catNms (str array) : get cats for given cat names :param supNms (str array) : get cats for given supercategory names :param catIds (int array) : get cats for given cat ids :return: ids (int array) : integer array of cat ids """ catNms = catNms if type(catNms) == list else [catNms] supNms = supNms if type(supNms) == list else [supNms] catIds = catIds if type(catIds) == list else [catIds] if len(catNms) == len(supNms) == len(catIds) == 0: cats = self.dataset['categories'] else: cats = self.dataset['categories'] cats = cats if len(catNms) == 0 else [cat for cat in cats if cat['name'] in catNms] cats = cats if len(supNms) == 0 else [cat for cat in cats if cat['supercategory'] in supNms] cats = cats if len(catIds) == 0 else [cat for cat in cats if cat['id'] in catIds] ids = [cat['id'] for cat in cats] return ids def getImgIds(self, imgIds=[], catIds=[]): ''' Get img ids that satisfy given filter conditions. :param imgIds (int array) : get imgs for given ids :param catIds (int array) : get imgs with all given cats :return: ids (int array) : integer array of img ids ''' imgIds = imgIds if type(imgIds) == list else [imgIds] catIds = catIds if type(catIds) == list else [catIds] if len(imgIds) == len(catIds) == 0: ids = self.imgs.keys() else: ids = set(imgIds) for i, catId in enumerate(catIds): if i == 0 and len(ids) == 0: ids = set(self.catToImgs[catId]) else: ids &= set(self.catToImgs[catId]) return list(ids) def loadAnns(self, ids=[]): """ Load anns with the specified ids. :param ids (int array) : integer ids specifying anns :return: anns (object array) : loaded ann objects """ if type(ids) == list: return [self.anns[id] for id in ids] elif type(ids) == int: return [self.anns[ids]] def loadCats(self, ids=[]): """ Load cats with the specified ids. :param ids (int array) : integer ids specifying cats :return: cats (object array) : loaded cat objects """ if type(ids) == list: return [self.cats[id] for id in ids] elif type(ids) == int: return [self.cats[ids]] def loadImgs(self, ids=[]): """ Load anns with the specified ids. :param ids (int array) : integer ids specifying img :return: imgs (object array) : loaded img objects """ if type(ids) == list: return [self.imgs[id] for id in ids] elif type(ids) == int: return [self.imgs[ids]] def showAnns(self, anns): """ Display the specified annotations. :param anns (array of object): annotations to display :return: None """ if len(anns) == 0: return 0 if 'segmentation' in anns[0] or 'keypoints' in anns[0]: datasetType = 'instances' elif 'caption' in anns[0]: datasetType = 'captions' else: raise Exception("datasetType not supported") if datasetType == 'instances': ax = plt.gca() ax.set_autoscale_on(False) polygons = [] color = [] for ann in anns: c = (np.random.random((1, 3))*0.6+0.4).tolist()[0] if 'segmentation' in ann: if type(ann['segmentation']) == list: # polygon for seg in ann['segmentation']: poly = np.array(seg).reshape((len(seg)/2, 2)) polygons.append(Polygon(poly)) color.append(c) else: # mask t = self.imgs[ann['image_id']] if type(ann['segmentation']['counts']) == list: rle = mask.frPyObjects([ann['segmentation']], t['height'], t['width']) else: rle = [ann['segmentation']] m = mask.decode(rle) img = np.ones( (m.shape[0], m.shape[1], 3) ) if ann['iscrowd'] == 1: color_mask = np.array([2.0,166.0,101.0])/255 if ann['iscrowd'] == 0: color_mask = np.random.random((1, 3)).tolist()[0] for i in range(3): img[:,:,i] = color_mask[i] ax.imshow(np.dstack( (img, m*0.5) )) if 'keypoints' in ann and type(ann['keypoints']) == list: # turn skeleton into zero-based index sks = np.array(self.loadCats(ann['category_id'])[0]['skeleton'])-1 kp = np.array(ann['keypoints']) x = kp[0::3] y = kp[1::3] v = kp[2::3] for sk in sks: if np.all(v[sk]>0): plt.plot(x[sk],y[sk], linewidth=3, color=c) plt.plot(x[v>0], y[v>0],'o',markersize=8, markerfacecolor=c, markeredgecolor='k',markeredgewidth=2) plt.plot(x[v>1], y[v>1],'o',markersize=8, markerfacecolor=c, markeredgecolor=c, markeredgewidth=2) p = PatchCollection(polygons, facecolor=color, linewidths=0, alpha=0.4) ax.add_collection(p) p = PatchCollection(polygons, facecolor="none", edgecolors=color, linewidths=2) ax.add_collection(p) elif datasetType == 'captions': for ann in anns: print ann['caption'] def loadRes(self, resFile): """ Load result file and return a result api object. :param resFile (str) : file name of result file :return: res (obj) : result api object """ res = COCO() res.dataset['images'] = [img for img in self.dataset['images']] print 'Loading and preparing results... ' tic = time.time() if type(resFile) == str or type(resFile) == unicode: anns = json.load(open(resFile)) elif type(resFile) == np.ndarray: anns = self.loadNumpyAnnotations(resFile) else: anns = resFile assert type(anns) == list, 'results in not an array of objects' annsImgIds = [ann['image_id'] for ann in anns] assert set(annsImgIds) == (set(annsImgIds) & set(self.getImgIds())), \ 'Results do not correspond to current coco set' if 'caption' in anns[0]: imgIds = set([img['id'] for img in res.dataset['images']]) & set([ann['image_id'] for ann in anns]) res.dataset['images'] = [img for img in res.dataset['images'] if img['id'] in imgIds] for id, ann in enumerate(anns): ann['id'] = id+1 elif 'bbox' in anns[0] and not anns[0]['bbox'] == []: res.dataset['categories'] = copy.deepcopy(self.dataset['categories']) for id, ann in enumerate(anns): bb = ann['bbox'] x1, x2, y1, y2 = [bb[0], bb[0]+bb[2], bb[1], bb[1]+bb[3]] if not 'segmentation' in ann: ann['segmentation'] = [[x1, y1, x1, y2, x2, y2, x2, y1]] ann['area'] = bb[2]*bb[3] ann['id'] = id+1 ann['iscrowd'] = 0 elif 'segmentation' in anns[0]: res.dataset['categories'] = copy.deepcopy(self.dataset['categories']) for id, ann in enumerate(anns): # now only support compressed RLE format as segmentation results ann['area'] = mask.area([ann['segmentation']])[0] if not 'bbox' in ann: ann['bbox'] = mask.toBbox([ann['segmentation']])[0] ann['id'] = id+1 ann['iscrowd'] = 0 elif 'keypoints' in anns[0]: res.dataset['categories'] = copy.deepcopy(self.dataset['categories']) for id, ann in enumerate(anns): s = ann['keypoints'] x = s[0::3] y = s[1::3] x0,x1,y0,y1 = np.min(x), np.max(x), np.min(y), np.max(y) ann['area'] = (x1-x0)*(y1-y0) ann['id'] = id + 1 ann['bbox'] = [x0,y0,x1-x0,y1-y0] print 'DONE (t=%0.2fs)'%(time.time()- tic) res.dataset['annotations'] = anns res.createIndex() return res def download( self, tarDir = None, imgIds = [] ): ''' Download COCO images from mscoco.org server. :param tarDir (str): COCO results directory name imgIds (list): images to be downloaded :return: ''' if tarDir is None: print 'Please specify target directory' return -1 if len(imgIds) == 0: imgs = self.imgs.values() else: imgs = self.loadImgs(imgIds) N = len(imgs) if not os.path.exists(tarDir): os.makedirs(tarDir) for i, img in enumerate(imgs): tic = time.time() fname = os.path.join(tarDir, img['file_name']) if not os.path.exists(fname): urllib.urlretrieve(img['coco_url'], fname) print 'downloaded %d/%d images (t=%.1fs)'%(i, N, time.time()- tic) def loadNumpyAnnotations(self, data): """ Convert result data from a numpy array [Nx7] where each row contains {imageID,x1,y1,w,h,score,class} :param data (numpy.ndarray) :return: annotations (python nested list) """ print("Converting ndarray to lists...") assert(type(data) == np.ndarray) print(data.shape) assert(data.shape[1] == 7) N = data.shape[0] ann = [] for i in range(N): if i % 1000000 == 0: print("%d/%d" % (i,N)) ann += [{ 'image_id' : int(data[i, 0]), 'bbox' : [ data[i, 1], data[i, 2], data[i, 3], data[i, 4] ], 'score' : data[i, 5], 'category_id': int(data[i, 6]), }] return ann
bsd-3-clause
-9,197,075,660,975,878,000
42.358056
128
0.54775
false
3.730854
false
false
false
blackball/an-test6
net/migrations/0004_update_calibrations.py
1
22054
# encoding: utf-8 import datetime from south.db import db from south.v2 import DataMigration from django.db import models from astrometry.net.settings import * from astrometry.util.util import Tan import math import os class Migration(DataMigration): def forwards(self, orm): "Write your forwards methods here." for calib in orm.Calibration.objects.all(): wcsfn = os.path.join(JOBDIR, '%08i' % calib.job.id) wcsfn = os.path.join(wcsfn, 'wcs.fits') wcs = Tan(str(wcsfn), 0) ra,dec = wcs.radec_center() radius = (wcs.pixel_scale() * math.hypot(wcs.imagew, wcs.imageh)/2. / 3600.) # Find cartesian coordinates ra *= math.pi/180 dec *= math.pi/180 tempr = math.cos(dec) calib.x = tempr*math.cos(ra) calib.y = tempr*math.sin(ra) calib.z = math.sin(dec) calib.r = radius/180*math.pi calib.save() def backwards(self, orm): "Write your backwards methods here." models = { 'auth.group': { 'Meta': {'object_name': 'Group'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}), 'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}) }, 'auth.permission': { 'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'}, 'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}) }, 'auth.user': { 'Meta': {'object_name': 'User'}, 'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}), 'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}), 'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}) }, 'contenttypes.contenttype': { 'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"}, 'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}) }, 'net.album': { 'Meta': {'object_name': 'Album'}, 'comment_receiver': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['net.CommentReceiver']", 'unique': 'True'}), 'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'description': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'publicly_visible': ('django.db.models.fields.CharField', [], {'default': "'y'", 'max_length': '1'}), 'tags': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'albums'", 'symmetrical': 'False', 'to': "orm['net.Tag']"}), 'title': ('django.db.models.fields.CharField', [], {'max_length': '64'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'albums'", 'null': 'True', 'to': "orm['auth.User']"}), 'user_images': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'albums'", 'symmetrical': 'False', 'to': "orm['net.UserImage']"}) }, 'net.cachedfile': { 'Meta': {'object_name': 'CachedFile'}, 'disk_file': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['net.DiskFile']"}), 'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64', 'primary_key': 'True'}) }, 'net.calibration': { 'Meta': {'object_name': 'Calibration'}, 'decmax': ('django.db.models.fields.FloatField', [], {}), 'decmin': ('django.db.models.fields.FloatField', [], {}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'r': ('django.db.models.fields.FloatField', [], {}), 'ramax': ('django.db.models.fields.FloatField', [], {}), 'ramin': ('django.db.models.fields.FloatField', [], {}), 'raw_tan': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'calibrations_raw'", 'null': 'True', 'to': "orm['net.TanWCS']"}), 'sip': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['net.SipWCS']", 'null': 'True'}), 'sky_location': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'calibrations'", 'null': 'True', 'to': "orm['net.SkyLocation']"}), 'tweaked_tan': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'calibrations_tweaked'", 'null': 'True', 'to': "orm['net.TanWCS']"}), 'x': ('django.db.models.fields.FloatField', [], {}), 'y': ('django.db.models.fields.FloatField', [], {}), 'z': ('django.db.models.fields.FloatField', [], {}) }, 'net.comment': { 'Meta': {'ordering': "['-created_at']", 'object_name': 'Comment'}, 'author': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'comments_left'", 'to': "orm['auth.User']"}), 'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'recipient': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'comments'", 'to': "orm['net.CommentReceiver']"}), 'text': ('django.db.models.fields.CharField', [], {'max_length': '1024'}) }, 'net.commentreceiver': { 'Meta': {'object_name': 'CommentReceiver'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'}) }, 'net.diskfile': { 'Meta': {'object_name': 'DiskFile'}, 'file_hash': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '40', 'primary_key': 'True'}), 'file_type': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True'}), 'size': ('django.db.models.fields.PositiveIntegerField', [], {}) }, 'net.flag': { 'Meta': {'ordering': "['name']", 'object_name': 'Flag'}, 'explanation': ('django.db.models.fields.CharField', [], {'max_length': '2048', 'blank': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '56', 'primary_key': 'True'}) }, 'net.flaggeduserimage': { 'Meta': {'object_name': 'FlaggedUserImage'}, 'flag': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['net.Flag']"}), 'flagged_time': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}), 'user_image': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['net.UserImage']"}) }, 'net.image': { 'Meta': {'object_name': 'Image'}, 'disk_file': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['net.DiskFile']"}), 'display_image': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'image_display_set'", 'null': 'True', 'to': "orm['net.Image']"}), 'height': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'thumbnail': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'image_thumbnail_set'", 'null': 'True', 'to': "orm['net.Image']"}), 'width': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}) }, 'net.job': { 'Meta': {'object_name': 'Job'}, 'calibration': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'job'", 'unique': 'True', 'null': 'True', 'to': "orm['net.Calibration']"}), 'end_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}), 'error_message': ('django.db.models.fields.CharField', [], {'max_length': '256'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'queued_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}), 'start_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}), 'status': ('django.db.models.fields.CharField', [], {'max_length': '1'}), 'user_image': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'jobs'", 'to': "orm['net.UserImage']"}) }, 'net.license': { 'Meta': {'object_name': 'License'}, 'allow_commercial_use': ('django.db.models.fields.CharField', [], {'default': "'d'", 'max_length': '1'}), 'allow_modifications': ('django.db.models.fields.CharField', [], {'default': "'d'", 'max_length': '2'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'license_name': ('django.db.models.fields.CharField', [], {'max_length': '1024'}), 'license_uri': ('django.db.models.fields.CharField', [], {'max_length': '1024'}) }, 'net.processsubmissions': { 'Meta': {'object_name': 'ProcessSubmissions'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'pid': ('django.db.models.fields.IntegerField', [], {}), 'watchdog': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}) }, 'net.queuedjob': { 'Meta': {'object_name': 'QueuedJob'}, 'finished': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'job': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['net.Job']"}), 'procsub': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'jobs'", 'to': "orm['net.ProcessSubmissions']"}), 'success': ('django.db.models.fields.BooleanField', [], {'default': 'False'}) }, 'net.queuedsubmission': { 'Meta': {'object_name': 'QueuedSubmission'}, 'finished': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'procsub': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'subs'", 'to': "orm['net.ProcessSubmissions']"}), 'submission': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['net.Submission']"}), 'success': ('django.db.models.fields.BooleanField', [], {'default': 'False'}) }, 'net.sipwcs': { 'Meta': {'object_name': 'SipWCS'}, 'apterms': ('django.db.models.fields.TextField', [], {'default': "''"}), 'aterms': ('django.db.models.fields.TextField', [], {'default': "''"}), 'bpterms': ('django.db.models.fields.TextField', [], {'default': "''"}), 'bterms': ('django.db.models.fields.TextField', [], {'default': "''"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'order': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '2'}), 'tan': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['net.TanWCS']", 'unique': 'True'}) }, 'net.skylocation': { 'Meta': {'object_name': 'SkyLocation'}, 'healpix': ('django.db.models.fields.BigIntegerField', [], {}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'nside': ('django.db.models.fields.PositiveSmallIntegerField', [], {}) }, 'net.skyobject': { 'Meta': {'object_name': 'SkyObject'}, 'name': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'primary_key': 'True'}) }, 'net.sourcelist': { 'Meta': {'object_name': 'SourceList', '_ormbases': ['net.Image']}, 'image_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['net.Image']", 'unique': 'True', 'primary_key': 'True'}), 'source_type': ('django.db.models.fields.CharField', [], {'max_length': '4'}) }, 'net.submission': { 'Meta': {'object_name': 'Submission'}, 'album': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['net.Album']", 'null': 'True', 'blank': 'True'}), 'center_dec': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}), 'center_ra': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}), 'comment_receiver': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['net.CommentReceiver']", 'unique': 'True'}), 'deduplication_nonce': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), 'disk_file': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'submissions'", 'null': 'True', 'to': "orm['net.DiskFile']"}), 'downsample_factor': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}), 'error_message': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'license': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['net.License']"}), 'original_filename': ('django.db.models.fields.CharField', [], {'max_length': '256'}), 'parity': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '2'}), 'positional_error': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}), 'processing_finished': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}), 'processing_retries': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}), 'processing_started': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}), 'publicly_visible': ('django.db.models.fields.CharField', [], {'default': "'y'", 'max_length': '1'}), 'radius': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}), 'scale_err': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}), 'scale_est': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}), 'scale_lower': ('django.db.models.fields.FloatField', [], {'default': '0.10000000000000001', 'null': 'True', 'blank': 'True'}), 'scale_type': ('django.db.models.fields.CharField', [], {'default': "'ul'", 'max_length': '2'}), 'scale_units': ('django.db.models.fields.CharField', [], {'default': "'degwidth'", 'max_length': '20'}), 'scale_upper': ('django.db.models.fields.FloatField', [], {'default': '180', 'null': 'True', 'blank': 'True'}), 'submitted_on': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'submissions'", 'null': 'True', 'to': "orm['auth.User']"}) }, 'net.tag': { 'Meta': {'object_name': 'Tag'}, 'text': ('django.db.models.fields.CharField', [], {'max_length': '4096', 'primary_key': 'True'}) }, 'net.taggeduserimage': { 'Meta': {'object_name': 'TaggedUserImage'}, 'added_time': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'tag': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['net.Tag']"}), 'tagger': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'}), 'user_image': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['net.UserImage']"}) }, 'net.tanwcs': { 'Meta': {'object_name': 'TanWCS'}, 'cd11': ('django.db.models.fields.FloatField', [], {}), 'cd12': ('django.db.models.fields.FloatField', [], {}), 'cd21': ('django.db.models.fields.FloatField', [], {}), 'cd22': ('django.db.models.fields.FloatField', [], {}), 'crpix1': ('django.db.models.fields.FloatField', [], {}), 'crpix2': ('django.db.models.fields.FloatField', [], {}), 'crval1': ('django.db.models.fields.FloatField', [], {}), 'crval2': ('django.db.models.fields.FloatField', [], {}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'imageh': ('django.db.models.fields.FloatField', [], {}), 'imagew': ('django.db.models.fields.FloatField', [], {}) }, 'net.userimage': { 'Meta': {'object_name': 'UserImage'}, 'comment_receiver': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['net.CommentReceiver']", 'unique': 'True'}), 'description': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'blank': 'True'}), 'flags': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'user_images'", 'symmetrical': 'False', 'through': "orm['net.FlaggedUserImage']", 'to': "orm['net.Flag']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'image': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['net.Image']"}), 'license': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['net.License']"}), 'original_file_name': ('django.db.models.fields.CharField', [], {'max_length': '256'}), 'publicly_visible': ('django.db.models.fields.CharField', [], {'default': "'y'", 'max_length': '1'}), 'sky_objects': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'user_images'", 'symmetrical': 'False', 'to': "orm['net.SkyObject']"}), 'submission': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'user_images'", 'to': "orm['net.Submission']"}), 'tags': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'user_images'", 'symmetrical': 'False', 'through': "orm['net.TaggedUserImage']", 'to': "orm['net.Tag']"}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'user_images'", 'null': 'True', 'to': "orm['auth.User']"}) }, 'net.userprofile': { 'Meta': {'object_name': 'UserProfile'}, 'apikey': ('django.db.models.fields.CharField', [], {'max_length': '16'}), 'default_license': ('django.db.models.fields.related.ForeignKey', [], {'default': '1', 'to': "orm['net.License']"}), 'display_name': ('django.db.models.fields.CharField', [], {'max_length': '32'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'profile'", 'unique': 'True', 'to': "orm['auth.User']"}) } } complete_apps = ['net']
gpl-2.0
-8,568,439,465,542,566,000
73.255892
203
0.540446
false
3.638673
false
false
false
MSEMJEJME/Get-Dumped
renpy/statements.py
1
3307
# Copyright 2004-2012 Tom Rothamel <pytom@bishoujo.us> # # Permission is hereby granted, free of charge, to any person # obtaining a copy of this software and associated documentation files # (the "Software"), to deal in the Software without restriction, # including without limitation the rights to use, copy, modify, merge, # publish, distribute, sublicense, and/or sell copies of the Software, # and to permit persons to whom the Software is furnished to do so, # subject to the following conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. # This module contains code to support user-defined statements. import renpy # The statement registry. It's a map from tuples giving the prefixes of # statements to dictionaries giving the methods used for that statement. registry = { } parsers = renpy.parser.ParseTrie() def register(name, parse=None, lint=None, execute=None, predict=None, next=None, scry=None, block=False, init=False): #@ReservedAssignment name = tuple(name.split()) registry[name] = dict(parse=parse, lint=lint, execute=execute, predict=predict, next=next, scry=scry) # The function that is called to create an ast.UserStatement. def parse_user_statement(l, loc): renpy.exports.push_error_handler(l.error) try: rv = renpy.ast.UserStatement(loc, l.text, l.subblock) if not block: l.expect_noblock(" ".join(name) + " statement") l.advance() else: l.expect_block(" ".join(name) + " statement") l.advance() finally: renpy.exports.pop_error_handler() if init and not l.init: rv = renpy.ast.Init(loc, [ rv ], 0) return rv renpy.parser.statements.add(name, parse_user_statement) # The function that is called to get our parse data. def parse_data(l): return (name, registry[name]["parse"](l)) parsers.add(name, parse_data) def parse(node, line, subblock): block = [ (node.filename, node.linenumber, line, subblock) ] l = renpy.parser.Lexer(block) l.advance() renpy.exports.push_error_handler(l.error) try: pf = parsers.parse(l) if pf is None: l.error("Could not find user-defined statement at runtime.") return pf(l) finally: renpy.exports.pop_error_handler() def call(method, parsed, *args, **kwargs): name, parsed = parsed method = registry[name].get(method) if method is None: return None return method(parsed, *args, **kwargs)
gpl-2.0
-8,841,751,182,915,426,000
32.07
138
0.643786
false
4.087763
false
false
false
RayRuizhiLiao/ITK_4D
Modules/ThirdParty/pygccxml/src/pygccxml/parser/etree_scanner.py
1
2166
# Copyright 2014-2016 Insight Software Consortium. # Copyright 2004-2008 Roman Yakovenko. # Distributed under the Boost Software License, Version 1.0. # See http://www.boost.org/LICENSE_1_0.txt import warnings from . import scanner # keep py2exe happy import xml.etree.ElementTree import xml.etree.cElementTree as ElementTree class etree_saxifier_t(object): def __init__(self, etree, handler): """ Deprecated since 1.8.0. Will be removed in 1.9.0. """ warnings.warn("etree_saxifier_t is deprecated.\n", DeprecationWarning) self.__root_elem = etree.getroot() self.__handler = handler def saxify(self): self.__handler.startDocument() self.__recursive_saxify(self.__root_elem) self.__handler.endDocument() def __recursive_saxify(self, element): self.__handler.startElement(element.tag, element.attrib) for e in element: self.__recursive_saxify(e) self.__handler.endElement(element.tag) class etree_scanner_t(scanner.scanner_t): def __init__(self, xml_file, decl_factory, *args): """ Deprecated since 1.8.0. Will be removed in 1.9.0. """ warnings.warn( "etree_scanner_t is deprecated.\n" + "Please use ietree_scanner_t instead.", DeprecationWarning) scanner.scanner_t.__init__(self, xml_file, decl_factory, *args) def read(self): tree = ElementTree.parse(self.xml_file) saxifier = etree_saxifier_t(tree, self) saxifier.saxify() class ietree_scanner_t(scanner.scanner_t): def __init__(self, xml_file, decl_factory, *args): scanner.scanner_t.__init__(self, xml_file, decl_factory, *args) def read(self): context = ElementTree.iterparse( self.xml_file, events=("start", "end")) for event, elem in context: if event == 'start': self.startElement(elem.tag, elem.attrib) else: self.endElement(elem.tag) elem.clear() self.endDocument()
apache-2.0
-8,333,672,104,491,799,000
28.507042
78
0.591874
false
3.854093
false
false
false
dnlcrl/PyFunt
tools/cythonize.py
1
6618
#!/usr/bin/env python """ cythonize SOURCE: https://github.com/scipy/scipy/blob/master/setup.py Cythonize pyx files into C files as needed. Usage: cythonize [root_dir] Default [root_dir] is 'pyfunt'. Checks pyx files to see if they have been changed relative to their corresponding C files. If they have, then runs cython on these files to recreate the C files. The script thinks that the pyx files have changed relative to the C files by comparing hashes stored in a database file. Simple script to invoke Cython (and Tempita) on all .pyx (.pyx.in) files; while waiting for a proper build system. Uses file hashes to figure out if rebuild is needed. For now, this script should be run by developers when changing Cython files only, and the resulting C files checked in, so that end-users (and Python-only developers) do not get the Cython/Tempita dependencies. Originally written by Dag Sverre Seljebotn, and copied here from: https://raw.github.com/dagss/private-scipy-refactor/cythonize/cythonize.py Note: this script does not check any of the dependent C libraries; it only operates on the Cython .pyx files. """ from __future__ import division, print_function, absolute_import import os import re import sys import hashlib import subprocess HASH_FILE = 'cythonize.dat' DEFAULT_ROOT = 'pyfunt' # WindowsError is not defined on unix systems try: WindowsError except NameError: WindowsError = None # # Rules # def process_pyx(fromfile, tofile): try: from Cython.Compiler.Version import version as cython_version from distutils.version import LooseVersion if LooseVersion(cython_version) < LooseVersion('0.22'): raise Exception('Building PyFunt requires Cython >= 0.22') except ImportError: pass flags = ['--fast-fail'] if tofile.endswith('.cxx'): flags += ['--cplus'] try: try: # if fromfile == 'im2col_cython.pyx': # print('compiling im2col_cython') # r = subprocess.call( # ['python', 'pyfunt/layers/setup.py', 'build_ext', '--inplace']) # else: r = subprocess.call( ['cython'] + flags + ["-o", tofile, fromfile]) if r != 0: raise Exception('Cython failed') except OSError: # There are ways of installing Cython that don't result in a cython # executable on the path, see gh-2397. r = subprocess.call([sys.executable, '-c', 'import sys; from Cython.Compiler.Main import ' 'setuptools_main as main; sys.exit(main())'] + flags + ["-o", tofile, fromfile]) if r != 0: raise Exception("Cython either isn't installed or it failed.") except OSError: raise OSError('Cython needs to be installed') def process_tempita_pyx(fromfile, tofile): try: try: from Cython import Tempita as tempita except ImportError: import tempita except ImportError: raise Exception('Building PyFunt requires Tempita: ' 'pip install --user Tempita') from_filename = tempita.Template.from_filename template = from_filename(fromfile, encoding=sys.getdefaultencoding()) pyxcontent = template.substitute() assert fromfile.endswith('.pyx.in') pyxfile = fromfile[:-len('.pyx.in')] + '.pyx' with open(pyxfile, "w") as f: f.write(pyxcontent) process_pyx(pyxfile, tofile) rules = { # fromext : function '.pyx': process_pyx, '.pyx.in': process_tempita_pyx } # # Hash db # def load_hashes(filename): # Return { filename : (sha1 of input, sha1 of output) } if os.path.isfile(filename): hashes = {} with open(filename, 'r') as f: for line in f: filename, inhash, outhash = line.split() hashes[filename] = (inhash, outhash) else: hashes = {} return hashes def save_hashes(hash_db, filename): with open(filename, 'w') as f: for key, value in sorted(hash_db.items()): f.write("%s %s %s\n" % (key, value[0], value[1])) def sha1_of_file(filename): h = hashlib.sha1() with open(filename, "rb") as f: h.update(f.read()) return h.hexdigest() # # Main program # def normpath(path): path = path.replace(os.sep, '/') if path.startswith('./'): path = path[2:] return path def get_hash(frompath, topath): from_hash = sha1_of_file(frompath) to_hash = sha1_of_file(topath) if os.path.exists(topath) else None return (from_hash, to_hash) def process(path, fromfile, tofile, processor_function, hash_db): fullfrompath = os.path.join(path, fromfile) fulltopath = os.path.join(path, tofile) current_hash = get_hash(fullfrompath, fulltopath) if current_hash == hash_db.get(normpath(fullfrompath), None): print('%s has not changed' % fullfrompath) return orig_cwd = os.getcwd() try: os.chdir(path) print('Processing %s to %s' % (fullfrompath, fulltopath)) processor_function(fromfile, tofile) finally: os.chdir(orig_cwd) # changed target file, recompute hash current_hash = get_hash(fullfrompath, fulltopath) # store hash in db hash_db[normpath(fullfrompath)] = current_hash def find_process_files(root_dir): hash_db = load_hashes(HASH_FILE) for cur_dir, dirs, files in os.walk(root_dir): for filename in files: in_file = os.path.join(cur_dir, filename + ".in") if filename.endswith('.pyx') and os.path.isfile(in_file): continue for fromext, function in rules.items(): if filename.endswith(fromext): toext = ".c" with open(os.path.join(cur_dir, filename), 'rb') as f: data = f.read() m = re.search( br"^\s*#\s*distutils:\s*language\s*=\s*c\+\+\s*$", data, re.I | re.M) if m: toext = ".cxx" fromfile = filename tofile = filename[:-len(fromext)] + toext process(cur_dir, fromfile, tofile, function, hash_db) save_hashes(hash_db, HASH_FILE) def main(): try: root_dir = sys.argv[1] except IndexError: root_dir = DEFAULT_ROOT find_process_files(root_dir) if __name__ == '__main__': main()
mit
-7,944,039,619,602,638,000
29.219178
97
0.599577
false
3.711722
false
false
false
deepmind/interval-bound-propagation
interval_bound_propagation/src/simplex_bounds.py
1
7609
# coding=utf-8 # Copyright 2019 The Interval Bound Propagation Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Naive bound calculation for common neural network layers.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from interval_bound_propagation.src import bounds as basic_bounds from interval_bound_propagation.src import relative_bounds import sonnet as snt import tensorflow.compat.v1 as tf class SimplexBounds(basic_bounds.AbstractBounds): """Specifies a bounding simplex within an embedding space.""" def __init__(self, vertices, nominal, r): """Initialises the simplex bounds. Args: vertices: Tensor of shape (num_vertices, *input_shape) or of shape (batch_size, num_vertices, *input_shape) containing the vertices in embedding space. nominal: Tensor of shape (batch_size, *input_shape) specifying the unperturbed inputs in embedding space, where `*input_shape` denotes either (embedding_size,) for flat input (e.g. bag-of-words) or (input_length, embedding_channels) for sequence input. r: Scalar specifying the dilation factor of the simplex. The dilated simplex will have vertices `nominal + r * (vertices-nominal)`. """ super(SimplexBounds, self).__init__() self._vertices = vertices self._nominal = nominal self._r = r @property def vertices(self): return self._vertices @property def nominal(self): return self._nominal @property def r(self): return self._r @property def shape(self): return self.nominal.shape.as_list() @classmethod def convert(cls, bounds): if not isinstance(bounds, cls): raise ValueError('Cannot convert "{}" to "{}"'.format(bounds, cls.__name__)) return bounds def apply_batch_reshape(self, wrapper, shape): reshape = snt.BatchReshape(shape) if self.vertices.shape.ndims == self.nominal.shape.ndims: reshape_vertices = reshape else: reshape_vertices = snt.BatchReshape(shape, preserve_dims=2) return SimplexBounds(reshape_vertices(self.vertices), reshape(self.nominal), self.r) def apply_linear(self, wrapper, w, b): mapped_centres = tf.matmul(self.nominal, w) mapped_vertices = tf.tensordot(self.vertices, w, axes=1) lb, ub = _simplex_bounds(mapped_vertices, mapped_centres, self.r, -2) nominal_out = tf.matmul(self.nominal, w) if b is not None: nominal_out += b return relative_bounds.RelativeIntervalBounds(lb, ub, nominal_out) def apply_conv1d(self, wrapper, w, b, padding, stride): mapped_centres = tf.nn.conv1d(self.nominal, w, padding=padding, stride=stride) if self.vertices.shape.ndims == 3: # `self.vertices` has no batch dimension; its shape is # (num_vertices, input_length, embedding_channels). mapped_vertices = tf.nn.conv1d(self.vertices, w, padding=padding, stride=stride) elif self.vertices.shape.ndims == 4: # `self.vertices` has shape # (batch_size, num_vertices, input_length, embedding_channels). # Vertices are different for each example in the batch, # e.g. for word perturbations. mapped_vertices = snt.BatchApply( lambda x: tf.nn.conv1d(x, w, padding=padding, stride=stride))( self.vertices) else: raise ValueError('"vertices" must have either 3 or 4 dimensions.') lb, ub = _simplex_bounds(mapped_vertices, mapped_centres, self.r, -3) nominal_out = tf.nn.conv1d(self.nominal, w, padding=padding, stride=stride) if b is not None: nominal_out += b return relative_bounds.RelativeIntervalBounds(lb, ub, nominal_out) def apply_conv2d(self, wrapper, w, b, padding, strides): mapped_centres = tf.nn.convolution(self.nominal, w, padding=padding, strides=strides) if self.vertices.shape.ndims == 4: # `self.vertices` has no batch dimension; its shape is # (num_vertices, input_height, input_width, input_channels). mapped_vertices = tf.nn.convolution(self.vertices, w, padding=padding, strides=strides) elif self.vertices.shape.ndims == 5: # `self.vertices` has shape # (batch_size, num_vertices, input_height, input_width, input_channels). # Vertices are different for each example in the batch. mapped_vertices = snt.BatchApply( lambda x: tf.nn.convolution(x, w, padding=padding, strides=strides))( self.vertices) else: raise ValueError('"vertices" must have either 4 or 5 dimensions.') lb, ub = _simplex_bounds(mapped_vertices, mapped_centres, self.r, -4) nominal_out = tf.nn.convolution(self.nominal, w, padding=padding, strides=strides) if b is not None: nominal_out += b return relative_bounds.RelativeIntervalBounds(lb, ub, nominal_out) def apply_increasing_monotonic_fn(self, wrapper, fn, *args, **parameters): if fn.__name__ in ('add', 'reduce_mean', 'reduce_sum', 'avg_pool'): if self.vertices.shape.ndims == self.nominal.shape.ndims: vertices_fn = fn else: vertices_fn = snt.BatchApply(fn, n_dims=2) return SimplexBounds( vertices_fn(self.vertices, *[bounds.vertices for bounds in args]), fn(self.nominal, *[bounds.nominal for bounds in args]), self.r) elif fn.__name__ == 'quotient': return SimplexBounds( self.vertices / tf.expand_dims(parameters['denom'], axis=1), fn(self.nominal), self.r) else: return super(SimplexBounds, self).apply_increasing_monotonic_fn( wrapper, fn, *args, **parameters) def _simplex_bounds(mapped_vertices, mapped_centres, r, axis): """Calculates naive bounds on the given layer-mapped vertices. Args: mapped_vertices: Tensor of shape (num_vertices, *output_shape) or of shape (batch_size, num_vertices, *output_shape) containing the vertices in the layer's output space. mapped_centres: Tensor of shape (batch_size, *output_shape) containing the layer's nominal outputs. r: Scalar in [0, 1) specifying the radius (in vocab space) of the simplex. axis: Index of the `num_vertices` dimension of `mapped_vertices`. Returns: lb_out: Tensor of shape (batch_size, *output_shape) with lower bounds on the outputs of the affine layer. ub_out: Tensor of shape (batch_size, *output_shape) with upper bounds on the outputs of the affine layer. """ # Use the negative of r, instead of the complement of r, as # we're shifting the input domain to be centred at the origin. lb_out = -r * mapped_centres + r * tf.reduce_min(mapped_vertices, axis=axis) ub_out = -r * mapped_centres + r * tf.reduce_max(mapped_vertices, axis=axis) return lb_out, ub_out
apache-2.0
3,811,566,715,278,191,600
38.020513
79
0.655539
false
3.770565
false
false
false
sigurdga/nidarholm
organization/templatetags/grouplistings.py
1
2466
from django import template from django.contrib.auth.models import Group from organization.models import GroupCategory, Role import re register = template.Library() def roles_for_user_in_group(user, group): return Role.objects.filter(membership__user=user, membership__group=group) def phone_number_format(number): if number: m = re.search(r'^((?:4|9)\d{2})(\d{2})(\d{3})$', number) if m: return "%s %s %s" % (m.group(1), m.group(2), m.group(3)) else: n = re.search(r'^(\d{2})(\d{2})(\d{2})(\d{2})$', number) if n: return "%s %s %s %s" % (n.group(1), n.group(2), n.group(3), n.group(4)) else: return number @register.simple_tag def list_groups(request, group_name, groupcategory_name): """Give a group and a not related group category. Lists all groups in groupcategory, filtered on users in the given group. """ group = Group.objects.get(name__iexact=group_name) groupcategory = GroupCategory.objects.get(name=groupcategory_name) #TODO: Add 404 on exceptions ret = '<ul class="reset">' for groupprofile in groupcategory.groupprofile_set.all(): ret += "<li>" ret += "<h2>" + groupprofile.group.name + "</h2>" ret += "<table>" for u in groupprofile.group.user_set.all(): # groupprofile.group.user_set.filter(groups=group) is too eager #if u.groups.filter(id=group.id).exists(): if u.userprofile_set.filter(status__lt=4): ret += "<tr>" if request.organization.group in request.user.groups.all(): ret += "<td class=\"col4\"><a href=\"" + u.get_absolute_url() +"\">" + u.get_full_name() + "</a></td>" else: ret += "<td class=\"col4\">" + u.get_full_name() + "</td>" ret += "<td>" + ", ".join([ role.name for role in roles_for_user_in_group(u, group) ]) + "</td>" if request.user.groups.filter(id=group.id): ret += "<td class=\"col2\">%s</td>" % (phone_number_format(u.get_profile().cellphone) or "",) ret += "<td class=\"col5\">%s</td>" % (u.email,) ret += "<td>" + ", ".join([ role.name for role in roles_for_user_in_group(u, groupprofile.group) ]) + "</td>" ret += "</tr>" ret += "</table>" ret += "</li>" ret += "</ul>" return ret
agpl-3.0
-7,402,660,224,905,612,000
41.517241
125
0.539335
false
3.401379
false
false
false
lorensen/VTKExamples
src/Python/GeometricObjects/Polygon.py
1
1535
#!/usr/bin/env python import vtk def main(): colors = vtk.vtkNamedColors() # Setup four points points = vtk.vtkPoints() points.InsertNextPoint(0.0, 0.0, 0.0) points.InsertNextPoint(1.0, 0.0, 0.0) points.InsertNextPoint(1.0, 1.0, 0.0) points.InsertNextPoint(0.0, 1.0, 0.0) # Create the polygon polygon = vtk.vtkPolygon() polygon.GetPointIds().SetNumberOfIds(4) # make a quad polygon.GetPointIds().SetId(0, 0) polygon.GetPointIds().SetId(1, 1) polygon.GetPointIds().SetId(2, 2) polygon.GetPointIds().SetId(3, 3) # Add the polygon to a list of polygons polygons = vtk.vtkCellArray() polygons.InsertNextCell(polygon) # Create a PolyData polygonPolyData = vtk.vtkPolyData() polygonPolyData.SetPoints(points) polygonPolyData.SetPolys(polygons) # Create a mapper and actor mapper = vtk.vtkPolyDataMapper() mapper.SetInputData(polygonPolyData) actor = vtk.vtkActor() actor.SetMapper(mapper) actor.GetProperty().SetColor(colors.GetColor3d("Silver")) # Visualize renderer = vtk.vtkRenderer() renderWindow = vtk.vtkRenderWindow() renderWindow.SetWindowName("Polygon") renderWindow.AddRenderer(renderer) renderWindowInteractor = vtk.vtkRenderWindowInteractor() renderWindowInteractor.SetRenderWindow(renderWindow) renderer.AddActor(actor) renderer.SetBackground(colors.GetColor3d("Salmon")) renderWindow.Render() renderWindowInteractor.Start() if __name__ == '__main__': main()
apache-2.0
-4,295,740,643,100,989,000
26.909091
61
0.695765
false
3.449438
false
false
false
angelicadly/prog-script
tekton-master/backend/appengine/routes/rotas/rest.py
1
1044
# -*- coding: utf-8 -*- from __future__ import absolute_import, unicode_literals from gaebusiness.business import CommandExecutionException from tekton.gae.middleware.json_middleware import JsonResponse from rota_app import facade def index(): cmd = facade.list_rotas_cmd() rota_list = cmd() short_form=facade.rota_short_form() rota_short = [short_form.fill_with_model(m) for m in rota_list] return JsonResponse(rota_short) def save(**rota_properties): cmd = facade.save_rota_cmd(**rota_properties) return _save_or_update_json_response(cmd) def update(rota_id, **rota_properties): cmd = facade.update_rota_cmd(rota_id, **rota_properties) return _save_or_update_json_response(cmd) def delete(rota_id): facade.delete_rota_cmd(rota_id)() def _save_or_update_json_response(cmd): try: rota = cmd() except CommandExecutionException: return JsonResponse({'errors': cmd.errors}) short_form=facade.rota_short_form() return JsonResponse(short_form.fill_with_model(rota))
mit
-1,036,920,398,554,202,000
27.216216
67
0.704981
false
3.144578
false
false
false
akshayka/edxclassify
edxclassify/classifiers/feature_generation.py
1
3989
from edxclassify.feature_spec import FEATURE_COLUMNS from edxclassify.classifiers.word_lists import * from edxclassify.data_cleaners.dc_util import compress_likert import re import nltk from nltk.tokenize import sent_tokenize, word_tokenize def to_int(value, aux=None): if value == '': return 0 return int(value) def to_float(value, aux=None): if value == '': return 0 return 1 if float(value) > 0.94 else 0 def is_anonymous(value, aux=None): return 1 if value.lower() == 'true' else 0 def is_comment_thread(value, aux=None): return 1 if value.lower() == 'commentthread' else 0 def count_question_marks(document, aux=None): count = 0 for c in document: if c == '?': count = count + 1 return count # TODO: How do these play with logistic regression? # TODO: Idea -- feature for sentiment ~ 1 iff #pos > #neg def count_negative_words(document, token_patrn): words = re.findall(token_patrn, document) count = 0 for w in words: if w in NEGATIVE_WORDS: count = count + 1 return count def count_urgent_words(document, token_patrn): words = re.findall(token_patrn, document) count = 0 for w in words: if w in URGENT_WORDS: return 1 return 0 def count_opinion_words(document, token_patrn): words = re.findall(token_patrn, document) count = 0 for w in words: if w in OPINION_WORDS: count = count + 1 return count def count_nouns(document, aux=None): tagged_words = [] for s in sent_tokenize(document.decode('utf-8')): tagged_words.extend(nltk.pos_tag(word_tokenize(s))) count = 0 for word, tag in tagged_words: if tag == 'NN': count = count + 1 return count # TODO: We might want to discretize the grades and number of attempts class FeatureExtractor: def __init__(self, feature_name): self.feature_name = feature_name def fit(self, X, y=None): return self def transform(self, X, y=None): idx = FEATURE_COLUMNS[self.feature_name] return [row[idx] for row in X] class FeatureCurator: def __init__(self, feature_name, curate_function, aux=None): self.feature_name = feature_name self.curate = curate_function self.aux=aux def fit(self, X, y=None): return self def transform(self, X, y=None): return [{self.feature_name + ' feature': self.curate(value, self.aux)} for value in X] def fit_transform(self, X, y=None): return self.transform(X) class ChainedClassifier: def __init__(self, clf, column, guess): self.clf = clf self.column = column self.y_chain = None self.guess = guess def fit(self, X, y=None): # Note that the extracted values will be in # [0, 2] for non-binary variables (confusion, # sentiment, urgency), {0, 1} otherwise. if self.column == 'confusion' or\ self.column == 'sentiment' or\ self.column == 'urgency': self.y_chain = [compress_likert( record[FEATURE_COLUMNS[self.column]], binary=False)\ for record in X] else: self.y_chain = [int(record[FEATURE_COLUMNS[self.column]])\ for record in X] self.clf.train(X, self.y_chain) def transform(self, X, y=None): if self.y_chain is not None and not self.guess: predictions = self.y_chain # This is critical -- it ensures # that we don't use the gold set values when # predicting. self.y_chain = None else: predictions = self.clf.test(X) return [{self.column + ' prediction': value} for value in predictions] def fit_transform(self, X, y=None): self.fit(X) return self.transform(X)
gpl-2.0
2,360,410,365,754,236,000
28.992481
78
0.591376
false
3.659633
false
false
false
srmagura/goodnight-lead
gl_site/statistics/views.py
1
6287
# View imports from django.http import JsonResponse, HttpResponse from django.shortcuts import render from gl_site.custom_auth import login_required # Forms from gl_site.statistics.statistics_form import statistics_request_form, statistics_download_form # Data from .data_generation import format_graph_data, format_file_data, generate_data_from_sessions, get_queryset, validate_sessions from gl_site.statistics import inventory_keys # IO from django.core.files.base import ContentFile from io import BytesIO # JSON import json # Excel import xlsxwriter # Response statuses BAD_REQUEST = 400 FORBIDDEN = 403 METHOD_NOT_ALLOWED = 405 # Error messages METHOD_NOT_ALLOWED_MESSAGE = "Method not allowed." INVALID_DATA_SELECTION = "Invalid data selection." @login_required def view_statistics(request): """ View responsable for initially loading the statistics page """ # Get the proper queryset and generate the form querysets = get_queryset(request.user) form = statistics_request_form( querysets['organizations'], querysets['sessions'] ) downloads = statistics_download_form( querysets['organizations'], querysets['sessions'], auto_id='id_downloads_%s' ) return render(request, 'statistics/statistics.html', { 'form': form, 'downloads': downloads, 'statistics_active': True, }) @login_required def load_data(request): """ Returns a JSON respons containing statistics data """ # Deny non GET requests if (request.method != 'GET'): return JsonResponse([METHOD_NOT_ALLOWED_MESSAGE], status=METHOD_NOT_ALLOWED, safe=False) # Get the querysets accessable by the user querysets = get_queryset(request.user) # Build the submitted form from request data form = statistics_request_form( querysets['organizations'], querysets['sessions'], request.GET ) # Validate the form if (not form.is_valid()): return JsonResponse([INVALID_DATA_SELECTION], status=FORBIDDEN, safe=False) try: # Validate sessions sessions = validate_sessions( form.cleaned_data['organization'], form.cleaned_data['session'], request.user ) # Generate and format the data data = generate_data_from_sessions(sessions, request.user) data = format_graph_data(data) # Return the JSON encoded response return JsonResponse(data, safe=False) except LookupError as e: return JsonResponse([str(e)], status=BAD_REQUEST, safe=False) def download_data(request): # Get the querysets accessable by the user querysets = get_queryset(request.user) # Get the selected downloads downloads = statistics_download_form( querysets['organizations'], querysets['sessions'], request.GET, auto_id='id_downloads_%s' ) # If it is a valid choice if (downloads.is_valid()): data = [] try: # Validate sessions sessions = validate_sessions( downloads.cleaned_data['organization'], downloads.cleaned_data['session'], request.user ) # Generate the data data = generate_data_from_sessions(sessions, request.user) data = format_file_data(data) except LookupError: pass else: data_file = ContentFile('') # Finalize the output if (downloads.cleaned_data['file_type'] == 'application/xlsx'): # Create an excel workbook wrapped around python byte io. # Use in memory to prevent the use of temp files. output = BytesIO() workbook = xlsxwriter.Workbook(output, {'in_memory': True}) # Create a worksheet. worksheet = workbook.add_worksheet() # Set ID, Organization, and Session headers worksheet.write('A1', 'User ID') worksheet.write('B1', 'Organization') worksheet.write('C1', 'Session') # Add all user IDs (row number), organization, and session information row = 2 for user in data: worksheet.write('A{}'.format(row), row - 1) worksheet.write('B{}'.format(row), user['organization']) worksheet.write('C{}'.format(row), user['session']) row += 1 # Print inventory data starting at column D prefix = '' column = ord('D') for inventory in inventory_keys: # Print all metrics within the inventory for key in inventory['keys']: # If column is greater than 'Z' move to 'AA' if (column > ord('Z')): prefix = 'A' column = ord('A') # Write the column header: Inventory - Metric worksheet.write(prefix + chr(column) + '1', inventory['name'] + ' - ' + key) # Print metric data for each user row = 2 for user in data: inventory_name = inventory['name'] # Only print if the user has data for this inventory if (inventory_name in user and key in user[inventory_name]): cell = (prefix + chr(column) + '{}').format(row) worksheet.write(cell, user[inventory['name']][key]) # Move on to the next row row += 1 # Move on to the next column column += 1 # Close the workbook workbook.close() # Get the output bytes for creating a django file output = output.getvalue() # Set the appropriate application extension extension = '.xlsx' else: # Generate the JSON output string output = json.dumps(data) # Set the appropriate application extension extension = '.json' # Generate the data file data_file = ContentFile(output) # Create the response containing the file response = HttpResponse( data_file, content_type=downloads.cleaned_data['file_type'] ) response['Content-Disposition'] = 'attachment; filename=statistics{}'.format(extension) return response
gpl-3.0
-8,336,534,110,600,097,000
30.435
126
0.606171
false
4.474733
false
false
false
adamfast/faadata
faadata/aircraft/parser.py
1
3304
import datetime class AircraftManufacturerCode(object): def __init__(self, record): self.code = record[:7].strip() self.manufacturer = record[8:38].strip() self.model = record[39:59].strip() self.aircraft_type = record[60].strip() self.engine_type = record[62].strip() self.category = record[64].strip() self.builder_certification_code = record[66].strip() self.number_of_engines = record[68:70].strip() self.number_of_seats = record[71:74].strip() self.aircraft_weight = record[75:82].strip() self.cruising_speed = record[83:87].strip() class AircraftRegistration(object): def __init__(self, record): # first parse the fixed-width self.n_number = record[:5].strip() self.serial_number = record[6:36].strip() self.aircraft_mfr_model_code = record[37:44].strip() self.engine_mfr_model_code = record[45:50].strip() self.year_mfg = record[51:55].strip() if record[56].strip(): self.type_registrant = record[56].strip() else: self.type_registrant = None self.registrant_name = record[58:108].strip() self.street1 = record[109:142].strip() self.street2 = record[143:176].strip() self.city = record[177:195].strip() self.state = record[196:198].strip() self.zip_code = record[199:209].strip() self.region = record[210].strip() self.county = record[212:215].strip() self.country = record[216:218].strip() if record[219:227].strip(): self.last_activity_date = datetime.datetime.strptime(record[219:227], "%Y%m%d").date() else: self.last_activity_date = None if record[228:236].strip(): self.certificate_issue_date = datetime.datetime.strptime(record[228:236], "%Y%m%d").date() else: self.certificate_issue_date = None self.airworthiness_classification_code = record[237:238].strip() if record[248].strip(): self.aircraft_type = record[248].strip() else: self.aircraft_type = None if record[250:252].strip(): self.engine_type = record[250:252].strip() else: self.engine_type = None self.status_code = record[253:255].strip() self.mode_s_code = record[256:264].strip() self.fractional_ownership = record[265].strip() if record[267:275].strip(): self.airworthiness_date = datetime.datetime.strptime(record[267:275], "%Y%m%d").date() else: self.airworthiness_date = None self.other_name_1 = record[276:326].strip() self.other_name_2 = record[327:377].strip() self.other_name_3 = record[378:428].strip() self.other_name_4 = record[429:479].strip() self.other_name_5 = record[480:530].strip() if record[531:539].strip(): self.expiration_date = datetime.datetime.strptime(record[531:539], "%Y%m%d").date() else: self.expiration_date = None self.unique_id = record[540:548].strip() self.kit_manufacturer = record[549:579].strip() self.kit_model = record[580:600].strip() self.mode_s_code_hex = record[601:611].strip()
bsd-3-clause
-8,092,445,956,844,940,000
43.648649
102
0.592918
false
3.448852
false
false
false
Aravinthu/odoo
odoo/fields.py
1
104967
# -*- coding: utf-8 -*- # Part of Odoo. See LICENSE file for full copyright and licensing details. """ High-level objects for fields. """ from collections import OrderedDict, defaultdict from datetime import date, datetime from functools import partial from operator import attrgetter import itertools import logging import pytz try: from xmlrpc.client import MAXINT except ImportError: #pylint: disable=bad-python3-import from xmlrpclib import MAXINT import psycopg2 from .sql_db import LazyCursor from .tools import float_repr, float_round, frozendict, html_sanitize, human_size, pg_varchar, ustr, OrderedSet, pycompat, sql from .tools import DEFAULT_SERVER_DATE_FORMAT as DATE_FORMAT from .tools import DEFAULT_SERVER_DATETIME_FORMAT as DATETIME_FORMAT from .tools.translate import html_translate, _ DATE_LENGTH = len(date.today().strftime(DATE_FORMAT)) DATETIME_LENGTH = len(datetime.now().strftime(DATETIME_FORMAT)) EMPTY_DICT = frozendict() RENAMED_ATTRS = [('select', 'index'), ('digits_compute', 'digits')] _logger = logging.getLogger(__name__) _schema = logging.getLogger(__name__[:-7] + '.schema') Default = object() # default value for __init__() methods def copy_cache(records, env): """ Recursively copy the cache of ``records`` to the environment ``env``. """ src, dst = records.env.cache, env.cache todo, done = set(records), set() while todo: record = todo.pop() if record not in done: done.add(record) target = record.with_env(env) for field in src.get_fields(record): value = src.get(record, field) dst.set(target, field, value) if value and field.type in ('many2one', 'one2many', 'many2many', 'reference'): todo.update(field.convert_to_record(value, record)) def resolve_mro(model, name, predicate): """ Return the list of successively overridden values of attribute ``name`` in mro order on ``model`` that satisfy ``predicate``. """ result = [] for cls in type(model).__mro__: if name in cls.__dict__: value = cls.__dict__[name] if not predicate(value): break result.append(value) return result class MetaField(type): """ Metaclass for field classes. """ by_type = {} def __new__(meta, name, bases, attrs): """ Combine the ``_slots`` dict from parent classes, and determine ``__slots__`` for them on the new class. """ base_slots = {} for base in reversed(bases): base_slots.update(getattr(base, '_slots', ())) slots = dict(base_slots) slots.update(attrs.get('_slots', ())) attrs['__slots__'] = set(slots) - set(base_slots) attrs['_slots'] = slots return type.__new__(meta, name, bases, attrs) def __init__(cls, name, bases, attrs): super(MetaField, cls).__init__(name, bases, attrs) if not hasattr(cls, 'type'): return if cls.type and cls.type not in MetaField.by_type: MetaField.by_type[cls.type] = cls # compute class attributes to avoid calling dir() on fields cls.related_attrs = [] cls.description_attrs = [] for attr in dir(cls): if attr.startswith('_related_'): cls.related_attrs.append((attr[9:], attr)) elif attr.startswith('_description_'): cls.description_attrs.append((attr[13:], attr)) _global_seq = iter(itertools.count()) class Field(MetaField('DummyField', (object,), {})): """ The field descriptor contains the field definition, and manages accesses and assignments of the corresponding field on records. The following attributes may be provided when instanciating a field: :param string: the label of the field seen by users (string); if not set, the ORM takes the field name in the class (capitalized). :param help: the tooltip of the field seen by users (string) :param readonly: whether the field is readonly (boolean, by default ``False``) :param required: whether the value of the field is required (boolean, by default ``False``) :param index: whether the field is indexed in database (boolean, by default ``False``) :param default: the default value for the field; this is either a static value, or a function taking a recordset and returning a value; use ``default=None`` to discard default values for the field :param states: a dictionary mapping state values to lists of UI attribute-value pairs; possible attributes are: 'readonly', 'required', 'invisible'. Note: Any state-based condition requires the ``state`` field value to be available on the client-side UI. This is typically done by including it in the relevant views, possibly made invisible if not relevant for the end-user. :param groups: comma-separated list of group xml ids (string); this restricts the field access to the users of the given groups only :param bool copy: whether the field value should be copied when the record is duplicated (default: ``True`` for normal fields, ``False`` for ``one2many`` and computed fields, including property fields and related fields) :param string oldname: the previous name of this field, so that ORM can rename it automatically at migration .. _field-computed: .. rubric:: Computed fields One can define a field whose value is computed instead of simply being read from the database. The attributes that are specific to computed fields are given below. To define such a field, simply provide a value for the attribute ``compute``. :param compute: name of a method that computes the field :param inverse: name of a method that inverses the field (optional) :param search: name of a method that implement search on the field (optional) :param store: whether the field is stored in database (boolean, by default ``False`` on computed fields) :param compute_sudo: whether the field should be recomputed as superuser to bypass access rights (boolean, by default ``False``) The methods given for ``compute``, ``inverse`` and ``search`` are model methods. Their signature is shown in the following example:: upper = fields.Char(compute='_compute_upper', inverse='_inverse_upper', search='_search_upper') @api.depends('name') def _compute_upper(self): for rec in self: rec.upper = rec.name.upper() if rec.name else False def _inverse_upper(self): for rec in self: rec.name = rec.upper.lower() if rec.upper else False def _search_upper(self, operator, value): if operator == 'like': operator = 'ilike' return [('name', operator, value)] The compute method has to assign the field on all records of the invoked recordset. The decorator :meth:`odoo.api.depends` must be applied on the compute method to specify the field dependencies; those dependencies are used to determine when to recompute the field; recomputation is automatic and guarantees cache/database consistency. Note that the same method can be used for several fields, you simply have to assign all the given fields in the method; the method will be invoked once for all those fields. By default, a computed field is not stored to the database, and is computed on-the-fly. Adding the attribute ``store=True`` will store the field's values in the database. The advantage of a stored field is that searching on that field is done by the database itself. The disadvantage is that it requires database updates when the field must be recomputed. The inverse method, as its name says, does the inverse of the compute method: the invoked records have a value for the field, and you must apply the necessary changes on the field dependencies such that the computation gives the expected value. Note that a computed field without an inverse method is readonly by default. The search method is invoked when processing domains before doing an actual search on the model. It must return a domain equivalent to the condition: ``field operator value``. .. _field-related: .. rubric:: Related fields The value of a related field is given by following a sequence of relational fields and reading a field on the reached model. The complete sequence of fields to traverse is specified by the attribute :param related: sequence of field names Some field attributes are automatically copied from the source field if they are not redefined: ``string``, ``help``, ``readonly``, ``required`` (only if all fields in the sequence are required), ``groups``, ``digits``, ``size``, ``translate``, ``sanitize``, ``selection``, ``comodel_name``, ``domain``, ``context``. All semantic-free attributes are copied from the source field. By default, the values of related fields are not stored to the database. Add the attribute ``store=True`` to make it stored, just like computed fields. Related fields are automatically recomputed when their dependencies are modified. .. _field-company-dependent: .. rubric:: Company-dependent fields Formerly known as 'property' fields, the value of those fields depends on the company. In other words, users that belong to different companies may see different values for the field on a given record. :param company_dependent: whether the field is company-dependent (boolean) .. _field-incremental-definition: .. rubric:: Incremental definition A field is defined as class attribute on a model class. If the model is extended (see :class:`~odoo.models.Model`), one can also extend the field definition by redefining a field with the same name and same type on the subclass. In that case, the attributes of the field are taken from the parent class and overridden by the ones given in subclasses. For instance, the second class below only adds a tooltip on the field ``state``:: class First(models.Model): _name = 'foo' state = fields.Selection([...], required=True) class Second(models.Model): _inherit = 'foo' state = fields.Selection(help="Blah blah blah") """ type = None # type of the field (string) relational = False # whether the field is a relational one translate = False # whether the field is translated column_type = None # database column type (ident, spec) column_format = '%s' # placeholder for value in queries column_cast_from = () # column types that may be cast to this _slots = { 'args': EMPTY_DICT, # the parameters given to __init__() '_attrs': EMPTY_DICT, # the field's non-slot attributes '_module': None, # the field's module name '_setup_done': None, # the field's setup state: None, 'base' or 'full' '_sequence': None, # absolute ordering of the field 'automatic': False, # whether the field is automatically created ("magic" field) 'inherited': False, # whether the field is inherited (_inherits) 'name': None, # name of the field 'model_name': None, # name of the model of this field 'comodel_name': None, # name of the model of values (if relational) 'store': True, # whether the field is stored in database 'index': False, # whether the field is indexed in database 'manual': False, # whether the field is a custom field 'copy': True, # whether the field is copied over by BaseModel.copy() 'depends': (), # collection of field dependencies 'recursive': False, # whether self depends on itself 'compute': None, # compute(recs) computes field on recs 'compute_sudo': False, # whether field should be recomputed as admin 'inverse': None, # inverse(recs) inverses field on recs 'search': None, # search(recs, operator, value) searches on self 'related': None, # sequence of field names, for related fields 'related_sudo': True, # whether related fields should be read as admin 'company_dependent': False, # whether ``self`` is company-dependent (property field) 'default': None, # default(recs) returns the default value 'string': None, # field label 'help': None, # field tooltip 'readonly': False, # whether the field is readonly 'required': False, # whether the field is required 'states': None, # set readonly and required depending on state 'groups': None, # csv list of group xml ids 'change_default': False, # whether the field may trigger a "user-onchange" 'deprecated': None, # whether the field is deprecated 'related_field': None, # corresponding related field 'group_operator': None, # operator for aggregating values 'group_expand': None, # name of method to expand groups in read_group() 'prefetch': True, # whether the field is prefetched 'context_dependent': False, # whether the field's value depends on context } def __init__(self, string=Default, **kwargs): kwargs['string'] = string self._sequence = kwargs['_sequence'] = next(_global_seq) args = {key: val for key, val in kwargs.items() if val is not Default} self.args = args or EMPTY_DICT self._setup_done = None def new(self, **kwargs): """ Return a field of the same type as ``self``, with its own parameters. """ return type(self)(**kwargs) def __getattr__(self, name): """ Access non-slot field attribute. """ try: return self._attrs[name] except KeyError: raise AttributeError(name) def __setattr__(self, name, value): """ Set slot or non-slot field attribute. """ try: object.__setattr__(self, name, value) except AttributeError: if self._attrs: self._attrs[name] = value else: self._attrs = {name: value} # replace EMPTY_DICT def set_all_attrs(self, attrs): """ Set all field attributes at once (with slot defaults). """ # optimization: we assign slots only assign = object.__setattr__ for key, val in self._slots.items(): assign(self, key, attrs.pop(key, val)) if attrs: assign(self, '_attrs', attrs) def __delattr__(self, name): """ Remove non-slot field attribute. """ try: del self._attrs[name] except KeyError: raise AttributeError(name) def __str__(self): return "%s.%s" % (self.model_name, self.name) def __repr__(self): return "%s.%s" % (self.model_name, self.name) ############################################################################ # # Base field setup: things that do not depend on other models/fields # def setup_base(self, model, name): """ Base setup: things that do not depend on other models/fields. """ if self._setup_done and not self.related: # optimization for regular fields: keep the base setup self._setup_done = 'base' else: # do the base setup from scratch self._setup_attrs(model, name) if not self.related: self._setup_regular_base(model) self._setup_done = 'base' # # Setup field parameter attributes # def _can_setup_from(self, field): """ Return whether ``self`` can retrieve parameters from ``field``. """ return isinstance(field, type(self)) def _get_attrs(self, model, name): """ Return the field parameter attributes as a dictionary. """ # determine all inherited field attributes attrs = {} if not (self.args.get('automatic') or self.args.get('manual')): # magic and custom fields do not inherit from parent classes for field in reversed(resolve_mro(model, name, self._can_setup_from)): attrs.update(field.args) attrs.update(self.args) # necessary in case self is not in class attrs['args'] = self.args attrs['model_name'] = model._name attrs['name'] = name # initialize ``self`` with ``attrs`` if attrs.get('compute'): # by default, computed fields are not stored, not copied and readonly attrs['store'] = attrs.get('store', False) attrs['copy'] = attrs.get('copy', False) attrs['readonly'] = attrs.get('readonly', not attrs.get('inverse')) attrs['context_dependent'] = attrs.get('context_dependent', True) if attrs.get('related'): # by default, related fields are not stored and not copied attrs['store'] = attrs.get('store', False) attrs['copy'] = attrs.get('copy', False) if attrs.get('company_dependent'): # by default, company-dependent fields are not stored and not copied attrs['store'] = False attrs['copy'] = attrs.get('copy', False) attrs['default'] = self._default_company_dependent attrs['compute'] = self._compute_company_dependent if not attrs.get('readonly'): attrs['inverse'] = self._inverse_company_dependent attrs['search'] = self._search_company_dependent attrs['context_dependent'] = attrs.get('context_dependent', True) if attrs.get('translate'): # by default, translatable fields are context-dependent attrs['context_dependent'] = attrs.get('context_dependent', True) return attrs def _setup_attrs(self, model, name): """ Initialize the field parameter attributes. """ attrs = self._get_attrs(model, name) self.set_all_attrs(attrs) # check for renamed attributes (conversion errors) for key1, key2 in RENAMED_ATTRS: if key1 in attrs: _logger.warning("Field %s: parameter %r is no longer supported; use %r instead.", self, key1, key2) # prefetch only stored, column, non-manual and non-deprecated fields if not (self.store and self.column_type) or self.manual or self.deprecated: self.prefetch = False if not self.string and not self.related: # related fields get their string from their parent field self.string = ( name[:-4] if name.endswith('_ids') else name[:-3] if name.endswith('_id') else name ).replace('_', ' ').title() # self.default must be a callable if self.default is not None: value = self.default self.default = value if callable(value) else lambda model: value ############################################################################ # # Full field setup: everything else, except recomputation triggers # def setup_full(self, model): """ Full setup: everything else, except recomputation triggers. """ if self._setup_done != 'full': if not self.related: self._setup_regular_full(model) else: self._setup_related_full(model) self._setup_done = 'full' # # Setup of non-related fields # def _setup_regular_base(self, model): """ Setup the attributes of a non-related field. """ def make_depends(deps): return tuple(deps(model) if callable(deps) else deps) if isinstance(self.compute, pycompat.string_types): # if the compute method has been overridden, concatenate all their _depends self.depends = () for method in resolve_mro(model, self.compute, callable): self.depends += make_depends(getattr(method, '_depends', ())) else: self.depends = make_depends(getattr(self.compute, '_depends', ())) def _setup_regular_full(self, model): """ Setup the inverse field(s) of ``self``. """ pass # # Setup of related fields # def _setup_related_full(self, model): """ Setup the attributes of a related field. """ # fix the type of self.related if necessary if isinstance(self.related, pycompat.string_types): self.related = tuple(self.related.split('.')) # determine the chain of fields, and make sure they are all set up target = model for name in self.related: field = target._fields[name] field.setup_full(target) target = target[name] self.related_field = field # check type consistency if self.type != field.type: raise TypeError("Type of related field %s is inconsistent with %s" % (self, field)) # determine dependencies, compute, inverse, and search self.depends = ('.'.join(self.related),) self.compute = self._compute_related if not (self.readonly or field.readonly): self.inverse = self._inverse_related if field._description_searchable: # allow searching on self only if the related field is searchable self.search = self._search_related # copy attributes from field to self (string, help, etc.) for attr, prop in self.related_attrs: if not getattr(self, attr): setattr(self, attr, getattr(field, prop)) for attr, value in field._attrs.items(): if attr not in self._attrs: setattr(self, attr, value) # special case for states: copy it only for inherited fields if not self.states and self.inherited: self.states = field.states # special case for inherited required fields if self.inherited and field.required: self.required = True def traverse_related(self, record): """ Traverse the fields of the related field `self` except for the last one, and return it as a pair `(last_record, last_field)`. """ for name in self.related[:-1]: record = record[name][:1].with_prefetch(record._prefetch) return record, self.related_field def _compute_related(self, records): """ Compute the related field ``self`` on ``records``. """ # when related_sudo, bypass access rights checks when reading values others = records.sudo() if self.related_sudo else records for record, other in pycompat.izip(records, others): if not record.id and record.env != other.env: # draft records: copy record's cache to other's cache first copy_cache(record, other.env) other, field = self.traverse_related(other) record[self.name] = other[field.name] def _inverse_related(self, records): """ Inverse the related field ``self`` on ``records``. """ # store record values, otherwise they may be lost by cache invalidation! record_value = {record: record[self.name] for record in records} for record in records: other, field = self.traverse_related(record) if other: other[field.name] = record_value[record] def _search_related(self, records, operator, value): """ Determine the domain to search on field ``self``. """ return [('.'.join(self.related), operator, value)] # properties used by _setup_related_full() to copy values from related field _related_comodel_name = property(attrgetter('comodel_name')) _related_string = property(attrgetter('string')) _related_help = property(attrgetter('help')) _related_readonly = property(attrgetter('readonly')) _related_groups = property(attrgetter('groups')) _related_group_operator = property(attrgetter('group_operator')) @property def base_field(self): """ Return the base field of an inherited field, or ``self``. """ return self.related_field.base_field if self.inherited else self # # Company-dependent fields # def _default_company_dependent(self, model): return model.env['ir.property'].get(self.name, self.model_name) def _compute_company_dependent(self, records): Property = records.env['ir.property'] values = Property.get_multi(self.name, self.model_name, records.ids) for record in records: record[self.name] = values.get(record.id) def _inverse_company_dependent(self, records): Property = records.env['ir.property'] values = { record.id: self.convert_to_write(record[self.name], record) for record in records } Property.set_multi(self.name, self.model_name, values) def _search_company_dependent(self, records, operator, value): Property = records.env['ir.property'] return Property.search_multi(self.name, self.model_name, operator, value) # # Setup of field triggers # # The triggers of ``self`` are a collection of pairs ``(field, path)`` of # fields that depend on ``self``. When ``self`` is modified, it invalidates # the cache of each ``field``, and determines the records to recompute based # on ``path``. See method ``modified`` below for details. # def resolve_deps(self, model): """ Return the dependencies of ``self`` as tuples ``(model, field, path)``, where ``path`` is an optional list of field names. """ model0 = model result = [] # add self's own dependencies for dotnames in self.depends: if dotnames == self.name: _logger.warning("Field %s depends on itself; please fix its decorator @api.depends().", self) model, path = model0, dotnames.split('.') for i, fname in enumerate(path): field = model._fields[fname] result.append((model, field, path[:i])) model = model0.env.get(field.comodel_name) # add self's model dependencies for mname, fnames in model0._depends.items(): model = model0.env[mname] for fname in fnames: field = model._fields[fname] result.append((model, field, None)) # add indirect dependencies from the dependencies found above for model, field, path in list(result): for inv_field in model._field_inverses[field]: inv_model = model0.env[inv_field.model_name] inv_path = None if path is None else path + [field.name] result.append((inv_model, inv_field, inv_path)) return result def setup_triggers(self, model): """ Add the necessary triggers to invalidate/recompute ``self``. """ for model, field, path in self.resolve_deps(model): if field is not self: path_str = None if path is None else ('.'.join(path) or 'id') model._field_triggers.add(field, (self, path_str)) elif path: self.recursive = True model._field_triggers.add(field, (self, '.'.join(path))) ############################################################################ # # Field description # def get_description(self, env): """ Return a dictionary that describes the field ``self``. """ desc = {'type': self.type} for attr, prop in self.description_attrs: value = getattr(self, prop) if callable(value): value = value(env) if value is not None: desc[attr] = value return desc # properties used by get_description() _description_store = property(attrgetter('store')) _description_manual = property(attrgetter('manual')) _description_depends = property(attrgetter('depends')) _description_related = property(attrgetter('related')) _description_company_dependent = property(attrgetter('company_dependent')) _description_readonly = property(attrgetter('readonly')) _description_required = property(attrgetter('required')) _description_states = property(attrgetter('states')) _description_groups = property(attrgetter('groups')) _description_change_default = property(attrgetter('change_default')) _description_deprecated = property(attrgetter('deprecated')) @property def _description_searchable(self): return bool(self.store or self.search) @property def _description_sortable(self): return self.store or (self.inherited and self.related_field._description_sortable) def _description_string(self, env): if self.string and env.lang: model_name = self.base_field.model_name field_string = env['ir.translation'].get_field_string(model_name) return field_string.get(self.name) or self.string return self.string def _description_help(self, env): if self.help and env.lang: model_name = self.base_field.model_name field_help = env['ir.translation'].get_field_help(model_name) return field_help.get(self.name) or self.help return self.help ############################################################################ # # Conversion of values # def cache_key(self, record): """ Return the key to get/set the value of ``self`` on ``record`` in cache, the full cache key being ``(self, record.id, key)``. """ env = record.env return env if self.context_dependent else (env.cr, env.uid) def null(self, record): """ Return the null value for this field in the record format. """ return False def convert_to_column(self, value, record, values=None): """ Convert ``value`` from the ``write`` format to the SQL format. """ if value is None or value is False: return None return pycompat.to_native(value) def convert_to_cache(self, value, record, validate=True): """ Convert ``value`` to the cache format; ``value`` may come from an assignment, or have the format of methods :meth:`BaseModel.read` or :meth:`BaseModel.write`. If the value represents a recordset, it should be added for prefetching on ``record``. :param bool validate: when True, field-specific validation of ``value`` will be performed """ return value def convert_to_record(self, value, record): """ Convert ``value`` from the cache format to the record format. If the value represents a recordset, it should share the prefetching of ``record``. """ return value def convert_to_read(self, value, record, use_name_get=True): """ Convert ``value`` from the record format to the format returned by method :meth:`BaseModel.read`. :param bool use_name_get: when True, the value's display name will be computed using :meth:`BaseModel.name_get`, if relevant for the field """ return False if value is None else value def convert_to_write(self, value, record): """ Convert ``value`` from the record format to the format of method :meth:`BaseModel.write`. """ return self.convert_to_read(value, record) def convert_to_onchange(self, value, record, names): """ Convert ``value`` from the record format to the format returned by method :meth:`BaseModel.onchange`. :param names: a tree of field names (for relational fields only) """ return self.convert_to_read(value, record) def convert_to_export(self, value, record): """ Convert ``value`` from the record format to the export format. """ if not value: return '' return value if record._context.get('export_raw_data') else ustr(value) def convert_to_display_name(self, value, record): """ Convert ``value`` from the record format to a suitable display name. """ return ustr(value) ############################################################################ # # Update database schema # def update_db(self, model, columns): """ Update the database schema to implement this field. :param model: an instance of the field's model :param columns: a dict mapping column names to their configuration in database :return: ``True`` if the field must be recomputed on existing rows """ if not self.column_type: return column = columns.get(self.name) if not column and hasattr(self, 'oldname'): # column not found; check whether it exists under its old name column = columns.get(self.oldname) if column: sql.rename_column(model._cr, model._table, self.oldname, self.name) # create/update the column, not null constraint, indexes self.update_db_column(model, column) self.update_db_notnull(model, column) self.update_db_index(model, column) return not column def update_db_column(self, model, column): """ Create/update the column corresponding to ``self``. :param model: an instance of the field's model :param column: the column's configuration (dict) if it exists, or ``None`` """ if not column: # the column does not exist, create it sql.create_column(model._cr, model._table, self.name, self.column_type[1], self.string) return if column['udt_name'] == self.column_type[0]: return if column['udt_name'] in self.column_cast_from: sql.convert_column(model._cr, model._table, self.name, self.column_type[1]) else: newname = (self.name + '_moved{}').format i = 0 while sql.column_exists(model._cr, model._table, newname(i)): i += 1 if column['is_nullable'] == 'NO': sql.drop_not_null(model._cr, model._table, self.name) sql.rename_column(model._cr, model._table, self.name, newname(i)) sql.create_column(model._cr, model._table, self.name, self.column_type[1], self.string) def update_db_notnull(self, model, column): """ Add or remove the NOT NULL constraint on ``self``. :param model: an instance of the field's model :param column: the column's configuration (dict) if it exists, or ``None`` """ has_notnull = column and column['is_nullable'] == 'NO' if not column or (self.required and not has_notnull): # the column is new or it becomes required; initialize its values if model._table_has_rows(): model._init_column(self.name) if self.required and not has_notnull: sql.set_not_null(model._cr, model._table, self.name) elif not self.required and has_notnull: sql.drop_not_null(model._cr, model._table, self.name) def update_db_index(self, model, column): """ Add or remove the index corresponding to ``self``. :param model: an instance of the field's model :param column: the column's configuration (dict) if it exists, or ``None`` """ indexname = '%s_%s_index' % (model._table, self.name) if self.index: sql.create_index(model._cr, indexname, model._table, ['"%s"' % self.name]) else: sql.drop_index(model._cr, indexname, model._table) ############################################################################ # # Read from/write to database # def read(self, records): """ Read the value of ``self`` on ``records``, and store it in cache. """ return NotImplementedError("Method read() undefined on %s" % self) def write(self, records, value, create=False): """ Write the value of ``self`` on ``records``. The ``value`` must be in the format of method :meth:`BaseModel.write`. :param create: whether ``records`` have just been created (to enable some optimizations) """ return NotImplementedError("Method write() undefined on %s" % self) ############################################################################ # # Descriptor methods # def __get__(self, record, owner): """ return the value of field ``self`` on ``record`` """ if record is None: return self # the field is accessed through the owner class if record: # only a single record may be accessed record.ensure_one() try: value = record.env.cache.get(record, self) except KeyError: # cache miss, determine value and retrieve it if record.id: self.determine_value(record) else: self.determine_draft_value(record) value = record.env.cache.get(record, self) else: # null record -> return the null value for this field value = self.convert_to_cache(False, record, validate=False) return self.convert_to_record(value, record) def __set__(self, record, value): """ set the value of field ``self`` on ``record`` """ env = record.env # only a single record may be updated record.ensure_one() # adapt value to the cache level value = self.convert_to_cache(value, record) if env.in_draft or not record.id: # determine dependent fields spec = self.modified_draft(record) # set value in cache, inverse field, and mark record as dirty record.env.cache.set(record, self, value) if env.in_onchange: for invf in record._field_inverses[self]: invf._update(record[self.name], record) record._set_dirty(self.name) # determine more dependent fields, and invalidate them if self.relational: spec += self.modified_draft(record) env.cache.invalidate(spec) else: # Write to database write_value = self.convert_to_write(self.convert_to_record(value, record), record) record.write({self.name: write_value}) # Update the cache unless value contains a new record if not (self.relational and not all(value)): record.env.cache.set(record, self, value) ############################################################################ # # Computation of field values # def _compute_value(self, records): """ Invoke the compute method on ``records``. """ # initialize the fields to their corresponding null value in cache fields = records._field_computed[self] cache = records.env.cache for field in fields: for record in records: cache.set(record, field, field.convert_to_cache(False, record, validate=False)) if isinstance(self.compute, pycompat.string_types): getattr(records, self.compute)() else: self.compute(records) def compute_value(self, records): """ Invoke the compute method on ``records``; the results are in cache. """ fields = records._field_computed[self] with records.env.do_in_draft(), records.env.protecting(fields, records): try: self._compute_value(records) except (AccessError, MissingError): # some record is forbidden or missing, retry record by record for record in records: try: self._compute_value(record) except Exception as exc: record.env.cache.set_failed(record, [self], exc) def determine_value(self, record): """ Determine the value of ``self`` for ``record``. """ env = record.env if self.store and not (self.compute and env.in_onchange): # this is a stored field or an old-style function field if self.compute: # this is a stored computed field, check for recomputation recs = record._recompute_check(self) if recs: # recompute the value (only in cache) self.compute_value(recs) # HACK: if result is in the wrong cache, copy values if recs.env != env: computed = record._field_computed[self] for source, target in pycompat.izip(recs, recs.with_env(env)): try: values = {f.name: source[f.name] for f in computed} target._cache.update(target._convert_to_cache(values, validate=False)) except MissingError as exc: target._cache.set_failed(target._fields, exc) # the result is saved to database by BaseModel.recompute() return # read the field from database record._prefetch_field(self) elif self.compute: # this is either a non-stored computed field, or a stored computed # field in onchange mode if self.recursive: self.compute_value(record) else: recs = record._in_cache_without(self) recs = recs.with_prefetch(record._prefetch) self.compute_value(recs) else: # this is a non-stored non-computed field record.env.cache.set(record, self, self.convert_to_cache(False, record, validate=False)) def determine_draft_value(self, record): """ Determine the value of ``self`` for the given draft ``record``. """ if self.compute: fields = record._field_computed[self] with record.env.protecting(fields, record): self._compute_value(record) else: null = self.convert_to_cache(False, record, validate=False) record.env.cache.set_special(record, self, lambda: null) def determine_inverse(self, records): """ Given the value of ``self`` on ``records``, inverse the computation. """ if isinstance(self.inverse, pycompat.string_types): getattr(records, self.inverse)() else: self.inverse(records) def determine_domain(self, records, operator, value): """ Return a domain representing a condition on ``self``. """ if isinstance(self.search, pycompat.string_types): return getattr(records, self.search)(operator, value) else: return self.search(records, operator, value) ############################################################################ # # Notification when fields are modified # def modified_draft(self, records): """ Same as :meth:`modified`, but in draft mode. """ env = records.env # invalidate the fields on the records in cache that depend on # ``records``, except fields currently being computed spec = [] for field, path in records._field_triggers[self]: if not field.compute: # Note: do not invalidate non-computed fields. Such fields may # require invalidation in general (like *2many fields with # domains) but should not be invalidated in this case, because # we would simply lose their values during an onchange! continue target = env[field.model_name] protected = env.protected(field) if path == 'id' and field.model_name == records._name: target = records - protected elif path and env.in_onchange: target = (env.cache.get_records(target, field) - protected).filtered( lambda rec: rec if path == 'id' else rec._mapped_cache(path) & records ) else: target = env.cache.get_records(target, field) - protected if target: spec.append((field, target._ids)) return spec class Boolean(Field): type = 'boolean' column_type = ('bool', 'bool') def convert_to_column(self, value, record, values=None): return bool(value) def convert_to_cache(self, value, record, validate=True): return bool(value) def convert_to_export(self, value, record): if record._context.get('export_raw_data'): return value return ustr(value) class Integer(Field): type = 'integer' column_type = ('int4', 'int4') _slots = { 'group_operator': 'sum', } _description_group_operator = property(attrgetter('group_operator')) def convert_to_column(self, value, record, values=None): return int(value or 0) def convert_to_cache(self, value, record, validate=True): if isinstance(value, dict): # special case, when an integer field is used as inverse for a one2many return value.get('id', False) return int(value or 0) def convert_to_read(self, value, record, use_name_get=True): # Integer values greater than 2^31-1 are not supported in pure XMLRPC, # so we have to pass them as floats :-( if value and value > MAXINT: return float(value) return value def _update(self, records, value): # special case, when an integer field is used as inverse for a one2many cache = records.env.cache for record in records: cache.set(record, self, value.id or 0) def convert_to_export(self, value, record): if value or value == 0: return value if record._context.get('export_raw_data') else ustr(value) return '' class Float(Field): """ The precision digits are given by the attribute :param digits: a pair (total, decimal), or a function taking a database cursor and returning a pair (total, decimal) """ type = 'float' column_cast_from = ('int4', 'numeric', 'float8') _slots = { '_digits': None, # digits argument passed to class initializer 'group_operator': 'sum', } def __init__(self, string=Default, digits=Default, **kwargs): super(Float, self).__init__(string=string, _digits=digits, **kwargs) @property def column_type(self): # Explicit support for "falsy" digits (0, False) to indicate a NUMERIC # field with no fixed precision. The values are saved in the database # with all significant digits. # FLOAT8 type is still the default when there is no precision because it # is faster for most operations (sums, etc.) return ('numeric', 'numeric') if self.digits is not None else \ ('float8', 'double precision') @property def digits(self): if callable(self._digits): with LazyCursor() as cr: return self._digits(cr) else: return self._digits _related__digits = property(attrgetter('_digits')) _description_digits = property(attrgetter('digits')) _description_group_operator = property(attrgetter('group_operator')) def convert_to_column(self, value, record, values=None): result = float(value or 0.0) digits = self.digits if digits: precision, scale = digits result = float_repr(float_round(result, precision_digits=scale), precision_digits=scale) return result def convert_to_cache(self, value, record, validate=True): # apply rounding here, otherwise value in cache may be wrong! value = float(value or 0.0) if not validate: return value digits = self.digits return float_round(value, precision_digits=digits[1]) if digits else value def convert_to_export(self, value, record): if value or value == 0.0: return value if record._context.get('export_raw_data') else ustr(value) return '' class Monetary(Field): """ The decimal precision and currency symbol are taken from the attribute :param currency_field: name of the field holding the currency this monetary field is expressed in (default: `currency_id`) """ type = 'monetary' column_type = ('numeric', 'numeric') column_cast_from = ('float8',) _slots = { 'currency_field': None, 'group_operator': 'sum', } def __init__(self, string=Default, currency_field=Default, **kwargs): super(Monetary, self).__init__(string=string, currency_field=currency_field, **kwargs) _related_currency_field = property(attrgetter('currency_field')) _description_currency_field = property(attrgetter('currency_field')) _description_group_operator = property(attrgetter('group_operator')) def _setup_regular_full(self, model): super(Monetary, self)._setup_regular_full(model) if not self.currency_field: # pick a default, trying in order: 'currency_id', 'x_currency_id' if 'currency_id' in model._fields: self.currency_field = 'currency_id' elif 'x_currency_id' in model._fields: self.currency_field = 'x_currency_id' assert self.currency_field in model._fields, \ "Field %s with unknown currency_field %r" % (self, self.currency_field) def convert_to_column(self, value, record, values=None): # retrieve currency from values or record if values and self.currency_field in values: field = record._fields[self.currency_field] currency = field.convert_to_cache(values[self.currency_field], record) currency = field.convert_to_record(currency, record) else: # Note: this is wrong if 'record' is several records with different # currencies, which is functional nonsense and should not happen currency = record[:1][self.currency_field] value = float(value or 0.0) if currency: return float_repr(currency.round(value), currency.decimal_places) return value def convert_to_cache(self, value, record, validate=True): # cache format: float value = float(value or 0.0) if validate and record[self.currency_field]: # FIXME @rco-odoo: currency may not be already initialized if it is # a function or related field! value = record[self.currency_field].round(value) return value def convert_to_read(self, value, record, use_name_get=True): return value def convert_to_write(self, value, record): return value class _String(Field): """ Abstract class for string fields. """ _slots = { 'translate': False, # whether the field is translated } def __init__(self, string=Default, **kwargs): # translate is either True, False, or a callable if 'translate' in kwargs and not callable(kwargs['translate']): kwargs['translate'] = bool(kwargs['translate']) super(_String, self).__init__(string=string, **kwargs) _related_translate = property(attrgetter('translate')) def _description_translate(self, env): return bool(self.translate) def get_trans_terms(self, value): """ Return the sequence of terms to translate found in `value`. """ if not callable(self.translate): return [value] if value else [] terms = [] self.translate(terms.append, value) return terms def get_trans_func(self, records): """ Return a translation function `translate` for `self` on the given records; the function call `translate(record_id, value)` translates the field value to the language given by the environment of `records`. """ if callable(self.translate): rec_src_trans = records.env['ir.translation']._get_terms_translations(self, records) def translate(record_id, value): src_trans = rec_src_trans[record_id] return self.translate(src_trans.get, value) else: rec_trans = records.env['ir.translation']._get_ids( '%s,%s' % (self.model_name, self.name), 'model', records.env.lang, records.ids) def translate(record_id, value): return rec_trans.get(record_id) or value return translate def check_trans_value(self, value): """ Check and possibly sanitize the translated term `value`. """ if callable(self.translate): # do a "no-translation" to sanitize the value callback = lambda term: None return self.translate(callback, value) else: return value class Char(_String): """ Basic string field, can be length-limited, usually displayed as a single-line string in clients. :param int size: the maximum size of values stored for that field :param translate: enable the translation of the field's values; use ``translate=True`` to translate field values as a whole; ``translate`` may also be a callable such that ``translate(callback, value)`` translates ``value`` by using ``callback(term)`` to retrieve the translation of terms. """ type = 'char' column_cast_from = ('text',) _slots = { 'size': None, # maximum size of values (deprecated) } @property def column_type(self): return ('varchar', pg_varchar(self.size)) def update_db_column(self, model, column): if ( column and column['udt_name'] == 'varchar' and column['character_maximum_length'] and (self.size is None or column['character_maximum_length'] < self.size) ): # the column's varchar size does not match self.size; convert it sql.convert_column(model._cr, model._table, self.name, self.column_type[1]) super(Char, self).update_db_column(model, column) _related_size = property(attrgetter('size')) _description_size = property(attrgetter('size')) def _setup_regular_base(self, model): super(Char, self)._setup_regular_base(model) assert self.size is None or isinstance(self.size, int), \ "Char field %s with non-integer size %r" % (self, self.size) def convert_to_column(self, value, record, values=None): if value is None or value is False: return None # we need to convert the string to a unicode object to be able # to evaluate its length (and possibly truncate it) reliably return pycompat.to_text(value)[:self.size] def convert_to_cache(self, value, record, validate=True): if value is None or value is False: return False return pycompat.to_text(value)[:self.size] class Text(_String): """ Very similar to :class:`~.Char` but used for longer contents, does not have a size and usually displayed as a multiline text box. :param translate: enable the translation of the field's values; use ``translate=True`` to translate field values as a whole; ``translate`` may also be a callable such that ``translate(callback, value)`` translates ``value`` by using ``callback(term)`` to retrieve the translation of terms. """ type = 'text' column_type = ('text', 'text') column_cast_from = ('varchar',) def convert_to_cache(self, value, record, validate=True): if value is None or value is False: return False return ustr(value) class Html(_String): type = 'html' column_type = ('text', 'text') _slots = { 'sanitize': True, # whether value must be sanitized 'sanitize_tags': True, # whether to sanitize tags (only a white list of attributes is accepted) 'sanitize_attributes': True, # whether to sanitize attributes (only a white list of attributes is accepted) 'sanitize_style': False, # whether to sanitize style attributes 'strip_style': False, # whether to strip style attributes (removed and therefore not sanitized) 'strip_classes': False, # whether to strip classes attributes } def _setup_attrs(self, model, name): super(Html, self)._setup_attrs(model, name) # Translated sanitized html fields must use html_translate or a callable. if self.translate is True and self.sanitize: self.translate = html_translate _related_sanitize = property(attrgetter('sanitize')) _related_sanitize_tags = property(attrgetter('sanitize_tags')) _related_sanitize_attributes = property(attrgetter('sanitize_attributes')) _related_sanitize_style = property(attrgetter('sanitize_style')) _related_strip_style = property(attrgetter('strip_style')) _related_strip_classes = property(attrgetter('strip_classes')) _description_sanitize = property(attrgetter('sanitize')) _description_sanitize_tags = property(attrgetter('sanitize_tags')) _description_sanitize_attributes = property(attrgetter('sanitize_attributes')) _description_sanitize_style = property(attrgetter('sanitize_style')) _description_strip_style = property(attrgetter('strip_style')) _description_strip_classes = property(attrgetter('strip_classes')) def convert_to_column(self, value, record, values=None): if value is None or value is False: return None if self.sanitize: return html_sanitize( value, silent=True, sanitize_tags=self.sanitize_tags, sanitize_attributes=self.sanitize_attributes, sanitize_style=self.sanitize_style, strip_style=self.strip_style, strip_classes=self.strip_classes) return value def convert_to_cache(self, value, record, validate=True): if value is None or value is False: return False if validate and self.sanitize: return html_sanitize( value, silent=True, sanitize_tags=self.sanitize_tags, sanitize_attributes=self.sanitize_attributes, sanitize_style=self.sanitize_style, strip_style=self.strip_style, strip_classes=self.strip_classes) return value class Date(Field): type = 'date' column_type = ('date', 'date') column_cast_from = ('timestamp',) @staticmethod def today(*args): """ Return the current day in the format expected by the ORM. This function may be used to compute default values. """ return date.today().strftime(DATE_FORMAT) @staticmethod def context_today(record, timestamp=None): """ Return the current date as seen in the client's timezone in a format fit for date fields. This method may be used to compute default values. :param datetime timestamp: optional datetime value to use instead of the current date and time (must be a datetime, regular dates can't be converted between timezones.) :rtype: str """ today = timestamp or datetime.now() context_today = None tz_name = record._context.get('tz') or record.env.user.tz if tz_name: try: today_utc = pytz.timezone('UTC').localize(today, is_dst=False) # UTC = no DST context_today = today_utc.astimezone(pytz.timezone(tz_name)) except Exception: _logger.debug("failed to compute context/client-specific today date, using UTC value for `today`", exc_info=True) return (context_today or today).strftime(DATE_FORMAT) @staticmethod def from_string(value): """ Convert an ORM ``value`` into a :class:`date` value. """ if not value: return None value = value[:DATE_LENGTH] return datetime.strptime(value, DATE_FORMAT).date() @staticmethod def to_string(value): """ Convert a :class:`date` value into the format expected by the ORM. """ return value.strftime(DATE_FORMAT) if value else False def convert_to_cache(self, value, record, validate=True): if not value: return False if isinstance(value, pycompat.string_types): if validate: # force parsing for validation self.from_string(value) return value[:DATE_LENGTH] return self.to_string(value) def convert_to_export(self, value, record): if not value: return '' return self.from_string(value) if record._context.get('export_raw_data') else ustr(value) class Datetime(Field): type = 'datetime' column_type = ('timestamp', 'timestamp') column_cast_from = ('date',) @staticmethod def now(*args): """ Return the current day and time in the format expected by the ORM. This function may be used to compute default values. """ return datetime.now().strftime(DATETIME_FORMAT) @staticmethod def context_timestamp(record, timestamp): """Returns the given timestamp converted to the client's timezone. This method is *not* meant for use as a default initializer, because datetime fields are automatically converted upon display on client side. For default values :meth:`fields.datetime.now` should be used instead. :param datetime timestamp: naive datetime value (expressed in UTC) to be converted to the client timezone :rtype: datetime :return: timestamp converted to timezone-aware datetime in context timezone """ assert isinstance(timestamp, datetime), 'Datetime instance expected' tz_name = record._context.get('tz') or record.env.user.tz utc_timestamp = pytz.utc.localize(timestamp, is_dst=False) # UTC = no DST if tz_name: try: context_tz = pytz.timezone(tz_name) return utc_timestamp.astimezone(context_tz) except Exception: _logger.debug("failed to compute context/client-specific timestamp, " "using the UTC value", exc_info=True) return utc_timestamp @staticmethod def from_string(value): """ Convert an ORM ``value`` into a :class:`datetime` value. """ if not value: return None value = value[:DATETIME_LENGTH] if len(value) == DATE_LENGTH: value += " 00:00:00" return datetime.strptime(value, DATETIME_FORMAT) @staticmethod def to_string(value): """ Convert a :class:`datetime` value into the format expected by the ORM. """ return value.strftime(DATETIME_FORMAT) if value else False def convert_to_cache(self, value, record, validate=True): if not value: return False if isinstance(value, pycompat.string_types): if validate: # force parsing for validation self.from_string(value) value = value[:DATETIME_LENGTH] if len(value) == DATE_LENGTH: value += " 00:00:00" return value return self.to_string(value) def convert_to_export(self, value, record): if not value: return '' return self.from_string(value) if record._context.get('export_raw_data') else ustr(value) def convert_to_display_name(self, value, record): assert record, 'Record expected' return Datetime.to_string(Datetime.context_timestamp(record, Datetime.from_string(value))) # http://initd.org/psycopg/docs/usage.html#binary-adaptation # Received data is returned as buffer (in Python 2) or memoryview (in Python 3). _BINARY = memoryview if pycompat.PY2: _BINARY = buffer #pylint: disable=buffer-builtin class Binary(Field): type = 'binary' _slots = { 'prefetch': False, # not prefetched by default 'context_dependent': True, # depends on context (content or size) 'attachment': False, # whether value is stored in attachment } @property def column_type(self): return None if self.attachment else ('bytea', 'bytea') _description_attachment = property(attrgetter('attachment')) def convert_to_column(self, value, record, values=None): # Binary values may be byte strings (python 2.6 byte array), but # the legacy OpenERP convention is to transfer and store binaries # as base64-encoded strings. The base64 string may be provided as a # unicode in some circumstances, hence the str() cast here. # This str() coercion will only work for pure ASCII unicode strings, # on purpose - non base64 data must be passed as a 8bit byte strings. if not value: return None if isinstance(value, bytes): return psycopg2.Binary(value) return psycopg2.Binary(pycompat.text_type(value).encode('ascii')) def convert_to_cache(self, value, record, validate=True): if isinstance(value, _BINARY): return bytes(value) if isinstance(value, pycompat.integer_types) and \ (record._context.get('bin_size') or record._context.get('bin_size_' + self.name)): # If the client requests only the size of the field, we return that # instead of the content. Presumably a separate request will be done # to read the actual content, if necessary. return human_size(value) return value def read(self, records): # values are stored in attachments, retrieve them assert self.attachment domain = [ ('res_model', '=', records._name), ('res_field', '=', self.name), ('res_id', 'in', records.ids), ] # Note: the 'bin_size' flag is handled by the field 'datas' itself data = {att.res_id: att.datas for att in records.env['ir.attachment'].sudo().search(domain)} cache = records.env.cache for record in records: cache.set(record, self, data.get(record.id, False)) def write(self, records, value, create=False): # retrieve the attachments that stores the value, and adapt them assert self.attachment if create: atts = records.env['ir.attachment'].sudo() else: atts = records.env['ir.attachment'].sudo().search([ ('res_model', '=', records._name), ('res_field', '=', self.name), ('res_id', 'in', records.ids), ]) with records.env.norecompute(): if value: # update the existing attachments atts.write({'datas': value}) # create the missing attachments for record in (records - records.browse(atts.mapped('res_id'))): atts.create({ 'name': self.name, 'res_model': record._name, 'res_field': self.name, 'res_id': record.id, 'type': 'binary', 'datas': value, }) else: atts.unlink() class Selection(Field): """ :param selection: specifies the possible values for this field. It is given as either a list of pairs (``value``, ``string``), or a model method, or a method name. :param selection_add: provides an extension of the selection in the case of an overridden field. It is a list of pairs (``value``, ``string``). The attribute ``selection`` is mandatory except in the case of :ref:`related fields <field-related>` or :ref:`field extensions <field-incremental-definition>`. """ type = 'selection' _slots = { 'selection': None, # [(value, string), ...], function or method name } def __init__(self, selection=Default, string=Default, **kwargs): super(Selection, self).__init__(selection=selection, string=string, **kwargs) @property def column_type(self): if (self.selection and isinstance(self.selection, list) and isinstance(self.selection[0][0], int)): return ('int4', 'integer') else: return ('varchar', pg_varchar()) def _setup_regular_base(self, model): super(Selection, self)._setup_regular_base(model) assert self.selection is not None, "Field %s without selection" % self def _setup_related_full(self, model): super(Selection, self)._setup_related_full(model) # selection must be computed on related field field = self.related_field self.selection = lambda model: field._description_selection(model.env) def _setup_attrs(self, model, name): super(Selection, self)._setup_attrs(model, name) # determine selection (applying 'selection_add' extensions) for field in reversed(resolve_mro(model, name, self._can_setup_from)): # We cannot use field.selection or field.selection_add here # because those attributes are overridden by ``_setup_attrs``. if 'selection' in field.args: self.selection = field.args['selection'] if 'selection_add' in field.args: # use an OrderedDict to update existing values selection_add = field.args['selection_add'] self.selection = list(OrderedDict(self.selection + selection_add).items()) def _description_selection(self, env): """ return the selection list (pairs (value, label)); labels are translated according to context language """ selection = self.selection if isinstance(selection, pycompat.string_types): return getattr(env[self.model_name], selection)() if callable(selection): return selection(env[self.model_name]) # translate selection labels if env.lang: name = "%s,%s" % (self.model_name, self.name) translate = partial( env['ir.translation']._get_source, name, 'selection', env.lang) return [(value, translate(label) if label else label) for value, label in selection] else: return selection def get_values(self, env): """ return a list of the possible values """ selection = self.selection if isinstance(selection, pycompat.string_types): selection = getattr(env[self.model_name], selection)() elif callable(selection): selection = selection(env[self.model_name]) return [value for value, _ in selection] def convert_to_cache(self, value, record, validate=True): if not validate: return value or False if value in self.get_values(record.env): return value elif not value: return False raise ValueError("Wrong value for %s: %r" % (self, value)) def convert_to_export(self, value, record): if not isinstance(self.selection, list): # FIXME: this reproduces an existing buggy behavior! return value if value else '' for item in self._description_selection(record.env): if item[0] == value: return item[1] return False class Reference(Selection): type = 'reference' @property def column_type(self): return ('varchar', pg_varchar()) def convert_to_cache(self, value, record, validate=True): # cache format: (res_model, res_id) or False def process(res_model, res_id): record._prefetch[res_model].add(res_id) return (res_model, res_id) if isinstance(value, BaseModel): if not validate or (value._name in self.get_values(record.env) and len(value) <= 1): return process(value._name, value.id) if value else False elif isinstance(value, pycompat.string_types): res_model, res_id = value.split(',') if record.env[res_model].browse(int(res_id)).exists(): return process(res_model, int(res_id)) else: return False elif not value: return False raise ValueError("Wrong value for %s: %r" % (self, value)) def convert_to_record(self, value, record): return value and record.env[value[0]].browse([value[1]], record._prefetch) def convert_to_read(self, value, record, use_name_get=True): return "%s,%s" % (value._name, value.id) if value else False def convert_to_export(self, value, record): return value.name_get()[0][1] if value else '' def convert_to_display_name(self, value, record): return ustr(value and value.display_name) class _Relational(Field): """ Abstract class for relational fields. """ relational = True _slots = { 'domain': [], # domain for searching values 'context': {}, # context for searching values } def _setup_regular_base(self, model): super(_Relational, self)._setup_regular_base(model) if self.comodel_name not in model.pool: _logger.warning("Field %s with unknown comodel_name %r", self, self.comodel_name) self.comodel_name = '_unknown' @property def _related_domain(self): if callable(self.domain): # will be called with another model than self's return lambda recs: self.domain(recs.env[self.model_name]) else: # maybe not correct if domain is a string... return self.domain _related_context = property(attrgetter('context')) _description_relation = property(attrgetter('comodel_name')) _description_context = property(attrgetter('context')) def _description_domain(self, env): return self.domain(env[self.model_name]) if callable(self.domain) else self.domain def null(self, record): return record.env[self.comodel_name] class Many2one(_Relational): """ The value of such a field is a recordset of size 0 (no record) or 1 (a single record). :param comodel_name: name of the target model (string) :param domain: an optional domain to set on candidate values on the client side (domain or string) :param context: an optional context to use on the client side when handling that field (dictionary) :param ondelete: what to do when the referred record is deleted; possible values are: ``'set null'``, ``'restrict'``, ``'cascade'`` :param auto_join: whether JOINs are generated upon search through that field (boolean, by default ``False``) :param delegate: set it to ``True`` to make fields of the target model accessible from the current model (corresponds to ``_inherits``) The attribute ``comodel_name`` is mandatory except in the case of related fields or field extensions. """ type = 'many2one' column_type = ('int4', 'int4') _slots = { 'ondelete': 'set null', # what to do when value is deleted 'auto_join': False, # whether joins are generated upon search 'delegate': False, # whether self implements delegation } def __init__(self, comodel_name=Default, string=Default, **kwargs): super(Many2one, self).__init__(comodel_name=comodel_name, string=string, **kwargs) def _setup_attrs(self, model, name): super(Many2one, self)._setup_attrs(model, name) # determine self.delegate if not self.delegate: self.delegate = name in model._inherits.values() def update_db(self, model, columns): comodel = model.env[self.comodel_name] if not model.is_transient() and comodel.is_transient(): raise ValueError('Many2one %s from Model to TransientModel is forbidden' % self) if model.is_transient() and not comodel.is_transient(): # Many2one relations from TransientModel Model are annoying because # they can block deletion due to foreign keys. So unless stated # otherwise, we default them to ondelete='cascade'. self.ondelete = self.ondelete or 'cascade' return super(Many2one, self).update_db(model, columns) def update_db_column(self, model, column): super(Many2one, self).update_db_column(model, column) model.pool.post_init(self.update_db_foreign_key, model, column) def update_db_foreign_key(self, model, column): comodel = model.env[self.comodel_name] # ir_actions is inherited, so foreign key doesn't work on it if not comodel._auto or comodel._table == 'ir_actions': return # create/update the foreign key, and reflect it in 'ir.model.constraint' process = sql.fix_foreign_key if column else sql.add_foreign_key new = process(model._cr, model._table, self.name, comodel._table, 'id', self.ondelete or 'set null') if new: conname = '%s_%s_fkey' % (model._table, self.name) model.env['ir.model.constraint']._reflect_constraint(model, conname, 'f', None, self._module) def _update(self, records, value): """ Update the cached value of ``self`` for ``records`` with ``value``. """ cache = records.env.cache for record in records: cache.set(record, self, self.convert_to_cache(value, record, validate=False)) def convert_to_column(self, value, record, values=None): return value or None def convert_to_cache(self, value, record, validate=True): # cache format: tuple(ids) def process(ids): return record._prefetch[self.comodel_name].update(ids) or ids if type(value) in IdType: return process((value,)) elif isinstance(value, BaseModel): if not validate or (value._name == self.comodel_name and len(value) <= 1): return process(value._ids) raise ValueError("Wrong value for %s: %r" % (self, value)) elif isinstance(value, tuple): # value is either a pair (id, name), or a tuple of ids return process(value[:1]) elif isinstance(value, dict): return process(record.env[self.comodel_name].new(value)._ids) else: return () def convert_to_record(self, value, record): return record.env[self.comodel_name]._browse(value, record.env, record._prefetch) def convert_to_read(self, value, record, use_name_get=True): if use_name_get and value: # evaluate name_get() as superuser, because the visibility of a # many2one field value (id and name) depends on the current record's # access rights, and not the value's access rights. try: # performance: value.sudo() prefetches the same records as value return value.sudo().name_get()[0] except MissingError: # Should not happen, unless the foreign key is missing. return False else: return value.id def convert_to_write(self, value, record): return value.id def convert_to_export(self, value, record): return value.name_get()[0][1] if value else '' def convert_to_display_name(self, value, record): return ustr(value.display_name) def convert_to_onchange(self, value, record, names): if not value.id: return False return super(Many2one, self).convert_to_onchange(value, record, names) class _RelationalMulti(_Relational): """ Abstract class for relational fields *2many. """ _slots = { 'context_dependent': True, # depends on context (active_test) } def _update(self, records, value): """ Update the cached value of ``self`` for ``records`` with ``value``. """ cache = records.env.cache for record in records: if cache.contains(record, self): val = self.convert_to_cache(record[self.name] | value, record, validate=False) cache.set(record, self, val) else: cache.set_special(record, self, self._update_getter(record, value)) def _update_getter(self, record, value): def getter(): # determine the current field's value, and update it in cache only cache = record.env.cache cache.remove(record, self) val = self.convert_to_cache(record[self.name] | value, record, validate=False) cache.set(record, self, val) return val return getter def convert_to_cache(self, value, record, validate=True): # cache format: tuple(ids) def process(ids): return record._prefetch[self.comodel_name].update(ids) or ids if isinstance(value, BaseModel): if not validate or (value._name == self.comodel_name): return process(value._ids) elif isinstance(value, (list, tuple)): # value is a list/tuple of commands, dicts or record ids comodel = record.env[self.comodel_name] # determine the value ids; by convention empty on new records ids = OrderedSet(record[self.name].ids if record.id else ()) # modify ids with the commands for command in value: if isinstance(command, (tuple, list)): if command[0] == 0: ids.add(comodel.new(command[2], command[1]).id) elif command[0] == 1: comodel.browse(command[1]).update(command[2]) ids.add(command[1]) elif command[0] == 2: # note: the record will be deleted by write() ids.discard(command[1]) elif command[0] == 3: ids.discard(command[1]) elif command[0] == 4: ids.add(command[1]) elif command[0] == 5: ids.clear() elif command[0] == 6: ids = OrderedSet(command[2]) elif isinstance(command, dict): ids.add(comodel.new(command).id) else: ids.add(command) # return result as a tuple return process(tuple(ids)) elif not value: return () raise ValueError("Wrong value for %s: %s" % (self, value)) def convert_to_record(self, value, record): return record.env[self.comodel_name]._browse(value, record.env, record._prefetch) def convert_to_read(self, value, record, use_name_get=True): return value.ids def convert_to_write(self, value, record): # make result with new and existing records result = [(6, 0, [])] for record in value: if not record.id: values = {name: record[name] for name in record._cache} values = record._convert_to_write(values) result.append((0, 0, values)) elif record._is_dirty(): values = {name: record[name] for name in record._get_dirty()} values = record._convert_to_write(values) result.append((1, record.id, values)) else: result[0][2].append(record.id) return result def convert_to_onchange(self, value, record, names): # return the recordset value as a list of commands; the commands may # give all fields values, the client is responsible for figuring out # which fields are actually dirty result = [(5,)] for record in value: vals = { name: value._fields[name].convert_to_onchange(record[name], record, subnames) for name, subnames in names.items() if name != 'id' } if not record.id: result.append((0, record.id.ref or 0, vals)) elif vals: result.append((1, record.id, vals)) else: result.append((4, record.id)) return result def convert_to_export(self, value, record): return ','.join(name for id, name in value.name_get()) if value else '' def convert_to_display_name(self, value, record): raise NotImplementedError() def _compute_related(self, records): """ Compute the related field ``self`` on ``records``. """ super(_RelationalMulti, self)._compute_related(records) if self.related_sudo: # determine which records in the relation are actually accessible target = records.mapped(self.name) target_ids = set(target.search([('id', 'in', target.ids)]).ids) accessible = lambda target: target.id in target_ids # filter values to keep the accessible records only for record in records: record[self.name] = record[self.name].filtered(accessible) def _setup_regular_base(self, model): super(_RelationalMulti, self)._setup_regular_base(model) if isinstance(self.domain, list): self.depends += tuple( self.name + '.' + arg[0] for arg in self.domain if isinstance(arg, (tuple, list)) and isinstance(arg[0], pycompat.string_types) ) class One2many(_RelationalMulti): """ One2many field; the value of such a field is the recordset of all the records in ``comodel_name`` such that the field ``inverse_name`` is equal to the current record. :param comodel_name: name of the target model (string) :param inverse_name: name of the inverse ``Many2one`` field in ``comodel_name`` (string) :param domain: an optional domain to set on candidate values on the client side (domain or string) :param context: an optional context to use on the client side when handling that field (dictionary) :param auto_join: whether JOINs are generated upon search through that field (boolean, by default ``False``) :param limit: optional limit to use upon read (integer) The attributes ``comodel_name`` and ``inverse_name`` are mandatory except in the case of related fields or field extensions. """ type = 'one2many' _slots = { 'inverse_name': None, # name of the inverse field 'auto_join': False, # whether joins are generated upon search 'limit': None, # optional limit to use upon read 'copy': False, # o2m are not copied by default } def __init__(self, comodel_name=Default, inverse_name=Default, string=Default, **kwargs): super(One2many, self).__init__( comodel_name=comodel_name, inverse_name=inverse_name, string=string, **kwargs ) def _setup_regular_full(self, model): super(One2many, self)._setup_regular_full(model) if self.inverse_name: # link self to its inverse field and vice-versa comodel = model.env[self.comodel_name] invf = comodel._fields[self.inverse_name] # In some rare cases, a ``One2many`` field can link to ``Int`` field # (res_model/res_id pattern). Only inverse the field if this is # a ``Many2one`` field. if isinstance(invf, Many2one): model._field_inverses.add(self, invf) comodel._field_inverses.add(invf, self) _description_relation_field = property(attrgetter('inverse_name')) def convert_to_onchange(self, value, record, names): names = names.copy() names.pop(self.inverse_name, None) return super(One2many, self).convert_to_onchange(value, record, names) def update_db(self, model, columns): if self.comodel_name in model.env: comodel = model.env[self.comodel_name] if self.inverse_name not in comodel._fields: raise UserError(_("No inverse field %r found for %r") % (self.inverse_name, self.comodel_name)) def read(self, records): # retrieve the lines in the comodel comodel = records.env[self.comodel_name].with_context(**self.context) inverse = self.inverse_name get_id = (lambda rec: rec.id) if comodel._fields[inverse].type == 'many2one' else int domain = self.domain(records) if callable(self.domain) else self.domain domain = domain + [(inverse, 'in', records.ids)] lines = comodel.search(domain, limit=self.limit) # group lines by inverse field (without prefetching other fields) group = defaultdict(list) for line in lines.with_context(prefetch_fields=False): # line[inverse] may be a record or an integer group[get_id(line[inverse])].append(line.id) # store result in cache cache = records.env.cache for record in records: cache.set(record, self, tuple(group[record.id])) def write(self, records, value, create=False): comodel = records.env[self.comodel_name].with_context(**self.context) inverse = self.inverse_name with records.env.norecompute(): for act in (value or []): if act[0] == 0: for record in records: act[2][inverse] = record.id comodel.create(act[2]) elif act[0] == 1: comodel.browse(act[1]).write(act[2]) elif act[0] == 2: comodel.browse(act[1]).unlink() elif act[0] == 3: inverse_field = comodel._fields[inverse] if inverse_field.ondelete == 'cascade': comodel.browse(act[1]).unlink() else: comodel.browse(act[1]).write({inverse: False}) elif act[0] == 4: record = records[-1] line = comodel.browse(act[1]) line_sudo = line.sudo().with_context(prefetch_fields=False) if int(line_sudo[inverse]) != record.id: line.write({inverse: record.id}) elif act[0] == 5: domain = self.domain(records) if callable(self.domain) else self.domain domain = domain + [(inverse, 'in', records.ids)] inverse_field = comodel._fields[inverse] if inverse_field.ondelete == 'cascade': comodel.search(domain).unlink() else: comodel.search(domain).write({inverse: False}) elif act[0] == 6: record = records[-1] comodel.browse(act[2]).write({inverse: record.id}) query = "SELECT id FROM %s WHERE %s=%%s AND id <> ALL(%%s)" % (comodel._table, inverse) comodel._cr.execute(query, (record.id, act[2] or [0])) lines = comodel.browse([row[0] for row in comodel._cr.fetchall()]) inverse_field = comodel._fields[inverse] if inverse_field.ondelete == 'cascade': lines.unlink() else: lines.write({inverse: False}) class Many2many(_RelationalMulti): """ Many2many field; the value of such a field is the recordset. :param comodel_name: name of the target model (string) The attribute ``comodel_name`` is mandatory except in the case of related fields or field extensions. :param relation: optional name of the table that stores the relation in the database (string) :param column1: optional name of the column referring to "these" records in the table ``relation`` (string) :param column2: optional name of the column referring to "those" records in the table ``relation`` (string) The attributes ``relation``, ``column1`` and ``column2`` are optional. If not given, names are automatically generated from model names, provided ``model_name`` and ``comodel_name`` are different! :param domain: an optional domain to set on candidate values on the client side (domain or string) :param context: an optional context to use on the client side when handling that field (dictionary) :param limit: optional limit to use upon read (integer) """ type = 'many2many' _slots = { 'relation': None, # name of table 'column1': None, # column of table referring to model 'column2': None, # column of table referring to comodel 'auto_join': False, # whether joins are generated upon search 'limit': None, # optional limit to use upon read } def __init__(self, comodel_name=Default, relation=Default, column1=Default, column2=Default, string=Default, **kwargs): super(Many2many, self).__init__( comodel_name=comodel_name, relation=relation, column1=column1, column2=column2, string=string, **kwargs ) def _setup_regular_base(self, model): super(Many2many, self)._setup_regular_base(model) if self.store: if not (self.relation and self.column1 and self.column2): # table name is based on the stable alphabetical order of tables comodel = model.env[self.comodel_name] if not self.relation: tables = sorted([model._table, comodel._table]) assert tables[0] != tables[1], \ "%s: Implicit/canonical naming of many2many relationship " \ "table is not possible when source and destination models " \ "are the same" % self self.relation = '%s_%s_rel' % tuple(tables) if not self.column1: self.column1 = '%s_id' % model._table if not self.column2: self.column2 = '%s_id' % comodel._table # check validity of table name check_pg_name(self.relation) def _setup_regular_full(self, model): super(Many2many, self)._setup_regular_full(model) if self.relation: m2m = model.pool._m2m # if inverse field has already been setup, it is present in m2m invf = m2m.get((self.relation, self.column2, self.column1)) if invf: comodel = model.env[self.comodel_name] model._field_inverses.add(self, invf) comodel._field_inverses.add(invf, self) else: # add self in m2m, so that its inverse field can find it m2m[(self.relation, self.column1, self.column2)] = self def update_db(self, model, columns): cr = model._cr # Do not reflect relations for custom fields, as they do not belong to a # module. They are automatically removed when dropping the corresponding # 'ir.model.field'. if not self.manual: model.pool.post_init(model.env['ir.model.relation']._reflect_relation, model, self.relation, self._module) if not sql.table_exists(cr, self.relation): comodel = model.env[self.comodel_name] query = """ CREATE TABLE "{rel}" ("{id1}" INTEGER NOT NULL, "{id2}" INTEGER NOT NULL, UNIQUE("{id1}","{id2}")); COMMENT ON TABLE "{rel}" IS %s; CREATE INDEX ON "{rel}" ("{id1}"); CREATE INDEX ON "{rel}" ("{id2}") """.format(rel=self.relation, id1=self.column1, id2=self.column2) cr.execute(query, ['RELATION BETWEEN %s AND %s' % (model._table, comodel._table)]) _schema.debug("Create table %r: m2m relation between %r and %r", self.relation, model._table, comodel._table) model.pool.post_init(self.update_db_foreign_keys, model) return True def update_db_foreign_keys(self, model): """ Add the foreign keys corresponding to the field's relation table. """ cr = model._cr comodel = model.env[self.comodel_name] reflect = model.env['ir.model.constraint']._reflect_constraint # create foreign key references with ondelete=cascade, unless the targets are SQL views if sql.table_kind(cr, model._table) != 'v': sql.add_foreign_key(cr, self.relation, self.column1, model._table, 'id', 'cascade') reflect(model, '%s_%s_fkey' % (self.relation, self.column1), 'f', None, self._module) if sql.table_kind(cr, comodel._table) != 'v': sql.add_foreign_key(cr, self.relation, self.column2, comodel._table, 'id', 'cascade') reflect(model, '%s_%s_fkey' % (self.relation, self.column2), 'f', None, self._module) def read(self, records): comodel = records.env[self.comodel_name] # String domains are supposed to be dynamic and evaluated on client-side # only (thus ignored here). domain = self.domain if isinstance(self.domain, list) else [] wquery = comodel._where_calc(domain) comodel._apply_ir_rules(wquery, 'read') order_by = comodel._generate_order_by(None, wquery) from_c, where_c, where_params = wquery.get_sql() query = """ SELECT {rel}.{id1}, {rel}.{id2} FROM {rel}, {from_c} WHERE {where_c} AND {rel}.{id1} IN %s AND {rel}.{id2} = {tbl}.id {order_by} {limit} OFFSET {offset} """.format(rel=self.relation, id1=self.column1, id2=self.column2, tbl=comodel._table, from_c=from_c, where_c=where_c or '1=1', limit=(' LIMIT %d' % self.limit) if self.limit else '', offset=0, order_by=order_by) where_params.append(tuple(records.ids)) # retrieve lines and group them by record group = defaultdict(list) records._cr.execute(query, where_params) for row in records._cr.fetchall(): group[row[0]].append(row[1]) # store result in cache cache = records.env.cache for record in records: cache.set(record, self, tuple(group[record.id])) def write(self, records, value, create=False): cr = records._cr comodel = records.env[self.comodel_name] parts = dict(rel=self.relation, id1=self.column1, id2=self.column2) clear = False # whether the relation should be cleared links = {} # {id: True (link it) or False (unlink it)} for act in (value or []): if not isinstance(act, (list, tuple)) or not act: continue if act[0] == 0: for record in records: links[comodel.create(act[2]).id] = True elif act[0] == 1: comodel.browse(act[1]).write(act[2]) elif act[0] == 2: comodel.browse(act[1]).unlink() elif act[0] == 3: links[act[1]] = False elif act[0] == 4: links[act[1]] = True elif act[0] == 5: clear = True links.clear() elif act[0] == 6: clear = True links = dict.fromkeys(act[2], True) if clear and not create: # remove all records for which user has access rights clauses, params, tables = comodel.env['ir.rule'].domain_get(comodel._name) cond = " AND ".join(clauses) if clauses else "1=1" query = """ DELETE FROM {rel} USING {tables} WHERE {rel}.{id1} IN %s AND {rel}.{id2}={table}.id AND {cond} """.format(table=comodel._table, tables=','.join(tables), cond=cond, **parts) cr.execute(query, [tuple(records.ids)] + params) # link records to the ids such that links[id] = True if any(links.values()): # beware of duplicates when inserting query = """ INSERT INTO {rel} ({id1}, {id2}) (SELECT a, b FROM unnest(%s) AS a, unnest(%s) AS b) EXCEPT (SELECT {id1}, {id2} FROM {rel} WHERE {id1} IN %s) """.format(**parts) ids = [id for id, flag in links.items() if flag] for sub_ids in cr.split_for_in_conditions(ids): cr.execute(query, (records.ids, list(sub_ids), tuple(records.ids))) # unlink records from the ids such that links[id] = False if not all(links.values()): query = """ DELETE FROM {rel} WHERE {id1} IN %s AND {id2} IN %s """.format(**parts) ids = [id for id, flag in links.items() if not flag] for sub_ids in cr.split_for_in_conditions(ids): cr.execute(query, (tuple(records.ids), sub_ids)) class Id(Field): """ Special case for field 'id'. """ type = 'integer' column_type = ('int4', 'int4') _slots = { 'string': 'ID', 'store': True, 'readonly': True, } def update_db(self, model, columns): pass # this column is created with the table def __get__(self, record, owner): if record is None: return self # the field is accessed through the class owner if not record: return False return record.ensure_one()._ids[0] def __set__(self, record, value): raise TypeError("field 'id' cannot be assigned") # imported here to avoid dependency cycle issues from odoo import SUPERUSER_ID from .exceptions import AccessError, MissingError, UserError from .models import check_pg_name, BaseModel, IdType
agpl-3.0
8,913,938,050,853,976,000
41.087811
126
0.585813
false
4.358009
false
false
false
RyadElssalihine/RyadElssalihine
user_manager/views.py
1
1508
# Create your views here. from rest_framework import status from rest_framework.decorators import api_view from rest_framework.response import Response from models import Profile,Application,Tab,Page,Footer from serializer import ProfileSerializer,TabSerializer,FooterSerializer from django.shortcuts import render from rest_framework.parsers import JSONParser from django.contrib.auth import authenticate, login ,logout from django.shortcuts import redirect from django.http import HttpResponse,HttpRequest from forms import ConnexionForm from django.core.urlresolvers import reverse import RyadEssalihine from django.contrib.auth.decorators import login_required @api_view(['POST']) def register(request): pass @api_view(['GET']) def user_list(request): profiles=Profile.objects.all() serializer=ProfileSerializer(profiles,many=True) return Response(serializer.data) @api_view(['GET']) def user_get(request): try: profiles = Profile.objects.get(user_id=request.user.id) except Profile.DoesNotExist: return Response(status=status.HTTP_404_NOT_FOUND) serializer=ProfileSerializer(profiles) return Response(serializer.data) @api_view(['GET']) def tabs_list(request): tabs=Tab.objects.all() serializer= TabSerializer(tabs,many=True) return Response(serializer.data) @api_view(['GET']) def footers_list(request): footers=Footer.objects.all() serializer=FooterSerializer(footers,many=True) return Response(serializer.data)
gpl-2.0
-4,443,966,142,931,526,700
27.45283
71
0.772546
false
3.896641
false
false
false
mareknetusil/twist
cbc/twist/kinematics.py
1
4324
__author__ = "Harish Narayanan" __copyright__ = "Copyright (C) 2009 Simula Research Laboratory and %s" % __author__ __license__ = "GNU GPL Version 3 or any later version" from dolfin import * #from cbc.twist.coordinate_system import CartesianSystem # Renaming grad to Grad because it looks nicer in the reference # configuration from ufl import grad as ufl_grad # Deformation gradient def DeformationGradient(u): I = SecondOrderIdentity(u) return variable(I + Grad(u)) def Grad(v): return ufl_grad(v) # Infinitesimal strain tensor def InfinitesimalStrain(u): return variable(0.5*(Grad(u) + Grad(u).T)) # Second order identity tensor def SecondOrderIdentity(u): return variable(Identity(u.geometric_dimension())) # Determinant of the deformation gradient def Jacobian(u): F = DeformationGradient(u) return variable(det(F)) # Right Cauchy-Green tensor def RightCauchyGreen(u): F = DeformationGradient(u) return variable(F.T*F) # Green-Lagrange strain tensor def GreenLagrangeStrain(u): I = SecondOrderIdentity(u) C = RightCauchyGreen(u) return variable(0.5*(C - I)) # Left Cauchy-Green tensor def LeftCauchyGreen(u): F = DeformationGradient(u) return variable(F*F.T) # Euler-Almansi strain tensor def EulerAlmansiStrain(u): I = SecondOrderIdentity(u) b = LeftCauchyGreen(u) return variable(0.5*(I - inv(b))) # Invariants of an arbitrary tensor, A def Invariants(A): I1 = tr(A) I2 = 0.5*(tr(A)**2 - tr(A*A)) I3 = det(A) return [I1, I2, I3] # Invariants of the (right/left) Cauchy-Green tensor #TODO: NEEDS TESTING def CauchyGreenInvariants(u): C = RightCauchyGreen(u) [I1, I2, I3] = Invariants(C) return [variable(I1), variable(I2), variable(I3)] # Isochoric part of the deformation gradient #TODO: NEEDS TESTING def IsochoricDeformationGradient(u): F = DeformationGradient(u) J = Jacobian(u) return variable(J**(-1.0/3.0)*F) # Isochoric part of the right Cauchy-Green tensor #TODO: NEEDS TESTING def IsochoricRightCauchyGreen(u): C = RightCauchyGreen(u) J = Jacobian(u) return variable(J**(-2.0/3.0)*C) # Invariants of the ischoric part of the (right/left) Cauchy-Green # tensor. Note that I3bar = 1 by definition. #TODO: NEEDS TESTING def IsochoricCauchyGreenInvariants(u): Cbar = IsochoricRightCauchyGreen(u) [I1bar, I2bar, I3bar] = Invariants(Cbar) return [variable(I1bar), variable(I2bar)] # Principal stretches #TODO: NEEDS TESTING def PrincipalStretches(u): C = RightCauchyGreen(u) S = FunctionSpace(u.function_space().mesh(), "CG", 1) if (u.cell().geometric_dimension() == 2): D = sqrt(tr(C)*tr(C) - 4.0*det(C)) eig1 = sqrt(0.5*(tr(C) + D)) eig2 = sqrt(0.5*(tr(C) - D)) return [variable(eig1), variable(eig2)] if (u.cell().geometric_dimension() == 3): c = (1.0/3.0)*tr(C) D = C - c*SecondOrderIdentity(u) q = (1.0/2.0)*det(D) p = (1.0/6.0)*inner(D, D) ph = project(p, S) if (norm(ph) < DOLFIN_EPS): eig1 = sqrt(c) eig2 = sqrt(c) eig3 = sqrt(c) else: phi = (1.0/3.0)*atan(sqrt(p**3.0 - q**2.0)/q) if (phi < 0.0): phi = phi + DOLFIN_PI/3.0 end eig1 = sqrt(c + 2*sqrt(p)*cos(phi)) eig2 = sqrt(c - sqrt(p)*(cos(phi) + sqrt(3)*sin(phi))) eig3 = sqrt(c - sqrt(p)*(cos(phi) - sqrt(3)*sin(phi))) return [variable(eig1), variable(eig2), variable(eig3)] # Pull-back of a two-tensor from the current to the reference # configuration #TODO: NEEDS TESTING def PiolaTransform(A, u): J = Jacobian(u) F = DeformationGradient(u) B = J*A*inv(F).T return B # Push-forward of a two-tensor from the reference to the current # configuration #TODO: NEEDS TESTING def InversePiolaTransform(A, u): J = Jacobian(u) F = DeformationGradient(u) B = (1/J)*A*F.T return B # Computes M*C^nM # for n = 1 equals to the stretch in the direction M #TODO: NEEDS TESTING def DirectionalStretch(u, M, degree = 1): C = RightCauchyGreen(u) Cpow = SecondOrderIdentity(u) if degree >= 1: for i in range(degree): Cpow = C*Cpow directionalstretch = inner(M,Cpow*M) return variable(directionalstretch)
gpl-3.0
1,692,405,160,494,801,000
26.896774
83
0.639685
false
2.88459
true
false
false
snarfed/webmention-tools
bin/demo.py
1
1241
#!/usr/bin/env python # -*- coding: utf-8 -*- from webmentiontools.urlinfo import UrlInfo from webmentiontools.webmentionio import WebmentionIO # If you have an access token from webmention.io, # set it here. Some calls require it. webmention_io_token = None wio = WebmentionIO(webmention_io_token) # Get all links "mentioning" http://indiewebcamp.com/webmention target_url = 'http://indiewebcamp.com/webmention' ret = wio.linksToURL(target_url) if not ret: print wio.error else: for link in ret['links']: print print 'Webmention.io ID: %s' % link['id'] print ' Source: %s' % link['source'] print ' Verification Date: %s' % link['verified_date'] # Now use UrlInfo to get some more information about the source. # Most web apps showing webmentions, will probably do something # like this. info = UrlInfo(link['source']) print ' Source URL info:' print ' Title: %s' % info.title() print ' Pub Date: %s' % info.pubDate() print ' in-reply-to: %s' % info.inReplyTo() print ' Author image: %s' % info.image() print ' Snippet: %s' % info.snippetWithLink(target_url)
mit
8,639,230,686,331,052,000
33.472222
72
0.617244
false
3.428177
false
false
false
XiaochenCui/algorithm_submit
app/auth/views.py
1
5804
from flask import render_template, redirect, request, url_for, flash from flask.ext.login import login_user, logout_user, login_required, \ current_user from . import auth from .. import db from ..models import User from ..email import send_email from .forms import LoginForm, RegistrationForm, ChangePasswordForm, \ PasswordResetRequestForm, PasswordResetForm, ChangeEmailForm @auth.before_app_request def before_request(): if current_user.is_authenticated: current_user.ping() if not current_user.confirmed \ and request.endpoint[:5] != 'auth.' \ and request.endpoint != 'static': return redirect(url_for('auth.unconfirmed')) @auth.route('/unconfirmed') def unconfirmed(): if current_user.is_anonymous or current_user.confirmed: return redirect(url_for('main.index')) return render_template('auth/unconfirmed.html') @auth.route('/login', methods=['GET', 'POST']) def login(): form = LoginForm() if form.validate_on_submit(): user = User.query.filter_by(email=form.email.data).first() if user is not None and user.verify_password(form.password.data): login_user(user, form.remember_me.data) return redirect(request.args.get('next') or url_for('main.index')) flash('帐号或密码不可用') return render_template('auth/login.html', form=form) @auth.route('/logout') @login_required def logout(): logout_user() flash('你已登出') return redirect(url_for('main.index')) @auth.route('/register', methods=['GET', 'POST']) def register(): form = RegistrationForm() if form.validate_on_submit(): user = User(email=form.email.data, username=form.username.data, password=form.password.data) db.session.add(user) db.session.commit() token = user.generate_confirmation_token() send_email(user.email, '验证你的帐号', 'auth/email/confirm', user=user, token=token) flash('验证邮件已发送') return redirect(url_for('auth.login')) return render_template('auth/register.html', form=form) @auth.route('/confirm/<token>') @login_required def confirm(token): if current_user.confirmed: return redirect(url_for('main.index')) if current_user.confirm(token): flash('帐号已激活') else: flash('验证链接不可用或已过期') return redirect(url_for('main.index')) @auth.route('/confirm') @login_required def resend_confirmation(): token = current_user.generate_confirmation_token() send_email(current_user.email, '验证帐号', 'auth/email/confirm', user=current_user, token=token) flash('新的验证邮件已经发送到你的邮箱') return redirect(url_for('main.index')) @auth.route('/change-password', methods=['GET', 'POST']) @login_required def change_password(): form = ChangePasswordForm() if form.validate_on_submit(): if current_user.verify_password(form.old_password.data): current_user.password = form.password.data db.session.add(current_user) flash('密码更改成功') return redirect(url_for('main.index')) else: flash('密码不可用') return render_template("auth/change_password.html", form=form) @auth.route('/reset', methods=['GET', 'POST']) def password_reset_request(): if not current_user.is_anonymous: return redirect(url_for('main.index')) form = PasswordResetRequestForm() if form.validate_on_submit(): user = User.query.filter_by(email=form.email.data).first() if user: token = user.generate_reset_token() send_email(user.email, '重新设置你的密码', 'auth/email/reset_password', user=user, token=token, next=request.args.get('next')) flash('重置密码的邮件已经发送到你的邮箱') return redirect(url_for('auth.login')) return render_template('auth/reset_password.html', form=form) @auth.route('/reset/<token>', methods=['GET', 'POST']) def password_reset(token): if not current_user.is_anonymous: return redirect(url_for('main.index')) form = PasswordResetForm() if form.validate_on_submit(): user = User.query.filter_by(email=form.email.data).first() if user is None: return redirect(url_for('main.index')) if user.reset_password(token, form.password.data): flash('Your password has been updated.') return redirect(url_for('auth.login')) else: return redirect(url_for('main.index')) return render_template('auth/reset_password.html', form=form) @auth.route('/change-email', methods=['GET', 'POST']) @login_required def change_email_request(): form = ChangeEmailForm() if form.validate_on_submit(): if current_user.verify_password(form.password.data): new_email = form.email.data token = current_user.generate_email_change_token(new_email) send_email(new_email, '验证邮箱', 'auth/email/change_email', user=current_user, token=token) flash('验证邮件已经发送到你的邮箱') return redirect(url_for('main.index')) else: flash('邮箱或密码不可用') return render_template("auth/change_email.html", form=form) @auth.route('/change-email/<token>') @login_required def change_email(token): if current_user.change_email(token): flash('你的邮箱地址已经更新') else: flash('错误的请求') return redirect(url_for('main.index'))
mit
5,879,881,981,366,820,000
32.95092
78
0.630105
false
3.413942
false
false
false
kangwonlee/ECA
lab_07_linear_algebra/gauss_jordan.py
1
1766
# -*- coding: utf8 -*- from pprint import pprint import linear_algebra as la def gauss_jordan(A): # 행렬의 크기 n_row = len(A) n_column = len(A[0]) # 단위 행렬과의 Augmented Matrix 를 만듦 AI = [] for i_row in xrange(n_row): AI_row = [0.0] * (n_column * 2) for j_column in xrange(n_column): AI_row[j_column] = A[i_row][j_column] for j_column in xrange(n_column, n_column * 2): AI_row[j_column] = 0.0 AI_row[n_column + i_row] = 1.0 AI.append(AI_row) print "Augmented matrix" print '1234567890' * 7 pprint(AI, width=30) # pivot 반복문 for i_pivot in xrange(n_row): # pivot 행을 pivot 요소로 나눔. # pivot 요소는 1이 됨 ratio = 1.0 / float(AI[i_pivot][i_pivot]) for k_column in xrange(n_column * 2): AI[i_pivot][k_column] *= ratio # 행 반복문 for j_row in xrange(0, n_row): if j_row != i_pivot: ratio = -AI[j_row][i_pivot] # 열 반복문 for k_column in xrange(n_column * 2): AI[j_row][k_column] += ratio * AI[i_pivot][k_column] # 이 반복문이 끝나고 나면 주 대각선 이외의 요소는 모두 0 print "After Gauss Jordan" pprint(AI) # 오른쪽의 행렬을 떼어냄 result = [] for i_row in xrange(n_row): result.append(AI[i_row][n_column:]) return result if "__main__" == __name__: A = [[3, 2, 1], [2, 3, 2], [1, 2, 3]] A_inverse = gauss_jordan(A) print "A inverse" pprint(A_inverse) I_expected = la.multiply_matrix_matrix(A, A_inverse) print "I expected" pprint(I_expected)
apache-2.0
1,296,910,529,811,983,000
23.666667
72
0.514742
false
2.296192
false
false
false
Chris7/django-djangui
djangui/models/mixins.py
1
1697
from __future__ import absolute_import __author__ = 'chris' from django.forms.models import model_to_dict import six class UpdateScriptsMixin(object): def save(self, **kwargs): super(UpdateScriptsMixin, self).save(**kwargs) from ..backend.utils import load_scripts load_scripts() class DjanguiPy2Mixin(object): def __unicode__(self): return unicode(self.__str__()) # from # http://stackoverflow.com/questions/1355150/django-when-saving-how-can-you-check-if-a-field-has-changed class ModelDiffMixin(object): """ A model mixin that tracks model fields' values and provide some useful api to know what fields have been changed. """ def __init__(self, *args, **kwargs): super(ModelDiffMixin, self).__init__(*args, **kwargs) self.__initial = self._dict @property def diff(self): d1 = self.__initial d2 = self._dict diffs = [(k, (v, d2[k])) for k, v in d1.items() if v != d2[k]] return dict(diffs) @property def has_changed(self): return bool(self.diff) @property def changed_fields(self): return self.diff.keys() def get_field_diff(self, field_name): """ Returns a diff for field if it's changed and None otherwise. """ return self.diff.get(field_name, None) def save(self, *args, **kwargs): """ Saves model and set initial state. """ super(ModelDiffMixin, self).save(*args, **kwargs) self.__initial = self._dict @property def _dict(self): return model_to_dict(self, fields=[field.name for field in self._meta.fields])
gpl-3.0
4,384,404,271,976,613,400
27.3
104
0.602829
false
3.77951
false
false
false
manaris/jythonMusic
15. simpleCircleInstrument.py
1
2279
# simpleCircleInstrument.py # # Demonstrates how to use mouse and keyboard events to build a simple # drawing musical instrument. # from gui import * from music import * from math import sqrt ### initialize variables ###################### minPitch = C1 # instrument pitch range maxPitch = C8 # create display d = Display("Circle Instrument") # default dimensions (600 x 400) d.setColor( Color(51, 204, 255) ) # set background to turquoise beginX = 0 # holds starting x coordinate for next circle beginY = 0 # holds starting y coordinate # maximum circle diameter - same as diagonal of display maxDiameter = sqrt(d.getWidth()**2 + d.getHeight()**2) # calculate it ### define callback functions ###################### def beginCircle(x, y): # for when mouse is pressed global beginX, beginY beginX = x # remember new circle's coordinates beginY = y def endCircleAndPlayNote(endX, endY): # for when mouse is released global beginX, beginY, d, maxDiameter, minPitch, maxPitch # calculate circle parameters # first, calculate distance between begin and end points diameter = sqrt( (beginX-endX)**2 + (beginY-endY)**2 ) diameter = int(diameter) # in pixels - make it an integer radius = diameter/2 # get radius centerX = (beginX + endX)/2 # circle center is halfway between... centerY = (beginY + endY)/2 # ...begin and end points # draw circle with yellow color, unfilled, 3 pixels thick d.drawCircle(centerX, centerY, radius, Color.YELLOW, False, 3) # create note pitch = mapScale(diameter, 0, maxDiameter, minPitch, maxPitch, MAJOR_SCALE) # invert pitch (larger diameter, lower pitch) pitch = maxPitch - pitch # and play note Play.note(pitch, 0, 5000) # start immediately, hold for 5 secs def clearOnSpacebar(key): # for when a key is pressed global d # if they pressed space, clear display and stop the music if key == VK_SPACE: d.removeAll() # remove all shapes Play.allNotesOff() # stop all notes ### assign callback functions to display event handlers ############# d.onMouseDown( beginCircle ) d.onMouseUp( endCircleAndPlayNote ) d.onKeyDown( clearOnSpacebar )
gpl-3.0
2,095,986,917,681,406,200
31.571429
70
0.660816
false
3.628981
false
false
false
intelligent-agent/redeem
tests/gcode/test_M83.py
1
1638
from __future__ import absolute_import from .MockPrinter import MockPrinter from redeem.Path import Path class M83_Tests(MockPrinter): def test_gcodes_M83_from_absolute(self): """ set state as it should be after a G90, all axes absolute """ self.printer.axes_absolute = ["X", "Y", "Z", "E", "H", "A", "B", "C"] self.printer.axes_relative = [] self.printer.movement == Path.ABSOLUTE self.execute_gcode("M83") self.assertEqual(self.printer.movement, Path.MIXED) self.assertEqual(self.printer.axes_absolute, ["X", "Y", "Z"]) self.assertEqual(self.printer.axes_relative, ["E", "H", "A", "B", "C"]) def test_gcodes_M83_from_relative(self): """ set state as it should be after a G91, all axes relative """ self.printer.axes_absolute = [] self.printer.axes_relative = ["X", "Y", "Z", "E", "H", "A", "B", "C"] self.printer.movement == Path.RELATIVE self.execute_gcode("M83") self.assertEqual(self.printer.movement, Path.RELATIVE) self.assertEqual(self.printer.axes_relative, ["X", "Y", "Z", "E", "H", "A", "B", "C"]) self.assertEqual(self.printer.axes_absolute, []) def test_gcodes_M83_from_mixed(self): """ set state as it should be after a G90/M83, XYZ absolute and extruders relative """ self.printer.axes_absolute = ["X", "Y", "Z"] self.printer.axes_relative = ["E", "H", "A", "B", "C"] self.printer.movement == Path.MIXED self.execute_gcode("M83") self.assertEqual(self.printer.movement, Path.MIXED) self.assertEqual(self.printer.axes_relative, ["E", "H", "A", "B", "C"]) self.assertEqual(self.printer.axes_absolute, ["X", "Y", "Z"])
gpl-3.0
4,424,008,043,799,721,000
44.5
90
0.639194
false
3.005505
false
false
false
jayvdb/travis_log_fetch
tests/test_github.py
1
1132
"""Test Github resolution.""" from __future__ import absolute_import, unicode_literals from travis_log_fetch.config import ( _get_github, get_options, ) from travis_log_fetch.get import ( get_forks, ) import pytest # Note 'foo' is a real Github user, but they do not # have repos bar or baz class TestForks(object): def test_invalid(self): options = get_options() if not options.access_token: pytest.skip("github access token needed") _github = _get_github() pytest.raises(AssertionError, get_forks, _github, 'foo/bar') def test_zero(self): options = get_options() if not options.access_token: pytest.skip("github access token needed") _github = _get_github() forks = get_forks(_github, 'travispy/on_pypy') assert len(forks) == 0 def test_fork(self): options = get_options() if not options.access_token: pytest.skip("github access token needed") _github = _get_github() forks = get_forks(_github, 'menegazzo/travispy') assert 'jayvdb/travispy' in forks
mit
-6,854,272,443,919,678,000
27.3
68
0.621025
false
3.559748
true
false
false
jacobajit/ion
intranet/apps/events/views.py
1
11092
# -*- coding: utf-8 -*- import datetime import logging import bleach from django import http from django.contrib import messages from django.contrib.auth.decorators import login_required from django.core import exceptions from django.shortcuts import get_object_or_404, redirect, render from .forms import AdminEventForm, EventForm from .models import Event logger = logging.getLogger(__name__) @login_required def events_view(request): """Events homepage. Shows a list of events occurring in the next week, month, and future. """ is_events_admin = request.user.has_admin_permission('events') if request.method == "POST": if "approve" in request.POST and is_events_admin: event_id = request.POST.get('approve') event = get_object_or_404(Event, id=event_id) event.rejected = False event.approved = True event.approved_by = request.user event.save() messages.success(request, "Approved event {}".format(event)) if "reject" in request.POST and is_events_admin: event_id = request.POST.get('reject') event = get_object_or_404(Event, id=event_id) event.approved = False event.rejected = True event.rejected_by = request.user event.save() messages.success(request, "Rejected event {}".format(event)) if is_events_admin and "show_all" in request.GET: viewable_events = (Event.objects.prefetch_related("groups")) else: viewable_events = (Event.objects.visible_to_user(request.user).prefetch_related("groups")) # get date objects for week and month today = datetime.date.today() delta = today - datetime.timedelta(days=today.weekday()) this_week = (delta, delta + datetime.timedelta(days=7)) this_month = (this_week[1], this_week[1] + datetime.timedelta(days=31)) events_categories = [ { "title": "This week", "events": viewable_events.filter(time__gte=this_week[0], time__lt=this_week[1]) }, { "title": "This month", "events": viewable_events.filter(time__gte=this_month[0], time__lt=this_month[1]) }, { "title": "Future", "events": viewable_events.filter(time__gte=this_month[1]) } ] if is_events_admin: unapproved_events = (Event.objects.filter(approved=False, rejected=False).prefetch_related("groups")) events_categories = [{"title": "Awaiting Approval", "events": unapproved_events}] + events_categories if is_events_admin and "show_all" in request.GET: events_categories.append({"title": "Past", "events": viewable_events.filter(time__lt=this_week[0])}) context = { "events": events_categories, "num_events": viewable_events.count(), "is_events_admin": is_events_admin, "events_admin": is_events_admin, "show_attend": True, "show_icon": True } return render(request, "events/home.html", context) @login_required def join_event_view(request, id): """Join event page. If a POST request, actually add or remove the attendance of the current user. Otherwise, display a page with confirmation. id: event id """ event = get_object_or_404(Event, id=id) if request.method == "POST": if not event.show_attending: return redirect("events") if "attending" in request.POST: attending = request.POST.get("attending") attending = (attending == "true") if attending: event.attending.add(request.user) else: event.attending.remove(request.user) return redirect("events") context = {"event": event, "is_events_admin": request.user.has_admin_permission('events')} return render(request, "events/join_event.html", context) @login_required def event_roster_view(request, id): """Show the event roster. Users with hidden eighth period permissions will not be displayed. Users will be able to view all other users, along with a count of the number of hidden users. (Same as 8th roster page.) Admins will see a full roster at the bottom. id: event id """ event = get_object_or_404(Event, id=id) full_roster = list(event.attending.all()) viewable_roster = [] num_hidden_members = 0 for p in full_roster: if p.can_view_eighth: viewable_roster.append(p) else: num_hidden_members += 1 context = { "event": event, "viewable_roster": viewable_roster, "full_roster": full_roster, "num_hidden_members": num_hidden_members, "is_events_admin": request.user.has_admin_permission('events'), } return render(request, "events/roster.html", context) @login_required def add_event_view(request): """Add event page. Currently, there is an approval process for events. If a user is an events administrator, they can create events directly. Otherwise, their event is added in the system but must be approved. """ is_events_admin = request.user.has_admin_permission('events') if not is_events_admin: return redirect("request_event") if request.method == "POST": form = EventForm(data=request.POST, all_groups=request.user.has_admin_permission('groups')) logger.debug(form) if form.is_valid(): obj = form.save() obj.user = request.user # SAFE HTML obj.description = bleach.linkify(obj.description) # auto-approve if admin obj.approved = True obj.approved_by = request.user messages.success(request, "Because you are an administrator, this event was auto-approved.") obj.created_hook(request) obj.save() return redirect("events") else: messages.error(request, "Error adding event") else: form = EventForm(all_groups=request.user.has_admin_permission('groups')) context = {"form": form, "action": "add", "action_title": "Add" if is_events_admin else "Submit", "is_events_admin": is_events_admin} return render(request, "events/add_modify.html", context) @login_required def request_event_view(request): """Request event page. Currently, there is an approval process for events. If a user is an events administrator, they can create events directly. Otherwise, their event is added in the system but must be approved. """ is_events_admin = False if request.method == "POST": form = EventForm(data=request.POST, all_groups=request.user.has_admin_permission('groups')) logger.debug(form) if form.is_valid(): obj = form.save() obj.user = request.user # SAFE HTML obj.description = bleach.linkify(obj.description) messages.success(request, "Your event needs to be approved by an administrator. If approved, it should appear on Intranet within 24 hours.") obj.created_hook(request) obj.save() return redirect("events") else: messages.error(request, "Error adding event") else: form = EventForm(all_groups=request.user.has_admin_permission('groups')) context = {"form": form, "action": "add", "action_title": "Submit", "is_events_admin": is_events_admin} return render(request, "events/add_modify.html", context) @login_required def modify_event_view(request, id=None): """Modify event page. You may only modify an event if you were the creator or you are an administrator. id: event id """ event = get_object_or_404(Event, id=id) is_events_admin = request.user.has_admin_permission('events') if not is_events_admin: raise exceptions.PermissionDenied if request.method == "POST": if is_events_admin: form = AdminEventForm(data=request.POST, instance=event, all_groups=request.user.has_admin_permission('groups')) else: form = EventForm(data=request.POST, instance=event, all_groups=request.user.has_admin_permission('groups')) logger.debug(form) if form.is_valid(): obj = form.save() obj.user = request.user # SAFE HTML obj.description = bleach.linkify(obj.description) obj.save() messages.success(request, "Successfully modified event.") # return redirect("events") else: messages.error(request, "Error adding event.") else: if is_events_admin: form = AdminEventForm(instance=event, all_groups=request.user.has_admin_permission('groups')) else: form = EventForm(instance=event, all_groups=request.user.has_admin_permission('groups')) context = {"form": form, "action": "modify", "action_title": "Modify", "id": id, "is_events_admin": is_events_admin} return render(request, "events/add_modify.html", context) @login_required def delete_event_view(request, id): """Delete event page. You may only delete an event if you were the creator or you are an administrator. Confirmation page if not POST. id: event id """ event = get_object_or_404(Event, id=id) if not request.user.has_admin_permission('events'): raise exceptions.PermissionDenied if request.method == "POST": try: event.delete() messages.success(request, "Successfully deleted event.") except Event.DoesNotExist: pass return redirect("events") else: return render(request, "events/delete.html", {"event": event}) @login_required def show_event_view(request): """ Unhide an event that was hidden by the logged-in user. events_hidden in the user model is the related_name for "users_hidden" in the EventUserMap model. """ if request.method == "POST": event_id = request.POST.get("event_id") if event_id: event = Event.objects.get(id=event_id) event.user_map.users_hidden.remove(request.user) event.user_map.save() return http.HttpResponse("Unhidden") return http.Http404() else: return http.HttpResponseNotAllowed(["POST"], "HTTP 405: METHOD NOT ALLOWED") @login_required def hide_event_view(request): """ Hide an event for the logged-in user. events_hidden in the user model is the related_name for "users_hidden" in the EventUserMap model. """ if request.method == "POST": event_id = request.POST.get("event_id") if event_id: event = Event.objects.get(id=event_id) event.user_map.users_hidden.add(request.user) event.user_map.save() return http.HttpResponse("Hidden") return http.Http404() else: return http.HttpResponseNotAllowed(["POST"], "HTTP 405: METHOD NOT ALLOWED")
gpl-2.0
-4,660,954,090,174,681,000
33.554517
152
0.627209
false
3.883754
false
false
false
TheVirtualLtd/bda.plone.shop
src/bda/plone/shop/vocabularies.py
1
6162
# -*- coding: utf-8 -*- from bda.plone.checkout.vocabularies import country_vocabulary from bda.plone.checkout.vocabularies import gender_vocabulary from bda.plone.payment import Payments from bda.plone.shipping import Shippings from bda.plone.shop import message_factory as _ from bda.plone.shop.utils import get_shop_article_settings from bda.plone.shop.utils import get_shop_tax_settings from zope.interface import provider from zope.schema.interfaces import IVocabularyFactory from zope.schema.vocabulary import SimpleTerm from zope.schema.vocabulary import SimpleVocabulary # This are the overall avaiable quantity units which then can be reduced in # control panel. If you need to provide more quantity units add it here or # patch this vocab AVAILABLE_QUANTITY_UNITS = { 'quantity': _('quantity', default='Quantity'), 'meter': _('meter', default='Meter'), 'kilo': _('kilo', default='Kilo'), 'liter': _('liter', default='Liter'), } @provider(IVocabularyFactory) def AvailableQuantityUnitVocabulary(context): # vocab is used in shop settings control panel items = AVAILABLE_QUANTITY_UNITS.items() return SimpleVocabulary([SimpleTerm(value=k, title=v) for k, v in items]) @provider(IVocabularyFactory) def QuantityUnitVocabulary(context): # vocab is used for buyable items try: settings = get_shop_article_settings() except KeyError: # happens GS profile application if registry entries not present yet return AvailableQuantityUnitVocabulary(context) if not settings: return terms = [] for quantity_unit in settings.quantity_units: title = AVAILABLE_QUANTITY_UNITS.get(quantity_unit, quantity_unit) terms.append(SimpleTerm(value=quantity_unit, title=title)) return SimpleVocabulary(terms) # This are the overall avaiable VAT values which then can be reduced in # control panel. If you need to provide more vat values add it here or # patch this vocab AVAILABLE_VAT_VALUES = { '0': '0%', '2.5': '2,5%', '3.8': '3,8%', '8': '8%', '10': '10%', '15': '15%', '20': '20%', '25': '25%', } @provider(IVocabularyFactory) def AvailableVatVocabulary(context): # vocab is used in shop settings control panel items = AVAILABLE_VAT_VALUES.items() items = sorted(items, key=lambda x: x[0]) return SimpleVocabulary([SimpleTerm(value=k, title=v) for k, v in items]) @provider(IVocabularyFactory) def VatVocabulary(context): # vocab is used for buyable items. try: settings = get_shop_tax_settings() except KeyError: # happens GS profile application if registry entries not present yet return AvailableVatVocabulary(context) settings.vat terms = [] if settings.vat: for vat in settings.vat: title = AVAILABLE_VAT_VALUES.get(vat, vat) terms.append(SimpleTerm(value=vat, title=title)) return SimpleVocabulary(terms) # This are the overall avaiable currency values available in # control panel. If you need to provide more currencies add it here or # patch this vocab AVAILABLE_CURRENCIES = { 'EUR': _('EUR', default='Euro'), 'USD': _('USD', default='US Dollar'), 'INR': _('INR', default='Indian Rupee'), 'CAD': _('CAD', default='Canadian Dollar'), 'CHF': _('CHF', default='Swiss Franc'), 'GBP': _('GBP', default='British Pound Sterling'), 'AUD': _('AUD', default='Australian Dollar'), 'NOK': _('NOK', default='Norwegian Krone'), 'SEK': _('SEK', default='Swedish Krona'), 'DKK': _('DKK', default='Danish Krone'), 'YEN': _('YEN', default='Japanese Yen'), 'NZD': _('NZD', default='New Zealand Dollar'), } @provider(IVocabularyFactory) def AvailableCurrenciesVocabulary(context): items = AVAILABLE_CURRENCIES.items() return SimpleVocabulary([SimpleTerm(value=k, title=v) for k, v in items]) @provider(IVocabularyFactory) def CurrencyDisplayOptionsVocabulary(context): items = [ ('yes', _('yes', default='Yes')), ('no', _('no', default='No')), ('symbol', _('symbol', default='Symbol')), ] return SimpleVocabulary([SimpleTerm(value=k, title=v) for k, v in items]) @provider(IVocabularyFactory) def GenderVocabulary(context): return SimpleVocabulary([SimpleTerm(value=k, title=v) for k, v in gender_vocabulary()]) @provider(IVocabularyFactory) def CountryVocabulary(context): """VocabularyFactory for countries from ISO3166 source. """ return SimpleVocabulary([SimpleTerm(value=k, title=v) for k, v in country_vocabulary()]) @provider(IVocabularyFactory) def AvailableShippingMethodsVocabulary(context): shippings = Shippings(context).shippings items = [(shipping.sid, shipping.label) for shipping in shippings] return SimpleVocabulary([SimpleTerm(value=k, title=v) for k, v in items]) @provider(IVocabularyFactory) def ShippingMethodsVocabulary(context): try: items = Shippings(context).vocab except (KeyError, TypeError): # happens GS profile application if registry entries not present yet return AvailableShippingMethodsVocabulary(context) return SimpleVocabulary([SimpleTerm(value=k, title=v) for k, v in items]) @provider(IVocabularyFactory) def AvailablePaymentMethodsVocabulary(context): payments = Payments(context).payments items = [(payment.pid, payment.label) for payment in payments] return SimpleVocabulary([SimpleTerm(value=k, title=v) for k, v in items]) @provider(IVocabularyFactory) def PaymentMethodsVocabulary(context): try: items = Payments(context).vocab except KeyError: # happens GS profile application if registry entries not present yet return AvailablePaymentMethodsVocabulary(context) return SimpleVocabulary([SimpleTerm(value=k, title=v) for k, v in items]) @provider(IVocabularyFactory) def SurchargeablePaymentMethodsVocabulary(context): payments = Payments(context).payments items = [(payment.pid, payment.label) for payment in payments] return SimpleVocabulary([SimpleTerm(value=k, title=v) for k, v in items])
bsd-3-clause
-2,564,873,223,783,599,600
34.011364
77
0.697339
false
3.535284
false
false
false
elaske/mufund
tests.py
1
3839
#!/usr/bin/env python # -*- coding: utf-8 -*- # @Author: Evan Laske # @Date: 2014-03-01 21:45:31 # @Last Modified by: Evan Laske # @Last Modified time: 2015-09-15 23:51:12 import urllib import urllib2 from bs4 import BeautifulSoup import html5lib import re from StockQuote import StockQuote from MutualFundData import MutualFundData import logging import argparse def main(): parser = argparse.ArgumentParser() parser.add_argument('tickers', metavar='ticker', nargs='+', help='The ticker(s) of the funds to predict.') parser.add_argument('--logfile', dest='logfile', default='', help='Specify a log file to log info to.') parser.add_argument('--loglevel', dest='loglevel', default='', help='Specify a logging level to output.') args = parser.parse_args() # Logging configuration args logConfigArgs = dict() # If the log level was specified if args.loglevel: # Convert it to something usable numeric_level = getattr(logging, args.loglevel.upper(), None) # Double-check it's a valid logging level if not isinstance(numeric_level, int): raise ValueError('Invalid log level: %s' % args.loglevel) logConfigArgs['level'] = numeric_level # If there was any of the logging files specified... if args.logfile: logConfigArgs['filename'] = args.logfile # This will make the log file be overwritten each time. logConfigArgs['filemode'] = 'w' # If any of the logging arguments are specified, configure logging if args.logfile or args.loglevel: logging.basicConfig(**logConfigArgs) # Gather the data from the given stocks testStockQuote(args.tickers) # Test the mutual fund data gathering testMutualFund(args.tickers) def testStockQuote(tickers): """ """ for ticker in tickers: sq = StockQuote(ticker) print sq.ticker, sq.price, sq.change, sq.percent def testMutualFund(tickers): """ """ for ticker in tickers: mfd = MutualFundData(ticker) print mfd.price, mfd.change, mfd.percent holdings = mfd.holdings() print holdings for h in holdings: print 'Retrieving {0} data...'.format(h) sq = StockQuote(h) delta = float(holdings[h])*float(sq.percent)/100 holdings[h] = [holdings[h], sq.price, sq.change, sq.percent, delta] print delta, holdings[h], 'Complete.' print sq #print holdings print '\nESTIMATED CHANGE: {0}\nTOTAL COMPOSITION: {1}'.format( sum([v[4] for (k,v) in holdings.items()]), sum([float(v[0]) for (k,v) in holdings.items()])) def randomTest(): ticker = "FBIOX" quoteURL = 'http://quotes.morningstar.com/fund/f?t=' portfolioURL = 'http://portfolios.morningstar.com/fund/summary?t=' holdingsURL = 'http://portfolios.morningstar.com/fund/holdings?t=' googleFinanceURL = 'http://www.google.com/finance?q=' # Test with a stock #sq = StockQuote("goog") #print sq.price, sq.change, sq.percent #print sq # Test with a mutual fund sq = StockQuote("fiuix") print sq.price, sq.change, sq.percent mfd = MutualFundData("FBIOX") print mfd.price, mfd.change, mfd.percent holdings = mfd.holdings() #print holdings for h in holdings: print 'Retrieving {0} data...'.format(h) sq = StockQuote(h) delta = float(holdings[h])*float(sq.percent)/100 holdings[h] = [holdings[h], sq.price, sq.change, sq.percent, delta] print 'Complete.' #print holdings print '\nESTIMATED CHANGE: {0}\nTOTAL COMPOSITION: {1}'.format( sum([v[4] for (k,v) in holdings.items()]), sum([float(v[0]) for (k,v) in holdings.items()])) # Standard main call if __name__ == "__main__": main()
gpl-3.0
6,559,670,225,888,140,000
32.684211
110
0.636624
false
3.502737
true
false
false
Foldblade/EORS
Mypackage/back_to_yesterday.py
1
3091
# encoding:utf-8 ''' ———————————————————————————————— back_to_yesterday.py 对备份文件的回档,所谓‘回到昨天’功能。 实现原理:删除源文件。解压备份的zip,自动覆盖。 ———————————————————————————————— ''' import os import zipfile import shutil import time def back_to_yesterday(): where_script = os.path.split(os.path.realpath(__file__))[0] # print(where_script) where_rootmenu = where_script[:where_script.rfind('\\')] # print(where_rootmenu) def unzip(zipfilepath, unzippath): # zipfilepath 为需要解压的文件路径,unzippath为解压的目标目录 # e.g. unzip(where_rootmenu + '/cache/cache.zip', where_rootmenu + '/cache') f = zipfile.ZipFile(zipfilepath, 'r') for file in f.infolist(): d = file.date_time gettime = "%s/%s/%s %s:%s" % (d[0], d[1], d[2], d[3], d[4]) # 获取文件原修改时间 f.extract(file, unzippath) filep = os.path.join(unzippath, file.filename) timearry = time.mktime(time.strptime(gettime, '%Y/%m/%d %H:%M')) os.utime(filep, (timearry, timearry)) # 重写文件原修改时间 return def clear_unexist(dirname, zipfilename): zipfilepath = (where_rootmenu + '/backup/' + zipfilename) fileinzip = [] f = zipfile.ZipFile(zipfilepath, 'r') for filename in f.namelist(): # print(filename) fileinzip.append(filename) for parent, dirnames, filenames in os.walk(dirname): for filename in filenames: # print ("parent is:" + parent) # print("filename is:" + filename) # print ("the full name of the file is:" + os.path.join(parent,filename)) if filename not in fileinzip: os.remove(os.path.join(parent, filename)) # 删除压缩包内不存在的文件 return clear_unexist(where_rootmenu + '/cache', 'cache.zip') clear_unexist(where_rootmenu + '/data', 'data.zip') clear_unexist(where_rootmenu + '/output', 'output.zip') # 删除压缩包内不存在的文件 shutil.copyfile(where_rootmenu + '/backup/cache.zip', where_rootmenu + '/cache/cache.zip') shutil.copyfile(where_rootmenu + '/backup/output.zip', where_rootmenu + '/output/output.zip') shutil.copyfile(where_rootmenu + '/backup/data.zip', where_rootmenu + '/data/data.zip') # 拷贝备份zip到各自目录下 unzip(where_rootmenu + '/cache/cache.zip', where_rootmenu + '/cache') unzip(where_rootmenu + '/output/output.zip', where_rootmenu + '/output') unzip(where_rootmenu + '/data/data.zip', where_rootmenu + '/data') # 解压文件 os.remove(where_rootmenu + '/cache/cache.zip') os.remove(where_rootmenu + '/output/output.zip') os.remove(where_rootmenu + '/data/data.zip') # 删除拷贝的zip文件 print('成功穿越回昨日!!') return
gpl-3.0
-882,281,870,340,508,500
35.5
97
0.595705
false
2.798964
false
false
false
XiMuYouZi/PythonDemo
Crawler/Zhihu/zhihuuser/spiders/zhihu_user.py
1
4440
# -*- coding: utf-8 -*- # 爬取知乎全站的用户信息 import json from scrapy import Spider, Request from Crawler.Zhihu.zhihuuser.items import UserItem class ZhihuSpider(Spider): #忽略301,302重定向请求 # handle_httpstatus_list = [301, 302] name = "zhihu_user" allowed_domains = ["www.zhihu.com"] user_url = 'https://www.zhihu.com/api/v4/members/{user}?include={include}' follows_url = 'https://www.zhihu.com/api/v4/members/{user}/followees?include={include}&offset={offset}&limit={limit}' followers_url = 'https://www.zhihu.com/api/v4/members/{user}/followers?include={include}&offset={offset}&limit={limit}' start_user = 'excited-vczh' user_query = 'locations,employments,gender,educations,business,voteup_count,thanked_Count,follower_count,following_count,cover_url,following_topic_count,following_question_count,following_favlists_count,following_columns_count,answer_count,articles_count,pins_count,question_count,commercial_question_count,favorite_count,favorited_count,logs_count,marked_answers_count,marked_answers_text,message_thread_token,account_status,is_active,is_force_renamed,is_bind_sina,sina_weibo_url,sina_weibo_name,show_sina_weibo,is_blocking,is_blocked,is_following,is_followed,mutual_followees_count,vote_to_count,vote_from_count,thank_to_count,thank_from_count,thanked_count,description,hosted_live_count,participated_live_count,allow_message,industry_category,org_name,org_homepage,badge[?(type=best_answerer)].topics' follows_query = 'data[*].answer_count,articles_count,gender,follower_count,is_followed,is_following,badge[?(type=best_answerer)].topics' followers_query = 'data[*].answer_count,articles_count,gender,follower_count,is_followed,is_following,badge[?(type=best_answerer)].topics' def start_requests(self): yield Request(self.user_url.format(user=self.start_user, include=self.user_query), self.parse_user,dont_filter=True) yield Request(self.follows_url.format(user=self.start_user, include=self.follows_query, limit=20, offset=0), self.parse_follows,dont_filter=True) yield Request(self.followers_url.format(user=self.start_user, include=self.followers_query, limit=20, offset=0), self.parse_followers,dont_filter=True) def parse(self, response): print(response.text) #解析每个用户的信息 def parse_user(self, response): result = json.loads(response.text,strict=False) print('解析每个用户的信息\n: ',result) item = UserItem() #解析用户信息 for field in item.fields: if field in result.keys(): item[field] = result.get(field) yield item # 生成该用户的关注和粉丝用户的Request yield Request( self.follows_url.format(user=result.get('url_token'), include=self.follows_query, limit=20, offset=0), self.parse_follows) yield Request( self.followers_url.format(user=result.get('url_token'), include=self.followers_query, limit=20, offset=0), self.parse_followers) #解析他的关注列表 def parse_follows(self, response): results = json.loads(response.text,strict=False) print('解析他的关注列表\n: ',results) if 'data' in results.keys(): for result in results.get('data'): yield Request(self.user_url.format(user=result.get('url_token'), include=self.user_query), self.parse_user) if 'paging' in results.keys() and results.get('paging').get('is_end') == False: next_page = results.get('paging').get('next') yield Request(next_page, self.parse_follows) #解析他的粉丝列表 def parse_followers(self, response): results = json.loads(response.text,strict=False) print('解析他的粉丝列表\n: ',results) if 'data' in results.keys(): for result in results.get('data'): yield Request(self.user_url.format(user=result.get('url_token'), include=self.user_query), self.parse_user) if 'paging' in results.keys() and results.get('paging').get('is_end') == False: next_page = results.get('paging').get('next') yield Request(next_page, self.parse_followers)
mit
4,831,802,806,455,866,000
46.88764
808
0.666823
false
3.152367
false
false
false
sgzwiz/brython
tests/console.py
1
2190
import sys import time import random #this sucks.. cannot find dis since "root" path is blah/test #we might need to create a variable we pass via the brython function # to state what the root path is. # For now, we'll hardcode a relative path. :( sys.path.append("../Lib") import dis _rand=random.random() editor=JSObject(ace).edit("editor") editor.getSession().setMode("ace/mode/python") if sys.has_local_storage: from local_storage import storage else: storage = False def reset_src(): if storage: editor.setValue(storage["py_src"]) else: editor.setValue('for i in range(10):\n\tprint(i)') editor.scrollToRow(0) editor.gotoLine(0) def write(data): doc["console"].value += str(data) sys.stdout = object() sys.stdout.write = write sys.stderr = object() sys.stderr.write = write def to_str(xx): return str(xx) doc['version'].text = '.'.join(map(to_str,sys.version_info)) output = '' def show_console(): doc["console"].value = output doc["console"].cols = 60 def clear_text(): editor.setValue('') if sys.has_local_storage: storage["py_src"]='' doc["console"].value='' def run(): global output doc["console"].value='' src = editor.getValue() if storage: storage["py_src"]=src t0 = time.time() exec(src) output = doc["console"].value print('<completed in %s ms>' %(time.time()-t0)) # load a Python script def on_complete(req): editor.setValue(req.text) editor.scrollToRow(0) editor.gotoLine(0) def load(evt): _name=evt.target.value req = ajax() req.on_complete = on_complete req.open('GET',_name+'?foo=%s' % _rand,False) req.send() def show_js(): src = editor.getValue() doc["console"].value = dis.dis(src) def change_theme(evt): _theme=evt.target.value editor.setTheme(_theme) if storage: storage["ace_theme"]=_theme def reset_theme(): if storage: if storage["ace_theme"] is not None: if storage["ace_theme"].startswith("ace/theme/"): editor.setTheme(storage["ace_theme"]) doc["ace_theme"].value=storage["ace_theme"] reset_src() reset_theme()
bsd-3-clause
-4,744,852,278,866,266,000
19.660377
68
0.630594
false
3.254086
false
false
false
devs1991/test_edx_docmode
venv/lib/python2.7/site-packages/ratelimitbackend/backends.py
1
2730
import logging import warnings from datetime import datetime, timedelta from django.contrib.auth.backends import ModelBackend from django.core.cache import cache from .exceptions import RateLimitException logger = logging.getLogger('ratelimitbackend') class RateLimitMixin(object): """ A mixin to enable rate-limiting in an existing authentication backend. """ cache_prefix = 'ratelimitbackend-' minutes = 5 requests = 30 username_key = 'username' def authenticate(self, **kwargs): request = kwargs.pop('request', None) username = kwargs[self.username_key] if request is not None: counts = self.get_counters(request) if sum(counts.values()) >= self.requests: logger.warning( u"Login rate-limit reached: username '{0}', IP {1}".format( username, self.get_ip(request), ) ) raise RateLimitException('Rate-limit reached', counts) else: warnings.warn(u"No request passed to the backend, unable to " u"rate-limit. Username was '%s'" % username, stacklevel=2) user = super(RateLimitMixin, self).authenticate(**kwargs) if user is None and request is not None: logger.info( u"Login failed: username '{0}', IP {1}".format( username, self.get_ip(request), ) ) cache_key = self.get_cache_key(request) self.cache_incr(cache_key) return user def get_counters(self, request): return cache.get_many(self.keys_to_check(request)) def keys_to_check(self, request): now = datetime.now() return [ self.key( request, now - timedelta(minutes=minute), ) for minute in range(self.minutes + 1) ] def get_cache_key(self, request): return self.key(request, datetime.now()) def key(self, request, dt): return '%s%s-%s' % ( self.cache_prefix, self.get_ip(request), dt.strftime('%Y%m%d%H%M'), ) def get_ip(self, request): return request.META['REMOTE_ADDR'] def cache_incr(self, key): """ Non-atomic cache increment operation. Not optimal but consistent across different cache backends. """ cache.set(key, cache.get(key, 0) + 1, self.expire_after()) def expire_after(self): """Cache expiry delay""" return (self.minutes + 1) * 60 class RateLimitModelBackend(RateLimitMixin, ModelBackend): pass
agpl-3.0
6,461,471,023,397,237,000
29.674157
79
0.563736
false
4.299213
false
false
false
joetsoi/moonstone
python/main.py
1
1239
from collections import namedtuple from struct import unpack, unpack_from Segment = namedtuple('Segment', 'offset length') ViewportDimension = namedtuple('ViewportDimension', 'right left') class MainExe(object): def __init__(self, file_path): data_segment = Segment(0x138a0, 0xf460) with open(file_path, 'rb') as f: f.seek(data_segment.offset) data_segment_data = f.read(data_segment.length) self.bold_f_char_lookup = unpack( '>96B', data_segment_data[0x8006:0x8006 + (128 - 32)] ) self.screen_dimensions = ViewportDimension(*unpack( '<2H', data_segment_data[0x8002:0x8006] )) self.strings = { 'created by': unpack( '<5H', data_segment_data[0x8DCC:0x8DCC + 10] #should back 10 ), 'Loading...': unpack( '<5H', data_segment_data[0x8de0:0x8de0 + 10] ), 'Rob Anderson': unpack( '<5H', data_segment_data[0x8dd6:0x8dd6 + 10] ), } self.palette = unpack( '<32H', data_segment_data[0x892:0x892 + 0x40] )
agpl-3.0
326,647,625,653,657,340
27.813953
69
0.51816
false
3.529915
false
false
false
teoliphant/scipy
scipy/ndimage/filters.py
2
40010
# Copyright (C) 2003-2005 Peter J. Verveer # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # # 1. Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following # disclaimer in the documentation and/or other materials provided # with the distribution. # # 3. The name of the author may not be used to endorse or promote # products derived from this software without specific prior # written permission. # # THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS # OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY # DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE # GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import math import numpy import _ni_support import _nd_image from scipy.misc import doccer __all__ = ['correlate1d', 'convolve1d', 'gaussian_filter1d', 'gaussian_filter', 'prewitt', 'sobel', 'generic_laplace', 'laplace', 'gaussian_laplace', 'generic_gradient_magnitude', 'gaussian_gradient_magnitude', 'correlate', 'convolve', 'uniform_filter1d', 'uniform_filter', 'minimum_filter1d', 'maximum_filter1d', 'minimum_filter', 'maximum_filter', 'rank_filter', 'median_filter', 'percentile_filter', 'generic_filter1d', 'generic_filter'] _input_doc = \ """input : array-like input array to filter""" _axis_doc = \ """axis : integer, optional axis of ``input`` along which to calculate. Default is -1""" _output_doc = \ """output : array, optional The ``output`` parameter passes an array in which to store the filter output.""" _size_foot_doc = \ """size : scalar or tuple, optional See footprint, below footprint : array, optional Either ``size`` or ``footprint`` must be defined. ``size`` gives the shape that is taken from the input array, at every element position, to define the input to the filter function. ``footprint`` is a boolean array that specifies (implicitly) a shape, but also which of the elements within this shape will get passed to the filter function. Thus ``size=(n,m)`` is equivalent to ``footprint=np.ones((n,m))``. We adjust ``size`` to the number of dimensions of the input array, so that, if the input array is shape (10,10,10), and ``size`` is 2, then the actual size used is (2,2,2). """ _mode_doc = \ """mode : {'reflect','constant','nearest','mirror', 'wrap'}, optional The ``mode`` parameter determines how the array borders are handled, where ``cval`` is the value when mode is equal to 'constant'. Default is 'reflect'""" _cval_doc = \ """cval : scalar, optional Value to fill past edges of input if ``mode`` is 'constant'. Default is 0.0""" _origin_doc = \ """origin : scalar, optional The ``origin`` parameter controls the placement of the filter. Default 0""" _extra_arguments_doc = \ """extra_arguments : sequence, optional Sequence of extra positional arguments to pass to passed function""" _extra_keywords_doc = \ """extra_keywords : dict, optional dict of extra keyword arguments to pass to passed function""" docdict = { 'input':_input_doc, 'axis':_axis_doc, 'output':_output_doc, 'size_foot':_size_foot_doc, 'mode':_mode_doc, 'cval':_cval_doc, 'origin':_origin_doc, 'extra_arguments':_extra_arguments_doc, 'extra_keywords':_extra_keywords_doc, } docfiller = doccer.filldoc(docdict) @docfiller def correlate1d(input, weights, axis = -1, output = None, mode = "reflect", cval = 0.0, origin = 0): """Calculate a one-dimensional correlation along the given axis. The lines of the array along the given axis are correlated with the given weights. Parameters ---------- %(input)s weights : array one-dimensional sequence of numbers %(axis)s %(output)s %(mode)s %(cval)s %(origin)s """ input = numpy.asarray(input) if numpy.iscomplexobj(input): raise TypeError('Complex type not supported') output, return_value = _ni_support._get_output(output, input) weights = numpy.asarray(weights, dtype=numpy.float64) if weights.ndim != 1 or weights.shape[0] < 1: raise RuntimeError('no filter weights given') if not weights.flags.contiguous: weights = weights.copy() axis = _ni_support._check_axis(axis, input.ndim) if ((len(weights) // 2 + origin < 0) or (len(weights) // 2 + origin > len(weights))): raise ValueError('invalid origin') mode = _ni_support._extend_mode_to_code(mode) _nd_image.correlate1d(input, weights, axis, output, mode, cval, origin) return return_value @docfiller def convolve1d(input, weights, axis = -1, output = None, mode = "reflect", cval = 0.0, origin = 0): """Calculate a one-dimensional convolution along the given axis. The lines of the array along the given axis are convolved with the given weights. Parameters ---------- %(input)s weights : ndarray one-dimensional sequence of numbers %(axis)s %(output)s %(mode)s %(cval)s %(origin)s """ weights = weights[::-1] origin = -origin if not len(weights) & 1: origin -= 1 return correlate1d(input, weights, axis, output, mode, cval, origin) @docfiller def gaussian_filter1d(input, sigma, axis = -1, order = 0, output = None, mode = "reflect", cval = 0.0): """One-dimensional Gaussian filter. Parameters ---------- %(input)s sigma : scalar standard deviation for Gaussian kernel %(axis)s order : {0, 1, 2, 3}, optional An order of 0 corresponds to convolution with a Gaussian kernel. An order of 1, 2, or 3 corresponds to convolution with the first, second or third derivatives of a Gaussian. Higher order derivatives are not implemented %(output)s %(mode)s %(cval)s """ if order not in range(4): raise ValueError('Order outside 0..3 not implemented') sd = float(sigma) # make the length of the filter equal to 4 times the standard # deviations: lw = int(4.0 * sd + 0.5) weights = [0.0] * (2 * lw + 1) weights[lw] = 1.0 sum = 1.0 sd = sd * sd # calculate the kernel: for ii in range(1, lw + 1): tmp = math.exp(-0.5 * float(ii * ii) / sd) weights[lw + ii] = tmp weights[lw - ii] = tmp sum += 2.0 * tmp for ii in range(2 * lw + 1): weights[ii] /= sum # implement first, second and third order derivatives: if order == 1 : # first derivative weights[lw] = 0.0 for ii in range(1, lw + 1): x = float(ii) tmp = -x / sd * weights[lw + ii] weights[lw + ii] = -tmp weights[lw - ii] = tmp elif order == 2: # second derivative weights[lw] *= -1.0 / sd for ii in range(1, lw + 1): x = float(ii) tmp = (x * x / sd - 1.0) * weights[lw + ii] / sd weights[lw + ii] = tmp weights[lw - ii] = tmp elif order == 3: # third derivative weights[lw] = 0.0 sd2 = sd * sd for ii in range(1, lw + 1): x = float(ii) tmp = (3.0 - x * x / sd) * x * weights[lw + ii] / sd2 weights[lw + ii] = -tmp weights[lw - ii] = tmp return correlate1d(input, weights, axis, output, mode, cval, 0) @docfiller def gaussian_filter(input, sigma, order = 0, output = None, mode = "reflect", cval = 0.0): """Multi-dimensional Gaussian filter. Parameters ---------- %(input)s sigma : scalar or sequence of scalars standard deviation for Gaussian kernel. The standard deviations of the Gaussian filter are given for each axis as a sequence, or as a single number, in which case it is equal for all axes. order : {0, 1, 2, 3} or sequence from same set, optional The order of the filter along each axis is given as a sequence of integers, or as a single number. An order of 0 corresponds to convolution with a Gaussian kernel. An order of 1, 2, or 3 corresponds to convolution with the first, second or third derivatives of a Gaussian. Higher order derivatives are not implemented %(output)s %(mode)s %(cval)s Notes ----- The multi-dimensional filter is implemented as a sequence of one-dimensional convolution filters. The intermediate arrays are stored in the same data type as the output. Therefore, for output types with a limited precision, the results may be imprecise because intermediate results may be stored with insufficient precision. """ input = numpy.asarray(input) output, return_value = _ni_support._get_output(output, input) orders = _ni_support._normalize_sequence(order, input.ndim) if not set(orders).issubset(set(range(4))): raise ValueError('Order outside 0..4 not implemented') sigmas = _ni_support._normalize_sequence(sigma, input.ndim) axes = range(input.ndim) axes = [(axes[ii], sigmas[ii], orders[ii]) for ii in range(len(axes)) if sigmas[ii] > 1e-15] if len(axes) > 0: for axis, sigma, order in axes: gaussian_filter1d(input, sigma, axis, order, output, mode, cval) input = output else: output[...] = input[...] return return_value @docfiller def prewitt(input, axis = -1, output = None, mode = "reflect", cval = 0.0): """Calculate a Prewitt filter. Parameters ---------- %(input)s %(axis)s %(output)s %(mode)s %(cval)s """ input = numpy.asarray(input) axis = _ni_support._check_axis(axis, input.ndim) output, return_value = _ni_support._get_output(output, input) correlate1d(input, [-1, 0, 1], axis, output, mode, cval, 0) axes = [ii for ii in range(input.ndim) if ii != axis] for ii in axes: correlate1d(output, [1, 1, 1], ii, output, mode, cval, 0,) return return_value @docfiller def sobel(input, axis = -1, output = None, mode = "reflect", cval = 0.0): """Calculate a Sobel filter. Parameters ---------- %(input)s %(axis)s %(output)s %(mode)s %(cval)s """ input = numpy.asarray(input) axis = _ni_support._check_axis(axis, input.ndim) output, return_value = _ni_support._get_output(output, input) correlate1d(input, [-1, 0, 1], axis, output, mode, cval, 0) axes = [ii for ii in range(input.ndim) if ii != axis] for ii in axes: correlate1d(output, [1, 2, 1], ii, output, mode, cval, 0) return return_value @docfiller def generic_laplace(input, derivative2, output = None, mode = "reflect", cval = 0.0, extra_arguments = (), extra_keywords = None): """Calculate a multidimensional laplace filter using the provided second derivative function. Parameters ---------- %(input)s derivative2 : callable Callable with the following signature:: derivative2(input, axis, output, mode, cval, *extra_arguments, **extra_keywords) See `extra_arguments`, `extra_keywords` below. %(output)s %(mode)s %(cval)s %(extra_keywords)s %(extra_arguments)s """ if extra_keywords is None: extra_keywords = {} input = numpy.asarray(input) output, return_value = _ni_support._get_output(output, input) axes = range(input.ndim) if len(axes) > 0: derivative2(input, axes[0], output, mode, cval, *extra_arguments, **extra_keywords) for ii in range(1, len(axes)): tmp = derivative2(input, axes[ii], output.dtype, mode, cval, *extra_arguments, **extra_keywords) output += tmp else: output[...] = input[...] return return_value @docfiller def laplace(input, output = None, mode = "reflect", cval = 0.0): """Calculate a multidimensional laplace filter using an estimation for the second derivative based on differences. Parameters ---------- %(input)s %(output)s %(mode)s %(cval)s """ def derivative2(input, axis, output, mode, cval): return correlate1d(input, [1, -2, 1], axis, output, mode, cval, 0) return generic_laplace(input, derivative2, output, mode, cval) @docfiller def gaussian_laplace(input, sigma, output = None, mode = "reflect", cval = 0.0): """Calculate a multidimensional laplace filter using gaussian second derivatives. Parameters ---------- %(input)s sigma : scalar or sequence of scalars The standard deviations of the Gaussian filter are given for each axis as a sequence, or as a single number, in which case it is equal for all axes.. %(output)s %(mode)s %(cval)s """ input = numpy.asarray(input) def derivative2(input, axis, output, mode, cval, sigma): order = [0] * input.ndim order[axis] = 2 return gaussian_filter(input, sigma, order, output, mode, cval) return generic_laplace(input, derivative2, output, mode, cval, extra_arguments = (sigma,)) @docfiller def generic_gradient_magnitude(input, derivative, output = None, mode = "reflect", cval = 0.0, extra_arguments = (), extra_keywords = None): """Calculate a gradient magnitude using the provided function for the gradient. Parameters ---------- %(input)s derivative : callable Callable with the following signature:: derivative(input, axis, output, mode, cval, *extra_arguments, **extra_keywords) See `extra_arguments`, `extra_keywords` below. `derivative` can assume that `input` and `output` are ndarrays. Note that the output from `derivative` is modified inplace; be careful to copy important inputs before returning them. %(output)s %(mode)s %(cval)s %(extra_keywords)s %(extra_arguments)s """ if extra_keywords is None: extra_keywords = {} input = numpy.asarray(input) output, return_value = _ni_support._get_output(output, input) axes = range(input.ndim) if len(axes) > 0: derivative(input, axes[0], output, mode, cval, *extra_arguments, **extra_keywords) numpy.multiply(output, output, output) for ii in range(1, len(axes)): tmp = derivative(input, axes[ii], output.dtype, mode, cval, *extra_arguments, **extra_keywords) numpy.multiply(tmp, tmp, tmp) output += tmp # This allows the sqrt to work with a different default casting if numpy.version.short_version > '1.6.1': numpy.sqrt(output, output, casting='unsafe') else: numpy.sqrt(output, output) else: output[...] = input[...] return return_value @docfiller def gaussian_gradient_magnitude(input, sigma, output = None, mode = "reflect", cval = 0.0): """Calculate a multidimensional gradient magnitude using gaussian derivatives. Parameters ---------- %(input)s sigma : scalar or sequence of scalars The standard deviations of the Gaussian filter are given for each axis as a sequence, or as a single number, in which case it is equal for all axes.. %(output)s %(mode)s %(cval)s """ input = numpy.asarray(input) def derivative(input, axis, output, mode, cval, sigma): order = [0] * input.ndim order[axis] = 1 return gaussian_filter(input, sigma, order, output, mode, cval) return generic_gradient_magnitude(input, derivative, output, mode, cval, extra_arguments = (sigma,)) def _correlate_or_convolve(input, weights, output, mode, cval, origin, convolution): input = numpy.asarray(input) if numpy.iscomplexobj(input): raise TypeError('Complex type not supported') origins = _ni_support._normalize_sequence(origin, input.ndim) weights = numpy.asarray(weights, dtype=numpy.float64) wshape = [ii for ii in weights.shape if ii > 0] if len(wshape) != input.ndim: raise RuntimeError('filter weights array has incorrect shape.') if convolution: weights = weights[tuple([slice(None, None, -1)] * weights.ndim)] for ii in range(len(origins)): origins[ii] = -origins[ii] if not weights.shape[ii] & 1: origins[ii] -= 1 for origin, lenw in zip(origins, wshape): if (lenw // 2 + origin < 0) or (lenw // 2 + origin > lenw): raise ValueError('invalid origin') if not weights.flags.contiguous: weights = weights.copy() output, return_value = _ni_support._get_output(output, input) mode = _ni_support._extend_mode_to_code(mode) _nd_image.correlate(input, weights, output, mode, cval, origins) return return_value @docfiller def correlate(input, weights, output = None, mode = 'reflect', cval = 0.0, origin = 0): """ Multi-dimensional correlation. The array is correlated with the given kernel. Parameters ---------- input : array-like input array to filter weights : ndarray array of weights, same number of dimensions as input output : array, optional The ``output`` parameter passes an array in which to store the filter output. mode : {'reflect','constant','nearest','mirror', 'wrap'}, optional The ``mode`` parameter determines how the array borders are handled, where ``cval`` is the value when mode is equal to 'constant'. Default is 'reflect' cval : scalar, optional Value to fill past edges of input if ``mode`` is 'constant'. Default is 0.0 origin : scalar, optional The ``origin`` parameter controls the placement of the filter. Default 0 See Also -------- convolve : Convolve an image with a kernel. """ return _correlate_or_convolve(input, weights, output, mode, cval, origin, False) @docfiller def convolve(input, weights, output = None, mode = 'reflect', cval = 0.0, origin = 0): """ Multi-dimensional convolution. The array is convolved with the given kernel. Parameters ---------- input : array_like Input array to filter. weights : array_like Array of weights, same number of dimensions as input output : ndarray, optional The `output` parameter passes an array in which to store the filter output. mode : {'reflect','constant','nearest','mirror', 'wrap'}, optional the `mode` parameter determines how the array borders are handled. For 'constant' mode, values beyond borders are set to be `cval`. Default is 'reflect'. cval : scalar, optional Value to fill past edges of input if `mode` is 'constant'. Default is 0.0 origin : array_like, optional The `origin` parameter controls the placement of the filter. Default is 0. Returns ------- result : ndarray The result of convolution of `input` with `weights`. See Also -------- correlate : Correlate an image with a kernel. Notes ----- Each value in result is :math:`C_i = \\sum_j{I_{i+j-k} W_j}`, where W is the `weights` kernel, j is the n-D spatial index over :math:`W`, I is the `input` and k is the coordinate of the center of W, specified by `origin` in the input parameters. Examples -------- Perhaps the simplest case to understand is ``mode='constant', cval=0.0``, because in this case borders (i.e. where the `weights` kernel, centered on any one value, extends beyond an edge of `input`. >>> a = np.array([[1, 2, 0, 0], .... [5, 3, 0, 4], .... [0, 0, 0, 7], .... [9, 3, 0, 0]]) >>> k = np.array([[1,1,1],[1,1,0],[1,0,0]]) >>> from scipy import ndimage >>> ndimage.convolve(a, k, mode='constant', cval=0.0) array([[11, 10, 7, 4], [10, 3, 11, 11], [15, 12, 14, 7], [12, 3, 7, 0]]) Setting ``cval=1.0`` is equivalent to padding the outer edge of `input` with 1.0's (and then extracting only the original region of the result). >>> ndimage.convolve(a, k, mode='constant', cval=1.0) array([[13, 11, 8, 7], [11, 3, 11, 14], [16, 12, 14, 10], [15, 6, 10, 5]]) With ``mode='reflect'`` (the default), outer values are reflected at the edge of `input` to fill in missing values. >>> b = np.array([[2, 0, 0], [1, 0, 0], [0, 0, 0]]) >>> k = np.array([[0,1,0],[0,1,0],[0,1,0]]) >>> ndimage.convolve(b, k, mode='reflect') array([[5, 0, 0], [3, 0, 0], [1, 0, 0]]) This includes diagonally at the corners. >>> k = np.array([[1,0,0],[0,1,0],[0,0,1]]) >>> ndimage.convolve(b, k) array([[4, 2, 0], [3, 2, 0], [1, 1, 0]]) With ``mode='nearest'``, the single nearest value in to an edge in `input` is repeated as many times as needed to match the overlapping `weights`. >>> c = np.array([[2, 0, 1], [1, 0, 0], [0, 0, 0]]) >>> k = np.array([[0, 1, 0], [0, 1, 0], [0, 1, 0], [0, 1, 0], [0, 1, 0]]) >>> ndimage.convolve(c, k, mode='nearest') array([[7, 0, 3], [5, 0, 2], [3, 0, 1]]) """ return _correlate_or_convolve(input, weights, output, mode, cval, origin, True) @docfiller def uniform_filter1d(input, size, axis = -1, output = None, mode = "reflect", cval = 0.0, origin = 0): """Calculate a one-dimensional uniform filter along the given axis. The lines of the array along the given axis are filtered with a uniform filter of given size. Parameters ---------- %(input)s size : integer length of uniform filter %(axis)s %(output)s %(mode)s %(cval)s %(origin)s """ input = numpy.asarray(input) if numpy.iscomplexobj(input): raise TypeError('Complex type not supported') axis = _ni_support._check_axis(axis, input.ndim) if size < 1: raise RuntimeError('incorrect filter size') output, return_value = _ni_support._get_output(output, input) if (size // 2 + origin < 0) or (size // 2 + origin >= size): raise ValueError('invalid origin') mode = _ni_support._extend_mode_to_code(mode) _nd_image.uniform_filter1d(input, size, axis, output, mode, cval, origin) return return_value @docfiller def uniform_filter(input, size = 3, output = None, mode = "reflect", cval = 0.0, origin = 0): """Multi-dimensional uniform filter. Parameters ---------- %(input)s size : int or sequence of ints The sizes of the uniform filter are given for each axis as a sequence, or as a single number, in which case the size is equal for all axes. %(output)s %(mode)s %(cval)s %(origin)s Notes ----- The multi-dimensional filter is implemented as a sequence of one-dimensional uniform filters. The intermediate arrays are stored in the same data type as the output. Therefore, for output types with a limited precision, the results may be imprecise because intermediate results may be stored with insufficient precision. """ input = numpy.asarray(input) output, return_value = _ni_support._get_output(output, input) sizes = _ni_support._normalize_sequence(size, input.ndim) origins = _ni_support._normalize_sequence(origin, input.ndim) axes = range(input.ndim) axes = [(axes[ii], sizes[ii], origins[ii]) for ii in range(len(axes)) if sizes[ii] > 1] if len(axes) > 0: for axis, size, origin in axes: uniform_filter1d(input, int(size), axis, output, mode, cval, origin) input = output else: output[...] = input[...] return return_value @docfiller def minimum_filter1d(input, size, axis = -1, output = None, mode = "reflect", cval = 0.0, origin = 0): """Calculate a one-dimensional minimum filter along the given axis. The lines of the array along the given axis are filtered with a minimum filter of given size. Parameters ---------- %(input)s size : int length along which to calculate 1D minimum %(axis)s %(output)s %(mode)s %(cval)s %(origin)s """ input = numpy.asarray(input) if numpy.iscomplexobj(input): raise TypeError('Complex type not supported') axis = _ni_support._check_axis(axis, input.ndim) if size < 1: raise RuntimeError('incorrect filter size') output, return_value = _ni_support._get_output(output, input) if (size // 2 + origin < 0) or (size // 2 + origin >= size): raise ValueError('invalid origin') mode = _ni_support._extend_mode_to_code(mode) _nd_image.min_or_max_filter1d(input, size, axis, output, mode, cval, origin, 1) return return_value @docfiller def maximum_filter1d(input, size, axis = -1, output = None, mode = "reflect", cval = 0.0, origin = 0): """Calculate a one-dimensional maximum filter along the given axis. The lines of the array along the given axis are filtered with a maximum filter of given size. Parameters ---------- %(input)s size : int length along which to calculate 1D maximum %(axis)s %(output)s %(mode)s %(cval)s %(origin)s """ input = numpy.asarray(input) if numpy.iscomplexobj(input): raise TypeError('Complex type not supported') axis = _ni_support._check_axis(axis, input.ndim) if size < 1: raise RuntimeError('incorrect filter size') output, return_value = _ni_support._get_output(output, input) if (size // 2 + origin < 0) or (size // 2 + origin >= size): raise ValueError('invalid origin') mode = _ni_support._extend_mode_to_code(mode) _nd_image.min_or_max_filter1d(input, size, axis, output, mode, cval, origin, 0) return return_value def _min_or_max_filter(input, size, footprint, structure, output, mode, cval, origin, minimum): if structure is None: if footprint is None: if size is None: raise RuntimeError("no footprint provided") separable= True else: footprint = numpy.asarray(footprint) footprint = footprint.astype(bool) if numpy.alltrue(numpy.ravel(footprint),axis=0): size = footprint.shape footprint = None separable = True else: separable = False else: structure = numpy.asarray(structure, dtype=numpy.float64) separable = False if footprint is None: footprint = numpy.ones(structure.shape, bool) else: footprint = numpy.asarray(footprint) footprint = footprint.astype(bool) input = numpy.asarray(input) if numpy.iscomplexobj(input): raise TypeError('Complex type not supported') output, return_value = _ni_support._get_output(output, input) origins = _ni_support._normalize_sequence(origin, input.ndim) if separable: sizes = _ni_support._normalize_sequence(size, input.ndim) axes = range(input.ndim) axes = [(axes[ii], sizes[ii], origins[ii]) for ii in range(len(axes)) if sizes[ii] > 1] if minimum: filter_ = minimum_filter1d else: filter_ = maximum_filter1d if len(axes) > 0: for axis, size, origin in axes: filter_(input, int(size), axis, output, mode, cval, origin) input = output else: output[...] = input[...] else: fshape = [ii for ii in footprint.shape if ii > 0] if len(fshape) != input.ndim: raise RuntimeError('footprint array has incorrect shape.') for origin, lenf in zip(origins, fshape): if (lenf // 2 + origin < 0) or (lenf // 2 + origin >= lenf): raise ValueError('invalid origin') if not footprint.flags.contiguous: footprint = footprint.copy() if structure is not None: if len(structure.shape) != input.ndim: raise RuntimeError('structure array has incorrect shape') if not structure.flags.contiguous: structure = structure.copy() mode = _ni_support._extend_mode_to_code(mode) _nd_image.min_or_max_filter(input, footprint, structure, output, mode, cval, origins, minimum) return return_value @docfiller def minimum_filter(input, size = None, footprint = None, output = None, mode = "reflect", cval = 0.0, origin = 0): """Calculates a multi-dimensional minimum filter. Parameters ---------- %(input)s %(size_foot)s %(output)s %(mode)s %(cval)s %(origin)s """ return _min_or_max_filter(input, size, footprint, None, output, mode, cval, origin, 1) @docfiller def maximum_filter(input, size = None, footprint = None, output = None, mode = "reflect", cval = 0.0, origin = 0): """Calculates a multi-dimensional maximum filter. Parameters ---------- %(input)s %(size_foot)s %(output)s %(mode)s %(cval)s %(origin)s """ return _min_or_max_filter(input, size, footprint, None, output, mode, cval, origin, 0) @docfiller def _rank_filter(input, rank, size = None, footprint = None, output = None, mode = "reflect", cval = 0.0, origin = 0, operation = 'rank'): input = numpy.asarray(input) if numpy.iscomplexobj(input): raise TypeError('Complex type not supported') origins = _ni_support._normalize_sequence(origin, input.ndim) if footprint is None: if size is None: raise RuntimeError("no footprint or filter size provided") sizes = _ni_support._normalize_sequence(size, input.ndim) footprint = numpy.ones(sizes, dtype=bool) else: footprint = numpy.asarray(footprint, dtype=bool) fshape = [ii for ii in footprint.shape if ii > 0] if len(fshape) != input.ndim: raise RuntimeError('filter footprint array has incorrect shape.') for origin, lenf in zip(origins, fshape): if (lenf // 2 + origin < 0) or (lenf // 2 + origin >= lenf): raise ValueError('invalid origin') if not footprint.flags.contiguous: footprint = footprint.copy() filter_size = numpy.where(footprint, 1, 0).sum() if operation == 'median': rank = filter_size // 2 elif operation == 'percentile': percentile = rank if percentile < 0.0: percentile += 100.0 if percentile < 0 or percentile > 100: raise RuntimeError('invalid percentile') if percentile == 100.0: rank = filter_size - 1 else: rank = int(float(filter_size) * percentile / 100.0) if rank < 0: rank += filter_size if rank < 0 or rank >= filter_size: raise RuntimeError('rank not within filter footprint size') if rank == 0: return minimum_filter(input, None, footprint, output, mode, cval, origin) elif rank == filter_size - 1: return maximum_filter(input, None, footprint, output, mode, cval, origin) else: output, return_value = _ni_support._get_output(output, input) mode = _ni_support._extend_mode_to_code(mode) _nd_image.rank_filter(input, rank, footprint, output, mode, cval, origins) return return_value @docfiller def rank_filter(input, rank, size = None, footprint = None, output = None, mode = "reflect", cval = 0.0, origin = 0): """Calculates a multi-dimensional rank filter. Parameters ---------- %(input)s rank : integer The rank parameter may be less then zero, i.e., rank = -1 indicates the largest element. %(size_foot)s %(output)s %(mode)s %(cval)s %(origin)s """ return _rank_filter(input, rank, size, footprint, output, mode, cval, origin, 'rank') @docfiller def median_filter(input, size = None, footprint = None, output = None, mode = "reflect", cval = 0.0, origin = 0): """ Calculates a multi-dimensional median filter. Parameters ---------- input : array-like input array to filter size : scalar or tuple, optional See footprint, below footprint : array, optional Either ``size`` or ``footprint`` must be defined. ``size`` gives the shape that is taken from the input array, at every element position, to define the input to the filter function. ``footprint`` is a boolean array that specifies (implicitly) a shape, but also which of the elements within this shape will get passed to the filter function. Thus ``size=(n,m)`` is equivalent to ``footprint=np.ones((n,m))``. We adjust ``size`` to the number of dimensions of the input array, so that, if the input array is shape (10,10,10), and ``size`` is 2, then the actual size used is (2,2,2). output : array, optional The ``output`` parameter passes an array in which to store the filter output. mode : {'reflect','constant','nearest','mirror', 'wrap'}, optional The ``mode`` parameter determines how the array borders are handled, where ``cval`` is the value when mode is equal to 'constant'. Default is 'reflect' cval : scalar, optional Value to fill past edges of input if ``mode`` is 'constant'. Default is 0.0 origin : scalar, optional The ``origin`` parameter controls the placement of the filter. Default 0 """ return _rank_filter(input, 0, size, footprint, output, mode, cval, origin, 'median') @docfiller def percentile_filter(input, percentile, size = None, footprint = None, output = None, mode = "reflect", cval = 0.0, origin = 0): """Calculates a multi-dimensional percentile filter. Parameters ---------- %(input)s percentile : scalar The percentile parameter may be less then zero, i.e., percentile = -20 equals percentile = 80 %(size_foot)s %(output)s %(mode)s %(cval)s %(origin)s """ return _rank_filter(input, percentile, size, footprint, output, mode, cval, origin, 'percentile') @docfiller def generic_filter1d(input, function, filter_size, axis = -1, output = None, mode = "reflect", cval = 0.0, origin = 0, extra_arguments = (), extra_keywords = None): """Calculate a one-dimensional filter along the given axis. generic_filter1d iterates over the lines of the array, calling the given function at each line. The arguments of the line are the input line, and the output line. The input and output lines are 1D double arrays. The input line is extended appropriately according to the filter size and origin. The output line must be modified in-place with the result. Parameters ---------- %(input)s function : callable function to apply along given axis filter_size : scalar length of the filter %(axis)s %(output)s %(mode)s %(cval)s %(origin)s %(extra_arguments)s %(extra_keywords)s """ if extra_keywords is None: extra_keywords = {} input = numpy.asarray(input) if numpy.iscomplexobj(input): raise TypeError('Complex type not supported') output, return_value = _ni_support._get_output(output, input) if filter_size < 1: raise RuntimeError('invalid filter size') axis = _ni_support._check_axis(axis, input.ndim) if ((filter_size // 2 + origin < 0) or (filter_size // 2 + origin >= filter_size)): raise ValueError('invalid origin') mode = _ni_support._extend_mode_to_code(mode) _nd_image.generic_filter1d(input, function, filter_size, axis, output, mode, cval, origin, extra_arguments, extra_keywords) return return_value @docfiller def generic_filter(input, function, size = None, footprint = None, output = None, mode = "reflect", cval = 0.0, origin = 0, extra_arguments = (), extra_keywords = None): """Calculates a multi-dimensional filter using the given function. At each element the provided function is called. The input values within the filter footprint at that element are passed to the function as a 1D array of double values. Parameters ---------- %(input)s function : callable function to apply at each element %(size_foot)s %(output)s %(mode)s %(cval)s %(origin)s %(extra_arguments)s %(extra_keywords)s """ if extra_keywords is None: extra_keywords = {} input = numpy.asarray(input) if numpy.iscomplexobj(input): raise TypeError('Complex type not supported') origins = _ni_support._normalize_sequence(origin, input.ndim) if footprint is None: if size is None: raise RuntimeError("no footprint or filter size provided") sizes = _ni_support._normalize_sequence(size, input.ndim) footprint = numpy.ones(sizes, dtype=bool) else: footprint = numpy.asarray(footprint) footprint = footprint.astype(bool) fshape = [ii for ii in footprint.shape if ii > 0] if len(fshape) != input.ndim: raise RuntimeError('filter footprint array has incorrect shape.') for origin, lenf in zip(origins, fshape): if (lenf // 2 + origin < 0) or (lenf // 2 + origin >= lenf): raise ValueError('invalid origin') if not footprint.flags.contiguous: footprint = footprint.copy() output, return_value = _ni_support._get_output(output, input) mode = _ni_support._extend_mode_to_code(mode) _nd_image.generic_filter(input, function, footprint, output, mode, cval, origins, extra_arguments, extra_keywords) return return_value
bsd-3-clause
-4,108,254,230,189,439,500
34.004374
79
0.597751
false
3.896192
false
false
false
vaniakosmos/memes-reposter
apps/imgur/migrations/0001_initial.py
1
1605
# Generated by Django 2.0.3 on 2018-06-30 17:27 import django.core.validators from django.db import migrations, models class Migration(migrations.Migration): initial = True dependencies = [ ] operations = [ migrations.CreateModel( name='ImgurConfig', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('score_limit', models.IntegerField(default=1000, validators=[django.core.validators.MinValueValidator(0)])), ('good_tags', models.TextField(blank=True)), ('bad_tags', models.TextField(blank=True)), ('exclude_mode', models.BooleanField(default=True, help_text='If true posts with bad tags will be filtered out. Otherwise only posts from with good tags will pass the filter.')), ('channel_username', models.CharField(max_length=200, null=True)), ('chat_id', models.BigIntegerField(null=True)), ], options={ 'abstract': False, }, ), migrations.CreateModel( name='Post', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('imgur_id', models.CharField(max_length=200)), ('title', models.TextField()), ('is_album', models.BooleanField()), ('tags', models.TextField()), ('images_links', models.TextField()), ], ), ]
mit
-4,660,612,646,727,245,000
38.146341
194
0.560748
false
4.533898
false
false
false
dsweet04/rekall
rekall-core/rekall/plugins/windows/heap_analysis.py
1
16866
# Rekall Memory Forensics # Copyright 2014 Google Inc. All Rights Reserved. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or (at # your option) any later version. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA # """The module implements user mode heap analysis. Recent versions of windows use the Low Fragmentation Heap (LFH). http://illmatics.com/Windows%208%20Heap%20Internals.pdf http://illmatics.com/Understanding_the_LFH.pdf http://www.leviathansecurity.com/blog/understanding-the-windows-allocator-a-redux/ """ from rekall import scan from rekall.plugins import core from rekall.plugins.windows import common from rekall_lib import utils class InspectHeap(common.WinProcessFilter): """Inspect the process heap. This prints a lot of interesting facts about the process heap. It is also the foundation to many other plugins which find things in the process heaps. NOTE: Currently we only support Windows 7 64 bit. """ name = "inspect_heap" __args = [ dict(name="free", type="Boolean", help="Also show freed chunks."), dict(name="heaps", type="ArrayIntParser", help="Only show these heaps (default show all)") ] mode = "mode_amd64" def __init__(self, *args, **kwargs): super(InspectHeap, self).__init__(*args, **kwargs) self.segments = utils.SortedCollection() def enumerate_lfh_heap_allocations(self, heap, skip_freed=False): """Dump the low fragmentation heap.""" seen_blocks = set() for lfh_block in heap.FrontEndHeap.SubSegmentZones.list_of_type( "_LFH_BLOCK_ZONE", "ListEntry"): block_length = lfh_block.FreePointer.v() - lfh_block.obj_end segments = heap.obj_profile.Array( target="_HEAP_SUBSEGMENT", offset=lfh_block.obj_end, size=block_length) for segment in segments: allocation_length = segment.BlockSize * 16 if segment.UserBlocks.v() in seen_blocks: break seen_blocks.add(segment.UserBlocks.v()) for entry in segment.UserBlocks.Entries: # http://www.leviathansecurity.com/blog/understanding-the-windows-allocator-a-redux/ # Skip freed blocks if requested. if skip_freed and entry.UnusedBytes & 0x38: continue UnusedBytes = entry.UnusedBytes & 0x3f - 0x8 # The actual length of user allocation is the difference # between the HEAP allocation bin size and the unused bytes # at the end of the allocation. data_len = allocation_length - UnusedBytes # The data length can not be larger than the allocation # minus the critical parts of _HEAP_ENTRY. Sometimes, # allocations overrun into the next element's _HEAP_ENTRY so # they can store data in the next entry's # entry.PreviousBlockPrivateData. In this case the # allocation length seems to be larger by 8 bytes. if data_len > allocation_length - 0x8: data_len -= 0x8 yield (heap.obj_profile.String(entry.obj_end, term=None, length=data_len), allocation_length) def enumerate_backend_heap_allocations(self, heap): """Enumerate all allocations for _EPROCESS instance.""" for seg in heap.Segments: seg_end = seg.LastValidEntry.v() # Ensure sanity. if seg.Heap.deref() != heap: continue # The segment is empty - often seg_end is zero here. if seg_end < seg.FirstEntry.v(): break for entry in seg.FirstEntry.walk_list("NextEntry", True): # If this is the last entry it goes until the end of the # segment. start = entry.obj_offset + 0x10 if start > seg_end: break allocation = entry.Allocation yield allocation def GenerateHeaps(self): task = self.session.GetParameter("process_context") resolver = self.session.address_resolver # Try to load the ntdll profile. ntdll_mod = resolver.GetModuleByName("ntdll") if not ntdll_mod: return ntdll_prof = ntdll_mod.profile # Set the ntdll profile on the _PEB member. peb = task.m("Peb").cast( "Pointer", target="_PEB", profile=ntdll_prof, vm=task.get_process_address_space()) for heap in peb.ProcessHeaps: yield heap def render(self, renderer): cc = self.session.plugins.cc() with cc: for task in self.filter_processes(): cc.SwitchProcessContext(task) renderer.section() renderer.format("{0:r}\n", task) for heap in self.GenerateHeaps(): self.render_process_heap_info(heap, renderer) def render_low_frag_info(self, heap, renderer): """Displays information about the low fragmentation front end.""" renderer.format("Low Fragmentation Front End Information:\n") renderer.table_header([ dict(name="Entry", style="address"), ("Alloc", "allocation_length", "4"), ("Length", "length", ">4"), dict(name="Data"), ]) # Render the LFH allocations in increasing allocation sizes. Collect # them first, then display by sorted allocation size, and offset. entries_by_size = {} for entry, allocation_length in self.enumerate_lfh_heap_allocations( heap): entries_by_size.setdefault(allocation_length, []).append(entry) for allocation_length, entries in sorted(entries_by_size.iteritems()): for entry in sorted(entries, key=lambda x: x.obj_offset): data = entry.v()[:64] renderer.table_row( entry, allocation_length, entry.length, utils.HexDumpedString(data), ) def render_process_heap_info(self, heap, renderer): if (self.plugin_args.heaps and heap.ProcessHeapsListIndex not in self.plugin_args.heaps): return if 1 <= heap.ProcessHeapsListIndex <= 64: renderer.format("Heap {0}: {1:#x} ({2})\nBackend Info:\n\n", heap.ProcessHeapsListIndex, heap.BaseAddress, heap.FrontEndHeapType) renderer.table_header([ dict(name="Segment", type="TreeNode", width=18, child=dict(style="address")), ("End", "segment_end", "[addr]"), ("Length", "length", "8"), dict(name="Data"), ]) for seg in heap.Segments: seg_start = seg.FirstEntry.obj_offset seg_end = seg.LastValidEntry.v() renderer.table_row( seg_start, seg_end, seg_end - seg_start, depth=1) for entry in seg.FirstEntry.walk_list("NextEntry", True): # If this is the last entry it goes until the end of the # segment. start = entry.obj_offset + 0x10 if start > seg_end: break if entry.Flags.LAST_ENTRY: end = seg.LastValidEntry.v() else: end = entry.obj_offset + entry.Size * 16 data = heap.obj_vm.read(start, min(16, end-start)) renderer.table_row( entry, end, end - start, utils.HexDumpedString(data), depth=2) if heap.FrontEndHeapType.LOW_FRAG: self.render_low_frag_info(heap, renderer) class ShowAllocation(common.WindowsCommandPlugin): """Show the allocation containing the address.""" name = "show_allocation" __args = [ dict(name="address", type="ArrayIntParser", positional=True, help="The address to display"), dict(name="preamble", type="IntParser", default=32, help="How many bytes prior to the address to display."), dict(name="length", type="IntParser", default=50 * 16, help="How many bytes after the address to display.") ] def BuildAllocationMap(self): """Build a map of all allocations for fast looksup.""" allocations = utils.RangedCollection() inspect_heap = self.session.plugins.inspect_heap() for heap in inspect_heap.GenerateHeaps(): # First do the backend allocations. for allocation in inspect_heap.enumerate_backend_heap_allocations( heap): # Include the header in the allocation. allocations.insert( allocation.obj_offset - 16, allocation.obj_offset + allocation.length + 16, (allocation.obj_offset, allocation.length, "B")) self.session.report_progress( "Enumerating backend allocation: %#x", lambda allocation=allocation: allocation.obj_offset) # Now do the LFH allocations (These will mask the subsegments in the # RangedCollection). for _ in inspect_heap.enumerate_lfh_heap_allocations( heap, skip_freed=False): allocation, allocation_length = _ self.session.report_progress( "Enumerating frontend allocation: %#x", lambda: allocation.obj_offset) # Front end allocations do not have their own headers. allocations.insert( allocation.obj_offset, allocation.obj_offset + allocation_length, (allocation.obj_offset, allocation_length, "F")) return allocations def __init__(self, *args, **kwargs): super(ShowAllocation, self).__init__(*args, **kwargs) self.offset = None # Get cached allocations for current process context. task = self.session.GetParameter("process_context") cache_key = "heap_allocations_%x" % task.obj_offset self.allocations = self.session.GetParameter(cache_key) if self.allocations == None: self.allocations = self.BuildAllocationMap() # Cache the allocations for next time. self.session.SetCache(cache_key, self.allocations) def GetAllocationForAddress(self, address): return self.allocations.get_containing_range(address) def CreateAllocationMap(self, start, length, alloc_start, alloc_type): address_map = core.AddressMap() # For backend allocs we highlight the heap entry before them. if alloc_type == "B": address_map.AddRange(alloc_start-16, alloc_start, "_HEAP_ENTRY") # Try to interpret pointers to other allocations and highlight them. count = length / 8 for pointer in self.profile.Array( offset=start, count=count, target="Pointer"): name = None alloc_start, alloc_length, alloc_type = ( self.allocations.get_containing_range(pointer.v())) if alloc_type is not None: # First check if the pointer points inside this allocation. if alloc_start == start + 16: name = "+%#x(%#x)" % (pointer.v() - start, pointer.v()) else: name = "%#x(%s@%#x)" % ( pointer.v(), alloc_length, alloc_start) else: # Maybe it is a resolvable address. name = ",".join(self.session.address_resolver.format_address( pointer.v(), max_distance=1024*1024)) if name: address_map.AddRange( pointer.obj_offset, pointer.obj_offset + 8, # Color it using a unique color related to the address. This # helps to visually relate the same address across different # dumps. "%s" % name, color_index=pointer.obj_offset) return address_map def render(self, renderer): for address in self.plugin_args.address: # If the user requested to view more than one address we do not # support plugin continuation (with v() plugin). if len(self.plugin_args.address) > 1: self.offset = None alloc_start, alloc_length, alloc_type = ( self.allocations.get_containing_range(address)) if not alloc_type: renderer.format("Allocation not found for address " "{0:style=address} in any heap.\n", address) alloc_start = address alloc_length = 50 * 16 alloc_type = None else: renderer.format( "Address {0:style=address} is {1} bytes into " "{2} allocation of size {3} " "({4:style=address} - {5:style=address})\n", address, address - alloc_start, alloc_type, alloc_length, alloc_start, alloc_start + alloc_length) # Start dumping preamble before the address if self.offset is not # specified. It will be specified when we run the plugin again using # v(). if self.offset is None: # Start dumping a little before the requested address, but do # not go before the start of the allocation. start = max(alloc_start, address - self.plugin_args.preamble) else: # Continue dumping from the last run. start = self.offset # Also show the _HEAP_ENTRY before backend allocations (Front end # allocations do not have a _HEAP_ENTRY). if alloc_type == "B": start -= 16 length = min(alloc_start + alloc_length - start, self.plugin_args.length) dump = self.session.plugins.dump( offset=start, length=length, address_map=self.CreateAllocationMap( start, length, alloc_start, alloc_type)) dump.render(renderer) self.offset = dump.offset class FindReferenceAlloc(common.WindowsCommandPlugin): """Show allocations that refer to an address.""" name = "show_referrer_alloc" __args = [ dict(name="address", type="IntParser", positional=True, required=True, help="The address to display") ] def get_referrers(self, address, maxlen=None): addr = self.profile.address() addr.write(address) pointer_scanner = scan.BaseScanner( address_space=self.session.GetParameter("default_address_space"), session=self.session, checks=[ ('StringCheck', dict(needle=addr.obj_vm.getvalue())) ]) # Just scan the entire userspace address space. This means we might find # hits outside the heap but this is usually useful as it would locate # static pointers in dlls. if maxlen is None: maxlen = self.session.GetParameter("highest_usermode_address") for hit in pointer_scanner.scan(maxlen=maxlen): yield hit def render(self, renderer): show_allocation = None for hit in self.get_referrers(self.address): show_allocation = self.session.plugins.show_allocation(hit) show_allocation.render(renderer) return show_allocation
gpl-2.0
5,344,717,608,450,970,000
37.594966
104
0.563441
false
4.506011
false
false
false
google/makani
gs/monitor2/apps/plugins/indicators/servo.py
1
17733
# Copyright 2020 Makani Technologies LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """"Monitor indicators from the ground station.""" import collections import operator from makani.analysis.checks import avionics_util from makani.analysis.checks import check_range from makani.analysis.control import flap_limits from makani.avionics.common import pack_avionics_messages from makani.avionics.common import servo_types as servo_common from makani.avionics.firmware.monitors import servo_types from makani.avionics.network import aio_labels from makani.control import control_types from makani.gs.monitor2.apps.layout import indicator from makani.gs.monitor2.apps.layout import stoplights from makani.gs.monitor2.apps.plugins import common from makani.gs.monitor2.apps.plugins.indicators import avionics from makani.lib.python import c_helpers from makani.lib.python import struct_tree import numpy _SERVO_WARNING_HELPER = c_helpers.EnumHelper('ServoWarning', servo_common) _SERVO_ERROR_HELPER = c_helpers.EnumHelper('ServoError', servo_common) _SERVO_STATUS_HELPER = c_helpers.EnumHelper('ServoStatus', servo_common) _SERVO_LABELS_HELPER = c_helpers.EnumHelper('ServoLabel', aio_labels, prefix='kServo') _SERVO_ANALOG_VOLTAGE_HELPER = c_helpers.EnumHelper('ServoAnalogVoltage', servo_types) _SERVO_MON_WARNING_HELPER = c_helpers.EnumHelper('ServoMonitorWarning', servo_types) _SERVO_MON_ERROR_HELPER = c_helpers.EnumHelper('ServoMonitorError', servo_types) _ACTUATOR_STATE_HELPER = c_helpers.EnumHelper('ActuatorState', pack_avionics_messages, exclude='ActuatorStateCommand') class BaseServoIndicator(avionics.BaseActuatorIndicator): """Base class with utilities shared by servo indicators.""" def __init__(self, mode, label, precision, servo_labels=_SERVO_LABELS_HELPER.ShortNames(), show_label=True): super(BaseServoIndicator, self).__init__( mode, label, precision, servo_labels, 'Servo', _SERVO_LABELS_HELPER, common.MAX_NO_UPDATE_COUNT_SERVO_STATUS, full_comms_message_type='ServoStatus', tether_attribute='servo_statuses', show_label=show_label) class BaseArmedIndicator(BaseServoIndicator): """Base indicator for servos' armed status.""" def _GetSingleValue(self, arg_idx, *args): """Obtain a single value for one servo, invoked within _GetAvailableValues. Args: arg_idx: The index referring to the n-th servo. *args: The list of attributes to the indicator. The attributes vary in different modes. For FULL_COMMS_MODE, it is the list of ServoStatus messages for each servo, so args[arg_idx] refers to the servo's message struct. For SPARSE_COMMS_MODE, it is [TetherDown.servo_statuses, valid, timestamp_sec], so args[0][`EnumValue(A2)`] refers to the state of servo A2. Returns: The servo status of the n-th servo. """ if self._mode == common.FULL_COMMS_MODE: if struct_tree.IsValidElement(args[arg_idx]): return args[arg_idx].flags.status else: return None elif self._mode == common.SPARSE_COMMS_MODE: return self._GetTetherValue(args[0], self._node_labels[arg_idx], 'state') else: assert False @indicator.ReturnIfInputInvalid('--', stoplights.STOPLIGHT_UNAVAILABLE) def _Filter(self, *args): """Get the armed information of all servos. Args: *args: The list of attributes to the indicator. The attributes vary in different modes. For FULL_COMMS_MODE, it is the list of ServoStatus messages for each servo, so args[arg_idx] refers to the servo's message struct. For SPARSE_COMMS_MODE, it is [TetherDown.servo_statuses, valid, timestamp_sec], so args[0][`EnumValue(A2)`] refers to the state of servo A2. Returns: The text and stoplight to show. """ servo_status = self._GetAvailableValues(*args) if self._mode == common.FULL_COMMS_MODE: status_helper = _SERVO_STATUS_HELPER expecting = ['Armed'] elif self._mode == common.SPARSE_COMMS_MODE: status_helper = _ACTUATOR_STATE_HELPER expecting = ['Armed', 'Running'] else: assert False return self._CheckStatusFlags(servo_status, status_helper, expecting, stoplights.STOPLIGHT_ERROR) class BaseR22TemperatureIndicator(BaseServoIndicator): """Base indicator for servos' R22 temperatures.""" def __init__(self, *args, **kwargs): super(BaseR22TemperatureIndicator, self).__init__(*args, show_label=False, **kwargs) self._normal_ranges = check_range.BuildRanges([[None, 65]]) self._warning_ranges = check_range.BuildRanges([[None, 75]]) def _GetSingleValue(self, arg_idx, *args): if self._mode == common.FULL_COMMS_MODE: if struct_tree.IsValidElement(args[arg_idx]): return args[arg_idx].r22.temperature else: return None elif self._mode == common.SPARSE_COMMS_MODE: return self._GetTetherValue( args[0], self._node_labels[arg_idx], 'r22_temp') else: assert False @indicator.ReturnIfInputInvalid('', stoplights.STOPLIGHT_UNAVAILABLE) def _Filter(self, *args): temperatures, stoplight = self._GetFieldInfo( self._normal_ranges, self._warning_ranges, None, *args) return self._DictToString(temperatures), stoplight class BaseLvBusIndicator(indicator.BaseIndicator): """The base class for low voltage bus indicators.""" _voltage_names = ['LvA', 'LvB'] def __init__(self, servos, name): self._short_names = servos super(BaseLvBusIndicator, self).__init__(name) def _GatherVoltageData(self, messages): """Gather voltage data from the messages.""" voltages = collections.defaultdict(dict) any_value = False warning = False errors = [] for servo in self._short_names: if 'ServoStatus.Servo' + servo not in messages: continue any_value = True populated = messages[ 'ServoStatus.Servo%s.servo_mon.analog_populated' % servo] for voltage_name in self._voltage_names: # Guard against bad voltage names. if voltage_name not in _SERVO_ANALOG_VOLTAGE_HELPER: errors.append('Servo %s: Invalid voltage (%s)' % (servo, voltage_name)) continue index = _SERVO_ANALOG_VOLTAGE_HELPER.Value(voltage_name) if not avionics_util.TestMask(populated, index): continue voltages[voltage_name][servo] = messages[ 'ServoStatus.Servo%s.servo_mon.analog_data[%d]' % (servo, index)] warning |= avionics_util.CheckWarning( messages['ServoStatus.Servo%s.servo_mon.flags' % servo], _SERVO_MON_WARNING_HELPER.Value(voltage_name)) if errors: stoplight = stoplights.STOPLIGHT_ERROR elif not any_value: stoplight = stoplights.STOPLIGHT_UNAVAILABLE elif warning: stoplight = stoplights.STOPLIGHT_WARNING else: stoplight = stoplights.STOPLIGHT_NORMAL return voltages, stoplight, errors def Filter(self, messages): if not messages: return '--', stoplights.STOPLIGHT_UNAVAILABLE voltages, stoplight, errors = self._GatherVoltageData(messages) results = [' ' + ' '.join(v.rjust(4) for v in self._voltage_names)] for servo in self._short_names: servo_text = '%s:' % servo for voltage_name in self._voltage_names: if voltage_name in voltages and servo in voltages[voltage_name]: servo_text += ' %5.1f' % voltages[voltage_name][servo] else: servo_text += ' --'.rjust(6) results.append(servo_text) return '\n'.join(errors + results), stoplight class ArmedTailIndicator(BaseArmedIndicator): def __init__(self, mode): super(ArmedTailIndicator, self).__init__( mode, 'Tail Armed', 0, ['E1', 'E2', 'R1', 'R2']) class ArmedPortIndicator(BaseArmedIndicator): def __init__(self, mode): super(ArmedPortIndicator, self).__init__( mode, 'Port Armed', 0, ['A1', 'A2', 'A4']) class ArmedStarboardIndicator(BaseArmedIndicator): def __init__(self, mode): super(ArmedStarboardIndicator, self).__init__( mode, 'Starboard Armed', 0, ['A5', 'A7', 'A8']) class R22TemperatureTailIndicator(BaseR22TemperatureIndicator): def __init__(self, mode): super(R22TemperatureTailIndicator, self).__init__( mode, 'Tail R22 Temp', 0, ['E1', 'E2', 'R1', 'R2']) class R22TemperaturePortIndicator(BaseR22TemperatureIndicator): def __init__(self, mode): super(R22TemperaturePortIndicator, self).__init__( mode, 'Port R22 Temp', 0, ['A1', 'A2', 'A4']) class R22TemperatureStarboardIndicator(BaseR22TemperatureIndicator): def __init__(self, mode): super(R22TemperatureStarboardIndicator, self).__init__( mode, 'Star R22 Temp', 0, ['A5', 'A7', 'A8']) class LvBusTailIndicator(BaseLvBusIndicator): def __init__(self): super(LvBusTailIndicator, self).__init__( ['E1', 'E2', 'R1', 'R2'], 'Tail Bus [V]') class LvBusPortIndicator(BaseLvBusIndicator): def __init__(self): super(LvBusPortIndicator, self).__init__( ['A1', 'A2', 'A4'], 'Port Bus [V]') class LvBusStarboardIndicator(BaseLvBusIndicator): def __init__(self): super(LvBusStarboardIndicator, self).__init__( ['A5', 'A7', 'A8'], 'Starboard Bus [V]') class BasePosChart(avionics.ActuatorCmdDictChart): """The indicator to show servo position angles.""" def __init__(self, mode, name, servo_labels, show_cmd=True, **base_kwargs): super(BasePosChart, self).__init__( mode, name, servo_labels, 'Servo', _SERVO_LABELS_HELPER, common.MAX_NO_UPDATE_COUNT_SERVO_STATUS, show_cmd=show_cmd, full_comms_message_type='ServoStatus', tether_attribute='servo_statuses', precision=0, **base_kwargs) def _GetValuePerNode(self, arg_idx, *args): if self._mode == common.FULL_COMMS_MODE: return (numpy.rad2deg(args[arg_idx].angle_estimate) if struct_tree.IsValidElement(args[arg_idx]) else None) elif self._mode == common.SPARSE_COMMS_MODE: rad = self._GetTetherValue(args[0], self._node_labels[arg_idx], 'angle') return numpy.rad2deg(rad) if rad is not None else None else: assert False def _GetCmdValue(self, servo, controller_command): servo_idx = _SERVO_LABELS_HELPER.Value(servo) return numpy.rad2deg(controller_command.servo_angle[servo_idx]) class RudPosChart(BasePosChart): def __init__(self, mode, **widget_kwargs): nodes = ['R1', 'R2'] super(RudPosChart, self).__init__( mode, 'Rud Pos [&deg;]', nodes, show_cmd=True, **widget_kwargs) limits = flap_limits.FlapsToServos( flap_limits.GetControlCrosswindLimits())['R1'] limits = numpy.rad2deg(limits).tolist() self._SetLimits({ self._ObservationLabel(n): ( check_range.Interval(limits, inclusiveness=(False, False)), check_range.AllInclusiveRange()) for n in nodes }, [control_types.kFlightModeCrosswindNormal, control_types.kFlightModeCrosswindPrepTransOut]) class ElePosChart(BasePosChart): def __init__(self, mode, **widget_kwargs): nodes = ['E1', 'E2'] super(ElePosChart, self).__init__( mode, 'Ele Pos [&deg;]', nodes, show_cmd=True, **widget_kwargs) limits = flap_limits.FlapsToServos( flap_limits.GetControlCrosswindLimits())['E1'] limits = numpy.rad2deg(limits).tolist() self._SetLimits({ self._ObservationLabel(n): ( check_range.Interval(limits, inclusiveness=(False, False)), check_range.AllInclusiveRange()) for n in nodes }, [control_types.kFlightModeCrosswindNormal, control_types.kFlightModeCrosswindPrepTransOut]) class PortPosChart(BasePosChart): def __init__(self, mode, **widget_kwargs): super(PortPosChart, self).__init__( mode, 'Port Ail Pos [&deg;]', ['A1', 'A2', 'A4'], show_cmd=True, **widget_kwargs) self._SetLimits({ self._ObservationLabel(n): ( check_range.Interval( numpy.rad2deg(flap_limits.FlapsToServos( flap_limits.GetControlCrosswindLimits())[n]).tolist(), inclusiveness=(False, False)), check_range.AllInclusiveRange()) for n in ['A1', 'A2'] }, [control_types.kFlightModeCrosswindNormal, control_types.kFlightModeCrosswindPrepTransOut]) class StarboardPosChart(BasePosChart): def __init__(self, mode, **widget_kwargs): super(StarboardPosChart, self).__init__( mode, 'Star Ail Pos [&deg;]', ['A5', 'A7', 'A8'], show_cmd=True, **widget_kwargs) self._SetLimits({ self._ObservationLabel(n): ( check_range.Interval( numpy.rad2deg(flap_limits.FlapsToServos( flap_limits.GetControlCrosswindLimits())[n]).tolist(), inclusiveness=(False, False)), check_range.AllInclusiveRange()) for n in ['A7', 'A8'] }, [control_types.kFlightModeCrosswindNormal, control_types.kFlightModeCrosswindPrepTransOut]) class LvBusSummaryIndicator(BaseLvBusIndicator): """The summary class for low voltage bus indicators.""" _voltage_names = ['LvA', 'LvB'] def __init__(self): super(LvBusSummaryIndicator, self).__init__( _SERVO_LABELS_HELPER.ShortNames(), 'Servo LV Bus [V]') def Filter(self, messages): if not messages: return '--', stoplights.STOPLIGHT_UNAVAILABLE all_voltages, stoplight, errors = self._GatherVoltageData(messages) all_stats = {} for voltage_name in self._voltage_names: voltages = all_voltages[voltage_name] sorted_pairs = sorted(voltages.items(), key=operator.itemgetter(1)) num_units = len(voltages) all_stats[voltage_name] = { 'min': sorted_pairs[0] if voltages else None, 'max': sorted_pairs[-1] if voltages else None, 'median': sorted_pairs[num_units / 2] if voltages else None, } delimiter = ' ' results = [' '.rjust(7) + delimiter + delimiter.join(v.rjust(8) for v in self._voltage_names)] for metric in ['min', 'max', 'median']: text = metric.rjust(7) for voltage_name in self._voltage_names: stats = all_stats[voltage_name] text += delimiter if stats[metric] is not None: if isinstance(stats[metric], tuple): text += '{: 2.1f}({:2})'.format( stats[metric][1], stats[metric][0]) else: text += '{: 7.1f}'.format(stats[metric]) else: text += '--'.rjust(8) results.append(text) return '\n'.join(errors + results), stoplight class StatusIndicator(BaseServoIndicator): """Summary servo status.""" @indicator.RegisterModes(common.FULL_COMMS_MODE, common.SPARSE_COMMS_MODE) def __init__(self, mode, **format_kwargs): super(StatusIndicator, self).__init__(mode, 'Servo Status', 0) self._format_kwargs = format_kwargs def _GetSingleValue(self, arg_idx, *args): if self._mode == common.FULL_COMMS_MODE: if struct_tree.IsValidElement(args[arg_idx]): return [args[arg_idx].flags, args[arg_idx].servo_mon.flags] else: return None elif self._mode == common.SPARSE_COMMS_MODE: return self._GetTetherValue( args[0], self._node_labels[arg_idx], 'state') else: assert False @indicator.ReturnIfInputInvalid('', stoplights.STOPLIGHT_UNAVAILABLE) def _Filter(self, *attributes): any_warning_or_error = False warnings = collections.defaultdict(list) errors = collections.defaultdict(list) report_by_servo = collections.defaultdict(list) any_servo = False reports = self._GetAvailableValues(*attributes) for servo in _SERVO_LABELS_HELPER.ShortNames(): if servo not in reports or reports[servo] is None: continue if self._mode == common.FULL_COMMS_MODE: flags, mon_flags = reports[servo] any_servo = True if common.CheckFlags(servo, report_by_servo, warnings, errors, flags, _SERVO_WARNING_HELPER, _SERVO_ERROR_HELPER): any_warning_or_error = True if common.CheckFlags( servo, report_by_servo, warnings, errors, mon_flags, _SERVO_MON_WARNING_HELPER, _SERVO_MON_ERROR_HELPER): any_warning_or_error = True elif self._mode == common.SPARSE_COMMS_MODE: any_servo = True if reports[servo] & _ACTUATOR_STATE_HELPER.Value('Error'): any_warning_or_error = True report_by_servo[servo].append(('ERROR', 'status')) errors['status'].append(servo) return common.SummarizeWarningsAndErrors( any_servo, report_by_servo, warnings, errors, any_warning_or_error, **self._format_kwargs)
apache-2.0
5,210,630,127,275,359,000
35.189796
79
0.646366
false
3.526148
false
false
false
barak/autograd
examples/fluidsim/wing.py
1
6136
from __future__ import absolute_import from __future__ import print_function import autograd.numpy as np from autograd import value_and_grad from scipy.optimize import minimize import matplotlib.pyplot as plt import os from builtins import range rows, cols = 40, 60 # Fluid simulation code based on # "Real-Time Fluid Dynamics for Games" by Jos Stam # http://www.intpowertechcorp.com/GDC03.pdf def occlude(f, occlusion): return f * (1 - occlusion) def project(vx, vy, occlusion): """Project the velocity field to be approximately mass-conserving, using a few iterations of Gauss-Seidel.""" p = np.zeros(vx.shape) div = -0.5 * (np.roll(vx, -1, axis=1) - np.roll(vx, 1, axis=1) + np.roll(vy, -1, axis=0) - np.roll(vy, 1, axis=0)) div = make_continuous(div, occlusion) for k in range(50): p = (div + np.roll(p, 1, axis=1) + np.roll(p, -1, axis=1) + np.roll(p, 1, axis=0) + np.roll(p, -1, axis=0))/4.0 p = make_continuous(p, occlusion) vx = vx - 0.5*(np.roll(p, -1, axis=1) - np.roll(p, 1, axis=1)) vy = vy - 0.5*(np.roll(p, -1, axis=0) - np.roll(p, 1, axis=0)) vx = occlude(vx, occlusion) vy = occlude(vy, occlusion) return vx, vy def advect(f, vx, vy): """Move field f according to x and y velocities (u and v) using an implicit Euler integrator.""" rows, cols = f.shape cell_xs, cell_ys = np.meshgrid(np.arange(cols), np.arange(rows)) center_xs = (cell_xs - vx).ravel() center_ys = (cell_ys - vy).ravel() # Compute indices of source cells. left_ix = np.floor(center_ys).astype(np.int) top_ix = np.floor(center_xs).astype(np.int) rw = center_ys - left_ix # Relative weight of right-hand cells. bw = center_xs - top_ix # Relative weight of bottom cells. left_ix = np.mod(left_ix, rows) # Wrap around edges of simulation. right_ix = np.mod(left_ix + 1, rows) top_ix = np.mod(top_ix, cols) bot_ix = np.mod(top_ix + 1, cols) # A linearly-weighted sum of the 4 surrounding cells. flat_f = (1 - rw) * ((1 - bw)*f[left_ix, top_ix] + bw*f[left_ix, bot_ix]) \ + rw * ((1 - bw)*f[right_ix, top_ix] + bw*f[right_ix, bot_ix]) return np.reshape(flat_f, (rows, cols)) def make_continuous(f, occlusion): non_occluded = 1 - occlusion num = np.roll(f, 1, axis=0) * np.roll(non_occluded, 1, axis=0)\ + np.roll(f, -1, axis=0) * np.roll(non_occluded, -1, axis=0)\ + np.roll(f, 1, axis=1) * np.roll(non_occluded, 1, axis=1)\ + np.roll(f, -1, axis=1) * np.roll(non_occluded, -1, axis=1) den = np.roll(non_occluded, 1, axis=0)\ + np.roll(non_occluded, -1, axis=0)\ + np.roll(non_occluded, 1, axis=1)\ + np.roll(non_occluded, -1, axis=1) return f * non_occluded + (1 - non_occluded) * num / ( den + 0.001) def sigmoid(x): return 0.5*(np.tanh(x) + 1.0) # Output ranges from 0 to 1. def simulate(vx, vy, num_time_steps, occlusion, ax=None, render=False): occlusion = sigmoid(occlusion) # Disallow occlusion outside a certain area. mask = np.zeros((rows, cols)) mask[10:30, 10:30] = 1.0 occlusion = occlusion * mask # Initialize smoke bands. red_smoke = np.zeros((rows, cols)) red_smoke[rows/4:rows/2] = 1 blue_smoke = np.zeros((rows, cols)) blue_smoke[rows/2:3*rows/4] = 1 print("Running simulation...") vx, vy = project(vx, vy, occlusion) for t in range(num_time_steps): plot_matrix(ax, red_smoke, occlusion, blue_smoke, t, render) vx_updated = advect(vx, vx, vy) vy_updated = advect(vy, vx, vy) vx, vy = project(vx_updated, vy_updated, occlusion) red_smoke = advect(red_smoke, vx, vy) red_smoke = occlude(red_smoke, occlusion) blue_smoke = advect(blue_smoke, vx, vy) blue_smoke = occlude(blue_smoke, occlusion) plot_matrix(ax, red_smoke, occlusion, blue_smoke, num_time_steps, render) return vx, vy def plot_matrix(ax, r, g, b, t, render=False): if ax: plt.cla() ax.imshow(np.concatenate((r[...,np.newaxis], g[...,np.newaxis], b[...,np.newaxis]), axis=2)) ax.set_xticks([]) ax.set_yticks([]) plt.draw() if render: plt.savefig('step{0:03d}.png'.format(t), bbox_inches='tight') plt.pause(0.001) if __name__ == '__main__': simulation_timesteps = 20 print("Loading initial and target states...") init_vx = np.ones((rows, cols)) init_vy = np.zeros((rows, cols)) # Initialize the occlusion to be a block. init_occlusion = -np.ones((rows, cols)) init_occlusion[15:25, 15:25] = 0.0 init_occlusion = init_occlusion.ravel() def drag(vx): return np.mean(init_vx - vx) def lift(vy): return np.mean(vy - init_vy) def objective(params): cur_occlusion = np.reshape(params, (rows, cols)) final_vx, final_vy = simulate(init_vx, init_vy, simulation_timesteps, cur_occlusion) return -lift(final_vy) / drag(final_vx) # Specify gradient of objective function using autograd. objective_with_grad = value_and_grad(objective) fig = plt.figure(figsize=(8,8)) ax = fig.add_subplot(111, frameon=False) def callback(weights): cur_occlusion = np.reshape(weights, (rows, cols)) simulate(init_vx, init_vy, simulation_timesteps, cur_occlusion, ax) print("Rendering initial flow...") callback(init_occlusion) print("Optimizing initial conditions...") result = minimize(objective_with_grad, init_occlusion, jac=True, method='CG', options={'maxiter':50, 'disp':True}, callback=callback) print("Rendering optimized flow...") final_occlusion = np.reshape(result.x, (rows, cols)) simulate(init_vx, init_vy, simulation_timesteps, final_occlusion, ax, render=True) print("Converting frames to an animated GIF...") # Using imagemagick. os.system("convert -delay 5 -loop 0 step*.png " "-delay 250 step{0:03d}.png wing.gif".format(simulation_timesteps)) os.system("rm step*.png")
mit
7,440,294,014,902,484,000
36.414634
100
0.608703
false
2.987342
false
false
false
MarvinTeichmann/KittiBox
tests/test_anno_load.py
1
14271
from __future__ import absolute_import from __future__ import division from __future__ import print_function import itertools import json import logging import os import sys import random from random import shuffle import numpy as np import scipy as scp import scipy.misc sys.path.insert(1, '../incl') from scipy.misc import imread, imresize from utils.data_utils import (annotation_jitter, annotation_to_h5) from utils.annolist import AnnotationLib as AnnoLib import threading from collections import namedtuple import tensorflow as tf flags = tf.app.flags FLAGS = flags.FLAGS tf.app.flags.DEFINE_boolean( 'save', False, ('Whether to save the run. In case --nosave (default) ' 'output will be saved to the folder TV_DIR_RUNS/debug, ' 'hence it will get overwritten by further runs.')) flags.DEFINE_string('name', None, 'Append a name Tag to run.') fake_anno = namedtuple('fake_anno_object', ['rects']) from PIL import Image, ImageDraw rect = namedtuple('Rectangel', ['left', 'top', 'right', 'bottom']) def _get_ignore_rect(x, y, cell_size): left = x*cell_size right = (x+1)*cell_size top = y*cell_size bottom = (y+1)*cell_size return rect(left, top, right, bottom) def _rescale_boxes(current_shape, anno, target_height, target_width): x_scale = target_width / float(current_shape[1]) y_scale = target_height / float(current_shape[0]) for r in anno.rects: assert r.x1 < r.x2 r.x1 *= x_scale r.x2 *= x_scale assert r.y1 < r.y2 r.y1 *= y_scale r.y2 *= y_scale return anno def read_kitti_anno(label_file): """ Reads a kitti annotation file. Args: label_file: Path to file Returns: Lists of rectangels: Cars and don't care area. """ labels = [line.rstrip().split(' ') for line in open(label_file)] rect_list = [] for label in labels: if not (label[0] == 'Car' or label[0] == 'Van' or label[0] == 'DontCare'): continue if label[0] == 'DontCare': class_id = -1 else: class_id = 1 object_rect = AnnoLib.AnnoRect( x1=float(label[4]), y1=float(label[5]), x2=float(label[6]), y2=float(label[7])) assert object_rect.x1 < object_rect.x2 assert object_rect.y1 < object_rect.y2 object_rect.classID = class_id rect_list.append(object_rect) return rect_list def _load_idl_tf(idlfile, hypes, jitter=False, random_shuffel=True): """Take the idlfile and net configuration and create a generator that outputs a jittered version of a random image from the annolist that is mean corrected.""" annolist = AnnoLib.parse(idlfile) annos = [] for anno in annolist: anno.imageName = os.path.join( os.path.dirname(os.path.realpath(idlfile)), anno.imageName) annos.append(anno) random.seed(0) if hypes['data']['truncate_data']: annos = annos[:10] for epoch in itertools.count(): if random_shuffel: random.shuffle(annos) for anno in annos: im = imread(anno.imageName) if im.shape[2] == 4: im = im[:, :, :3] if im.shape[0] != hypes["image_height"] or \ im.shape[1] != hypes["image_width"]: if epoch == 0: anno = _rescale_boxes(im.shape, anno, hypes["image_height"], hypes["image_width"]) im = imresize( im, (hypes["image_height"], hypes["image_width"]), interp='cubic') if jitter: jitter_scale_min = 0.9 jitter_scale_max = 1.1 jitter_offset = 16 im, anno = annotation_jitter( im, anno, target_width=hypes["image_width"], target_height=hypes["image_height"], jitter_scale_min=jitter_scale_min, jitter_scale_max=jitter_scale_max, jitter_offset=jitter_offset) boxes, flags = annotation_to_h5(hypes, anno, hypes["grid_width"], hypes["grid_height"], hypes["rnn_len"]) boxes = boxes.reshape([hypes["grid_height"], hypes["grid_width"], 4]) flags = flags.reshape(hypes["grid_height"], hypes["grid_width"]) yield {"image": im, "boxes": boxes, "flags": flags, "rects": anno.rects, "anno": anno} def _generate_mask(hypes, ignore_rects): width = hypes["image_width"] height = hypes["image_height"] grid_width = hypes["grid_width"] grid_height = hypes["grid_height"] mask = np.ones([grid_height, grid_width]) for rect in ignore_rects: left = int(rect.x1/width*grid_width) right = int(rect.x2/width*grid_width) top = int(rect.y1/height*grid_height) bottom = int(rect.y2/height*grid_height) for x in range(left, right+1): for y in range(top, bottom+1): mask[y, x] = 0 return mask def _load_kitti_txt(kitti_txt, hypes, jitter=False, random_shuffel=True): """Take the txt file and net configuration and create a generator that outputs a jittered version of a random image from the annolist that is mean corrected.""" base_path = os.path.realpath(os.path.dirname(kitti_txt)) files = [line.rstrip() for line in open(kitti_txt)] if hypes['data']['truncate_data']: files = files[:10] random.seed(0) for epoch in itertools.count(): if random_shuffel: random.shuffle(files) for file in files: image_file, gt_image_file = file.split(" ") image_file = os.path.join(base_path, image_file) assert os.path.exists(image_file), \ "File does not exist: %s" % image_file gt_image_file = os.path.join(base_path, gt_image_file) assert os.path.exists(gt_image_file), \ "File does not exist: %s" % gt_image_file rect_list = read_kitti_anno(gt_image_file) anno = fake_anno(rect_list) im = scp.misc.imread(image_file) if im.shape[2] == 4: im = im[:, :, :3] if im.shape[0] != hypes["image_height"] or \ im.shape[1] != hypes["image_width"]: if epoch == 0: anno = _rescale_boxes(im.shape, anno, hypes["image_height"], hypes["image_width"]) im = imresize( im, (hypes["image_height"], hypes["image_width"]), interp='cubic') if jitter: jitter_scale_min = 0.9 jitter_scale_max = 1.1 jitter_offset = 16 im, anno = annotation_jitter( im, anno, target_width=hypes["image_width"], target_height=hypes["image_height"], jitter_scale_min=jitter_scale_min, jitter_scale_max=jitter_scale_max, jitter_offset=jitter_offset) pos_list = [rect for rect in anno.rects if rect.classID == 1] pos_anno = fake_anno(pos_list) boxes, confs = annotation_to_h5(hypes, pos_anno, hypes["grid_width"], hypes["grid_height"], hypes["rnn_len"]) mask_list = [rect for rect in anno.rects if rect.classID == -1] mask = _generate_mask(hypes, mask_list) boxes = boxes.reshape([hypes["grid_height"], hypes["grid_width"], 4]) confs = confs.reshape(hypes["grid_height"], hypes["grid_width"]) yield {"image": im, "boxes": boxes, "confs": confs, "rects": pos_list, "mask": mask} def _make_sparse(n, d): v = np.zeros((d,), dtype=np.float32) v[n] = 1. return v def _load_data_gen(hypes, phase, jitter): grid_size = hypes['grid_width'] * hypes['grid_height'] data_file = hypes["data"]['%s_idl' % phase] data_dir = hypes['dirs']['data_dir'] data_file = os.path.join(data_dir, data_file) data = _load_idl_tf(data_file, hypes, jitter={'train': jitter, 'val': False}[phase]) for d in data: output = {} rnn_len = hypes["rnn_len"] flags = d['flags'][0, :, 0, 0:rnn_len, 0] boxes = np.transpose(d['boxes'][0, :, :, 0:rnn_len, 0], (0, 2, 1)) assert(flags.shape == (grid_size, rnn_len)) assert(boxes.shape == (grid_size, rnn_len, 4)) output['image'] = d['image'] confs = [[_make_sparse(int(detection), d=hypes['num_classes']) for detection in cell] for cell in flags] output['confs'] = np.array(confs) output['boxes'] = boxes output['flags'] = flags yield output def test_new_kitti(): idlfile = "/home/mifs/mttt2/cvfs/DATA/KittiBox/train_3.idl" kitti_txt = "/home/mifs/mttt2/cvfs/DATA/KittiBox/train.txt" with open('../hypes/kittiBox.json', 'r') as f: logging.info("f: %s", f) hypes = json.load(f) hypes["rnn_len"] = 1 hypes["image_height"] = 200 hypes["image_width"] = 800 gen1 = _load_kitti_txt(kitti_txt, hypes, random_shuffel=False) gen2 = _load_idl_tf(idlfile, hypes, random_shuffel=False) print('testing generators') for i in range(20): data1 = gen1.next() data2 = gen2.next() rects1 = data1['rects'] rects2 = data2['rects'] assert len(rects1) <= len(rects2) if not len(rects1) == len(rects2): print('ignoring flags') continue else: print('comparing flags') assert(np.all(data1['image'] == data2['image'])) # assert(np.all(data1['boxes'] == data2['boxes'])) if np.all(data1['flags'] == data2['flags']): print('same') else: print('diff') def draw_rect(draw, rect, color): rect_cords = ((rect.left, rect.top), (rect.left, rect.bottom), (rect.right, rect.bottom), (rect.right, rect.top), (rect.left, rect.top)) draw.line(rect_cords, fill=color, width=2) def draw_encoded(image, confs, mask=None, rects=None, cell_size=32): image = image.astype('uint8') im = Image.fromarray(image) shape = confs.shape if mask is None: mask = np.ones(shape) # overimage = mycm(confs_pred, bytes=True) poly = Image.new('RGBA', im.size) pdraw = ImageDraw.Draw(poly) for y in range(shape[0]): for x in range(shape[1]): outline = (0, 0, 0, 255) if confs[y, x]: fill = (0, 255, 0, 100) else: fill = (0, 0, 0, 0) rect = _get_ignore_rect(x, y, cell_size) pdraw.rectangle(rect, fill=fill, outline=fill) if not mask[y, x]: pdraw.line(((rect.left, rect.bottom), (rect.right, rect.top)), fill=(0, 0, 0, 255), width=2) pdraw.line(((rect.left, rect.top), (rect.right, rect.bottom)), fill=(0, 0, 0, 255), width=2) color = (0, 0, 255) for rect in rects: rect_cords = ((rect.x1, rect.y1), (rect.x1, rect.y2), (rect.x2, rect.y2), (rect.x2, rect.y1), (rect.x1, rect.y1)) pdraw.line(rect_cords, fill=color, width=2) im.paste(poly, mask=poly) return np.array(im) def draw_kitti_jitter(): idlfile = "/home/mifs/mttt2/cvfs/DATA/KittiBox/train_3.idl" kitti_txt = "/home/mifs/mttt2/cvfs/DATA/KittiBox/train.txt" with open('../hypes/kittiBox.json', 'r') as f: logging.info("f: %s", f) hypes = json.load(f) hypes["rnn_len"] = 1 gen = _load_kitti_txt(kitti_txt, hypes, random_shuffel=False) data = gen.next() for i in range(20): data = gen.next() image = draw_encoded(image=data['image'], confs=data['confs'], rects=data['rects'], mask=data['mask']) scp.misc.imshow(image) scp.misc.imshow(data['mask']) def draw_idl(): idlfile = "/home/mifs/mttt2/cvfs/DATA/KittiBox/train_3.idl" kitti_txt = "/home/mifs/mttt2/cvfs/DATA/KittiBox/train.txt" with open('../hypes/kittiBox.json', 'r') as f: logging.info("f: %s", f) hypes = json.load(f) hypes["rnn_len"] = 1 gen = _load_idl_tf(idlfile, hypes, random_shuffel=False) data = gen.next() for i in range(20): data = gen.next() image = draw_encoded(image=data['image'], confs=data['flags'], rects=data['rects']) scp.misc.imshow(image) def draw_both(): idlfile = "/home/mifs/mttt2/cvfs/DATA/KittiBox/train_3.idl" kitti_txt = "/home/mifs/mttt2/cvfs/DATA/KittiBox/train.txt" with open('../hypes/kittiBox.json', 'r') as f: logging.info("f: %s", f) hypes = json.load(f) hypes["rnn_len"] = 1 gen1 = _load_idl_tf(idlfile, hypes, random_shuffel=False) gen2 = _load_kitti_txt(kitti_txt, hypes, random_shuffel=False) data1 = gen1.next() data2 = gen2.next() for i in range(20): data1 = gen1.next() data2 = gen2.next() image1 = draw_encoded(image=data1['image'], confs=data1['flags'], rects=data1['rects']) image2 = draw_encoded(image=data2['image'], confs=data2['confs'], rects=data2['rects'], mask=data2['mask']) scp.misc.imshow(image1) scp.misc.imshow(image2) if __name__ == '__main__': draw_both()
mit
-198,509,635,553,043,260
31.434091
78
0.529676
false
3.366596
false
false
false
baayso/learn-python3
basic/the_list.py
1
1216
#!/usr/bin/env python3 # -*- coding: utf-8 -*- # list是一种有序的集合,可以随时添加和删除其中的元素 classmates = ['Michael', 'Bob', 'Tracy'] print(classmates) print(len(classmates)) print(classmates[0]) print(classmates[1]) print(classmates[2]) print(classmates[-1]) print(classmates[-2]) print(classmates[-3]) print() # 追加元素到末尾 classmates.append('Adam') print(classmates) print() # 把元素插入到指定的位置 classmates.insert(1, 'Jack') print(classmates) print() # 删除list末尾的元素 classmates.pop() print(classmates) print() # 删除指定位置的元素 classmates.pop(1) print(classmates) print() # 把某个元素替换成别的元素,直接赋值给对应的索引位置 classmates[1] = 'Sarah' print(classmates) print() # list里面的元素的数据类型也可以不同 L = ['Apple', 123, True] print(L) print() # list元素也可以是另一个list s = ['python', 'java', ['asp', 'php'], 'scheme'] print(len(s)) print(s) print(len(s[2])) print(s[2][1]) print() # 注意s只有4个元素,其中s[2]又是一个list p = ['asp', 'php'] s = ['python', 'java', p, 'scheme'] print(len(s)) print(s) print() # 空list L = [] print(len(L))
apache-2.0
-2,483,030,746,383,875,600
12.71831
48
0.676591
false
1.723894
false
true
false
studenteternal/SoftLayer
get_list.py
1
1502
#!/usr/bin/python import yaml import SoftLayer from pprint import pprint credsFile = open("softcreds.yaml",'r') creds = yaml.load(credsFile) #print creds['username'] #print creds['api_key'] client = SoftLayer.Client(username=(creds['username']), api_key=(creds['api_key'])) n = 1 count = 1 kill_file = open("kill-file",'a') while n < 2: server_name = 'jbsampsobuntutemp' + str(count) n = n + 1 server_return = client['Virtual_Guest'].createObject({ 'datacenter': {'name': 'mex01'}, 'hostname': server_name, 'domain': 'test.com', 'startCpus': 1, 'maxMemory': 4096, 'hourlyBillingFlag': 'true', 'localDiskFlag': 'false', 'networkComponents': [{'maxSpeed': 1000}], 'privateNetworkOnlyFlag': 'false', 'blockDevices': [{'device': '0', 'diskImage': {'capacity': 100}}], 'operatingSystemReferenceCode': 'UBUNTU_latest', 'primaryBackendNetworkComponent': {'networkVlan': {'id': 773482}}, # 'tags': 'jbsampso,temp', # 'postInstallScriptUri': 'https://mex01.objectstorage.softlayer.net/v1/AUTH_3d7f3c03-9b34-418d-96f1-09a45712c21c/Jbsampso_startup_scripts/post_test.sh', }) count = count + 1 kill_file.write(str(server_return['id']) + '\n') # print server_return # server_return = server_return.split(',') # print server_return[15] # client['Virtual_Guest'].setUserMetadata(id=server_return['id']{ # 'metadata': {'jbsampso, temp'}} kill_file.close() credsFile.close() #pprint( server_return ) #print server_return['id']
mit
-2,538,125,823,673,107,500
28.45098
154
0.661784
false
2.839319
false
false
false
beia/beialand
projects/CitisimWebApp/app/src/users.py
1
2358
#!flask/bin/python from flask_bcrypt import Bcrypt from flask_login import UserMixin from main import login_manager from main import mydb import MySQLdb @login_manager.user_loader def load_user(user_id): user = User() return user.getUserByID(int(user_id)) class User(UserMixin): id = None username = None email = None password = None def __init__(self): print "Empty constructor" def getUserByID(self, id): mycursor = mydb.connection.cursor(MySQLdb.cursors.DictCursor) mycursor.execute("select * from Users u where u.userID = " + str(id)) row = mycursor.fetchone() if(row is None): return None self.id = row['userID'] self.username = row['userName'] self.email = row['userEmail'] self.password = row['userPass'] return self def getUserByEmail(self, email): mycursor = mydb.connection.cursor(MySQLdb.cursors.DictCursor) mycursor.execute("select * from Users u where u.userEmail = '" + str(email) + "'") row = mycursor.fetchone() if(row is None): return None self.id = row['userID'] self.username = row['userName'] self.email = row['userEmail'] self.password = row['userPass'] return self def userAuthentication(self, email, password): mycursor = mydb.connection.cursor(MySQLdb.cursors.DictCursor) mycursor.execute("select * from Users u where u.userEmail = '" + str(email) + "'") row = mycursor.fetchone() if(row is None): return False bcrypt = Bcrypt() return bcrypt.check_password_hash(row['userPass'], password) def checkIfEmailExists(self, email): mycursor = mydb.connection.cursor(MySQLdb.cursors.DictCursor) mycursor.execute("select * from Users u where u.userEmail = '" + str(email) + "'") row = mycursor.fetchone() if(row is None): return False return True def addUser(self, name, email, password): bcrypt = Bcrypt() mycursor = mydb.connection.cursor(MySQLdb.cursors.DictCursor) mycursor.execute("insert into Users (userName, userEmail, userPass) values ('"+str(name)+"','"+str(email)+"','"+bcrypt.generate_password_hash(password)+"')") mydb.connection.commit()
gpl-3.0
5,566,151,524,797,660,000
29.623377
165
0.621713
false
3.778846
false
false
false
gitterHQ/ansible
v2/ansible/playbook/attribute.py
1
1053
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. #from ansible.common.errors import AnsibleError class Attribute(object): def __init__(self, isa=None, private=False, default=None): self.isa = isa self.private = private self.value = None self.default = default def __call__(self): return self.value class FieldAttribute(Attribute): pass
gpl-3.0
8,832,590,789,999,572,000
30.909091
70
0.720798
false
4.003802
false
false
false
abusesa/idiokit
idiokit/xmpp/jid.py
1
6351
# Module for XMPP JID processing as defined in on RFC 3920 # (http://www.ietf.org/rfc/rfc3920.txt) And RFC 3454 # (http://www.ietf.org/rfc/rfc3454.txt). # # This module was originally written using both the above RFCs and the # xmppstringprep module of the pyxmpp package # (http://pyxmpp.jajcus.net/) as well as the # twisted.words.protocols.jabber.xmpp_stringprep module of Twisted # (http://twistedmatrix.com/) as a reference. import re import threading import stringprep from encodings import idna from unicodedata import ucd_3_2_0 as unicodedata class JIDError(Exception): pass def check_prohibited_and_unassigned(chars, prohibited_tables): in_table_a1 = stringprep.in_table_a1 for pos, ch in enumerate(chars): if any(table(ch) for table in prohibited_tables): raise JIDError("prohibited character {0!r} at index {1}".format(ch, pos)) if in_table_a1(ch): raise JIDError("unassigned characted {0!r} at index {1}".format(ch, pos)) def check_bidirectional(chars): in_table_d1 = stringprep.in_table_d1 in_table_d2 = stringprep.in_table_d2 # RFC 3454: If a string contains any RandALCat character, the # string MUST NOT contain any LCat character. if not any(in_table_d1(ch) for ch in chars): return if any(in_table_d2(ch) for ch in chars): raise JIDError("string contains RandALCat and LCat characters") # RFC 3454: If a string contains any RandALCat character, a # RandALCat character MUST be the first character of the string, # and a RandALCat character MUST be the last character of the # string. if not (in_table_d1(chars[0]) and in_table_d1(chars[-1])): raise JIDError("string must start and end with RandALCat characters") NODEPREP_PROHIBITED = ( stringprep.in_table_c11, stringprep.in_table_c12, stringprep.in_table_c21, stringprep.in_table_c22, stringprep.in_table_c3, stringprep.in_table_c4, stringprep.in_table_c5, stringprep.in_table_c6, stringprep.in_table_c7, stringprep.in_table_c8, stringprep.in_table_c9, frozenset(u"\"&'/:<>@").__contains__ ) def nodeprep(string): in_table_b1 = stringprep.in_table_b1 map_table_b2 = stringprep.map_table_b2 string = u"".join(map_table_b2(ch) for ch in string if not in_table_b1(ch)) string = unicodedata.normalize("NFKC", string) check_prohibited_and_unassigned(string, NODEPREP_PROHIBITED) check_bidirectional(string) return string RESOURCEPREP_PROHIBITED = ( stringprep.in_table_c12, stringprep.in_table_c21, stringprep.in_table_c22, stringprep.in_table_c3, stringprep.in_table_c4, stringprep.in_table_c5, stringprep.in_table_c6, stringprep.in_table_c7, stringprep.in_table_c8, stringprep.in_table_c9 ) def resourceprep(string): in_table_b1 = stringprep.in_table_b1 string = u"".join(ch for ch in string if not in_table_b1(ch)) string = unicodedata.normalize("NFKC", string) check_prohibited_and_unassigned(string, RESOURCEPREP_PROHIBITED) check_bidirectional(string) return string JID_REX = re.compile(r"^(?:(.*?)@)?([^\.\/]+(?:\.[^\.\/]+)*)(?:/(.*))?$", re.U) def split_jid(jid): match = JID_REX.match(jid) if not match: raise JIDError("not a valid JID") return match.groups() def check_length(identifier, value): if len(value) > 1023: raise JIDError("{0} identifier too long".format(identifier)) return value def prep_node(node): if not node: return None node = nodeprep(node) return check_length("node", node) def prep_resource(resource): if not resource: return None resource = resourceprep(resource) return check_length("resource", resource) def prep_domain(domain): labels = domain.split(".") try: labels = map(idna.nameprep, labels) labels = map(idna.ToASCII, labels) except UnicodeError as ue: raise JIDError("not an internationalized label: {0}".format(ue)) labels = map(idna.ToUnicode, labels) domain = ".".join(labels) return check_length("domain", domain) def unicodify(item): if item is None: return None return unicode(item) class JID(object): cache = dict() cache_size = 2 ** 14 cache_lock = threading.Lock() __slots__ = "_node", "_domain", "_resource" node = property(lambda x: x._node) domain = property(lambda x: x._domain) resource = property(lambda x: x._resource) def __new__(cls, node=None, domain=None, resource=None): node = unicodify(node) domain = unicodify(domain) resource = unicodify(resource) with cls.cache_lock: cache_key = node, domain, resource if cache_key in cls.cache: return cls.cache[cache_key] if node is None and domain is None: raise JIDError("either a full JID or at least a domain expected") elif domain is None: if resource is not None: raise JIDError("resource not expected with a full JID") node, domain, resource = split_jid(node) obj = super(JID, cls).__new__(cls) obj._node = prep_node(node) obj._domain = prep_domain(domain) obj._resource = prep_resource(resource) with cls.cache_lock: if len(cls.cache) >= cls.cache_size: cls.cache.clear() cls.cache[cache_key] = obj return obj def bare(self): return JID(self.node, self.domain) def __reduce__(self): return JID, (self.node, self.domain, self.resource) def __eq__(self, other): if not isinstance(other, JID): return NotImplemented return self is other or unicode(self) == unicode(other) def __ne__(self, other): result = self.__eq__(other) if result is NotImplemented: return result return not result def __hash__(self): return hash(unicode(self)) def __repr__(self): return "{0}({1!r})".format(type(self).__name__, unicode(self)) def __unicode__(self): jid = self.domain if self.node is not None: jid = self.node + "@" + jid if self.resource is not None: jid = jid + "/" + self.resource return jid
mit
-8,818,032,369,330,515,000
27.608108
85
0.634861
false
3.374601
false
false
false
moustakas/impy
teaching/siena_class_roster.py
1
3362
import requests #import BeautifulSoup from bs4 import BeautifulSoup import HTMLParser from HTMLParser import HTMLParser import sys import os # Open the file. r = open(sys.argv[1]) if not os.path.isdir('Detail_Class_List_files'): os.rename('Detail Class List_files', 'Detail_Class_List_files') # Try to parse the webpage by looking for the tables. soup = BeautifulSoup(r) print "\documentclass{article}" print "\usepackage{graphicx}" print "\usepackage{subfig}" print "\hoffset=-1.50in" print "\setlength{\\textwidth}{7.5in}" print "\setlength{\\textheight}{9in}" print "\setlength{\\voffset}{0pt}" print "\setlength{\\topmargin}{0pt}" print "\setlength{\headheight}{0pt}" print "\setlength{\headsep}{0pt}" h2s = soup.find_all('h2') caption = 'Default' for h in h2s: if h.string.find('Class Roster For')>=0: caption = h.string tables = soup.find_all('table') icount = 0 closed_figure = False for table in tables: if table['class'][0]=='datadisplaytable': rows = table.findAll('tr') image = None name = None for row in rows: cols = row.findAll('td') for col in cols: img = col.findAll('img') a = col.findAll('a') #if len(a)>0: # import pdb ; pdb.set_trace() if len(img)>0 and img[0]['src'].find('jpg')>=0: image = img[0]['src'] image = image.replace(' ','_').replace('%20', '_') if not os.path.isfile(image): import pdb ; pdb.set_trace() #if os.path.isfile(image): # import pdb ; pdb.set_trace() if len(a)>0 and 'mailto' in a[0]['href']: name = a[0]['target'] #if len(a)>0 and a[0]['class']==['leftaligntext']: # name = a[0].string # print(image, a[0].string) #import pdb ; pdb.set_trace() if name is not None and image is not None: if icount%25==0: if icount > 0: print "\\clearpage" else: print "\\begin{document}" print "\\begin{figure}" print "\centering" closed_figure = False if os.stat(image).st_size < 250: #image = './file_not_found.jpg' image = './smiley.png' if icount%5==4: print "\subfloat[%s]{\includegraphics[height=0.19\\textwidth]{%s}}\\\\" % (name,image) else: print "\subfloat[%s]{\includegraphics[height=0.19\\textwidth]{%s}}\\hfill" % (name,image) image = None name = None if icount%25==24: print "\caption{%s}" % (caption) print "\end{figure}" closed_figure = True icount += 1 if not closed_figure: print "\caption{%s}" % (caption) print "\end{figure}" print "\end{document}"
gpl-2.0
-457,119,040,262,636,200
29.844037
113
0.467281
false
4.1
false
false
false
vitan/hue
desktop/libs/hadoop/src/hadoop/cluster.py
1
7247
#!/usr/bin/env python # Licensed to Cloudera, Inc. under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. Cloudera, Inc. licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import logging from hadoop import conf from hadoop.fs import webhdfs, LocalSubFileSystem from hadoop.job_tracker import LiveJobTracker from desktop.lib.paths import get_build_dir LOG = logging.getLogger(__name__) FS_CACHE = None MR_CACHE = None MR_NAME_CACHE = 'default' def _make_filesystem(identifier): choice = os.getenv("FB_FS") if choice == "testing": path = os.path.join(get_build_dir(), "fs") if not os.path.isdir(path): LOG.warning(("Could not find fs directory: %s. Perhaps you need to run manage.py filebrowser_test_setup?") % path) return LocalSubFileSystem(path) else: cluster_conf = conf.HDFS_CLUSTERS[identifier] return webhdfs.WebHdfs.from_config(cluster_conf) def _make_mrcluster(identifier): cluster_conf = conf.MR_CLUSTERS[identifier] return LiveJobTracker.from_conf(cluster_conf) def get_hdfs(identifier="default"): global FS_CACHE get_all_hdfs() return FS_CACHE[identifier] def get_defaultfs(): fs = get_hdfs() if fs.logical_name: return fs.logical_name else: return fs.fs_defaultfs def get_all_hdfs(): global FS_CACHE if FS_CACHE is not None: return FS_CACHE FS_CACHE = {} for identifier in conf.HDFS_CLUSTERS.keys(): FS_CACHE[identifier] = _make_filesystem(identifier) return FS_CACHE def get_default_mrcluster(): """ Get the default JT (not necessarily HA). """ global MR_CACHE global MR_NAME_CACHE try: all_mrclusters() return MR_CACHE.get(MR_NAME_CACHE) except KeyError: # Return an arbitrary cluster candidates = all_mrclusters() if candidates: return candidates.values()[0] return None def get_default_yarncluster(): """ Get the default RM (not necessarily HA). """ global MR_NAME_CACHE try: return conf.YARN_CLUSTERS[MR_NAME_CACHE] except KeyError: return get_yarn() def get_next_ha_mrcluster(): """ Return the next available JT instance and cache its name. This method currently works for distincting between active/standby JT as a standby JT does not respond. A cleaner but more complicated way would be to do something like the MRHAAdmin tool and org.apache.hadoop.ha.HAServiceStatus#getServiceStatus(). """ global MR_NAME_CACHE candidates = all_mrclusters() has_ha = sum([conf.MR_CLUSTERS[name].SUBMIT_TO.get() for name in conf.MR_CLUSTERS.keys()]) >= 2 mrcluster = get_default_mrcluster() if mrcluster is None: return None current_user = mrcluster.user for name in conf.MR_CLUSTERS.keys(): config = conf.MR_CLUSTERS[name] if config.SUBMIT_TO.get(): jt = candidates[name] if has_ha: try: jt.setuser(current_user) status = jt.cluster_status() if status.stateAsString == 'RUNNING': MR_NAME_CACHE = name LOG.warn('Picking HA JobTracker: %s' % name) return (config, jt) else: LOG.info('JobTracker %s is not RUNNING, skipping it: %s' % (name, status)) except Exception, ex: LOG.info('JobTracker %s is not available, skipping it: %s' % (name, ex)) else: return (config, jt) return None def get_mrcluster(identifier="default"): global MR_CACHE all_mrclusters() return MR_CACHE[identifier] def all_mrclusters(): global MR_CACHE if MR_CACHE is not None: return MR_CACHE MR_CACHE = {} for identifier in conf.MR_CLUSTERS.keys(): MR_CACHE[identifier] = _make_mrcluster(identifier) return MR_CACHE def get_yarn(): global MR_NAME_CACHE if MR_NAME_CACHE in conf.YARN_CLUSTERS and conf.YARN_CLUSTERS[MR_NAME_CACHE].SUBMIT_TO.get(): return conf.YARN_CLUSTERS[MR_NAME_CACHE] for name in conf.YARN_CLUSTERS.keys(): yarn = conf.YARN_CLUSTERS[name] if yarn.SUBMIT_TO.get(): return yarn def get_next_ha_yarncluster(): """ Return the next available YARN RM instance and cache its name. """ from hadoop.yarn.resource_manager_api import ResourceManagerApi global MR_NAME_CACHE has_ha = sum([conf.YARN_CLUSTERS[name].SUBMIT_TO.get() for name in conf.YARN_CLUSTERS.keys()]) >= 2 for name in conf.YARN_CLUSTERS.keys(): config = conf.YARN_CLUSTERS[name] if config.SUBMIT_TO.get(): rm = ResourceManagerApi(config.RESOURCE_MANAGER_API_URL.get(), config.SECURITY_ENABLED.get(), config.SSL_CERT_CA_VERIFY.get()) if has_ha: try: cluster_info = rm.cluster() if cluster_info['clusterInfo']['haState'] == 'ACTIVE': MR_NAME_CACHE = name LOG.warn('Picking RM HA: %s' % name) from hadoop.yarn import resource_manager_api resource_manager_api._api_cache = None # Reset cache from hadoop.yarn import mapreduce_api mapreduce_api._api_cache = None return (config, rm) else: LOG.info('RM %s is not RUNNING, skipping it: %s' % (name, cluster_info)) except Exception, ex: LOG.info('RM %s is not available, skipping it: %s' % (name, ex)) else: return (config, rm) return None def get_cluster_for_job_submission(): """ Check the 'submit_to' for each MR/Yarn cluster, and return the config section of first one that enables submission. Support MR1/MR2 HA. """ yarn = get_next_ha_yarncluster() if yarn: return yarn mr = get_next_ha_mrcluster() if mr is not None: return mr return None def get_cluster_conf_for_job_submission(): cluster = get_cluster_for_job_submission() if cluster: config, rm = cluster return config else: return None def get_cluster_addr_for_job_submission(): """ Check the 'submit_to' for each MR/Yarn cluster, and return the logical name or host:port of first one that enables submission. """ if is_yarn(): if get_yarn().LOGICAL_NAME.get(): return get_yarn().LOGICAL_NAME.get() conf = get_cluster_conf_for_job_submission() if conf is None: return None return "%s:%s" % (conf.HOST.get(), conf.PORT.get()) def is_yarn(): return get_yarn() is not None def clear_caches(): """ Clears cluster's internal caches. Returns something that can be given back to restore_caches. """ global FS_CACHE, MR_CACHE old = FS_CACHE, MR_CACHE FS_CACHE, MR_CACHE = None, None return old def restore_caches(old): """ Restores caches from the result of a previous clear_caches call. """ global FS_CACHE, MR_CACHE FS_CACHE, MR_CACHE = old
apache-2.0
5,338,848,230,225,687,000
25.840741
132
0.673796
false
3.397562
true
false
false
facebookexperimental/eden
eden/integration/snapshot/verify.py
1
9902
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # # This software may be used and distributed according to the terms of the # GNU General Public License version 2. # pyre-strict import abc import os import stat as stat_mod import typing from pathlib import Path from typing import Dict, Iterator, List, Mapping, Optional, TypeVar, Union from eden.integration.lib import hgrepo _AnyPath = Union[Path, str] class _DefaultObject: pass _DEFAULT_OBJECT: _DefaultObject = _DefaultObject() class ExpectedFileBase(metaclass=abc.ABCMeta): def __init__( self, path: _AnyPath, contents: bytes, perms: int, file_type: int ) -> None: self.path: Path = Path(path) self.contents: bytes = contents self.permissions: int = perms self.file_type: int = file_type def verify( self, verifier: "SnapshotVerifier", path: Path, stat_info: os.stat_result ) -> None: found_perms = stat_mod.S_IMODE(stat_info.st_mode) if found_perms != self.permissions: verifier.error( f"{self.path}: expected permissions to be {self.permissions:#o}, " f"found {found_perms:#o}" ) found_file_type = stat_mod.S_IFMT(stat_info.st_mode) if found_file_type != self.file_type: verifier.error( f"{self.path}: expected file type to be {self.file_type:#o}, " f"found {found_file_type:#o}" ) else: self.verify_contents(verifier, path) @abc.abstractmethod def verify_contents(self, verifier: "SnapshotVerifier", path: Path) -> None: pass def _error(self, msg: str) -> None: raise ValueError(msg) class ExpectedFile(ExpectedFileBase): def __init__(self, path: _AnyPath, contents: bytes, perms: int = 0o644) -> None: super().__init__(path, contents, perms, stat_mod.S_IFREG) def verify_contents(self, verifier: "SnapshotVerifier", path: Path) -> None: with path.open("rb") as f: actual_contents = f.read() if actual_contents != self.contents: verifier.error( f"file contents mismatch for {self.path}:\n" f"expected: {self.contents!r}\n" f"actual: {actual_contents!r}" ) class ExpectedSymlink(ExpectedFileBase): def __init__(self, path: _AnyPath, contents: bytes, perms: int = 0o777) -> None: super().__init__(path, contents, perms, stat_mod.S_IFLNK) def verify_contents(self, verifier: "SnapshotVerifier", path: Path) -> None: actual_contents = os.readlink(bytes(path)) if actual_contents != self.contents: verifier.error( f"symlink contents mismatch for {self.path}:\n" f"expected: {self.contents!r}\n" f"actual: {actual_contents!r}" ) class ExpectedSocket(ExpectedFileBase): def __init__(self, path: _AnyPath, perms: int = 0o755) -> None: super().__init__(path, b"", perms, stat_mod.S_IFSOCK) def verify_contents(self, verifier: "SnapshotVerifier", path: Path) -> None: pass _ExpectedFile = TypeVar("_ExpectedFile", bound=ExpectedFileBase) class ExpectedFileSet(Mapping[Path, ExpectedFileBase]): """ ExpectedFileSet is basically a container of ExpectedFileBase objects, but also provides some helper methods for accessing and updating entries by path. """ def __init__(self) -> None: self._entries: Dict[Path, ExpectedFileBase] = {} def __len__(self) -> int: return len(self._entries) def __iter__(self) -> Iterator[Path]: return iter(self._entries.keys()) def __getitem__(self, path: _AnyPath) -> ExpectedFileBase: key = Path(path) return self._entries[key] def __delitem__(self, path: _AnyPath) -> None: key = Path(path) del self._entries[key] # pyre-fixme[14]: `__contains__` overrides method defined in `Mapping` # inconsistently. def __contains__(self, path: object) -> bool: if isinstance(path, str): key = Path(path) elif isinstance(path, Path): key = path else: return False return key in self._entries @typing.overload def pop(self, path: _AnyPath) -> ExpectedFileBase: ... @typing.overload # noqa: F811 def pop(self, path: _AnyPath, default: ExpectedFileBase) -> ExpectedFileBase: ... @typing.overload # noqa: F811 def pop(self, path: _AnyPath, default: None) -> Optional[ExpectedFileBase]: ... def pop( # noqa: F811 self, path: _AnyPath, default: Union[ExpectedFileBase, None, _DefaultObject] = _DEFAULT_OBJECT, ) -> Optional[ExpectedFileBase]: key = Path(path) if default is _DEFAULT_OBJECT: return self._entries.pop(key) else: tmp = typing.cast(Optional[ExpectedFileBase], default) return self._entries.pop(key, tmp) def add_file( self, path: _AnyPath, contents: bytes, perms: int = 0o644 ) -> ExpectedFile: return self.add(ExpectedFile(path=path, contents=contents, perms=perms)) def add_symlink( self, path: _AnyPath, contents: bytes, perms: int = 0o777 ) -> ExpectedSymlink: return self.add(ExpectedSymlink(path=path, contents=contents, perms=perms)) def add_socket(self, path: _AnyPath, perms: int = 0o755) -> ExpectedSocket: return self.add(ExpectedSocket(path=path, perms=perms)) def add(self, entry: _ExpectedFile) -> _ExpectedFile: assert entry.path not in self self._entries[entry.path] = entry return entry def set_file( self, path: _AnyPath, contents: bytes, perms: int = 0o644 ) -> ExpectedFile: return self.set(ExpectedFile(path=path, contents=contents, perms=perms)) def set_symlink( self, path: _AnyPath, contents: bytes, perms: int = 0o777 ) -> ExpectedSymlink: return self.set(ExpectedSymlink(path=path, contents=contents, perms=perms)) def set_socket(self, path: _AnyPath, perms: int = 0o755) -> ExpectedSocket: return self.set(ExpectedSocket(path=path, perms=perms)) def set(self, entry: _ExpectedFile) -> _ExpectedFile: self._entries[entry.path] = entry return entry class SnapshotVerifier: def __init__(self) -> None: self.errors: List[str] = [] self.quiet: bool = False def error(self, message: str) -> None: self.errors.append(message) if not self.quiet: print(f"==ERROR== {message}") def verify_directory(self, path: Path, expected: ExpectedFileSet) -> None: """Confirm that the contents of a directory match the expected file state.""" found_files = enumerate_directory(path) for expected_entry in expected.values(): file_stat = found_files.pop(expected_entry.path, None) if file_stat is None: self.error(f"{expected_entry.path}: file not present in snapshot") continue full_path = path / expected_entry.path try: expected_entry.verify(self, full_path, file_stat) except AssertionError as ex: self.error(f"{expected_entry.path}: {ex}") continue for path, stat_info in found_files.items(): if stat_mod.S_ISDIR(stat_info.st_mode): # Don't require directories to be listed explicitly in the input files continue if str(path.parents[0]) == ".hg": # Don't complain about files inside the .hg directory that the caller # did not explicitly specify. Mercurial can create a variety of files # here, and we don't care about checking the exact list of files it # happened to create when the snapshot was generated. continue self.error(f"{path}: unexpected file present in snapshot") def verify_hg_status( self, repo: hgrepo.HgRepository, expected: Dict[str, str], check_ignored: bool = True, ) -> None: actual_status = repo.status(include_ignored=check_ignored) for path, expected_char in expected.items(): actual_char = actual_status.pop(path, None) if expected_char != actual_char: self.error( f"{path}: unexpected hg status difference: " f"reported as {actual_char}, expected {expected_char}" ) for path, actual_char in actual_status.items(): self.error( f"{path}: unexpected hg status difference: " f"reported as {actual_char}, expected None" ) def enumerate_directory(path: Path) -> Dict[Path, os.stat_result]: """ Recursively walk a directory and return a dictionary of all of the files and directories it contains. Returns a dictionary of [path -> os.stat_result] The returned paths are relative to the input directory. """ entries: Dict[Path, os.stat_result] = {} _enumerate_directory_helper(path, Path(), entries) return entries def _enumerate_directory_helper( root_path: Path, rel_path: Path, results: Dict[Path, os.stat_result] ) -> None: for entry in os.scandir(root_path / rel_path): # Current versions of typeshed don't know about the follow_symlinks argument, # so ignore type errors on the next line. stat_info: os.stat_result = entry.stat(follow_symlinks=False) entry_path: Path = rel_path / entry.name results[entry_path] = stat_info if stat_mod.S_ISDIR(stat_info.st_mode): _enumerate_directory_helper(root_path, entry_path, results)
gpl-2.0
-8,863,614,257,235,671,000
34.113475
86
0.608261
false
3.893826
false
false
false
codelv/enaml-native
src/enamlnative/widgets/popup_window.py
1
4574
""" Copyright (c) 2017, Jairus Martin. Distributed under the terms of the MIT License. The full license is in the file LICENSE, distributed with this software. Created on Mar 17, 2018 @author: jrm """ from atom.api import ( Typed, ForwardTyped, Str, Float, Coerced, Bool, Enum, observe, ) from enaml.core.declarative import d_ from enaml.widgets.toolkit_object import ToolkitObject, ProxyToolkitObject from .view import coerce_size, coerce_gravity class ProxyPopupWindow(ProxyToolkitObject): """ The abstract definition of a proxy dialgo object. """ #: A reference to the Label declaration. declaration = ForwardTyped(lambda: PopupWindow) def set_height(self, height): raise NotImplementedError def set_width(self, width): raise NotImplementedError def set_x(self, x): raise NotImplementedError def set_y(self, y): raise NotImplementedError def set_position(self, position): raise NotImplementedError def set_focusable(self, enabled): raise NotImplementedError def set_touchable(self, enabled): raise NotImplementedError def set_outside_touchable(self, enabled): raise NotImplementedError def set_background_color(self, color): raise NotImplementedError def set_show(self, show): raise NotImplementedError def set_style(self, style): raise NotImplementedError def set_animation(self, style): raise NotImplementedError class PopupWindow(ToolkitObject): """ A popup window that may contain a view. """ #: Width and height or a string "match_parent" or "fill_parent" width = d_(Coerced(int, coercer=coerce_size)) height = d_(Coerced(int, coercer=coerce_size)) #: Layout gravity gravity = d_(Coerced(int, coercer=coerce_gravity)) #: Position x = d_(Float(strict=False)) y = d_(Float(strict=False)) #: Set whether the popup window can be focused focusable = d_(Bool()) #: Set whether the popup is touchable touchable = d_(Bool(True)) #: Controls whether the pop-up will be informed of touch events outside #: of its window. outside_touchable = d_(Bool(True)) #: Start the popup and display it on screen (or hide if False) show = d_(Bool()) #: Background color of the window (white by default) background_color = d_(Str()) #: If relative, show as a dropdown on the parent view, otherwise #: show at the position given by `x` and `y`. position = d_(Enum('relative', 'absolute')) #: Animation style for the PopupWindow using the @style format #: (ex. @style/MyAnimation animation = d_(Str()) #: PopupWindow style using the @style format #: (ex. @style/Theme_Light_NoTitleBar_Fullscreen style = d_(Str()) #: A reference to the proxy object. proxy = Typed(ProxyPopupWindow) # ------------------------------------------------------------------------- # Observers # ------------------------------------------------------------------------- @observe('width', 'height', 'x', 'y', 'position', 'focusable', 'touchable', 'outside_touchable', 'show', 'animation', 'style', 'background_color') def _update_proxy(self, change): """ An observer which sends the state change to the proxy. """ # The superclass implementation is sufficient. super(PopupWindow, self)._update_proxy(change) def popup(self): """ Show the window from code. This will initialize and activate if needed. Examples -------- >>> enamldef ContextMenu(PopupWindow): popup: attr result: lambda text: None Button: text = "One" clicked :: dialog.show = False dialog.result(self.text) Button: text = "Two" clicked :: dialog.show = False dialog.result(self.text) def on_result(value): print("User clicked: {}".format(value)) ContextMenu(result=on_result).popup() Notes ------ This does NOT block. Callbacks should be used to handle click events or the `show` state should be observed to know when it is closed. """ if not self.is_initialized: self.initialize() if not self.proxy_is_active: self.activate_proxy() self.show = True
mit
-8,627,291,073,902,098,000
27.234568
79
0.593791
false
4.40231
false
false
false
gst/alignak
alignak/macroresolver.py
2
23875
# -*- coding: utf-8 -*- # # Copyright (C) 2015-2015: Alignak team, see AUTHORS.txt file for contributors # # This file is part of Alignak. # # Alignak is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Alignak is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with Alignak. If not, see <http://www.gnu.org/licenses/>. # # # This file incorporates work covered by the following copyright and # permission notice: # # Copyright (C) 2009-2014: # Hartmut Goebel, h.goebel@goebel-consult.de # Nicolas Dupeux, nicolas@dupeux.net # Gerhard Lausser, gerhard.lausser@consol.de # Grégory Starck, g.starck@gmail.com # Frédéric Pégé, frederic.pege@gmail.com # Sebastien Coavoux, s.coavoux@free.fr # Olivier Hanesse, olivier.hanesse@gmail.com # Jean Gabes, naparuba@gmail.com # Zoran Zaric, zz@zoranzaric.de # David Gil, david.gil.marcos@gmail.com # This file is part of Shinken. # # Shinken is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Shinken is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with Shinken. If not, see <http://www.gnu.org/licenses/>. """ This class resolve Macro in commands by looking at the macros list in Class of elements. It give a property that call be callable or not. It not callable, it's a simple property and replace the macro with the value If callable, it's a method that is called to get the value. for example, to get the number of service in a host, you call a method to get the len(host.services) """ import re import time from alignak.borg import Borg class MacroResolver(Borg): """MacroResolver class is used to resolve macros (in command call). See above for details""" my_type = 'macroresolver' # Global macros macros = { 'TOTALHOSTSUP': '_get_total_hosts_up', 'TOTALHOSTSDOWN': '_get_total_hosts_down', 'TOTALHOSTSUNREACHABLE': '_get_total_hosts_unreachable', 'TOTALHOSTSDOWNUNHANDLED': '_get_total_hosts_unhandled', 'TOTALHOSTSUNREACHABLEUNHANDLED': '_get_total_hosts_unreachable_unhandled', 'TOTALHOSTPROBLEMS': '_get_total_host_problems', 'TOTALHOSTPROBLEMSUNHANDLED': '_get_total_host_problems_unhandled', 'TOTALSERVICESOK': '_get_total_service_ok', 'TOTALSERVICESWARNING': '_get_total_services_warning', 'TOTALSERVICESCRITICAL': '_get_total_services_critical', 'TOTALSERVICESUNKNOWN': '_get_total_services_unknown', 'TOTALSERVICESWARNINGUNHANDLED': '_get_total_services_warning_unhandled', 'TOTALSERVICESCRITICALUNHANDLED': '_get_total_services_critical_unhandled', 'TOTALSERVICESUNKNOWNUNHANDLED': '_get_total_services_unknown_unhandled', 'TOTALSERVICEPROBLEMS': '_get_total_service_problems', 'TOTALSERVICEPROBLEMSUNHANDLED': '_get_total_service_problems_unhandled', 'LONGDATETIME': '_get_long_date_time', 'SHORTDATETIME': '_get_short_date_time', 'DATE': '_get_date', 'TIME': '_get_time', 'TIMET': '_get_timet', 'PROCESSSTARTTIME': '_get_process_start_time', 'EVENTSTARTTIME': '_get_events_start_time', } output_macros = [ 'HOSTOUTPUT', 'HOSTPERFDATA', 'HOSTACKAUTHOR', 'HOSTACKCOMMENT', 'SERVICEOUTPUT', 'SERVICEPERFDATA', 'SERVICEACKAUTHOR', 'SERVICEACKCOMMENT' ] def init(self, conf): """Init macroresolver instance with conf. Must be called once. :param conf: conf to load :type conf: :return: None """ # For searching class and elements for ondemand # we need link to types self.conf = conf self.lists_on_demand = [] self.hosts = conf.hosts # For special void host_name handling... self.host_class = self.hosts.inner_class self.lists_on_demand.append(self.hosts) self.services = conf.services self.contacts = conf.contacts self.lists_on_demand.append(self.contacts) self.hostgroups = conf.hostgroups self.lists_on_demand.append(self.hostgroups) self.commands = conf.commands self.servicegroups = conf.servicegroups self.lists_on_demand.append(self.servicegroups) self.contactgroups = conf.contactgroups self.lists_on_demand.append(self.contactgroups) self.illegal_macro_output_chars = conf.illegal_macro_output_chars # Try cache :) # self.cache = {} def _get_macros(self, chain): """Get all macros of a chain Cut '$' char and create a dict with the following structure:: { 'MacroSTR1' : {'val': '', 'type': 'unknown'} 'MacroSTR2' : {'val': '', 'type': 'unknown'} } :param chain: chain to parse :type chain: str :return: dict with macro parsed as key :rtype: dict """ # if chain in self.cache: # return self.cache[chain] regex = re.compile(r'(\$)') elts = regex.split(chain) macros = {} in_macro = False for elt in elts: if elt == '$': in_macro = not in_macro elif in_macro: macros[elt] = {'val': '', 'type': 'unknown'} # self.cache[chain] = macros if '' in macros: del macros[''] return macros def _get_value_from_element(self, elt, prop): """Get value from a element's property the property may be a function to call. :param elt: element :type elt: object :param prop: element property :type prop: str :return: getattr(elt, prop) or getattr(elt, prop)() (call) :rtype: str """ try: value = getattr(elt, prop) if callable(value): return unicode(value()) else: return unicode(value) except AttributeError, exp: # Return no value return '' except UnicodeError, exp: if isinstance(value, str): return unicode(value, 'utf8', errors='ignore') else: return '' def _delete_unwanted_caracters(self, chain): """Remove not wanted char from chain unwanted char are illegal_macro_output_chars attribute :param chain: chain to remove char from :type chain: str :return: chain cleaned :rtype: str """ for char in self.illegal_macro_output_chars: chain = chain.replace(char, '') return chain def get_env_macros(self, data): """Get all environment macros from data For each object in data :: * Fetch all macros in object.__class__.macros * Fetch all customs macros in o.custom :param data: data to get macro :type data: :return: dict with macro name as key and macro value as value :rtype: dict """ env = {} for obj in data: cls = obj.__class__ macros = cls.macros for macro in macros: if macro.startswith("USER"): break prop = macros[macro] value = self._get_value_from_element(obj, prop) env['NAGIOS_%s' % macro] = value if hasattr(obj, 'customs'): # make NAGIOS__HOSTMACADDR from _MACADDR for cmacro in obj.customs: new_env_name = 'NAGIOS__' + obj.__class__.__name__.upper() + cmacro[1:].upper() env[new_env_name] = obj.customs[cmacro] return env def resolve_simple_macros_in_string(self, c_line, data, args=None): """Replace macro in the command line with the real value :param c_line: command line to modify :type c_line: str :param data: objects list, use to look for a specific macro :type data: :param args: args given to the command line, used to get "ARGN" macros. :type args: :return: command line with '$MACRO$' replaced with values :rtype: str """ # Now we prepare the classes for looking at the class.macros data.append(self) # For getting global MACROS if hasattr(self, 'conf'): data.append(self.conf) # For USERN macros clss = [d.__class__ for d in data] # we should do some loops for nested macros # like $USER1$ hiding like a ninja in a $ARG2$ Macro. And if # $USER1$ is pointing to $USER34$ etc etc, we should loop # until we reach the bottom. So the last loop is when we do # not still have macros :) still_got_macros = True nb_loop = 0 while still_got_macros: nb_loop += 1 # Ok, we want the macros in the command line macros = self._get_macros(c_line) # We can get out if we do not have macros this loop still_got_macros = (len(macros) != 0) # print "Still go macros:", still_got_macros # Put in the macros the type of macro for all macros self._get_type_of_macro(macros, clss) # Now we get values from elements for macro in macros: # If type ARGN, look at ARGN cutting if macros[macro]['type'] == 'ARGN' and args is not None: macros[macro]['val'] = self._resolve_argn(macro, args) macros[macro]['type'] = 'resolved' # If class, get value from properties if macros[macro]['type'] == 'class': cls = macros[macro]['class'] for elt in data: if elt is not None and elt.__class__ == cls: prop = cls.macros[macro] macros[macro]['val'] = self._get_value_from_element(elt, prop) # Now check if we do not have a 'output' macro. If so, we must # delete all special characters that can be dangerous if macro in self.output_macros: macros[macro]['val'] = \ self._delete_unwanted_caracters(macros[macro]['val']) if macros[macro]['type'] == 'CUSTOM': cls_type = macros[macro]['class'] # Beware : only cut the first _HOST value, so the macro name can have it on it.. macro_name = re.split('_' + cls_type, macro, 1)[1].upper() # Ok, we've got the macro like MAC_ADDRESS for _HOSTMAC_ADDRESS # Now we get the element in data that have the type HOST # and we check if it got the custom value for elt in data: if elt is not None and elt.__class__.my_type.upper() == cls_type: if '_' + macro_name in elt.customs: macros[macro]['val'] = elt.customs['_' + macro_name] # Then look on the macromodulations, in reserver order, so # the last to set, will be the firt to have. (yes, don't want to play # with break and such things sorry...) mms = getattr(elt, 'macromodulations', []) for macromod in mms[::-1]: # Look if the modulation got the value, # but also if it's currently active if '_' + macro_name in macromod.customs and macromod.is_active(): macros[macro]['val'] = macromod.customs['_' + macro_name] if macros[macro]['type'] == 'ONDEMAND': macros[macro]['val'] = self._resolve_ondemand(macro, data) # We resolved all we can, now replace the macro in the command call for macro in macros: c_line = c_line.replace('$' + macro + '$', macros[macro]['val']) # A $$ means we want a $, it's not a macro! # We replace $$ by a big dirty thing to be sure to not misinterpret it c_line = c_line.replace("$$", "DOUBLEDOLLAR") if nb_loop > 32: # too much loop, we exit still_got_macros = False # We now replace the big dirty token we made by only a simple $ c_line = c_line.replace("DOUBLEDOLLAR", "$") # print "Retuning c_line", c_line.strip() return c_line.strip() def resolve_command(self, com, data): """Resolve command macros with data :param com: check / event handler or command call object :type com: object :param data: objects list, use to look for a specific macro :type data: :return: command line with '$MACRO$' replaced with values :rtype: str """ c_line = com.command.command_line return self.resolve_simple_macros_in_string(c_line, data, args=com.args) def _get_type_of_macro(self, macros, clss): r"""Set macros types Example:: ARG\d -> ARGN, HOSTBLABLA -> class one and set Host in class) _HOSTTOTO -> HOST CUSTOM MACRO TOTO SERVICESTATEID:srv-1:Load$ -> MACRO SERVICESTATEID of the service Load of host srv-1 :param macros: macros list :type macros: list[str] :param clss: classes list, used to tag class macros :type clss: :return: None """ for macro in macros: # ARGN Macros if re.match(r'ARG\d', macro): macros[macro]['type'] = 'ARGN' continue # USERN macros # are managed in the Config class, so no # need to look that here elif re.match(r'_HOST\w', macro): macros[macro]['type'] = 'CUSTOM' macros[macro]['class'] = 'HOST' continue elif re.match(r'_SERVICE\w', macro): macros[macro]['type'] = 'CUSTOM' macros[macro]['class'] = 'SERVICE' # value of macro: re.split('_HOST', '_HOSTMAC_ADDRESS')[1] continue elif re.match(r'_CONTACT\w', macro): macros[macro]['type'] = 'CUSTOM' macros[macro]['class'] = 'CONTACT' continue # On demand macro elif len(macro.split(':')) > 1: macros[macro]['type'] = 'ONDEMAND' continue # OK, classical macro... for cls in clss: if macro in cls.macros: macros[macro]['type'] = 'class' macros[macro]['class'] = cls continue def _resolve_argn(self, macro, args): """Get argument from macro name ie : $ARG3$ -> args[2] :param macro: macro to parse :type macro: :param args: args given to command line :type args: :return: argument at position N-1 in args table (where N is the int parsed) :rtype: None | str """ # first, get the number of args _id = None matches = re.search(r'ARG(?P<id>\d+)', macro) if matches is not None: _id = int(matches.group('id')) - 1 try: return args[_id] except IndexError: return '' def _resolve_ondemand(self, macro, data): """Get on demand macro value :param macro: macro to parse :type macro: :param data: data to get value from :type data: :return: macro value :rtype: str """ # print "\nResolving macro", macro elts = macro.split(':') nb_parts = len(elts) macro_name = elts[0] # Len 3 == service, 2 = all others types... if nb_parts == 3: val = '' # print "Got a Service on demand asking...", elts (host_name, service_description) = (elts[1], elts[2]) # host_name can be void, so it's the host in data # that is important. We use our self.host_class to # find the host in the data :) if host_name == '': for elt in data: if elt is not None and elt.__class__ == self.host_class: host_name = elt.host_name # Ok now we get service serv = self.services.find_srv_by_name_and_hostname(host_name, service_description) if serv is not None: cls = serv.__class__ prop = cls.macros[macro_name] val = self._get_value_from_element(serv, prop) # print "Got val:", val return val # Ok, service was easy, now hard part else: val = '' elt_name = elts[1] # Special case: elt_name can be void # so it's the host where it apply if elt_name == '': for elt in data: if elt is not None and elt.__class__ == self.host_class: elt_name = elt.host_name for od_list in self.lists_on_demand: cls = od_list.inner_class # We search our type by looking at the macro if macro_name in cls.macros: prop = cls.macros[macro_name] i = od_list.find_by_name(elt_name) if i is not None: val = self._get_value_from_element(i, prop) # Ok we got our value :) break return val return '' def _get_long_date_time(self): """Get long date time Example : Fri 15 May 11:42:39 CEST 2009 :return: long date local time :rtype: str TODO: Should be moved to util TODO: Should consider timezone """ return time.strftime("%a %d %b %H:%M:%S %Z %Y").decode('UTF-8', 'ignore') def _get_short_date_time(self): """Get short date time Example : 10-13-2000 00:30:28 :return: short date local time :rtype: str TODO: Should be moved to util TODO: Should consider timezone """ return time.strftime("%d-%m-%Y %H:%M:%S") def _get_date(self): """Get date Example : 10-13-2000 :return: local date :rtype: str TODO: Should be moved to util TODO: Should consider timezone """ return time.strftime("%d-%m-%Y") def _get_time(self): """Get date time Example : 00:30:28 :return: date local time :rtype: str TODO: Should be moved to util TODO: Should consider timezone """ return time.strftime("%H:%M:%S") def _get_timet(self): """Get epoch time Example : 1437143291 :return: timestamp :rtype: str TODO: Should be moved to util TODO: Should consider timezone """ return str(int(time.time())) def _tot_hosts_by_state(self, state): """Generic function to get the number of host in the specified state :param state: state to filter on :type state: :return: number of host in state *state* :rtype: int TODO: Should be moved """ return sum(1 for h in self.hosts if h.state == state) _get_total_hosts_up = lambda s: s._tot_hosts_by_state('UP') _get_total_hosts_down = lambda s: s._tot_hosts_by_state('DOWN') _get_total_hosts_unreachable = lambda s: s._tot_hosts_by_state('UNREACHABLE') def _get_total_hosts_unreachable_unhandled(self): """DOES NOTHING( Should get the number of unreachable hosts not handled) :return: 0 always :rtype: int TODO: Implement this """ return 0 def _get_total_hosts_problems(self): """Get the number of hosts that are a problem :return: number of hosts with is_problem attribute True :rtype: int """ return sum(1 for h in self.hosts if h.is_problem) def _get_total_hosts_problems_unhandled(self): """DOES NOTHING( Should get the number of host problems not handled) :return: 0 always :rtype: int TODO: Implement this """ return 0 def _tot_services_by_state(self, state): """Generic function to get the number of service in the specified state :param state: state to filter on :type state: :return: number of service in state *state* :rtype: int TODO: Should be moved """ return sum(1 for s in self.services if s.state == state) _get_total_service_ok = lambda s: s._tot_services_by_state('OK') _get_total_service_warning = lambda s: s._tot_services_by_state('WARNING') _get_total_service_critical = lambda s: s._tot_services_by_state('CRITICAL') _get_total_service_unknown = lambda s: s._tot_services_by_state('UNKNOWN') def _get_total_services_warning_unhandled(self): """DOES NOTHING (Should get the number of warning services not handled) :return: 0 always :rtype: int TODO: Implement this """ return 0 def _get_total_services_critical_unhandled(self): """DOES NOTHING (Should get the number of critical services not handled) :return: 0 always :rtype: int TODO: Implement this """ return 0 def _get_total_services_unknown_unhandled(self): """DOES NOTHING (Should get the number of unknown services not handled) :return: 0 always :rtype: int TODO: Implement this """ return 0 def _get_total_service_problems(self): """Get the number of services that are a problem :return: number of services with is_problem attribute True :rtype: int """ return sum(1 for s in self.services if s.is_problem) def _get_total_service_problems_unhandled(self): """DOES NOTHING (Should get the number of service problems not handled) :return: 0 always :rtype: int TODO: Implement this """ return 0 def _get_process_start_time(self): """DOES NOTHING ( Should get process start time) :return: 0 always :rtype: int TODO: Implement this """ return 0 def _get_events_start_time(self): """DOES NOTHING ( Should get events start time) :return: 0 always :rtype: int TODO: Implement this """ return 0
agpl-3.0
-6,827,662,738,583,955,000
35.442748
100
0.556054
false
4.076161
false
false
false
vitormazzi/django-jython
doj/backends/zxjdbc/oracle/query.py
1
6130
""" Custom Query class for Oracle. Derived from: django.db.models.sql.query.Query """ import datetime from django.db.backends import util # Cache. Maps default query class to new Oracle query class. _classes = {} def query_class(QueryClass, Database): """ Returns a custom django.db.models.sql.query.Query subclass that is appropriate for Oracle. The 'Database' module (cx_Oracle) is passed in here so that all the setup required to import it only needs to be done by the calling module. """ global _classes try: return _classes[QueryClass] except KeyError: pass class OracleQuery(QueryClass): def resolve_columns(self, row, fields=()): index_start = len(self.extra_select.keys()) values = [self.convert_values(v, type(v)) for v in row[:index_start]] for value, field in map(None, row[index_start:], fields): values.append(self.convert_values(value, field)) return values def convert_values(self, value, field): from django.db.models.fields import DateField, DateTimeField, \ TimeField, BooleanField, NullBooleanField, DecimalField, FloatField, Field # Oracle stores empty strings as null. We need to undo this in # order to adhere to the Django convention of using the empty # string instead of null, but only if the field accepts the # empty string. if value is None: pass elif value is None and isinstance(field, Field) and field.empty_strings_allowed: value = u'' # Convert 1 or 0 to True or False elif isinstance(value, float): value = float(value) # Added 04-26-2009 to repair "Invalid literal for int() base 10" error elif isinstance(value,int): value = int(value) elif field is not None and field.get_internal_type() == 'AutoField': value = int(float(value)) elif value in (1, 0) and field is not None and field.get_internal_type() in ('BooleanField', 'NullBooleanField'): value = bool(value) # Force floats to the correct type elif field is not None and field.get_internal_type() == 'FloatField': value = float(value) # Convert floats to decimals elif field is not None and field.get_internal_type() == 'DecimalField': value = util.typecast_decimal(field.format_number(value)) elif field is not None and field.get_internal_type() == 'SmallIntegerField': value = util.typecast_decimal(field.format_number(value)) return value def as_sql(self, with_limits=True, with_col_aliases=False): """ Creates the SQL for this query. Returns the SQL string and list of parameters. This is overriden from the original Query class to handle the additional SQL Oracle requires to emulate LIMIT and OFFSET. If 'with_limits' is False, any limit/offset information is not included in the query. """ # The `do_offset` flag indicates whether we need to construct # the SQL needed to use limit/offset with Oracle. do_offset = with_limits and (self.high_mark is not None or self.low_mark) if not do_offset: sql, params = super(OracleQuery, self).as_sql(with_limits=False, with_col_aliases=with_col_aliases) else: # `get_columns` needs to be called before `get_ordering` to # populate `_select_alias`. self.pre_sql_setup() self.get_columns() #ordering = self.get_ordering() # # Removed Ordering on 03/27/2009 as it caused error: # TypeError: sequence item 0: expected string, list found # ordering = False # Oracle's ROW_NUMBER() function requires an ORDER BY clause. if ordering: rn_orderby = ', '.join(ordering) else: # Create a default ORDER BY since none was specified. qn = self.quote_name_unless_alias opts = self.model._meta rn_orderby = '%s.%s' % (qn(opts.db_table), qn(opts.fields[0].db_column or opts.fields[0].column)) # Ensure the base query SELECTs our special "_RN" column self.extra_select['_RN'] = ('ROW_NUMBER() OVER (ORDER BY %s)' % rn_orderby, '') sql, params = super(OracleQuery, self).as_sql(with_limits=False, with_col_aliases=True) # Wrap the base query in an outer SELECT * with boundaries on # the "_RN" column. This is the canonical way to emulate LIMIT # and OFFSET on Oracle. sql = 'SELECT * FROM (%s) WHERE "_RN" > %d' % (sql, self.low_mark) if self.high_mark is not None: sql = '%s AND "_RN" <= %d' % (sql, self.high_mark) return sql, params def set_limits(self, low=None, high=None): super(OracleQuery, self).set_limits(low, high) # We need to select the row number for the LIMIT/OFFSET sql. # A placeholder is added to extra_select now, because as_sql is # too late to be modifying extra_select. However, the actual sql # depends on the ordering, so that is generated in as_sql. self.extra_select['_RN'] = ('1', '') def clear_limits(self): super(OracleQuery, self).clear_limits() if '_RN' in self.extra_select: del self.extra_select['_RN'] _classes[QueryClass] = OracleQuery return OracleQuery
bsd-3-clause
7,167,331,611,330,312,000
44.073529
125
0.558564
false
4.490842
false
false
false
simplegeo/sqlalchemy
lib/sqlalchemy/dialects/mssql/base.py
1
48621
# mssql.py """Support for the Microsoft SQL Server database. Connecting ---------- See the individual driver sections below for details on connecting. Auto Increment Behavior ----------------------- ``IDENTITY`` columns are supported by using SQLAlchemy ``schema.Sequence()`` objects. In other words:: Table('test', mss_engine, Column('id', Integer, Sequence('blah',100,10), primary_key=True), Column('name', String(20)) ).create() would yield:: CREATE TABLE test ( id INTEGER NOT NULL IDENTITY(100,10) PRIMARY KEY, name VARCHAR(20) NULL, ) Note that the ``start`` and ``increment`` values for sequences are optional and will default to 1,1. Implicit ``autoincrement`` behavior works the same in MSSQL as it does in other dialects and results in an ``IDENTITY`` column. * Support for ``SET IDENTITY_INSERT ON`` mode (automagic on / off for ``INSERT`` s) * Support for auto-fetching of ``@@IDENTITY/@@SCOPE_IDENTITY()`` on ``INSERT`` Collation Support ----------------- MSSQL specific string types support a collation parameter that creates a column-level specific collation for the column. The collation parameter accepts a Windows Collation Name or a SQL Collation Name. Supported types are MSChar, MSNChar, MSString, MSNVarchar, MSText, and MSNText. For example:: Column('login', String(32, collation='Latin1_General_CI_AS')) will yield:: login VARCHAR(32) COLLATE Latin1_General_CI_AS NULL LIMIT/OFFSET Support -------------------- MSSQL has no support for the LIMIT or OFFSET keysowrds. LIMIT is supported directly through the ``TOP`` Transact SQL keyword:: select.limit will yield:: SELECT TOP n If using SQL Server 2005 or above, LIMIT with OFFSET support is available through the ``ROW_NUMBER OVER`` construct. For versions below 2005, LIMIT with OFFSET usage will fail. Nullability ----------- MSSQL has support for three levels of column nullability. The default nullability allows nulls and is explicit in the CREATE TABLE construct:: name VARCHAR(20) NULL If ``nullable=None`` is specified then no specification is made. In other words the database's configured default is used. This will render:: name VARCHAR(20) If ``nullable`` is ``True`` or ``False`` then the column will be ``NULL` or ``NOT NULL`` respectively. Date / Time Handling -------------------- DATE and TIME are supported. Bind parameters are converted to datetime.datetime() objects as required by most MSSQL drivers, and results are processed from strings if needed. The DATE and TIME types are not available for MSSQL 2005 and previous - if a server version below 2008 is detected, DDL for these types will be issued as DATETIME. Compatibility Levels -------------------- MSSQL supports the notion of setting compatibility levels at the database level. This allows, for instance, to run a database that is compatibile with SQL2000 while running on a SQL2005 database server. ``server_version_info`` will always retrun the database server version information (in this case SQL2005) and not the compatibiility level information. Because of this, if running under a backwards compatibility mode SQAlchemy may attempt to use T-SQL statements that are unable to be parsed by the database server. Known Issues ------------ * No support for more than one ``IDENTITY`` column per table """ import datetime, decimal, inspect, operator, sys, re import itertools from sqlalchemy import sql, schema as sa_schema, exc, util from sqlalchemy.sql import select, compiler, expression, \ operators as sql_operators, \ functions as sql_functions, util as sql_util from sqlalchemy.engine import default, base, reflection from sqlalchemy import types as sqltypes from sqlalchemy import processors from sqlalchemy.types import INTEGER, BIGINT, SMALLINT, DECIMAL, NUMERIC, \ FLOAT, TIMESTAMP, DATETIME, DATE, BINARY,\ VARBINARY, BLOB from sqlalchemy.dialects.mssql import information_schema as ischema MS_2008_VERSION = (10,) MS_2005_VERSION = (9,) MS_2000_VERSION = (8,) RESERVED_WORDS = set( ['add', 'all', 'alter', 'and', 'any', 'as', 'asc', 'authorization', 'backup', 'begin', 'between', 'break', 'browse', 'bulk', 'by', 'cascade', 'case', 'check', 'checkpoint', 'close', 'clustered', 'coalesce', 'collate', 'column', 'commit', 'compute', 'constraint', 'contains', 'containstable', 'continue', 'convert', 'create', 'cross', 'current', 'current_date', 'current_time', 'current_timestamp', 'current_user', 'cursor', 'database', 'dbcc', 'deallocate', 'declare', 'default', 'delete', 'deny', 'desc', 'disk', 'distinct', 'distributed', 'double', 'drop', 'dump', 'else', 'end', 'errlvl', 'escape', 'except', 'exec', 'execute', 'exists', 'exit', 'external', 'fetch', 'file', 'fillfactor', 'for', 'foreign', 'freetext', 'freetexttable', 'from', 'full', 'function', 'goto', 'grant', 'group', 'having', 'holdlock', 'identity', 'identity_insert', 'identitycol', 'if', 'in', 'index', 'inner', 'insert', 'intersect', 'into', 'is', 'join', 'key', 'kill', 'left', 'like', 'lineno', 'load', 'merge', 'national', 'nocheck', 'nonclustered', 'not', 'null', 'nullif', 'of', 'off', 'offsets', 'on', 'open', 'opendatasource', 'openquery', 'openrowset', 'openxml', 'option', 'or', 'order', 'outer', 'over', 'percent', 'pivot', 'plan', 'precision', 'primary', 'print', 'proc', 'procedure', 'public', 'raiserror', 'read', 'readtext', 'reconfigure', 'references', 'replication', 'restore', 'restrict', 'return', 'revert', 'revoke', 'right', 'rollback', 'rowcount', 'rowguidcol', 'rule', 'save', 'schema', 'securityaudit', 'select', 'session_user', 'set', 'setuser', 'shutdown', 'some', 'statistics', 'system_user', 'table', 'tablesample', 'textsize', 'then', 'to', 'top', 'tran', 'transaction', 'trigger', 'truncate', 'tsequal', 'union', 'unique', 'unpivot', 'update', 'updatetext', 'use', 'user', 'values', 'varying', 'view', 'waitfor', 'when', 'where', 'while', 'with', 'writetext', ]) class REAL(sqltypes.Float): """A type for ``real`` numbers.""" __visit_name__ = 'REAL' def __init__(self): super(REAL, self).__init__(precision=24) class TINYINT(sqltypes.Integer): __visit_name__ = 'TINYINT' # MSSQL DATE/TIME types have varied behavior, sometimes returning # strings. MSDate/TIME check for everything, and always # filter bind parameters into datetime objects (required by pyodbc, # not sure about other dialects). class _MSDate(sqltypes.Date): def bind_processor(self, dialect): def process(value): if type(value) == datetime.date: return datetime.datetime(value.year, value.month, value.day) else: return value return process _reg = re.compile(r"(\d+)-(\d+)-(\d+)") def result_processor(self, dialect, coltype): def process(value): if isinstance(value, datetime.datetime): return value.date() elif isinstance(value, basestring): return datetime.date(*[ int(x or 0) for x in self._reg.match(value).groups() ]) else: return value return process class TIME(sqltypes.TIME): def __init__(self, precision=None, **kwargs): self.precision = precision super(TIME, self).__init__() __zero_date = datetime.date(1900, 1, 1) def bind_processor(self, dialect): def process(value): if isinstance(value, datetime.datetime): value = datetime.datetime.combine( self.__zero_date, value.time()) elif isinstance(value, datetime.time): value = datetime.datetime.combine(self.__zero_date, value) return value return process _reg = re.compile(r"(\d+):(\d+):(\d+)(?:\.(\d+))?") def result_processor(self, dialect, coltype): def process(value): if isinstance(value, datetime.datetime): return value.time() elif isinstance(value, basestring): return datetime.time(*[ int(x or 0) for x in self._reg.match(value).groups()]) else: return value return process class _DateTimeBase(object): def bind_processor(self, dialect): def process(value): if type(value) == datetime.date: return datetime.datetime(value.year, value.month, value.day) else: return value return process class _MSDateTime(_DateTimeBase, sqltypes.DateTime): pass class SMALLDATETIME(_DateTimeBase, sqltypes.DateTime): __visit_name__ = 'SMALLDATETIME' class DATETIME2(_DateTimeBase, sqltypes.DateTime): __visit_name__ = 'DATETIME2' def __init__(self, precision=None, **kwargs): self.precision = precision # TODO: is this not an Interval ? class DATETIMEOFFSET(sqltypes.TypeEngine): __visit_name__ = 'DATETIMEOFFSET' def __init__(self, precision=None, **kwargs): self.precision = precision class _StringType(object): """Base for MSSQL string types.""" def __init__(self, collation=None): self.collation = collation class TEXT(_StringType, sqltypes.TEXT): """MSSQL TEXT type, for variable-length text up to 2^31 characters.""" def __init__(self, *args, **kw): """Construct a TEXT. :param collation: Optional, a column-level collation for this string value. Accepts a Windows Collation Name or a SQL Collation Name. """ collation = kw.pop('collation', None) _StringType.__init__(self, collation) sqltypes.Text.__init__(self, *args, **kw) class NTEXT(_StringType, sqltypes.UnicodeText): """MSSQL NTEXT type, for variable-length unicode text up to 2^30 characters.""" __visit_name__ = 'NTEXT' def __init__(self, *args, **kwargs): """Construct a NTEXT. :param collation: Optional, a column-level collation for this string value. Accepts a Windows Collation Name or a SQL Collation Name. """ collation = kwargs.pop('collation', None) _StringType.__init__(self, collation) length = kwargs.pop('length', None) sqltypes.UnicodeText.__init__(self, length, **kwargs) class VARCHAR(_StringType, sqltypes.VARCHAR): """MSSQL VARCHAR type, for variable-length non-Unicode data with a maximum of 8,000 characters.""" def __init__(self, *args, **kw): """Construct a VARCHAR. :param length: Optinal, maximum data length, in characters. :param convert_unicode: defaults to False. If True, convert ``unicode`` data sent to the database to a ``str`` bytestring, and convert bytestrings coming back from the database into ``unicode``. Bytestrings are encoded using the dialect's :attr:`~sqlalchemy.engine.base.Dialect.encoding`, which defaults to `utf-8`. If False, may be overridden by :attr:`sqlalchemy.engine.base.Dialect.convert_unicode`. :param collation: Optional, a column-level collation for this string value. Accepts a Windows Collation Name or a SQL Collation Name. """ collation = kw.pop('collation', None) _StringType.__init__(self, collation) sqltypes.VARCHAR.__init__(self, *args, **kw) class NVARCHAR(_StringType, sqltypes.NVARCHAR): """MSSQL NVARCHAR type. For variable-length unicode character data up to 4,000 characters.""" def __init__(self, *args, **kw): """Construct a NVARCHAR. :param length: Optional, Maximum data length, in characters. :param collation: Optional, a column-level collation for this string value. Accepts a Windows Collation Name or a SQL Collation Name. """ collation = kw.pop('collation', None) _StringType.__init__(self, collation) sqltypes.NVARCHAR.__init__(self, *args, **kw) class CHAR(_StringType, sqltypes.CHAR): """MSSQL CHAR type, for fixed-length non-Unicode data with a maximum of 8,000 characters.""" def __init__(self, *args, **kw): """Construct a CHAR. :param length: Optinal, maximum data length, in characters. :param convert_unicode: defaults to False. If True, convert ``unicode`` data sent to the database to a ``str`` bytestring, and convert bytestrings coming back from the database into ``unicode``. Bytestrings are encoded using the dialect's :attr:`~sqlalchemy.engine.base.Dialect.encoding`, which defaults to `utf-8`. If False, may be overridden by :attr:`sqlalchemy.engine.base.Dialect.convert_unicode`. :param collation: Optional, a column-level collation for this string value. Accepts a Windows Collation Name or a SQL Collation Name. """ collation = kw.pop('collation', None) _StringType.__init__(self, collation) sqltypes.CHAR.__init__(self, *args, **kw) class NCHAR(_StringType, sqltypes.NCHAR): """MSSQL NCHAR type. For fixed-length unicode character data up to 4,000 characters.""" def __init__(self, *args, **kw): """Construct an NCHAR. :param length: Optional, Maximum data length, in characters. :param collation: Optional, a column-level collation for this string value. Accepts a Windows Collation Name or a SQL Collation Name. """ collation = kw.pop('collation', None) _StringType.__init__(self, collation) sqltypes.NCHAR.__init__(self, *args, **kw) class IMAGE(sqltypes.LargeBinary): __visit_name__ = 'IMAGE' class BIT(sqltypes.TypeEngine): __visit_name__ = 'BIT' class MONEY(sqltypes.TypeEngine): __visit_name__ = 'MONEY' class SMALLMONEY(sqltypes.TypeEngine): __visit_name__ = 'SMALLMONEY' class UNIQUEIDENTIFIER(sqltypes.TypeEngine): __visit_name__ = "UNIQUEIDENTIFIER" class SQL_VARIANT(sqltypes.TypeEngine): __visit_name__ = 'SQL_VARIANT' # old names. MSDateTime = _MSDateTime MSDate = _MSDate MSReal = REAL MSTinyInteger = TINYINT MSTime = TIME MSSmallDateTime = SMALLDATETIME MSDateTime2 = DATETIME2 MSDateTimeOffset = DATETIMEOFFSET MSText = TEXT MSNText = NTEXT MSString = VARCHAR MSNVarchar = NVARCHAR MSChar = CHAR MSNChar = NCHAR MSBinary = BINARY MSVarBinary = VARBINARY MSImage = IMAGE MSBit = BIT MSMoney = MONEY MSSmallMoney = SMALLMONEY MSUniqueIdentifier = UNIQUEIDENTIFIER MSVariant = SQL_VARIANT ischema_names = { 'int' : INTEGER, 'bigint': BIGINT, 'smallint' : SMALLINT, 'tinyint' : TINYINT, 'varchar' : VARCHAR, 'nvarchar' : NVARCHAR, 'char' : CHAR, 'nchar' : NCHAR, 'text' : TEXT, 'ntext' : NTEXT, 'decimal' : DECIMAL, 'numeric' : NUMERIC, 'float' : FLOAT, 'datetime' : DATETIME, 'datetime2' : DATETIME2, 'datetimeoffset' : DATETIMEOFFSET, 'date': DATE, 'time': TIME, 'smalldatetime' : SMALLDATETIME, 'binary' : BINARY, 'varbinary' : VARBINARY, 'bit': BIT, 'real' : REAL, 'image' : IMAGE, 'timestamp': TIMESTAMP, 'money': MONEY, 'smallmoney': SMALLMONEY, 'uniqueidentifier': UNIQUEIDENTIFIER, 'sql_variant': SQL_VARIANT, } class MSTypeCompiler(compiler.GenericTypeCompiler): def _extend(self, spec, type_): """Extend a string-type declaration with standard SQL COLLATE annotations. """ if getattr(type_, 'collation', None): collation = 'COLLATE %s' % type_.collation else: collation = None if type_.length: spec = spec + "(%d)" % type_.length return ' '.join([c for c in (spec, collation) if c is not None]) def visit_FLOAT(self, type_): precision = getattr(type_, 'precision', None) if precision is None: return "FLOAT" else: return "FLOAT(%(precision)s)" % {'precision': precision} def visit_REAL(self, type_): return "REAL" def visit_TINYINT(self, type_): return "TINYINT" def visit_DATETIMEOFFSET(self, type_): if type_.precision: return "DATETIMEOFFSET(%s)" % type_.precision else: return "DATETIMEOFFSET" def visit_TIME(self, type_): precision = getattr(type_, 'precision', None) if precision: return "TIME(%s)" % precision else: return "TIME" def visit_DATETIME2(self, type_): precision = getattr(type_, 'precision', None) if precision: return "DATETIME2(%s)" % precision else: return "DATETIME2" def visit_SMALLDATETIME(self, type_): return "SMALLDATETIME" def visit_unicode(self, type_): return self.visit_NVARCHAR(type_) def visit_unicode_text(self, type_): return self.visit_NTEXT(type_) def visit_NTEXT(self, type_): return self._extend("NTEXT", type_) def visit_TEXT(self, type_): return self._extend("TEXT", type_) def visit_VARCHAR(self, type_): return self._extend("VARCHAR", type_) def visit_CHAR(self, type_): return self._extend("CHAR", type_) def visit_NCHAR(self, type_): return self._extend("NCHAR", type_) def visit_NVARCHAR(self, type_): return self._extend("NVARCHAR", type_) def visit_date(self, type_): if self.dialect.server_version_info < MS_2008_VERSION: return self.visit_DATETIME(type_) else: return self.visit_DATE(type_) def visit_time(self, type_): if self.dialect.server_version_info < MS_2008_VERSION: return self.visit_DATETIME(type_) else: return self.visit_TIME(type_) def visit_large_binary(self, type_): return self.visit_IMAGE(type_) def visit_IMAGE(self, type_): return "IMAGE" def visit_boolean(self, type_): return self.visit_BIT(type_) def visit_BIT(self, type_): return "BIT" def visit_MONEY(self, type_): return "MONEY" def visit_SMALLMONEY(self, type_): return 'SMALLMONEY' def visit_UNIQUEIDENTIFIER(self, type_): return "UNIQUEIDENTIFIER" def visit_SQL_VARIANT(self, type_): return 'SQL_VARIANT' class MSExecutionContext(default.DefaultExecutionContext): _enable_identity_insert = False _select_lastrowid = False _result_proxy = None _lastrowid = None def pre_exec(self): """Activate IDENTITY_INSERT if needed.""" if self.isinsert: tbl = self.compiled.statement.table seq_column = tbl._autoincrement_column insert_has_sequence = seq_column is not None if insert_has_sequence: self._enable_identity_insert = \ seq_column.key in self.compiled_parameters[0] else: self._enable_identity_insert = False self._select_lastrowid = insert_has_sequence and \ not self.compiled.returning and \ not self._enable_identity_insert and \ not self.executemany if self._enable_identity_insert: self.cursor.execute("SET IDENTITY_INSERT %s ON" % self.dialect.identifier_preparer.format_table(tbl)) def post_exec(self): """Disable IDENTITY_INSERT if enabled.""" if self._select_lastrowid: if self.dialect.use_scope_identity: self.cursor.execute( "SELECT scope_identity() AS lastrowid", ()) else: self.cursor.execute("SELECT @@identity AS lastrowid", ()) # fetchall() ensures the cursor is consumed without closing it row = self.cursor.fetchall()[0] self._lastrowid = int(row[0]) if (self.isinsert or self.isupdate or self.isdelete) and \ self.compiled.returning: self._result_proxy = base.FullyBufferedResultProxy(self) if self._enable_identity_insert: self.cursor.execute( "SET IDENTITY_INSERT %s OFF" % self.dialect.identifier_preparer. format_table(self.compiled.statement.table) ) def get_lastrowid(self): return self._lastrowid def handle_dbapi_exception(self, e): if self._enable_identity_insert: try: self.cursor.execute( "SET IDENTITY_INSERT %s OFF" % self.dialect.identifier_preparer.\ format_table(self.compiled.statement.table) ) except: pass def get_result_proxy(self): if self._result_proxy: return self._result_proxy else: return base.ResultProxy(self) class MSSQLCompiler(compiler.SQLCompiler): returning_precedes_values = True extract_map = util.update_copy( compiler.SQLCompiler.extract_map, { 'doy': 'dayofyear', 'dow': 'weekday', 'milliseconds': 'millisecond', 'microseconds': 'microsecond' }) def __init__(self, *args, **kwargs): super(MSSQLCompiler, self).__init__(*args, **kwargs) self.tablealiases = {} def visit_now_func(self, fn, **kw): return "CURRENT_TIMESTAMP" def visit_current_date_func(self, fn, **kw): return "GETDATE()" def visit_length_func(self, fn, **kw): return "LEN%s" % self.function_argspec(fn, **kw) def visit_char_length_func(self, fn, **kw): return "LEN%s" % self.function_argspec(fn, **kw) def visit_concat_op(self, binary, **kw): return "%s + %s" % \ (self.process(binary.left, **kw), self.process(binary.right, **kw)) def visit_match_op(self, binary, **kw): return "CONTAINS (%s, %s)" % ( self.process(binary.left, **kw), self.process(binary.right, **kw)) def get_select_precolumns(self, select): """ MS-SQL puts TOP, it's version of LIMIT here """ if select._distinct or select._limit: s = select._distinct and "DISTINCT " or "" if select._limit: if not select._offset: s += "TOP %s " % (select._limit,) return s return compiler.SQLCompiler.get_select_precolumns(self, select) def limit_clause(self, select): # Limit in mssql is after the select keyword return "" def visit_select(self, select, **kwargs): """Look for ``LIMIT`` and OFFSET in a select statement, and if so tries to wrap it in a subquery with ``row_number()`` criterion. """ if not getattr(select, '_mssql_visit', None) and select._offset: # to use ROW_NUMBER(), an ORDER BY is required. orderby = self.process(select._order_by_clause) if not orderby: raise exc.InvalidRequestError('MSSQL requires an order_by when ' 'using an offset.') _offset = select._offset _limit = select._limit select._mssql_visit = True select = select.column( sql.literal_column("ROW_NUMBER() OVER (ORDER BY %s)" \ % orderby).label("mssql_rn") ).order_by(None).alias() limitselect = sql.select([c for c in select.c if c.key!='mssql_rn']) limitselect.append_whereclause("mssql_rn>%d" % _offset) if _limit is not None: limitselect.append_whereclause("mssql_rn<=%d" % (_limit + _offset)) return self.process(limitselect, iswrapper=True, **kwargs) else: return compiler.SQLCompiler.visit_select(self, select, **kwargs) def _schema_aliased_table(self, table): if getattr(table, 'schema', None) is not None: if table not in self.tablealiases: self.tablealiases[table] = table.alias() return self.tablealiases[table] else: return None def visit_table(self, table, mssql_aliased=False, **kwargs): if mssql_aliased: return super(MSSQLCompiler, self).visit_table(table, **kwargs) # alias schema-qualified tables alias = self._schema_aliased_table(table) if alias is not None: return self.process(alias, mssql_aliased=True, **kwargs) else: return super(MSSQLCompiler, self).visit_table(table, **kwargs) def visit_alias(self, alias, **kwargs): # translate for schema-qualified table aliases self.tablealiases[alias.original] = alias kwargs['mssql_aliased'] = True return super(MSSQLCompiler, self).visit_alias(alias, **kwargs) def visit_extract(self, extract, **kw): field = self.extract_map.get(extract.field, extract.field) return 'DATEPART("%s", %s)' % \ (field, self.process(extract.expr, **kw)) def visit_rollback_to_savepoint(self, savepoint_stmt): return ("ROLLBACK TRANSACTION %s" % self.preparer.format_savepoint(savepoint_stmt)) def visit_column(self, column, result_map=None, **kwargs): if column.table is not None and \ (not self.isupdate and not self.isdelete) or self.is_subquery(): # translate for schema-qualified table aliases t = self._schema_aliased_table(column.table) if t is not None: converted = expression._corresponding_column_or_error( t, column) if result_map is not None: result_map[column.name.lower()] = \ (column.name, (column, ), column.type) return super(MSSQLCompiler, self).\ visit_column(converted, result_map=None, **kwargs) return super(MSSQLCompiler, self).visit_column(column, result_map=result_map, **kwargs) def visit_binary(self, binary, **kwargs): """Move bind parameters to the right-hand side of an operator, where possible. """ if ( isinstance(binary.left, expression._BindParamClause) and binary.operator == operator.eq and not isinstance(binary.right, expression._BindParamClause) ): return self.process( expression._BinaryExpression(binary.right, binary.left, binary.operator), **kwargs) else: if ( (binary.operator is operator.eq or binary.operator is operator.ne) and ( (isinstance(binary.left, expression._FromGrouping) and isinstance(binary.left.element, expression._ScalarSelect)) or (isinstance(binary.right, expression._FromGrouping) and isinstance(binary.right.element, expression._ScalarSelect)) or isinstance(binary.left, expression._ScalarSelect) or isinstance(binary.right, expression._ScalarSelect) ) ): op = binary.operator == operator.eq and "IN" or "NOT IN" return self.process( expression._BinaryExpression(binary.left, binary.right, op), **kwargs) return super(MSSQLCompiler, self).visit_binary(binary, **kwargs) def returning_clause(self, stmt, returning_cols): if self.isinsert or self.isupdate: target = stmt.table.alias("inserted") else: target = stmt.table.alias("deleted") adapter = sql_util.ClauseAdapter(target) def col_label(col): adapted = adapter.traverse(col) if isinstance(col, expression._Label): return adapted.label(c.key) else: return self.label_select_column(None, adapted, asfrom=False) columns = [ self.process( col_label(c), within_columns_clause=True, result_map=self.result_map ) for c in expression._select_iterables(returning_cols) ] return 'OUTPUT ' + ', '.join(columns) def label_select_column(self, select, column, asfrom): if isinstance(column, expression.Function): return column.label(None) else: return super(MSSQLCompiler, self).\ label_select_column(select, column, asfrom) def for_update_clause(self, select): # "FOR UPDATE" is only allowed on "DECLARE CURSOR" which # SQLAlchemy doesn't use return '' def order_by_clause(self, select, **kw): order_by = self.process(select._order_by_clause, **kw) # MSSQL only allows ORDER BY in subqueries if there is a LIMIT if order_by and (not self.is_subquery() or select._limit): return " ORDER BY " + order_by else: return "" class MSSQLStrictCompiler(MSSQLCompiler): """A subclass of MSSQLCompiler which disables the usage of bind parameters where not allowed natively by MS-SQL. A dialect may use this compiler on a platform where native binds are used. """ ansi_bind_rules = True def visit_in_op(self, binary, **kw): kw['literal_binds'] = True return "%s IN %s" % ( self.process(binary.left, **kw), self.process(binary.right, **kw) ) def visit_notin_op(self, binary, **kw): kw['literal_binds'] = True return "%s NOT IN %s" % ( self.process(binary.left, **kw), self.process(binary.right, **kw) ) def visit_function(self, func, **kw): kw['literal_binds'] = True return super(MSSQLStrictCompiler, self).visit_function(func, **kw) def render_literal_value(self, value, type_): """ For date and datetime values, convert to a string format acceptable to MSSQL. That seems to be the so-called ODBC canonical date format which looks like this: yyyy-mm-dd hh:mi:ss.mmm(24h) For other data types, call the base class implementation. """ # datetime and date are both subclasses of datetime.date if issubclass(type(value), datetime.date): # SQL Server wants single quotes around the date string. return "'" + str(value) + "'" else: return super(MSSQLStrictCompiler, self).\ render_literal_value(value, type_) class MSDDLCompiler(compiler.DDLCompiler): def get_column_specification(self, column, **kwargs): colspec = (self.preparer.format_column(column) + " " + self.dialect.type_compiler.process(column.type)) if column.nullable is not None: if not column.nullable or column.primary_key: colspec += " NOT NULL" else: colspec += " NULL" if column.table is None: raise exc.InvalidRequestError( "mssql requires Table-bound columns " "in order to generate DDL") seq_col = column.table._autoincrement_column # install a IDENTITY Sequence if we have an implicit IDENTITY column if seq_col is column: sequence = isinstance(column.default, sa_schema.Sequence) and \ column.default if sequence: start, increment = sequence.start or 1, \ sequence.increment or 1 else: start, increment = 1, 1 colspec += " IDENTITY(%s,%s)" % (start, increment) else: default = self.get_column_default_string(column) if default is not None: colspec += " DEFAULT " + default return colspec def visit_drop_index(self, drop): return "\nDROP INDEX %s.%s" % ( self.preparer.quote_identifier(drop.element.table.name), self.preparer.quote( self._validate_identifier(drop.element.name, False), drop.element.quote) ) class MSIdentifierPreparer(compiler.IdentifierPreparer): reserved_words = RESERVED_WORDS def __init__(self, dialect): super(MSIdentifierPreparer, self).__init__(dialect, initial_quote='[', final_quote=']') def _escape_identifier(self, value): return value def quote_schema(self, schema, force=True): """Prepare a quoted table and schema name.""" result = '.'.join([self.quote(x, force) for x in schema.split('.')]) return result class MSDialect(default.DefaultDialect): name = 'mssql' supports_default_values = True supports_empty_insert = False execution_ctx_cls = MSExecutionContext use_scope_identity = True max_identifier_length = 128 schema_name = "dbo" colspecs = { sqltypes.DateTime : _MSDateTime, sqltypes.Date : _MSDate, sqltypes.Time : TIME, } ischema_names = ischema_names supports_native_boolean = False supports_unicode_binds = True postfetch_lastrowid = True server_version_info = () statement_compiler = MSSQLCompiler ddl_compiler = MSDDLCompiler type_compiler = MSTypeCompiler preparer = MSIdentifierPreparer def __init__(self, query_timeout=None, use_scope_identity=True, max_identifier_length=None, schema_name=u"dbo", **opts): self.query_timeout = int(query_timeout or 0) self.schema_name = schema_name self.use_scope_identity = use_scope_identity self.max_identifier_length = int(max_identifier_length or 0) or \ self.max_identifier_length super(MSDialect, self).__init__(**opts) def do_savepoint(self, connection, name): util.warn("Savepoint support in mssql is experimental and " "may lead to data loss.") connection.execute("IF @@TRANCOUNT = 0 BEGIN TRANSACTION") connection.execute("SAVE TRANSACTION %s" % name) def do_release_savepoint(self, connection, name): pass def initialize(self, connection): super(MSDialect, self).initialize(connection) if self.server_version_info[0] not in range(8, 17): # FreeTDS with version 4.2 seems to report here # a number like "95.10.255". Don't know what # that is. So emit warning. util.warn( "Unrecognized server version info '%s'. Version specific " "behaviors may not function properly. If using ODBC " "with FreeTDS, ensure server version 7.0 or 8.0, not 4.2, " "is configured in the FreeTDS configuration." % ".".join(str(x) for x in self.server_version_info) ) if self.server_version_info >= MS_2005_VERSION and \ 'implicit_returning' not in self.__dict__: self.implicit_returning = True def _get_default_schema_name(self, connection): user_name = connection.scalar("SELECT user_name() as user_name;") if user_name is not None: # now, get the default schema query = """ SELECT default_schema_name FROM sys.database_principals WHERE name = ? AND type = 'S' """ try: default_schema_name = connection.scalar(query, [user_name]) if default_schema_name is not None: return unicode(default_schema_name) except: pass return self.schema_name def has_table(self, connection, tablename, schema=None): current_schema = schema or self.default_schema_name columns = ischema.columns if current_schema: whereclause = sql.and_(columns.c.table_name==tablename, columns.c.table_schema==current_schema) else: whereclause = columns.c.table_name==tablename s = sql.select([columns], whereclause) c = connection.execute(s) return c.first() is not None @reflection.cache def get_schema_names(self, connection, **kw): s = sql.select([ischema.schemata.c.schema_name], order_by=[ischema.schemata.c.schema_name] ) schema_names = [r[0] for r in connection.execute(s)] return schema_names @reflection.cache def get_table_names(self, connection, schema=None, **kw): current_schema = schema or self.default_schema_name tables = ischema.tables s = sql.select([tables.c.table_name], sql.and_( tables.c.table_schema == current_schema, tables.c.table_type == u'BASE TABLE' ), order_by=[tables.c.table_name] ) table_names = [r[0] for r in connection.execute(s)] return table_names @reflection.cache def get_view_names(self, connection, schema=None, **kw): current_schema = schema or self.default_schema_name tables = ischema.tables s = sql.select([tables.c.table_name], sql.and_( tables.c.table_schema == current_schema, tables.c.table_type == u'VIEW' ), order_by=[tables.c.table_name] ) view_names = [r[0] for r in connection.execute(s)] return view_names # The cursor reports it is closed after executing the sp. @reflection.cache def get_indexes(self, connection, tablename, schema=None, **kw): current_schema = schema or self.default_schema_name col_finder = re.compile("(\w+)") full_tname = "%s.%s" % (current_schema, tablename) indexes = [] s = sql.text("exec sp_helpindex '%s'" % full_tname) rp = connection.execute(s) if rp.closed: # did not work for this setup. return [] for row in rp: if 'primary key' not in row['index_description']: indexes.append({ 'name' : row['index_name'], 'column_names' : col_finder.findall(row['index_keys']), 'unique': 'unique' in row['index_description'] }) return indexes @reflection.cache def get_view_definition(self, connection, viewname, schema=None, **kw): current_schema = schema or self.default_schema_name views = ischema.views s = sql.select([views.c.view_definition], sql.and_( views.c.table_schema == current_schema, views.c.table_name == viewname ), ) rp = connection.execute(s) if rp: view_def = rp.scalar() return view_def @reflection.cache def get_columns(self, connection, tablename, schema=None, **kw): # Get base columns current_schema = schema or self.default_schema_name columns = ischema.columns if current_schema: whereclause = sql.and_(columns.c.table_name==tablename, columns.c.table_schema==current_schema) else: whereclause = columns.c.table_name==tablename s = sql.select([columns], whereclause, order_by=[columns.c.ordinal_position]) c = connection.execute(s) cols = [] while True: row = c.fetchone() if row is None: break (name, type, nullable, charlen, numericprec, numericscale, default, collation) = ( row[columns.c.column_name], row[columns.c.data_type], row[columns.c.is_nullable] == 'YES', row[columns.c.character_maximum_length], row[columns.c.numeric_precision], row[columns.c.numeric_scale], row[columns.c.column_default], row[columns.c.collation_name] ) coltype = self.ischema_names.get(type, None) kwargs = {} if coltype in (MSString, MSChar, MSNVarchar, MSNChar, MSText, MSNText, MSBinary, MSVarBinary, sqltypes.LargeBinary): kwargs['length'] = charlen if collation: kwargs['collation'] = collation if coltype == MSText or \ (coltype in (MSString, MSNVarchar) and charlen == -1): kwargs.pop('length') if coltype is None: util.warn( "Did not recognize type '%s' of column '%s'" % (type, name)) coltype = sqltypes.NULLTYPE if issubclass(coltype, sqltypes.Numeric) and \ coltype is not MSReal: kwargs['scale'] = numericscale kwargs['precision'] = numericprec coltype = coltype(**kwargs) cdict = { 'name' : name, 'type' : coltype, 'nullable' : nullable, 'default' : default, 'autoincrement':False, } cols.append(cdict) # autoincrement and identity colmap = {} for col in cols: colmap[col['name']] = col # We also run an sp_columns to check for identity columns: cursor = connection.execute("sp_columns @table_name = '%s', " "@table_owner = '%s'" % (tablename, current_schema)) ic = None while True: row = cursor.fetchone() if row is None: break (col_name, type_name) = row[3], row[5] if type_name.endswith("identity") and col_name in colmap: ic = col_name colmap[col_name]['autoincrement'] = True colmap[col_name]['sequence'] = dict( name='%s_identity' % col_name) break cursor.close() if ic is not None and self.server_version_info >= MS_2005_VERSION: table_fullname = "%s.%s" % (current_schema, tablename) cursor = connection.execute( "select ident_seed('%s'), ident_incr('%s')" % (table_fullname, table_fullname) ) row = cursor.first() if row is not None and row[0] is not None: colmap[ic]['sequence'].update({ 'start' : int(row[0]), 'increment' : int(row[1]) }) return cols @reflection.cache def get_primary_keys(self, connection, tablename, schema=None, **kw): current_schema = schema or self.default_schema_name pkeys = [] # information_schema.referential_constraints RR = ischema.ref_constraints # information_schema.table_constraints TC = ischema.constraints # information_schema.constraint_column_usage: # the constrained column C = ischema.key_constraints.alias('C') # information_schema.constraint_column_usage: # the referenced column R = ischema.key_constraints.alias('R') # Primary key constraints s = sql.select([C.c.column_name, TC.c.constraint_type], sql.and_(TC.c.constraint_name == C.c.constraint_name, C.c.table_name == tablename, C.c.table_schema == current_schema) ) c = connection.execute(s) for row in c: if 'PRIMARY' in row[TC.c.constraint_type.name]: pkeys.append(row[0]) return pkeys @reflection.cache def get_foreign_keys(self, connection, tablename, schema=None, **kw): current_schema = schema or self.default_schema_name # Add constraints #information_schema.referential_constraints RR = ischema.ref_constraints # information_schema.table_constraints TC = ischema.constraints # information_schema.constraint_column_usage: # the constrained column C = ischema.key_constraints.alias('C') # information_schema.constraint_column_usage: # the referenced column R = ischema.key_constraints.alias('R') # Foreign key constraints s = sql.select([C.c.column_name, R.c.table_schema, R.c.table_name, R.c.column_name, RR.c.constraint_name, RR.c.match_option, RR.c.update_rule, RR.c.delete_rule], sql.and_(C.c.table_name == tablename, C.c.table_schema == current_schema, C.c.constraint_name == RR.c.constraint_name, R.c.constraint_name == RR.c.unique_constraint_name, C.c.ordinal_position == R.c.ordinal_position ), order_by = [ RR.c.constraint_name, R.c.ordinal_position]) # group rows by constraint ID, to handle multi-column FKs fkeys = [] fknm, scols, rcols = (None, [], []) def fkey_rec(): return { 'name' : None, 'constrained_columns' : [], 'referred_schema' : None, 'referred_table' : None, 'referred_columns' : [] } fkeys = util.defaultdict(fkey_rec) for r in connection.execute(s).fetchall(): scol, rschema, rtbl, rcol, rfknm, fkmatch, fkuprule, fkdelrule = r rec = fkeys[rfknm] rec['name'] = rfknm if not rec['referred_table']: rec['referred_table'] = rtbl if schema is not None or current_schema != rschema: rec['referred_schema'] = rschema local_cols, remote_cols = \ rec['constrained_columns'],\ rec['referred_columns'] local_cols.append(scol) remote_cols.append(rcol) return fkeys.values()
mit
-1,975,082,025,182,092,500
34.962278
80
0.55947
false
4.218376
false
false
false
pmuller/ipkg
ipkg/versions.py
1
1177
import __builtin__ # because we override sorted in this module import pkg_resources def compare(a, b): if a < b: return -1 elif a == b: return 0 else: # a > b return 1 def extract(item): if isinstance(item, dict): version = item['version'] revision = item['revision'] else: version = item.version revision = item.revision return parse(version), parse(str(revision)) def parse(version): """Parses a ``version`` string. Currently a simple wrapper around ``pkg_resources.parse_version()``, for API purpose. Parsing could change later. """ return pkg_resources.parse_version(version) def sorted(versions, parser=parse, reverse=False): """Returned sorted ``versions``. """ return __builtin__.sorted(versions, key=parser, cmp=compare, reverse=reverse) def most_recent(versions, parser=parse): """Returns the most recent version among ``versions``. * ``versions`` must be an iterable of versions. * ``parser`` defaults to ``parse`` which parses version strings. """ return sorted(versions, reverse=True)[0]
mit
-3,061,816,019,314,012,000
24.042553
72
0.620221
false
4.188612
false
false
false
pmdp/GIW
mongodb-1/consultas.py
1
10427
# -*- coding: utf-8 -*- from bottle import run, get, request, template from pymongo import MongoClient from os import linesep mongoclient = MongoClient() db = mongoclient.giw #Columnas para las tablas de los ejercicios 2, 3, 4, 5 y 7 all_table_data = ['Nombre de usuario', 'e-mail', 'Página web', 'Tarjeta de crédito', 'Hash de contraseña', 'Nombre', 'Apellido', 'Dirección', 'Aficiones', 'Fecha de nacimiento'] #Columnas para el ejercicio 6 mid_table_data = ['id', 'e-mail', 'Fecha de nacimiento'] #Función que recibe un cursor de mongo y prepara una lista para luego mostrarla por html def get_results_data(c): data = [] #Por cada elemento en el cursor devuelto en la consulta for r in c: userData = [] userData.append(r['_id']) userData.append(r['email']) userData.append(r['webpage']) creditCardData = u"Número: " + r['credit_card']['number'] + linesep creditCardData += u"Fecha de expiración: " + r['credit_card']['expire']['month'] + '/' + r['credit_card']['expire']['year'] userData.append(creditCardData) userData.append(r['password']) userData.append(r['name']) userData.append(r['surname']) addressData = "Pais: " + r['address']['country'] + linesep addressData += "Zip: " + r['address']['zip'] + linesep addressData += "Calle: " + r['address']['street'] + linesep addressData += "Num: " + r['address']['num'] userData.append(addressData) likesData = '' for like in r['likes']: likesData += str(like) + linesep userData.append(likesData) userData.append(r['birthdate']) data.append(userData) return data #Función que recibe una lista con los argumentos que deberían haber llegado al servidor # también recibe un variable que dice si todos los argumentos son obligatorios o no def validate_arguments(args_list, all_needed=False): args = request.query invalid_args = [] valid_args = [] # Comprueba que todos los argumentos pasados son válidos for a in args: # Si no es válido lo añade a la lista de argumentos inválidos if a not in args_list: invalid_args.append(a) #Si no lo mete en la lista de argumentos válidos else: valid_args.append(a) if len(invalid_args) != 0: return False, show_args_error(invalid_args) elif not all_needed and len(valid_args) > 0: return True, '' elif all_needed and len(valid_args) == len(args) and len(args) > 0: return True, '' else: return False, "<p style='color:red'>No se han recibido los argumentos necesarios</p>" #Función que muestra un mensaje de error con los argumento inválidos def show_args_error(invalid_args): out = "<p style='color:red'>Argumentos inválidos:</p>\n" out += "<ul>" for i in invalid_args: out += "<li>" + i + "</li>" out += "</ul>" return out @get('/find_user') def find_user(): # http://localhost:8080/find_user?username=burgoscarla valid, msg = validate_arguments(['username'], all_needed=True) if valid: #Coge el nombre de usuario de la petición GET username = request.query.username c = db.usuarios #Busca todos un único usuario con ese id res = c.find_one({"_id":username}) #Si existe dicho usuario rellena las listas con los datos de la BD if res: #Lista para datos simples simple_data = list() #Lista para todos los datos de dirección address = list() #Lista para todos los datos de la tarjeta de crédito credit_card = list() #Lista de todo lo que le gusta al usuario likes = list() for key, value in res.items(): if key == 'credit_card': credit_card.append('month : ' + value['expire']['month']) credit_card.append('year : ' + value['expire']['year']) credit_card.append('number : ' + value['number']) elif key == 'address': for k, v in value.items(): address.append(k + ' : ' + v) elif key == 'likes': for l in value: likes.append(l) else: simple_data.append(key + ' : ' + value) return template('datos', title=username, simple_data=simple_data, address=address, credit_card=credit_card, likes=likes) #Si no existe devuelve un error else: return "<p style='color:red'>El usuario <strong>" + username + " </strong> no existe en la BD.</p>" else: return msg @get('/find_users') def find_users(): # http://localhost:8080/find_users?name=Luz # http://localhost:8080/find_users?name=Luz&surname=Romero # http://localhost:8080/find_users?name=Luz&food=hotdog valid, msg = validate_arguments(['name', 'surname', 'birthday']) if valid: #Si no hay ningún elemento inválido procede con la consulta name = request.query.name surname = request.query.surname birth = request.query.birthday #Diccionario donde van a ir los datos a buscar data = dict() if name: data['name'] = name if surname: data['surname'] = surname if birth: data['birthdate'] = birth c = db.usuarios res = c.find(data) data = get_results_data(res) return template('table', num_results=str(res.count()), table_titles=all_table_data, rows=data) else: return msg @get('/find_users_or') def find_users_or(): # http://localhost:8080/find_users_or?name=Luz&surname=Corral valid, msg = validate_arguments(['name', 'surname', 'birthday']) # Si no hay ningún elemento inválido procede con la consulta if valid: name = request.query.name surname = request.query.surname birth = request.query.birthday # Diccionario donde van a ir los datos a buscar data = [] if name: data.append({'name': name}) if surname: data.append({'surname': surname}) if birth: data.append({'birthdate': birth}) c = db.usuarios res = c.find({'$or': data}) data = get_results_data(res) return template('table', num_results=str(res.count()), table_titles=all_table_data, rows=data) else: return msg @get('/find_like') def find_like(): # http://localhost:8080/find_like?like=football valid, msg = validate_arguments(['like'], all_needed=True) # Si no hay ningún elemento inválido procede con la consulta if valid: like = request.query.like c = db.usuarios res = c.find({'likes': like}) data = get_results_data(res) return template('table', num_results=str(res.count()), table_titles=all_table_data, rows=data) else: return msg @get('/find_country') def find_country(): # http://localhost:8080/find_country?country=Irlanda valid, msg = validate_arguments(['country'], all_needed=True) # Si no hay ningún elemento inválido procede con la consulta if valid: country = request.query.country c = db.usuarios res = c.find({'address.country': country}) data = get_results_data(res) return template('table', num_results=str(res.count()), table_titles=all_table_data, rows=data) else: return msg @get('/find_email_birthdate') def email_birthdate(): # http://localhost:8080/find_email_birthdate?from=1973-01-01&to=1990-12-31 valid, msg = validate_arguments(['from', 'to'], all_needed=True) # Si no hay ningún elemento inválido procede con la consulta if valid: from_date = request.query['from'] to_date = request.query.to c = db.usuarios # Fecha de nacimiento mayor que fromDate y menor que toDate query = {'birthdate': {'$gt': from_date, '$lt': to_date}} # query que busca las fechas de nacimiento ordenadas por fecha de nacimiento y por _id # y solo proyecta los datos necesarios res = c.find(query, {'_id': 1, 'email': 1, 'birthdate': 1 }).sort([('birthdate', 1), ('_id', 1)]) data = [] for r in res: user_data = [] user_data.append(r['_id']) user_data.append(r['email']) user_data.append(r['birthdate']) data.append(user_data) return template('table', num_results=str(res.count()), table_titles=mid_table_data, rows=data) else: return msg @get('/find_country_likes_limit_sorted') def find_country_likes_limit_sorted(): # http://localhost:8080/find_country_likes_limit_sorted?country=Irlanda&likes=movies,animals&limit=4&ord=asc valid, msg = validate_arguments(['country', 'likes', 'limit', 'ord'], all_needed=True) # Si no hay ningún elemento inválido procede con la consulta if valid: country = request.query.country likes = request.query.likes limit = request.query.limit order = request.query.ord # Almacenamos en una lista todos los likes q se pasan por parametro. Hacemos lista para que $all pueda leer bien. gustos = [] cadena = "" for i in likes: if i != ',': cadena += i else: gustos.append(cadena) cadena = "" gustos.append(cadena) # en funcion del tipo de ordenacion se le da un valor entero a la variable order if order == 'asc': order = 1 elif order == 'desc': order = -1 c = db.usuarios query = {'$and': [{'address.country': country}, {'likes': {'$all': gustos}}]} # query que busca en funcion de un country y de los gustos ordenando por fechas de nacimiento y con limite = limit res = c.find(query).sort('birthdate', int(order)).limit(int(limit)) data = get_results_data(res) return template('table', num_results=str(res.count()), table_titles=all_table_data, rows=data) else: return msg if __name__ == "__main__": # No cambiar host ni port ni debug run(host='localhost',port=8080,debug=True)
gpl-3.0
4,137,989,952,177,386,000
37.779851
177
0.592322
false
3.424382
false
false
false
tuxfux-hlp-notes/python-batches
archieves/Batch-63/12-Logging/seventh.py
1
2105
#!/usr/bin/python # logging.basicConfig? # logging.Formatter? # man data or time.strftime(). # https://docs.python.org/2/library/subprocess.html # cronjob or scheduler # import logging.handlers for rest all handlers. from subprocess import Popen,PIPE from logging.handlers import SysLogHandler import logging #logging.basicConfig(filename='my_logs.txt',filemode='a',level=logging.DEBUG, # format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',datefmt='%c') # Loggers expose the interface that application code directly uses. # ex:logger - root # Handlers send the log records (created by loggers) to the appropriate destination. # https://docs.python.org/2/howto/logging.html#useful-handlers # ex: filename='my_logs.txt',filemode='a' # Filters provide a finer grained facility for determining which log records to output. # ex: level=logging.DEBUG # Formatters specify the layout of log records in the final output. # ex: format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',datefmt='%c' # create logger logger = logging.getLogger('disk_monitor') # logger name logger.setLevel(logging.DEBUG) # Filter for logger # create console handler and set level to debug ch = SysLogHandler(address="/dev/log") # handler ch.setLevel(logging.DEBUG) # filter for handler # create formatter formatter = logging.Formatter('- %(name)s - %(levelname)s - %(message)s') # add formatter to ch ch.setFormatter(formatter) # handler and formatter # add ch to logger logger.addHandler(ch) # logger and handler # Main p1 = Popen(['df','-h','/'],stdout=PIPE) p2 = Popen(['tail','-n','1'],stdin=p1.stdout,stdout=PIPE) disk_size = int(p2.communicate()[0].split()[4].split('%')[0]) if disk_size < 50: logger.info("The disk looks health at {}".format(disk_size)) elif disk_size < 70: logger.warning("The disk is getting filled up {}".format(disk_size)) elif disk_size < 80: logger.error("your application is sleeping now {}".format(disk_size)) elif disk_size < 100: logger.critical("your application is not working {}".format(disk_size))
gpl-3.0
4,377,696,708,954,028,500
32.951613
87
0.705463
false
3.496678
false
false
false
KristianJensen/cameo
cameo/network_analysis/networkx_based.py
1
3295
# Copyright 2015 Novo Nordisk Foundation Center for Biosustainability, DTU. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import absolute_import, print_function __all__ = ['model_to_network', 'reactions_to_network', 'remove_highly_connected_nodes'] import networkx as nx from cameo.network_analysis.util import distance_based_on_molecular_formula def model_to_network(model, *args, **kwargs): """Convert a model into a networkx graph. Calls reactions_to_network with model.reactions. Parameters ---------- model : SolverBasedModel The model. Returns ------- networkx.MultiDiGraph See Also -------- reactions_to_network """ return reactions_to_network(model.reactions, *args, **kwargs) def reactions_to_network(reactions, max_distance=0.3): """Convert a list of reactions into a networkx graph. Parameters ---------- reactions : list The list of reactions. max_distance : float, optional A threshold on the normalized distance between two compounds. If distance is above this threshold, no edge between those compounds will be created. Returns ------- networkx.MultiDiGraph See Also -------- distance_based_on_molecular_formula """ edges = list() for reaction in reactions: substrates = list(reaction.reactants) for substrate in substrates: products = list(reaction.products) for product in products: try: distance = distance_based_on_molecular_formula(substrate, product, normalize=True) except ValueError: distance = 0. if distance <= max_distance: if reaction.reversibility: edges.append((product, substrate, dict(reaction=reaction))) edges.append((substrate, product, dict(reaction=reaction))) elif reaction.lower_bound > 0: edges.append((substrate, product, dict(reaction=reaction))) else: edges.append((product, substrate, dict(reaction=reaction))) multi_graph = nx.MultiDiGraph(edges) return multi_graph def remove_highly_connected_nodes(network, max_degree=10, ignore=[]): """Remove highly connected nodes (changes network in place). Parameters ---------- network : networkx graph max_degree : int (default 10) Remove nodes with degree > max_degree ignore : list List of nodes to ignore. Returns ------- None """ to_remove = [node for node, degree in network.degree_iter() if degree > max_degree and node not in ignore] network.remove_nodes_from(to_remove)
apache-2.0
-9,001,907,852,848,431,000
31.303922
122
0.640061
false
4.364238
false
false
false
chand3040/cloud_that
lms/djangoapps/shoppingcart/processors/PayPal.py
1
21754
""" Implementation the PayPal processor. To enable this implementation, add the following to lms.auth.json: CC_PROCESSOR_NAME = "PayPal" CC_PROCESSOR = { "PayPal": { "PURCHASE_ENDPOINT": "sandbox or live url of paypal", "CLIENT_ID": "<paypal client_id>", "CLIENT_SECRET": "<paypal client secret>", "MODE": "sandbox | live", "RETURN_URL": 'host/dashboard', "NOTIFY_URL": 'host/paypal', "CANCEL_URL": 'where to redirect if user cancels order' } } """ import time import hmac import binascii import re import json import urlparse import logging from collections import OrderedDict, defaultdict from decimal import Decimal, InvalidOperation from hashlib import sha1 from textwrap import dedent from django.conf import settings from django.utils.translation import ugettext as _ from edxmako.shortcuts import render_to_string from shoppingcart.models import Order from shoppingcart.processors.exceptions import * from shoppingcart.processors.helpers import get_processor_config from microsite_configuration import microsite from django.core.urlresolvers import reverse from paypal.standard.models import ST_PP_COMPLETED, ST_PP_CANCELLED, ST_PP_DENIED from paypal.standard.ipn.signals import valid_ipn_received from paypal.standard.pdt.views import process_pdt logger = logging.getLogger(__name__) def process_postpay_callback(request): """ The top level call to this module, basically This function is handed the callback request after the customer has entered the CC info and clicked "buy" on the external Hosted Order Page. It is expected to verify the callback and determine if the payment was successful. It returns {'success':bool, 'order':Order, 'error_html':str} If successful this function must have the side effect of marking the order purchased and calling the purchased_callbacks of the cart items. If unsuccessful this function should not have those side effects but should try to figure out why and return a helpful-enough error message in error_html. Author: Naresh Makwana created on: 07-Apr-2015 """ logger.info('Handling GET request %s', request.GET) logger.info('Handling POST request %s', request.POST) pdt_obj, failed = process_pdt(request) logger.info('invoice %s', getattr(pdt_obj, 'invoice')) logger.info('mc_currency %s', getattr(pdt_obj, 'mc_currency')) logger.info('payment_status %s', getattr(pdt_obj, 'payment_status')) try: result = payment_accepted(pdt_obj) if result['accepted']: # SUCCESS CASE first, rest are some sort of oddity record_purchase(pdt_obj, result['order']) return {'success': True, 'order': result['order'], 'error_html': ''} else: return {'success': False, 'order': result['order'], 'error_html': get_processor_decline_html(pdt_obj)} except CCProcessorException as error: return {'success': False, 'order': None, # due to exception we may not have the order 'error_html': get_processor_exception_html(error)} def processor_hash(value): """ Performs the base64(HMAC_SHA1(key, value)) used by CyberSource Hosted Order Page """ shared_secret = get_processor_config().get('SHARED_SECRET', '') hash_obj = hmac.new(shared_secret.encode('utf-8'), value.encode('utf-8'), sha1) return binascii.b2a_base64(hash_obj.digest())[:-1] # last character is a '\n', which we don't want def sign(params, signed_fields_key='orderPage_signedFields', full_sig_key='orderPage_signaturePublic'): """ params needs to be an ordered dict, b/c cybersource documentation states that order is important. Reverse engineered from PHP version provided by cybersource """ merchant_id = get_processor_config().get('MERCHANT_ID', '') order_page_version = get_processor_config().get('ORDERPAGE_VERSION', '7') serial_number = get_processor_config().get('SERIAL_NUMBER', '') params['merchantID'] = merchant_id params['orderPage_timestamp'] = int(time.time() * 1000) params['orderPage_version'] = order_page_version params['orderPage_serialNumber'] = serial_number fields = u",".join(params.keys()) values = u",".join([u"{0}={1}".format(i, params[i]) for i in params.keys()]) fields_sig = processor_hash(fields) values += u",signedFieldsPublicSignature=" + fields_sig params[full_sig_key] = processor_hash(values) params[signed_fields_key] = fields return params def verify_signatures(ipn_obj): """ Use the signature we receive in the POST back from PayPal to verify the identity of the sender (PayPal) and that the contents of the message have not been tampered with. Args: params (dictionary): The POST parameters we received from PayPal. Returns: dict: Contains the parameters we will use elsewhere, converted to the appropriate types Raises: CCProcessorSignatureException: The calculated signature does not match the signature we received. CCProcessorDataException: The parameters we received from CyberSource were not valid (missing keys, wrong types) """ # First see if the user cancelled the transaction # if so, then not all parameters will be passed back so we can't yet verify signatures if getattr(ipn_obj, 'payment_status') == ST_PP_CANCELLED: raise CCProcessorUserCancelled() # if the user decline the transaction # if so, then amount will not be passed back so we can't yet verify signatures if getattr(ipn_obj, 'payment_status') == ST_PP_DENIED: raise CCProcessorUserDeclined() return ipn_obj def render_purchase_form_html(cart, **kwargs): """ Renders the HTML of the hidden POST form that must be used to initiate a purchase with CyberSource """ return render_to_string('shoppingcart/paypal_form.html', { 'action': get_purchase_endpoint(), 'params': get_signed_purchase_params(cart), }) def get_signed_purchase_params(cart, **kwargs): return sign(get_purchase_params(cart)) def get_purchase_params(cart): cart_items = cart.orderitem_set.all() total_cost = cart.total_cost amount = "{0:0.2f}".format(total_cost) cart_items = cart.orderitem_set.all() params = OrderedDict() params['business'] = settings.PAYPAL_RECEIVER_EMAIL params['invoice'] = "{0:d}".format(cart.id) params['item_number'] = "{0:d}".format(cart.id) params['notify_url'] = get_processor_config().get('NOTIFY_URL', '') params['return'] = get_processor_config().get('RETURN_URL', '') params['cancel_return'] = get_processor_config().get('CANCEL_URL', '') params['currency_code'] = cart.currency.upper() params['orderPage_transactionType'] = 'sale' params['orderNumber'] = "{0:d}".format(cart.id) params['no_shipping'] = 1 params['charset'] = 'utf-8' params['upload'] = 1 for counter, cart_item in enumerate(cart_items): params['item_name_'+str(counter+1)] = cart_item.line_desc params['amount_'+str(counter+1)] = cart_item.list_price params['quantity_'+str(counter+1)] = cart_item.qty params['cmd'] = '_cart' return params def get_purchase_endpoint(): return get_processor_config().get('PURCHASE_ENDPOINT', '') def payment_accepted(ipn_obj): """ Check that paypal has accepted the payment params: a dictionary of POST parameters returned by paypal in their post-payment callback returns: true if the payment was correctly accepted, for the right amount false if the payment was not accepted raises: CCProcessorDataException if the returned message did not provide required parameters CCProcessorWrongAmountException if the amount charged is different than the order amount """ #make sure required keys are present and convert their values to the right type valid_params = {} for key, key_type in [('invoice', int), ('mc_currency', str), ('payment_status', str)]: if not hasattr(ipn_obj, key): raise CCProcessorDataException( _("The payment processor did not return a required parameter: {0}").format(key) ) try: valid_params[key] = key_type(getattr(ipn_obj, key)) except ValueError: raise CCProcessorDataException( _("The payment processor returned a badly-typed value {0} for param {1}.").format(getattr(ipn_obj, key), key) ) try: order = Order.objects.get(id=valid_params['invoice']) except Order.DoesNotExist: raise CCProcessorDataException(_("The payment processor accepted an order whose number is not in our system.")) if valid_params['payment_status'] == ST_PP_COMPLETED: try: # Moved reading of charged_amount here from the valid_params loop above because # only 'ACCEPT' messages have a 'mc_gross' parameter charged_amt = Decimal(getattr(ipn_obj, 'mc_gross')) except InvalidOperation: raise CCProcessorDataException( _("The payment processor returned a badly-typed value {0} for param {1}.").format( getattr(ipn_obj, 'mc_gross'), 'mc_gross' ) ) if charged_amt == order.total_cost and valid_params['mc_currency'] == order.currency.upper(): return {'accepted': True, 'amt_charged': charged_amt, 'currency': valid_params['mc_currency'].lower(), 'order': order} else: raise CCProcessorWrongAmountException( _("The amount charged by the processor {0} {1} is different than the total cost of the order {2} {3}.") .format( charged_amt, valid_params['mc_currency'], order.total_cost, order.currency ) ) else: return {'accepted': False, 'amt_charged': 0, 'currency': 'usd', 'order': order} def record_purchase(ipn_obj, order): """ Record the purchase and run purchased_callbacks """ ccnum_str = getattr(ipn_obj, 'card_accountNumber', '') m = re.search("\d", ccnum_str) if m: ccnum = ccnum_str[m.start():] else: ccnum = "####" order.purchase( first=getattr(ipn_obj, 'first_name', ''), last=getattr(ipn_obj, 'last_name', ''), street1=getattr(ipn_obj, 'billTo_street1', ''), street2=getattr(ipn_obj, 'address_street', ''), city=getattr(ipn_obj, 'address_city', ''), state=getattr(ipn_obj, 'address_state', ''), country=getattr(ipn_obj, 'address_country', ''), postalcode=getattr(ipn_obj, 'billTo_postalCode', ''), ccnum=ccnum, cardtype=CARDTYPE_MAP[getattr(ipn_obj, 'card_cardType', 'UNKNOWN')], processor_reply_dump=dict(urlparse.parse_qsl(str(getattr(ipn_obj, 'query', 'UNKNOWN=UNKNOWN')))) ) def get_processor_decline_html(ipn_obj): """Have to parse through the error codes to return a helpful message""" # see if we have an override in the microsites payment_support_email = microsite.get_value('payment_support_email', settings.PAYMENT_SUPPORT_EMAIL) msg = _( "Sorry! Our payment processor did not accept your payment. " "The decision they returned was {decision_text}, " "and the reason was {reason_text}. " "You were not charged. " "Please try a different form of payment. " "Contact us with payment-related questions at {email}." ) formatted = msg.format( decision_text='<span class="decision">{}</span>'.format(getattr(ipn_obj, 'payment_status')), reason_text='<span class="reason">{code}:{msg}</span>'.format( code=params['reasonCode'], msg=REASONCODE_MAP[getattr(ipn_obj,'reason_code')], ), email=payment_support_email, ) return '<p class="error_msg">{}</p>'.format(formatted) def get_processor_exception_html(exception): """Return error HTML associated with exception""" # see if we have an override in the microsites payment_support_email = microsite.get_value('payment_support_email', settings.PAYMENT_SUPPORT_EMAIL) if isinstance(exception, CCProcessorDataException): msg = _( "Sorry! Our payment processor sent us back a payment confirmation " "that had inconsistent data!" "We apologize that we cannot verify whether the charge went through " "and take further action on your order." "The specific error message is: {error_message}. " "Your credit card may possibly have been charged. " "Contact us with payment-specific questions at {email}." ) formatted = msg.format( error_message='<span class="exception_msg">{msg}</span>'.format( msg=exception.message, ), email=payment_support_email, ) return '<p class="error_msg">{}</p>'.format(formatted) elif isinstance(exception, CCProcessorWrongAmountException): msg = _( "Sorry! Due to an error your purchase was charged for " "a different amount than the order total! " "The specific error message is: {error_message}. " "Your credit card has probably been charged. " "Contact us with payment-specific questions at {email}." ) formatted = msg.format( error_message='<span class="exception_msg">{msg}</span>'.format( msg=exception.message, ), email=payment_support_email, ) return '<p class="error_msg">{}</p>'.format(formatted) elif isinstance(exception, CCProcessorSignatureException): msg = _( "Sorry! Our payment processor sent us back a corrupted message " "regarding your charge, so we are unable to validate that " "the message actually came from the payment processor. " "The specific error message is: {error_message}. " "We apologize that we cannot verify whether the charge went through " "and take further action on your order. " "Your credit card may possibly have been charged. " "Contact us with payment-specific questions at {email}." ) formatted = msg.format( error_message='<span class="exception_msg">{msg}</span>'.format( msg=exception.message, ), email=payment_support_email, ) return '<p class="error_msg">{}</p>'.format(formatted) # fallthrough case, which basically never happens return '<p class="error_msg">EXCEPTION!</p>' CARDTYPE_MAP = defaultdict(lambda: "UNKNOWN") CARDTYPE_MAP.update( { '001': 'Visa', '002': 'MasterCard', '003': 'American Express', '004': 'Discover', '005': 'Diners Club', '006': 'Carte Blanche', '007': 'JCB', '014': 'EnRoute', '021': 'JAL', '024': 'Maestro', '031': 'Delta', '033': 'Visa Electron', '034': 'Dankort', '035': 'Laser', '036': 'Carte Bleue', '037': 'Carta Si', '042': 'Maestro', '043': 'GE Money UK card' } ) REASONCODE_MAP = defaultdict(lambda: "UNKNOWN REASON") REASONCODE_MAP.update( { '100': _('Successful transaction.'), '101': _('The request is missing one or more required fields.'), '102': _('One or more fields in the request contains invalid data.'), '104': dedent(_( """ The merchantReferenceCode sent with this authorization request matches the merchantReferenceCode of another authorization request that you sent in the last 15 minutes. Possible fix: retry the payment after 15 minutes. """)), '150': _('Error: General system failure. Possible fix: retry the payment after a few minutes.'), '151': dedent(_( """ Error: The request was received but there was a server timeout. This error does not include timeouts between the client and the server. Possible fix: retry the payment after some time. """)), '152': dedent(_( """ Error: The request was received, but a service did not finish running in time Possible fix: retry the payment after some time. """)), '201': _('The issuing bank has questions about the request. Possible fix: retry with another form of payment'), '202': dedent(_( """ Expired card. You might also receive this if the expiration date you provided does not match the date the issuing bank has on file. Possible fix: retry with another form of payment """)), '203': dedent(_( """ General decline of the card. No other information provided by the issuing bank. Possible fix: retry with another form of payment """)), '204': _('Insufficient funds in the account. Possible fix: retry with another form of payment'), # 205 was Stolen or lost card. Might as well not show this message to the person using such a card. '205': _('Unknown reason'), '207': _('Issuing bank unavailable. Possible fix: retry again after a few minutes'), '208': dedent(_( """ Inactive card or card not authorized for card-not-present transactions. Possible fix: retry with another form of payment """)), '210': _('The card has reached the credit limit. Possible fix: retry with another form of payment'), '211': _('Invalid card verification number. Possible fix: retry with another form of payment'), # 221 was The customer matched an entry on the processor's negative file. # Might as well not show this message to the person using such a card. '221': _('Unknown reason'), '231': _('Invalid account number. Possible fix: retry with another form of payment'), '232': dedent(_( """ The card type is not accepted by the payment processor. Possible fix: retry with another form of payment """)), '233': _('General decline by the processor. Possible fix: retry with another form of payment'), '234': _( "There is a problem with our CyberSource merchant configuration. Please let us know at {0}" ).format(settings.PAYMENT_SUPPORT_EMAIL), # reason code 235 only applies if we are processing a capture through the API. so we should never see it '235': _('The requested amount exceeds the originally authorized amount.'), '236': _('Processor Failure. Possible fix: retry the payment'), # reason code 238 only applies if we are processing a capture through the API. so we should never see it '238': _('The authorization has already been captured'), # reason code 239 only applies if we are processing a capture or credit through the API, # so we should never see it '239': _('The requested transaction amount must match the previous transaction amount.'), '240': dedent(_( """ The card type sent is invalid or does not correlate with the credit card number. Possible fix: retry with the same card or another form of payment """)), # reason code 241 only applies when we are processing a capture or credit through the API, # so we should never see it '241': _('The request ID is invalid.'), # reason code 242 occurs if there was not a previously successful authorization request or # if the previously successful authorization has already been used by another capture request. # This reason code only applies when we are processing a capture through the API # so we should never see it '242': dedent(_( """ You requested a capture through the API, but there is no corresponding, unused authorization record. """)), # we should never see 243 '243': _('The transaction has already been settled or reversed.'), # reason code 246 applies only if we are processing a void through the API. so we should never see it '246': dedent(_( """ The capture or credit is not voidable because the capture or credit information has already been submitted to your processor. Or, you requested a void for a type of transaction that cannot be voided. """)), # reason code 247 applies only if we are processing a void through the API. so we should never see it '247': _('You requested a credit for a capture that was previously voided'), '250': dedent(_( """ Error: The request was received, but there was a timeout at the payment processor. Possible fix: retry the payment. """)), '520': dedent(_( """ The authorization request was approved by the issuing bank but declined by CyberSource.' Possible fix: retry with a different form of payment. """)), } )
agpl-3.0
322,017,873,674,394,940
42.077228
125
0.623472
false
4.254645
true
false
false
gimler/techism2
techism2/ical/views.py
1
3450
#!/usr/local/bin/python # -*- coding: utf-8 -*- from django.http import HttpResponse from django.core.urlresolvers import reverse from techism2 import service from datetime import datetime, timedelta import icalendar import time def ical(request): ninety_days = datetime.utcnow() + timedelta(days=90) event_list = service.get_event_query_set().filter(date_time_begin__lte=ninety_days).order_by('date_time_begin') cal = icalendar.Calendar() cal['prodid'] = icalendar.vText(u'-//Techism//Techism//DE') cal['version'] = icalendar.vText(u'2.0') cal['x-wr-calname'] = icalendar.vText(u'Techism') cal['x-wr-caldesc'] = icalendar.vText(u'Techism - IT-Events in München') for e in event_list: event = icalendar.Event() # TODO should we generate an UUID when creating the event? uid = u'%s@techism.de' % (str(e.id)) event['uid'] = icalendar.vText(uid) event['dtstamp'] = icalendar.vDatetime(datetime.utcnow()) # The sequence field must be incremented each time the event is modifed. # The trick here is to subtract the create TS from the modify TS and # use the difference as sequence. sequence = 0 if e.date_time_created and e.date_time_modified: createTimestamp = time.mktime(e.get_date_time_created_utc().timetuple()) modifyTimestamp = time.mktime(e.get_date_time_modified_utc().timetuple()) sequence = modifyTimestamp - createTimestamp event['sequence'] = icalendar.vInt(sequence) # created and last-modified if e.date_time_created: event['created'] = icalendar.vDatetime(e.get_date_time_created_utc()) if e.date_time_modified: event['last-modified'] = icalendar.vDatetime(e.get_date_time_modified_utc()) # TENTATIVE, CONFIRMED, CANCELLED event['status'] = icalendar.vText(u'CONFIRMED') if e.title: event['summary'] = icalendar.vText(e.title) if e.description: event['description'] = icalendar.vText(e.description) if e.date_time_begin: event['dtstart'] = icalendar.vDatetime(e.get_date_time_begin_utc()) if e.date_time_end: event['dtend'] = icalendar.vDatetime(e.get_date_time_end_utc()) if e.url: relative_url = reverse('event-show', args=[e.id]) absolute_url = request.build_absolute_uri(relative_url) event['url'] = icalendar.vUri(absolute_url) # geo value isn't used by iCal readers :-( # maybe a trick is to add the geo coordinates to the location field using the following format: # $latitude, $longitude ($name, $street, $city) if e.location: location = u'%s, %s, %s' % (e.location.name, e.location.street, e.location.city) event['location'] = icalendar.vText(location) if e.location and e.location.latitude and e.location.longitude: event['geo'] = icalendar.vGeo((e.location.latitude, e.location.longitude)) cal.add_component(event) response = HttpResponse(cal.as_string()) response['Content-Type'] = 'text/calendar; charset=UTF-8' response['Cache-Control'] = 'no-cache, no-store, max-age=0, must-revalidate' response['Pragma'] = 'no-cache' response['Expires'] = 'Fri, 01 Jan 1990 00:00:00 GMT' return response
apache-2.0
7,792,382,559,504,474,000
43.217949
115
0.627428
false
3.577801
false
false
false
infobip/infobip-api-python-client
infobip/api/model/nc/notify/NumberContextResponse.py
1
1285
# -*- coding: utf-8 -*- """This is a generated class and is not intended for modification! """ from datetime import datetime from infobip.util.models import DefaultObject, serializable from infobip.api.model.nc.notify.NumberContextResponseDetails import NumberContextResponseDetails class NumberContextResponse(DefaultObject): @property @serializable(name="results", type=NumberContextResponseDetails) def results(self): """ Property is a list of: NumberContextResponseDetails """ return self.get_field_value("results") @results.setter def results(self, results): """ Property is a list of: NumberContextResponseDetails """ self.set_field_value("results", results) def set_results(self, results): self.results = results return self @property @serializable(name="bulkId", type=unicode) def bulk_id(self): """ Property is of type: unicode """ return self.get_field_value("bulk_id") @bulk_id.setter def bulk_id(self, bulk_id): """ Property is of type: unicode """ self.set_field_value("bulk_id", bulk_id) def set_bulk_id(self, bulk_id): self.bulk_id = bulk_id return self
apache-2.0
2,975,082,414,965,617,700
25.791667
97
0.637354
false
4.131833
false
false
false
glenc/sp.py
src/sp/utils.py
1
3239
# Set up References import clr clr.AddReference("System") clr.AddReference("Microsoft.SharePoint") from System import Uri from Microsoft.SharePoint import * from Microsoft.SharePoint.Administration import SPWebApplication # Enumeration # These are simple enumeration methods for walking over various SharePoint # objects and collections. def enum(col, fn): """Enumerate a collection and call function fn for each item.""" for x in col: fn(x) def enum_sites(webapp, fn): """ Enumerate all site collections in the specified web application and call the specified function with each site collection. """ # just in case we were passed a URL, get the web app webapp = get_webapplication(webapp) enum(webapp.Sites, fn) def enum_webs(site, fn): """ Enumerate all webs beneath the site or web specified and call te specified function with each web. """ # do different things based on the type of object provided if type(site) is SPWeb: enum(site.Webs, fn) else: site = get_site(site) enum(site.RootWeb.Webs, fn) def enum_all_webs(site, fn): """Enumerate all webs in a site collection""" site = get_site(site) enum(site.AllWebs, fn) def enum_lists(web, fn): """Enumerate all lists in the web specified""" web = get_web(web) enum(web.Lists, fn) # Get Object Helper Methods # These methods take in some sort of object identifier (usually a URL) # and return the appropriate object instance def get_webapplication(url): """Gets a web application by its URL""" if type(url) is SPWebApplication: return url return SPWebApplication.Lookup(Uri(url)) def get_site(url): """Gets a site collection by its URL""" if type(url) is SPSite: return url return SPSite(url) def get_web(url): """Gets a web by its URL""" if type(url) is SPWeb: return url if type(url) is SPSite: return url.RootWeb site = get_site(url) relative_url = url.replace(site.Url, "") return site.OpenWeb(relative_url) def get_list(web, list_name): """Gets a list within a web""" web = get_web(web) return first(web.Lists, lambda l: l.Title == list_name) def try_get_site(url): """Tries to get a site collection but returns false if no site was found""" try: site = get_site(url) return True, site except: return False, None def try_get_web(url): """Tries to get a web but returns false if no web was found""" web = get_web(url) if web.Exists: return True, web else: return False, None def try_get_list(web, list_name): """Tries to get a list but returns false if no list was found""" l = get_list(web, list_name) return l != None, l # Find Object Helper Methods # These methods are used to find objects in collections def list_exists(web, list_name): """Checks if a list exists""" web = get_web(web) match = first(web.Lists, lambda l: l.Title == list_name) return match != None # List/Collection helper methods def collect(collection, fn): """Collects items where the function evalueates as true""" results = [] for item in collection: if fn(item): results << item return results def first(collection, fn): """Finds the first item in the collection where the function evaluates as true""" for item in collection: if fn(item): return item return None
bsd-3-clause
7,495,485,383,914,679,000
20.885135
82
0.710096
false
3.15692
false
false
false
dukhlov/oslo.messaging
oslo_messaging/_drivers/zmq_driver/server/consumers/zmq_router_consumer.py
1
3834
# Copyright 2015 Mirantis, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import logging from oslo_messaging._drivers import base from oslo_messaging._drivers.zmq_driver.server.consumers\ import zmq_consumer_base from oslo_messaging._drivers.zmq_driver.server import zmq_incoming_message from oslo_messaging._drivers.zmq_driver import zmq_address from oslo_messaging._drivers.zmq_driver import zmq_async from oslo_messaging._drivers.zmq_driver import zmq_names from oslo_messaging._i18n import _LE, _LI LOG = logging.getLogger(__name__) zmq = zmq_async.import_zmq() class RouterIncomingMessage(base.RpcIncomingMessage): def __init__(self, context, message, socket, reply_id, msg_id, poller): super(RouterIncomingMessage, self).__init__(context, message) self.socket = socket self.reply_id = reply_id self.msg_id = msg_id self.message = message poller.resume_polling(socket) def reply(self, reply=None, failure=None, log_failure=True): """Reply is not needed for non-call messages""" def acknowledge(self): LOG.debug("Not sending acknowledge for %s", self.msg_id) def requeue(self): """Requeue is not supported""" class RouterConsumer(zmq_consumer_base.SingleSocketConsumer): def __init__(self, conf, poller, server): super(RouterConsumer, self).__init__(conf, poller, server, zmq.ROUTER) self.matchmaker = server.matchmaker self.host = zmq_address.combine_address(self.conf.rpc_zmq_host, self.port) self.targets = zmq_consumer_base.TargetsManager( conf, self.matchmaker, self.host, zmq.ROUTER) LOG.info(_LI("[%s] Run ROUTER consumer"), self.host) def listen(self, target): LOG.info(_LI("[%(host)s] Listen to target %(target)s"), {'host': self.host, 'target': target}) self.targets.listen(target) def cleanup(self): super(RouterConsumer, self).cleanup() self.targets.cleanup() def _receive_request(self, socket): reply_id = socket.recv() empty = socket.recv() assert empty == b'', 'Bad format: empty delimiter expected' request = socket.recv_pyobj() return request, reply_id def receive_message(self, socket): try: request, reply_id = self._receive_request(socket) LOG.debug("[%(host)s] Received %(type)s, %(id)s, %(target)s", {"host": self.host, "type": request.msg_type, "id": request.message_id, "target": request.target}) if request.msg_type == zmq_names.CALL_TYPE: return zmq_incoming_message.ZmqIncomingRequest( socket, reply_id, request, self.poller) elif request.msg_type in zmq_names.NON_BLOCKING_TYPES: return RouterIncomingMessage( request.context, request.message, socket, reply_id, request.message_id, self.poller) else: LOG.error(_LE("Unknown message type: %s"), request.msg_type) except zmq.ZMQError as e: LOG.error(_LE("Receiving message failed: %s"), str(e))
apache-2.0
3,290,392,908,728,405,500
37.727273
78
0.6265
false
3.968944
false
false
false
stormi/tsunami
src/primaires/objet/commandes/remplir/__init__.py
1
4652
# -*-coding:Utf-8 -* # Copyright (c) 2010 LE GOFF Vincent # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # * Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # * Neither the name of the copyright holder nor the names of its contributors # may be used to endorse or promote products derived from this software # without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT # OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. """Package contenant la commande 'remplir'.""" from primaires.interpreteur.commande.commande import Commande class CmdRemplir(Commande): """Commande 'remplir'""" def __init__(self): """Constructeur de la commande""" Commande.__init__(self, "remplir", "fill") self.nom_categorie = "objets" self.schema = "<plat:nom_objet> avec/with (<nombre>) <nom_objet>" self.aide_courte = "remplit un plat de nourriture" self.aide_longue = \ "Cette commande permet de manipuler des plats (assiette, " \ "bol voire poêlon, marmite) en y mettant des objets de type " \ "nourriture. Un repas pris de cette manière sera meilleur " \ "et plus nourrissant." def ajouter(self): """Méthode appelée lors de l'ajout de la commande à l'interpréteur""" nom_objet = self.noeud.get_masque("nom_objet") nom_objet.proprietes["conteneurs"] = \ "(personnage.equipement.inventaire_simple.iter_objets_qtt(" \ "True), )" nom_objet.proprietes["quantite"] = "True" nom_objet.proprietes["conteneur"] = "True" plat = self.noeud.get_masque("plat") plat.prioritaire = True plat.proprietes["conteneurs"] = \ "(personnage.equipement.inventaire, " \ "personnage.salle.objets_sol)" plat.proprietes["types"] = "('conteneur de nourriture', )" def interpreter(self, personnage, dic_masques): """Méthode d'interprétation de commande""" personnage.agir("poser") nombre = 1 if dic_masques["nombre"]: nombre = dic_masques["nombre"].nombre objets = list(dic_masques["nom_objet"].objets_qtt_conteneurs)[:nombre] dans = dic_masques["plat"].objet pose = 0 poids_total = dans.poids for objet, qtt, conteneur in objets: if not objet.peut_prendre: personnage << "Vous ne pouvez pas prendre {} avec vos " \ "mains...".format(objet.get_nom()) return if not objet.est_de_type("nourriture"): personnage << "|err|Ceci n'est pas de la nourriture.|ff|" return poids_total += objet.poids if poids_total > dans.poids_max: if pose == 0: personnage << "Vous ne pouvez rien y poser de plus." return else: break pose += 1 if qtt > nombre: qtt = nombre conteneur.retirer(objet, qtt) dans.nourriture.append(objet) if pose < qtt: pose = qtt personnage << "Vous déposez {} dans {}.".format( objet.get_nom(pose), dans.nom_singulier) personnage.salle.envoyer("{{}} dépose {} dans {}.".format( objet.get_nom(pose), dans.nom_singulier), personnage)
bsd-3-clause
-5,609,864,221,507,276,000
41.981481
79
0.615252
false
3.595662
false
false
false
SVilgelm/CloudFerry
cloudferry/lib/base/action/is_end_iter.py
1
1174
# Copyright (c) 2014 Mirantis Inc. # # Licensed under the Apache License, Version 2.0 (the License); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an AS IS BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and# # limitations under the License. from cloudferry.lib.base.action import action from cloudferry.lib.utils import utils as utl class IsEndIter(action.Action): def __init__(self, init, iter_info_name='info_iter', resource_name=utl.INSTANCES_TYPE): self.iter_info_name = iter_info_name self.resource_name = resource_name super(IsEndIter, self).__init__(init) def run(self, **kwargs): info = kwargs[self.iter_info_name] objs = info[self.resource_name] if objs: self.num_element = 1 else: self.num_element = 0 return {}
apache-2.0
8,738,482,862,329,376,000
32.542857
70
0.682283
false
3.799353
false
false
false
punchagan/zulip
zerver/webhooks/pagerduty/view.py
1
7008
# Webhooks for external integrations. from typing import Any, Dict, Sequence from django.http import HttpRequest, HttpResponse from zerver.decorator import webhook_view from zerver.lib.exceptions import UnsupportedWebhookEventType from zerver.lib.request import REQ, has_request_variables from zerver.lib.response import json_success from zerver.lib.webhooks.common import check_send_webhook_message from zerver.models import UserProfile PAGER_DUTY_EVENT_NAMES = { "incident.trigger": "triggered", "incident.acknowledge": "acknowledged", "incident.unacknowledge": "unacknowledged", "incident.resolve": "resolved", "incident.assign": "assigned", "incident.escalate": "escalated", "incident.delegate": "delineated", } PAGER_DUTY_EVENT_NAMES_V2 = { "incident.trigger": "triggered", "incident.acknowledge": "acknowledged", "incident.resolve": "resolved", "incident.assign": "assigned", } ASSIGNEE_TEMPLATE = "[{username}]({url})" INCIDENT_WITH_SERVICE_AND_ASSIGNEE = ( "Incident [{incident_num}]({incident_url}) {action} by [{service_name}]" "({service_url}) (assigned to {assignee_info}):\n\n``` quote\n{trigger_message}\n```" ) INCIDENT_WITH_ASSIGNEE = """ Incident [{incident_num}]({incident_url}) {action} by {assignee_info}: ``` quote {trigger_message} ``` """.strip() INCIDENT_ASSIGNED = """ Incident [{incident_num}]({incident_url}) {action} to {assignee_info}: ``` quote {trigger_message} ``` """.strip() INCIDENT_RESOLVED_WITH_AGENT = """ Incident [{incident_num}]({incident_url}) resolved by {resolving_agent_info}: ``` quote {trigger_message} ``` """.strip() INCIDENT_RESOLVED = """ Incident [{incident_num}]({incident_url}) resolved: ``` quote {trigger_message} ``` """.strip() def build_pagerduty_formatdict(message: Dict[str, Any]) -> Dict[str, Any]: format_dict: Dict[str, Any] = {} format_dict["action"] = PAGER_DUTY_EVENT_NAMES[message["type"]] format_dict["incident_id"] = message["data"]["incident"]["id"] format_dict["incident_num"] = message["data"]["incident"]["incident_number"] format_dict["incident_url"] = message["data"]["incident"]["html_url"] format_dict["service_name"] = message["data"]["incident"]["service"]["name"] format_dict["service_url"] = message["data"]["incident"]["service"]["html_url"] if message["data"]["incident"].get("assigned_to_user", None): assigned_to_user = message["data"]["incident"]["assigned_to_user"] format_dict["assignee_info"] = ASSIGNEE_TEMPLATE.format( username=assigned_to_user["email"].split("@")[0], url=assigned_to_user["html_url"], ) else: format_dict["assignee_info"] = "nobody" if message["data"]["incident"].get("resolved_by_user", None): resolved_by_user = message["data"]["incident"]["resolved_by_user"] format_dict["resolving_agent_info"] = ASSIGNEE_TEMPLATE.format( username=resolved_by_user["email"].split("@")[0], url=resolved_by_user["html_url"], ) trigger_message = [] trigger_summary_data = message["data"]["incident"]["trigger_summary_data"] if trigger_summary_data is not None: trigger_subject = trigger_summary_data.get("subject", "") if trigger_subject: trigger_message.append(trigger_subject) trigger_description = trigger_summary_data.get("description", "") if trigger_description: trigger_message.append(trigger_description) format_dict["trigger_message"] = "\n".join(trigger_message) return format_dict def build_pagerduty_formatdict_v2(message: Dict[str, Any]) -> Dict[str, Any]: format_dict = {} format_dict["action"] = PAGER_DUTY_EVENT_NAMES_V2[message["event"]] format_dict["incident_id"] = message["incident"]["id"] format_dict["incident_num"] = message["incident"]["incident_number"] format_dict["incident_url"] = message["incident"]["html_url"] format_dict["service_name"] = message["incident"]["service"]["name"] format_dict["service_url"] = message["incident"]["service"]["html_url"] assignments = message["incident"]["assignments"] if assignments: assignee = assignments[0]["assignee"] format_dict["assignee_info"] = ASSIGNEE_TEMPLATE.format( username=assignee["summary"], url=assignee["html_url"] ) else: format_dict["assignee_info"] = "nobody" last_status_change_by = message["incident"].get("last_status_change_by") if last_status_change_by is not None: format_dict["resolving_agent_info"] = ASSIGNEE_TEMPLATE.format( username=last_status_change_by["summary"], url=last_status_change_by["html_url"], ) trigger_description = message["incident"].get("description") if trigger_description is not None: format_dict["trigger_message"] = trigger_description return format_dict def send_formated_pagerduty( request: HttpRequest, user_profile: UserProfile, message_type: str, format_dict: Dict[str, Any] ) -> None: if message_type in ("incident.trigger", "incident.unacknowledge"): template = INCIDENT_WITH_SERVICE_AND_ASSIGNEE elif message_type == "incident.resolve" and format_dict.get("resolving_agent_info") is not None: template = INCIDENT_RESOLVED_WITH_AGENT elif message_type == "incident.resolve" and format_dict.get("resolving_agent_info") is None: template = INCIDENT_RESOLVED elif message_type == "incident.assign": template = INCIDENT_ASSIGNED else: template = INCIDENT_WITH_ASSIGNEE subject = "Incident {incident_num}".format(**format_dict) body = template.format(**format_dict) check_send_webhook_message(request, user_profile, subject, body) @webhook_view("PagerDuty") @has_request_variables def api_pagerduty_webhook( request: HttpRequest, user_profile: UserProfile, payload: Dict[str, Sequence[Dict[str, Any]]] = REQ(argument_type="body"), ) -> HttpResponse: for message in payload["messages"]: message_type = message.get("type") # If the message has no "type" key, then this payload came from a # Pagerduty Webhook V2. if message_type is None: break if message_type not in PAGER_DUTY_EVENT_NAMES: raise UnsupportedWebhookEventType(message_type) format_dict = build_pagerduty_formatdict(message) send_formated_pagerduty(request, user_profile, message_type, format_dict) for message in payload["messages"]: event = message.get("event") # If the message has no "event" key, then this payload came from a # Pagerduty Webhook V1. if event is None: break if event not in PAGER_DUTY_EVENT_NAMES_V2: raise UnsupportedWebhookEventType(event) format_dict = build_pagerduty_formatdict_v2(message) send_formated_pagerduty(request, user_profile, event, format_dict) return json_success()
apache-2.0
2,138,208,620,822,185,200
34.21608
100
0.661244
false
3.604938
false
false
false
peterkuma/tjrapid
ob/views.py
1
1560
# -*- coding: utf-8 -*- from django.shortcuts import render from django.template import RequestContext from django.utils import translation from django.shortcuts import get_object_or_404 from django.http import Http404, HttpResponseRedirect from main.models import * from ob.models import * def events(request, category_name): category = get_object_or_404(Category, name=category_name) events = Event.objects.filter(category=category) return render(request, 'ob/events.html', { 'events': events, 'category': category, }, RequestContext(request) ) def event(request, name, category_name): category = get_object_or_404(Category, name=category_name) event = get_object_or_404(Event, category=category, name=name) return render(request, 'ob/event.html', { 'event': event, 'category': category, }, RequestContext(request) ) def attachment(request, category_name, event_name, name): category = get_object_or_404(Category, name=category_name) event = get_object_or_404(Event, category=category, name=event_name) for a in event.attachments.all(): if os.path.basename(a.file.name) == name: return HttpResponseRedirect(a.file.url) raise Http404 def members(request, category_name): members_m = Member.objects.filter(category__startswith='M') members_w = Member.objects.filter(category__startswith='W') category = Category.objects.get(name=category_name) return render(request, 'ob/members.html', { 'members_m': members_m, 'members_w': members_w, 'category': category, }, RequestContext(request) )
mit
-8,542,457,093,815,724,000
25.896552
69
0.730128
false
3.203285
false
false
false
rlbabyuk/integration_tests
cfme/scripting/ipyshell.py
1
1483
# -*- coding: utf-8 -*- import sys from . import quickstart from IPython.terminal.interactiveshell import TerminalInteractiveShell IMPORTS = [ 'from utils import conf', 'from fixtures.pytest_store import store', 'from utils.appliance.implementations.ui import navigate_to', 'from utils import providers' ] def main(): """Use quickstart to ensure we have correct env, then execute imports in ipython and done.""" quickstart.main(quickstart.parser.parse_args(['--mk-virtualenv', sys.prefix])) print('Welcome to IPython designed for running CFME QE code.') ipython = TerminalInteractiveShell.instance() for code_import in IMPORTS: print('> {}'.format(code_import)) ipython.run_cell(code_import) from utils.path import conf_path custom_import_path = conf_path.join('miq_python_startup.py') if custom_import_path.exists(): with open(custom_import_path.strpath, 'r') as custom_import_file: custom_import_code = custom_import_file.read() print('Importing custom code:\n{}'.format(custom_import_code.strip())) ipython.run_cell(custom_import_code) else: print( 'You can create your own python file with imports you use frequently. ' 'Just create a conf/miq_python_startup.py file in your repo. ' 'This file can contain arbitrary python code that is executed in this context.') ipython.interact() if __name__ == '__main__': main()
gpl-2.0
-2,106,811,254,778,584,000
37.025641
97
0.675657
false
3.872063
false
false
false
google-research/google-research
enas_lm/src/tpu/data_utils.py
1
3091
# coding=utf-8 # Copyright 2021 The Google Research Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Data input pipeline for TPUs.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import pickle import numpy as np import tensorflow.compat.v1 as tf gfile = tf.gfile def _pad_to_batch(batch_size, data): """Pad `Tensor`s in data so that `N == batch_size` and return `mask`.""" x = data['x'] curr_batch_size = tf.shape(x)[0] if curr_batch_size == batch_size: masks = tf.ones([batch_size], dtype=tf.float32) return data, masks batch_diff = batch_size - curr_batch_size padded_data = {} for key, val in data.items(): val = tf.pad(val, [[0, batch_diff]] + [[0, 0]] * (val.shape.ndims - 1)) val.set_shape([batch_size] + val.shape.as_list()[1:]) padded_data[key] = val masks = tf.pad(tf.ones([curr_batch_size], dtype=tf.float32), [[0, batch_diff]]) masks.set_shape([batch_size]) return padded_data, masks def input_fn(params): """For `TPUEstimator`.""" with gfile.GFile(params.data_path, 'rb') as finp: x_train, x_valid, x_test, _, _ = pickle.load(finp) tf.logging.info('-' * 80) tf.logging.info('train_size: {0}'.format(np.size(x_train))) tf.logging.info('valid_size: {0}'.format(np.size(x_valid))) tf.logging.info(' test_size: {0}'.format(np.size(x_test))) def _build_dataset(data, batch_size, bptt_steps): """Create LM dataset from a `data` tensor.""" num_batches = np.size(data) // batch_size data = np.reshape(data[:batch_size*num_batches], [batch_size, num_batches]) data = np.transpose(data) dataset = tf.data.Dataset.from_tensor_slices({'x': data[:-1], 'y': data[1:]}) dataset = dataset.repeat() dataset = dataset.batch(batch_size=bptt_steps, drop_remainder=True) def pad_to_batch(data): padded_data, masks = _pad_to_batch(bptt_steps, data) return padded_data, masks dataset = dataset.map(map_func=pad_to_batch) dataset = dataset.prefetch(2) # Prefetch overlaps in-feed with training return dataset if params.task_mode == 'train': return _build_dataset(x_train, params.train_batch_size, params.bptt_steps) elif params.task_mode == 'valid': return _build_dataset(x_valid, params.eval_batch_size, params.bptt_steps) elif params.task_mode == 'test': return _build_dataset(x_test, params.eval_batch_size, params.bptt_steps) else: raise ValueError('Unknown task_mode {0}'.format(params.task_mode))
apache-2.0
5,911,740,309,309,060,000
36.240964
79
0.664833
false
3.250263
false
false
false