text
stringlengths
4
1.02M
meta
dict
import logging import json from webob import Response import time from threading import Timer from ryu.base import app_manager from ryu.controller import ofp_event from ryu.controller.handler import MAIN_DISPATCHER from ryu.controller.handler import set_ev_cls from ryu.controller import dpset # from ryu.app.wsgi import ControllerBase, WSGIApplication from ryu.lib.packet import packet from ryu.lib.packet import ethernet from ryu.lib.packet import ipv4 from ryu.lib.packet import arp from ryu.ofproto import ether from ryu.ofproto import ofproto_v1_0, ofproto_v1_3 from ryu.lib import dpid as dpid_lib from ryu.lib.packet.lldp import LLDP_MAC_NEAREST_BRIDGE from ryu.lib import hub import os.path OFP_HOST_SWITCHES_LIST = \ './network-data/ofp_host_switches_list.db' class HostTracker(app_manager.RyuApp): def __init__(self, *args, **kwargs): super(HostTracker, self).__init__(*args, **kwargs) self.hosts = {} self.routers = [] self.IDLE_TIMEOUT = 300 self.count = 0 self.host_switch_file_update = hub.spawn(self._update) Timer(self.IDLE_TIMEOUT, self.expireHostEntries).start() def _update(self): # wait fof around 10s until all the swtiches connected to controller self._update_host_switch_file() hub.sleep(2) while True: self._update_host_switch_file() hub.sleep(5) def _update_host_switch_file(self): #if os.path.exists(OFP_HOST_SWITCHES_LIST): # print "**"*20 with open(OFP_HOST_SWITCHES_LIST, 'w') as outp: for srcIP, val in self.hosts.items(): # print srcIP, val['dpid'] outp.write("%s %s\n" % (srcIP, val['dpid'])) def expireHostEntries(self): expiredEntries = [] for key, val in self.hosts.iteritems(): if int(time.time()) > val['timestamp'] + self.IDLE_TIMEOUT: expiredEntries.append(key) for ip in expiredEntries: del self.hosts[ip] Timer(self.IDLE_TIMEOUT, self.expireHostEntries).start() # The hypothesis is that a router will be the srcMAC # for many IP addresses at the same time def isRouter(self, mac): if mac in self.routers: return True ip_list = [] for key, val in self.hosts.iteritems(): if val['mac'] == mac: ip_list.append(key) if len(ip_list) > 1: for ip in ip_list: del self.hosts[ip] self.routers.append(mac) return true return False def updateHostTable(self, srcIP, dpid, port): self.hosts[srcIP]['timestamp'] = int(time.time()) if 'dpid' not in self.hosts[srcIP]: self.hosts[srcIP]['dpid'] = dpid elif self.hosts[srcIP]['dpid'] != dpid: pass self.hosts[srcIP]['port'] = port @set_ev_cls(ofp_event.EventOFPPacketIn, MAIN_DISPATCHER) def packet_in_handler(self, ev): msg = ev.msg datapath = msg.datapath ofproto = datapath.ofproto parser = datapath.ofproto_parser in_port = msg.match['in_port'] pkt = packet.Packet(msg.data) eth = pkt.get_protocols(ethernet.ethernet)[0] dst = eth.dst if dst != LLDP_MAC_NEAREST_BRIDGE: # print "LLDP Packet:" # self.logger.info("packet in %s %s %s %s", datapath.id, eth.src, eth.dst, in_port) if eth.ethertype == ether.ETH_TYPE_ARP: arp_pkt = pkt.get_protocols(arp.arp)[0] srcMac = arp_pkt.src_mac srcIP = arp_pkt.src_ip elif eth.ethertype == ether.ETH_TYPE_IP: ip = pkt.get_protocols(ipv4.ipv4)[0] srcMac = eth.src srcIP = ip.src else: return if self.isRouter(srcMac): return if srcIP not in self.hosts: self.hosts[srcIP] = {} # Always update MAC and switch-port location, just in case # DHCP reassigned the IP or the host moved self.hosts[srcIP]['mac'] = srcMac self.updateHostTable(srcIP, dpid_lib.dpid_to_str(datapath.id), in_port) # print "host:", self.hosts # self._update_host_switch_file() # self.count += 1 # print "router:", self.routers
{ "content_hash": "341b48eb7d3a1310c5875c4c8b8b7dbb", "timestamp": "", "source": "github", "line_count": 136, "max_line_length": 99, "avg_line_length": 32.5514705882353, "alnum_prop": 0.5873051728032528, "repo_name": "umkcdcrg01/ryu_openflow", "id": "bdb3b85725935049a0fef68b07f447a5e7d9392a", "size": "4893", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "ryu/app/host_tracker.py", "mode": "33261", "license": "apache-2.0", "language": [ { "name": "C", "bytes": "7031" }, { "name": "CSS", "bytes": "6619" }, { "name": "Erlang", "bytes": "871862" }, { "name": "HTML", "bytes": "29323" }, { "name": "JavaScript", "bytes": "70185" }, { "name": "Makefile", "bytes": "1213" }, { "name": "Python", "bytes": "6641501" }, { "name": "Shell", "bytes": "33195" } ], "symlink_target": "" }
from swgpy.object import * def create(kernel): result = Intangible() result.template = "object/draft_schematic/furniture/shared_furniture_armoire_modern.iff" result.attribute_template_id = -1 result.stfName("string_id_table","") #### BEGIN MODIFICATIONS #### #### END MODIFICATIONS #### return result
{ "content_hash": "71de1fa94a38228290752319a48a6027", "timestamp": "", "source": "github", "line_count": 13, "max_line_length": 89, "avg_line_length": 24.46153846153846, "alnum_prop": 0.7044025157232704, "repo_name": "obi-two/Rebelion", "id": "d0a2193c425b707be51f51187e992b361c71a6b8", "size": "463", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "data/scripts/templates/object/draft_schematic/furniture/shared_furniture_armoire_modern.py", "mode": "33188", "license": "mit", "language": [ { "name": "Batchfile", "bytes": "11818" }, { "name": "C", "bytes": "7699" }, { "name": "C++", "bytes": "2293610" }, { "name": "CMake", "bytes": "39727" }, { "name": "PLSQL", "bytes": "42065" }, { "name": "Python", "bytes": "7499185" }, { "name": "SQLPL", "bytes": "41864" } ], "symlink_target": "" }
from ctypes import CDLL import itertools as it import os import platform import shlex import tempfile from appdirs import user_cache_dir from pytools.prefork import call_capture_output from pyfr.ctypesutil import platform_libname from pyfr.nputil import npdtype_to_ctypestype from pyfr.util import digest, lazyprop, mv, rm class SourceModule(object): _dir_seq = it.count() def __init__(self, src, cfg): # Find GCC (or a compatible alternative) self.cc = cfg.getpath('backend-openmp', 'cc', 'cc') # User specified compiler flags self.cflags = shlex.split(cfg.get('backend-openmp', 'cflags', '')) # Get the processor string proc = platform.processor() # Get the compiler version string version = call_capture_output([self.cc, '-v']) # Get the base compiler command strig cmd = self.cc_cmd(None, None) # Compute a digest of the current processor, compiler, and source self.digest = digest(proc, version, cmd, src) # Attempt to load the library from the cache self.mod = self._cache_loadlib() # Otherwise, we need to compile the kernel if not self.mod: # Create a scratch directory tmpidx = next(self._dir_seq) tmpdir = tempfile.mkdtemp(prefix='pyfr-{0}-'.format(tmpidx)) try: # Compile and link the source into a shared library cname, lname = 'tmp.c', platform_libname('tmp') # Write the source code out with open(os.path.join(tmpdir, cname), 'w') as f: f.write(src) # Invoke the compiler call_capture_output(self.cc_cmd(cname, lname), cwd=tmpdir) # Determine the fully qualified library name lpath = os.path.join(tmpdir, lname) # Add it to the cache and load self.mod = self._cache_set_and_loadlib(lpath) finally: # Unless we're debugging delete the scratch directory if 'PYFR_DEBUG_OMP_KEEP_LIBS' not in os.environ: rm(tmpdir) def cc_cmd(self, srcname, libname): cmd = [ self.cc, # Compiler name '-shared', # Create a shared library '-std=c99', # Enable C99 support '-Ofast', # Optimise, incl. -ffast-math '-march=native', # Use CPU-specific instructions '-fopenmp', # Enable OpenMP support '-fPIC', # Generate position-independent code '-o', libname, srcname, # Library and source file names '-lm' # Link against libm ] # Append any user-provided arguments and return return cmd + self.cflags @lazyprop def cachedir(self): return os.environ.get('PYFR_OMP_CACHE_DIR', user_cache_dir('pyfr', 'pyfr')) def _cache_loadlib(self): # If caching is disabled then return if 'PYFR_DEBUG_OMP_DISABLE_CACHE' in os.environ: return # Otherwise, check the cache else: # Determine the cached library name clname = platform_libname(self.digest) # Attempt to load the library try: return CDLL(os.path.join(self.cachedir, clname)) except OSError: return def _cache_set_and_loadlib(self, lpath): # If caching is disabled then just load the library as-is if 'PYFR_DEBUG_OMP_DISABLE_CACHE' in os.environ: return CDLL(lpath) # Otherwise, move the library into the cache and load else: # Determine the cached library name and path clname = platform_libname(self.digest) clpath = os.path.join(self.cachedir, clname) try: # Ensure the cache directory exists os.makedirs(self.cachedir, exist_ok=True) # Attempt to move the library to cache dir mv(lpath, clpath) # If an exception is raised, load from the original path except OSError: return CDLL(lpath) # Otherwise, load from the cache dir else: return CDLL(clpath) def function(self, name, restype, argtypes): # Get the function fn = getattr(self.mod, name) fn.restype = npdtype_to_ctypestype(restype) fn.argtypes = [npdtype_to_ctypestype(a) for a in argtypes] return fn
{ "content_hash": "c062541110d8a8c0317b5274193de5a9", "timestamp": "", "source": "github", "line_count": 133, "max_line_length": 74, "avg_line_length": 35.1203007518797, "alnum_prop": 0.5626204238921002, "repo_name": "BrianVermeire/PyFR", "id": "0f364c5dc55cc78cee04d6013306efaa717d501d", "size": "4696", "binary": false, "copies": "1", "ref": "refs/heads/develop", "path": "pyfr/backends/openmp/compiler.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "Mako", "bytes": "84519" }, { "name": "Python", "bytes": "422730" } ], "symlink_target": "" }
import base64 import time import uuid import importlib from flask import Flask from flask import render_template, request, make_response, redirect, g from Crypto.Cipher import DES from Crypto.Util.Padding import pad, unpad import jwt BLOCK_SIZE = 32 # Bytes cookies_max_age = 7 * 24 * 60 * 60 # 1week app = Flask(__name__) app.config.from_object('nginxauthdaemon.config.DefaultConfig') app.config.from_envvar('DAEMON_SETTINGS', True) custom_auth_url_prefix=app.config['AUTH_URL_PREFIX'] def get_authenticator(): auth = getattr(g, '_authenticator', None) if auth is None: class_name = app.config['AUTHENTICATOR'] parts = class_name.split('.') module = importlib.import_module(".".join(parts[:-1])) cls = getattr(module, parts[-1]) auth = g._authenticator = cls(app.config) return auth def decode_basic(auth_value): if auth_value is None or auth_value == '': return None try: auth_decoded = base64.b64decode(auth_value).decode("utf-8") return auth_decoded.split(':', 2) except TypeError as e: app.logger.warn("Parsing decoded basic value %s failed. Value ignored" % auth_value, e) return None def parse_authorization(original_value): """Parse 'Authorization' header and return (user, password) tuple for success and None for failure.""" if original_value is None: return None if original_value.startswith("Basic "): return decode_basic(original_value[6:]) app.logger.warn("Ignored unsupported authorization header %s" % original_value) return None def create_session_cookie(username): """Create session cookie. Returns string""" des = DES.new(bytes(app.config['DES_KEY'], encoding="raw_unicode_escape"), DES.MODE_ECB) clear_text = username + app.config['SESSION_SALT'] return base64.encodestring(des.encrypt(pad(clear_text.encode('utf-8'), BLOCK_SIZE))) def create_access_token_cookie(username): """Create access token. Returns string""" jwtPrivateKey = app.config['JWT_PRIVATE_KEY'] now = int(time.time()) expiresAt = now + cookies_max_age # seconds payload = {'jti': str(uuid.uuid4()), 'iat': now, 'nbf': 0, 'iss': 'realm://crowd-ldap', 'real-issuer': 'crowd-ldap', 'exp': expiresAt, 'realm_access': {'roles': []}, 'user_id': username, 'typ': 'Bearer'} return jwt.encode(payload, jwtPrivateKey, algorithm='RS256') def decode_session_cookie(cookie): """Decode session cookie and return user name""" try: encrypted = base64.decodestring(bytes(cookie,'utf-8')) des = DES.new(bytes(app.config['DES_KEY'], encoding="raw_unicode_escape"), DES.MODE_ECB) decrypted = unpad(des.decrypt(encrypted).rstrip(), BLOCK_SIZE) session_salt = app.config['SESSION_SALT'] if decrypted[-len(session_salt):].decode("utf-8") == session_salt: return decrypted[:-len(session_salt)] return None except: return None @app.route(custom_auth_url_prefix +'/login', methods=['GET', 'POST']) def show_login(): if request.method == 'GET': target = request.headers.get(app.config['TARGET_HEADER']) return render_template('login.html', realm=app.config['REALM_NAME'], target=target) else: # check user name and password username = request.form.get('user') password = request.form.get('pass') target = request.form.get('target') if username is not None and get_authenticator().authenticate(username, password): resp = redirect(target) if target == custom_auth_url_prefix +'/login': resp = redirect("/") resp.set_cookie(app.config['SESSION_COOKIE'], create_session_cookie(username), max_age=cookies_max_age) resp.set_cookie(app.config['ACCESS_TOKEN_COOKIE'], create_access_token_cookie(username), max_age=cookies_max_age) return resp else: return render_template('login.html', realm=app.config['REALM_NAME'], error="Please check user name and password"), 401 @app.route(custom_auth_url_prefix +'/validate', methods=['GET']) def validate(): # check session session_cookie = request.cookies.get(app.config['SESSION_COOKIE']) if session_cookie is not None: username = decode_session_cookie(session_cookie) if username is not None: # seems username is right return "Session verified" # check Authorization header user_and_password = request.headers.get('Authorization', type=parse_authorization) if user_and_password is None: # neither header nor cookie fits, return 401 resp = make_response('Unauthorized', 401) resp.headers['WWW-Authenticate'] = 'Basic realm=' + app.config['REALM_NAME'] resp.headers['Cache-Control'] = 'no-cache' return resp if get_authenticator().authenticate(user_and_password[0], user_and_password[1]): resp = make_response("Username/password verified") username = user_and_password[0] resp.set_cookie(app.config['SESSION_COOKIE'], create_session_cookie(username), max_age=cookies_max_age) resp.set_cookie(app.config['ACCESS_TOKEN_COOKIE'], create_access_token_cookie(username), max_age=cookies_max_age) return resp else: resp = make_response("Username/password failed", 401) resp.headers['WWW-Authenticate'] = 'Basic realm=' + app.config['REALM_NAME'] resp.headers['Cache-Control'] = 'no-cache' return resp if __name__ == '__main__': app.run('localhost', 5000, debug=True)
{ "content_hash": "79a35d0c6e227ec66e0cea35ab5b713f", "timestamp": "", "source": "github", "line_count": 136, "max_line_length": 207, "avg_line_length": 41.0735294117647, "alnum_prop": 0.6577157178660938, "repo_name": "akurdyukov/nginxauthdaemon", "id": "bf342e5a8f8110e9955c054121307e54f4529202", "size": "5586", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "nginxauthdaemon/nginxauthdaemon.py", "mode": "33188", "license": "mit", "language": [ { "name": "HTML", "bytes": "3234" }, { "name": "Python", "bytes": "7738" } ], "symlink_target": "" }
from .adapt_service import AdaptService, AdaptIntent from .base import IntentMatch from .fallback_service import FallbackService from .padatious_service import PadatiousService, PadatiousMatcher
{ "content_hash": "b9e7bd7c4324d97c60643e9bd570f156", "timestamp": "", "source": "github", "line_count": 4, "max_line_length": 65, "avg_line_length": 48.75, "alnum_prop": 0.8615384615384616, "repo_name": "forslund/mycroft-core", "id": "8603aad314d4602a94bd3b7e3f89104f4244ab9a", "size": "195", "binary": false, "copies": "2", "ref": "refs/heads/dev", "path": "mycroft/skills/intent_services/__init__.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Dockerfile", "bytes": "3791" }, { "name": "Python", "bytes": "1371642" }, { "name": "QML", "bytes": "18805" }, { "name": "Shell", "bytes": "83796" } ], "symlink_target": "" }
"""Computational environment validation script for the 2014 SciPy Conference Tutorial: Reproducible Research: Walking the Walk. https://github.com/reproducible-research/scipy-tutorial-2014 """ import sys import subprocess return_value = 0 required_packages = ['numpy', 'scipy', 'matplotlib', 'SimpleITK'] for package in required_packages: print('Importing ' + package + ' ...') try: __import__(package, globals(), locals(), [], 0) except ImportError: print('Error: could not import ' + package) return_value += 1 print('') required_executables = ['git', 'dexy', 'ipython', 'latex', 'nosetests', 'mplayer'] for executable in required_executables: print('Executing ' + executable + ' ...') try: process = subprocess.Popen([executable, '--help'], stderr=subprocess.STDOUT, stdout=subprocess.PIPE) process.wait() except OSError: print('Error: could not execute ' + executable) return_value += 1 if return_value is 0: print('\nSuccess.') else: print('\nA defect was found in your environment, please see the messages ' + 'above.') sys.exit(return_value)
{ "content_hash": "8330aec4ce010a923bdacbd2fdd85fea", "timestamp": "", "source": "github", "line_count": 48, "max_line_length": 86, "avg_line_length": 26.3125, "alnum_prop": 0.6033254156769596, "repo_name": "cpcloud/scipy-tutorial-2014", "id": "451e755ab4158ae5a6e9ccc728e498feb709e1f3", "size": "1286", "binary": false, "copies": "3", "ref": "refs/heads/master", "path": "environment/check_env.py", "mode": "33261", "license": "apache-2.0", "language": [], "symlink_target": "" }
import os import praw import time import sqlite3 import traceback from random import randint from random import choice print ("\n\nOpening database...") #SQL database setup/ignition. sql = sqlite3.connect('sql.db') cur = sql.cursor() cur.execute('CREATE TABLE IF NOT EXISTS cid_storage(ID TEXT)') cur.execute('CREATE TABLE IF NOT EXISTS ignore_authors(ID TEXT)') sql.commit() #User's username and password. Username = "" #: Enter your own username here. Password = "" #: Enter your own password here. #What reddit sees from the bot's requests. user_agent = "" #: Enter your own user_agent here. r = praw.Reddit(user_agent = user_agent) print("\n\nLogging in...\n\n") r.login(Username, Password) #List of gold facts. goldFactsList = [] #: Shh, secret. Amount of gold facts are hidden for . . . well, to keep private. #Set of variables for program. print ("Arranging variables...\n\n") commentNum = 0 ignore_requests_string = "ignore-/u/goldfact" obey_requests_string = "obey-/u/goldfact" #Message/link variables. ignore_message = "https://www.reddit.com/message/compose/?to=GoldFact&subject=Ignore-/u/GoldFact.&message=ignore-/u/goldfact" obey_message = "https://www.reddit.com/message/compose/?to=GoldFact&subject=Obey-/u/GoldFact.&message=obey-/u/goldfact" source_link = "https://github.com/Saroekin/GoldFact" pm_link = "https://www.reddit.com/message/compose/?to=Saroekin&subject=/u/GoldFact" info_post = "http://redd.it/339ec5" #Templates for messages and comments (and variables). MENTION_TEMPLATE_FACT = """ It looks as though I've been summoned! Here's the gold fact as you've requested: >%s --- ^I ^am ^a ^bot. ^If ^you ^have ^any ^questions ^or ^requests, ^please ^contact ^my ^[[creator]({pm_link})]. ^If ^you ^would ^like ^to ^read ^or ^learn ^more ^about ^my ^functionalities, ^please ^head ^over ^to ^this ^[[post]({info_post})]. = ^| ^[[Ignore]({ignore_message})] ^| ^[[Obey]({obey_message})] ^| ^[[Source]({source_link})] ^| """.format(pm_link = pm_link, info_post=info_post, ignore_message=ignore_message, obey_message=obey_message, source_link=source_link) COMMENT_TEMPLATE_FACT = """ Hello there ol' chap! It seems to me that you've been gilded, therefore congratulations! Here's a gold fact to celebrate: >%s --- ^I ^am ^a ^bot. ^If ^you ^have ^any ^questions ^or ^requests, ^please ^contact ^my ^[[creator]({pm_link})]. ^If ^you ^would ^like ^to ^read ^or ^learn ^more ^about ^my ^functionalities, ^please ^head ^over ^to ^this ^[[post]({info_post})]. = ^| ^[[Ignore]({ignore_message})] ^| ^[[Obey]({obey_message})] ^| ^[[Source]({source_link})] ^| """.format(pm_link = pm_link, info_post=info_post, ignore_message=ignore_message, obey_message=obey_message, source_link=source_link) COULD_NOT_REPLY = """ /u/GoldFact couldn't respond towards your message because you have ignored him. If you think this is a mistake, then look upon one of /u/GoldFact's post/comments, and click the "Obey" button formatted near the bottom. --- Tip: If you\'d like to use /u/GoldFact's name without him reacting, then use the command: >n-/u/GoldFact """.format() mentionreply = MENTION_TEMPLATE_FACT commentsubmit = COMMENT_TEMPLATE_FACT notreply = COULD_NOT_REPLY #Function for running (is defining) bot. #In this definition, we are collecting user ignore requests. def ignore_requests(): for message in r.get_unread(): message_text = message.body.lower() if ignore_requests_string not in message_text: continue mauth = message.author.name messageignore = "You have successfully ignored /u/GoldFact." #Checking throught SQL database. cur.execute('SELECT * FROM ignore_authors WHERE ID=?', [mauth]) if not cur.fetchone(): if message.subject in ['username mention', 'comment reply'] and type(message) == praw.objects.Comment: r.send_message(mauth, "Ignored /u/GoldFact.", messageignore) elif message.subject == "Ignore-/u/GoldFact." and type(message) == praw.objects.Message: callback = message.reply(messageignore) #Adding authors that wish to be ignored into a database. cur.execute('INSERT INTO ignore_authors VALUES(?)', [mauth]) sql.commit() message.mark_as_read() else: if message.subject in ['username mention', 'comment reply'] and type(message) == praw.objects.Comment: r.send_message(mauth, "Ignored /u/GoldFact.", messageignore) elif message.subject == "Ignore-/u/GoldFact." and type(message) == praw.objects.Message: callback = message.reply(messageignore) message.mark_as_read() #Function for running (is defining) bot. #In this definition, we are reapplying /u/GoldFact towards users who request acknowledgement. def obey_requests(): for message in r.get_unread(): message_text = message.body.lower() if obey_requests_string not in message_text: continue mauth = message.author.name messageobey = "You have successfully stopped ignoring /u/GoldFact." #Checking throught SQL database. cur.execute('SELECT * FROM ignore_authors WHERE ID=?', [mauth]) if cur.fetchone(): if message.subject in ['username mention', 'comment reply'] and type(message) == praw.objects.Comment: r.send_message(mauth, "Acknowledged /u/GoldFact.", messageobey) elif message.subject == "Obey-/u/GoldFact." and type(message) == praw.objects.Message: callback = message.reply(messageobey) #Removing authors that have been entered into the ignored database. cur.execute('DELETE FROM ignore_authors WHERE ID=?', [mauth]) sql.commit() message.mark_as_read() else: if message.subject in ['username mention', 'comment reply'] and type(message) == praw.objects.Comment: r.send_message(mauth, "Acknowledged /u/GoldFact.", messageobey) elif message.subject == "Obey-/u/GoldFact." and type(message) == praw.objects.Message: callback = message.reply(messageobey) message.mark_as_read() #Function for running (is defining) bot. #In this definition, the bot is replying to messages (both username mentions and comment replies). def run_bot_messages(): for message in r.get_unread(): message_text = message.body.lower() try: mauth = message.author.name except AttributeError: message.mark_as_read() continue #Checking throught SQL database. cur.execute('SELECT * FROM ignore_authors WHERE ID=?', [mauth]) if not cur.fetchone(): if message.subject in ['username mention', 'comment reply'] and type(message) == praw.objects.Comment and "n-/u/goldfact" in message_text: message.mark_as_read() elif message.subject == "username mention" and type(message) == praw.objects.Comment: callback = message.reply(mentionreply % choice(goldFactsList)) #Selecting a random gold fact. message.mark_as_read() elif message.subject == "comment reply" and type(message) == praw.objects.Comment and "/u/goldfact" in message_text: callback = message.reply(mentionreply % choice(goldFactsList)) message.mark_as_read() else: if message.subject == "username mention" and type(message) == praw.objects.Comment: r.send_message(mauth, "Error.", notreply) message.mark_as_read() elif message.subject == "comment reply" and type(message) == praw.objects.Comment and "/u/goldfact" in message_text: r.send_message(mauth, "Error.", notreply) message.mark_as_read() #Function for running (is defining) bot. #In this definition, the bot is posting/commenting to gilded comments from /r/lounge (and maybe submissions later on). def run_bot_comments_lounge(): subreddit = r.get_subreddit("lounge") comments = subreddit.get_comments(gilded_only=True, limit=100) for comment in comments: cid = comment.id cauth = comment.author.name #Checking throught SQL database. cur.execute('SELECT * FROM cid_storage WHERE ID=?', [cid]) if cur.fetchone(): continue cur.execute('SELECT * FROM ignore_authors WHERE ID=?', [cauth]) if cur.fetchone(): continue try: comment_text = comment.body.lower() if str(comment.author) != Username: commentNum = randint(0,13) if commentNum == 2: comment.reply(commentsubmit % choice(goldFactsList)) except AttributeError: pass #Adding comment id into SQL database. cur.execute('INSERT INTO cid_storage VALUES(?)', [cid]) sql.commit() #Function for running (is defining) bot. #In this definition, the bot is posting/commenting to gilded comments from /r/all (and maybe submissions later on). def run_bot_comments_all(): subreddit = r.get_subreddit("all") comments = subreddit.get_comments(gilded_only=True, limit=100) for comment in comments: cid = comment.id cauth = comment.author.name #Checking throught SQL database. cur.execute('SELECT * FROM cid_storage WHERE ID=?', [cid]) if cur.fetchone(): continue cur.execute('SELECT * FROM ignore_authors WHERE ID=?', [cauth]) if cur.fetchone(): continue try: comment_text = comment.body.lower() if str(comment.author) != Username: commentNum = randint(0,32) if commentNum == 2: comment.reply(commentsubmit % choice(goldFactsList)) except AttributeError: pass #Adding comment id into SQL database. cur.execute('INSERT INTO cid_storage VALUES(?)', [cid]) sql.commit() #Where bot begins (continues) to run. print("/u/GoldFact (bot) is running...\n") while True: try: ignore_requests() obey_requests() run_bot_messages() run_bot_comments_lounge() run_bot_comments_all() except Exception as e: traceback.print_exc() time.sleep(30)
{ "content_hash": "45652297c04e60833e73a8f14233a1de", "timestamp": "", "source": "github", "line_count": 223, "max_line_length": 218, "avg_line_length": 46.56502242152467, "alnum_prop": 0.6434899845916795, "repo_name": "Saroekin/GoldFact", "id": "7d1e94d96a93a95cb0e5a54d57fe363a9de8e105", "size": "10533", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "GoldFact.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "10533" } ], "symlink_target": "" }
from kraken import plugins from kraken.core.objects.locator import Locator from kraken.core.objects.operators.kl_operator import KLOperator from kraken.core.traverser.traverser import Traverser locInA = Locator("locatorInA") locInB = Locator("locatorInB") locOutA = Locator("locatorOutA") locOutB = Locator("locatorOutB") operator = KLOperator("IK", "MultiPoseConstraintSolver", "Kraken") operator.resizeInput('constrainers', 2) operator.resizeOutput('constrainees', 2) operator.setInput("constrainers", locInA, 0) operator.setInput("constrainers", locInB, 1) operator.setOutput("constrainees", locOutA, 0) operator.setOutput("constrainees", locOutB, 1) trav = Traverser() trav.addRootItem(locOutA) trav.addRootItem(locOutB) def callback(**args): item = args.get('item', None) print 'Visited '+item.getDecoratedPath() trav.traverse(itemCallback = callback)
{ "content_hash": "587964e1da1d1d459a17babb2de2f3ee", "timestamp": "", "source": "github", "line_count": 27, "max_line_length": 66, "avg_line_length": 32.22222222222222, "alnum_prop": 0.7793103448275862, "repo_name": "oculusstorystudio/kraken", "id": "04fc4fb1fcdba34f290ce04d269d6a4434bdbdcb", "size": "870", "binary": false, "copies": "2", "ref": "refs/heads/develop_OSS", "path": "tests/Traverser/operators.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "AMPL", "bytes": "136" }, { "name": "Batchfile", "bytes": "2584" }, { "name": "CSS", "bytes": "21033" }, { "name": "MAXScript", "bytes": "521" }, { "name": "Mathematica", "bytes": "4442959" }, { "name": "Python", "bytes": "2841362" }, { "name": "Shell", "bytes": "2689" } ], "symlink_target": "" }
from swgpy.object import * def create(kernel): result = Tangible() result.template = "object/tangible/furniture/city/shared_streetlamp_large_02.iff" result.attribute_template_id = 6 result.stfName("frn_n","streetlamp_2") #### BEGIN MODIFICATIONS #### #### END MODIFICATIONS #### return result
{ "content_hash": "91307fe90245abad2645bd41fde264bf", "timestamp": "", "source": "github", "line_count": 13, "max_line_length": 82, "avg_line_length": 23.846153846153847, "alnum_prop": 0.7, "repo_name": "anhstudios/swganh", "id": "60c5b03fb30c46726bbd2dd7ec95f655feeba201", "size": "455", "binary": false, "copies": "2", "ref": "refs/heads/develop", "path": "data/scripts/templates/object/tangible/furniture/city/shared_streetlamp_large_02.py", "mode": "33188", "license": "mit", "language": [ { "name": "Batchfile", "bytes": "11887" }, { "name": "C", "bytes": "7699" }, { "name": "C++", "bytes": "2357839" }, { "name": "CMake", "bytes": "41264" }, { "name": "PLSQL", "bytes": "42065" }, { "name": "Python", "bytes": "7503510" }, { "name": "SQLPL", "bytes": "42770" } ], "symlink_target": "" }
"""Imports to tensorflow_quantum.core.* level.""" # Import getters for constructing ops. from tensorflow_quantum.core.ops import (get_expectation_op, get_sampled_expectation_op, get_sampling_op, get_state_op, get_unitary_op) # Import regular ops. from tensorflow_quantum.core.ops import (append_circuit, padded_to_ragged, padded_to_ragged2d, resolve_parameters) # Import math ops. from tensorflow_quantum.core.ops import math_ops # Import noise ops. from tensorflow_quantum.core.ops import noise
{ "content_hash": "a470ba4b534ec29cdac7ba2d475daa89", "timestamp": "", "source": "github", "line_count": 14, "max_line_length": 80, "avg_line_length": 47.214285714285715, "alnum_prop": 0.5915279878971256, "repo_name": "tensorflow/quantum", "id": "fb24ad4a8c1f98f75937814b000c9f8167ecefaa", "size": "1358", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "tensorflow_quantum/core/__init__.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "C++", "bytes": "472630" }, { "name": "Python", "bytes": "1158441" }, { "name": "Shell", "bytes": "23877" }, { "name": "Starlark", "bytes": "54143" } ], "symlink_target": "" }
import os from urllib.request import urlretrieve import pandas as pd FREMONT_URL = 'https://data.seattle.gov/api/views/65db-xm6k/rows.csv?accessType=DOWNLOAD' def get_fremont_data(filename='Fremont.csv', url=FREMONT_URL, force_download=False): """Download and cache the fremont data Parameters ---------- filename : string (optional) location to save the data url: string (optional) web location of the data force_download : bool (optional) if true, force redownload of data Returns ------- data : pandas.DataFrame The fremont bridge data """ if not os.path.exists(filename): urlretrieve(FREMONT_URL, 'Fremont.csv') data = pd.read_csv('Fremont.csv', index_col='Date') try: data.index = pd.to_datetime(data.index, format='%m/%d/%Y %I:%M:%S %p') except TypeError: data.index = pd.to_datetime(data.index) data.columns = ['west', 'east'] data['Total'] = data.sum(axis=1) return data
{ "content_hash": "ba12f6c762f8de4bccb1b5a232ccb8a3", "timestamp": "", "source": "github", "line_count": 35, "max_line_length": 89, "avg_line_length": 28.82857142857143, "alnum_prop": 0.6382556987115956, "repo_name": "muxuezi/jupyterworkflow", "id": "79e4a3d575d3663d01c19f7cef18cc22c6603932", "size": "1009", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "103bestpractice/jupyterworkflow/data.py", "mode": "33188", "license": "mit", "language": [ { "name": "Jupyter Notebook", "bytes": "1324530" }, { "name": "Makefile", "bytes": "34" }, { "name": "Python", "bytes": "1314" } ], "symlink_target": "" }
import os import sys import yaml import numpy as np featuresDir = 'logs/features' features = [yaml.load(''.join([l for l in open(os.path.join(featuresDir, logFile)) if not l.startswith('[')])) for logFile in sorted(os.listdir(featuresDir))] fisher_vectorsDir = 'logs/fisher_vectors' fisher_vectors = [yaml.load(open(os.path.join(fisher_vectorsDir, logFile)).read()) for logFile in sorted(os.listdir(fisher_vectorsDir))] classification = yaml.load(open('data/classification.txt').read()) print 'Average frame count: %d' % np.mean([y['Frame count'] for y in features]) print 'Average frame size: %dx%d' % tuple(map(lambda x: np.mean([float(y['Original frame size'].split('x')[x]) for y in features]), range(2))) print 'Average descriptor count: %d' % np.mean([y['Calls.ComputeDescriptor'] for y in features]) print '' print 'All fps are reported without taking file reading and writing into account, howevere, video decoding is included.' print '' print 'Features (%s enabled):' % ', '.join([k for k, v in features[0]['Enabled descriptors'].items() if v]) print ' Average total fps: %.2f' % np.mean([y['Fps'] for y in features]) print ' Average HOG fps: %.2f' % np.mean([y['Frame count'] / float(0.01 + y['Reading (sec)'] + y['Interp (sec)']['HOG'] + y['IntHist (sec)']['HOG'] + y['Desc (sec)']['HOG']) for y in features]) print ' Average HOF fps: %.2f' % np.mean([y['Frame count'] / float(0.01 + y['Reading (sec)'] + y['Interp (sec)']['HOFMBH'] + y['IntHist (sec)']['HOF'] + y['Desc (sec)']['HOF']) for y in features]) print ' Average MBH fps: %.2f' % np.mean([y['Frame count'] / float(0.01 + y['Reading (sec)'] + y['Interp (sec)']['HOFMBH'] + y['IntHist (sec)']['MBH'] + y['Desc (sec)']['MBH']) for y in features]) print '' print 'Fisher vectors (components: %d, s-t grids enabled: %s, knn: %s, second order enabled: %s, FLANN trees: %s, FLANN comparisons: %s):' % (fisher_vectors[0]['10-105']['k'], fisher_vectors[0]['Enable spatio-temporal grids (1x1x1, 1x3x1, 1x1x2)'], fisher_vectors[0].get('K_nn', 'N/A'), fisher_vectors[0]['Enable second order'], fisher_vectors[0].get('FLANN trees', -1), fisher_vectors[0].get('FLANN checks', -1)) print ' Average total fps: %.2lf' % np.mean([y['Frame count'] / float(z.get('Copying (sec)', 0) + z.get('Flann (sec)', 0) + z.get('Assigning (sec)', 0)) for y, z in zip(features, fisher_vectors)]) print '' print 'Classification:' for k in sorted(set(classification) - set(['mean'])): print ' %-15s\t%.4f' % (k, classification[k]) print '' print ' mean: %.4f' % classification['mean']
{ "content_hash": "681885545a415269bc7bbf3f835f7ff8", "timestamp": "", "source": "github", "line_count": 34, "max_line_length": 413, "avg_line_length": 74.55882352941177, "alnum_prop": 0.6568047337278107, "repo_name": "vadimkantorov/cvpr2014", "id": "33570f54a0fe52b1deaf015e07981c831400f529", "size": "2559", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "repro/report.py", "mode": "33261", "license": "mit", "language": [ { "name": "C", "bytes": "8060" }, { "name": "C++", "bytes": "41039" }, { "name": "HTML", "bytes": "8961" }, { "name": "Makefile", "bytes": "7452" }, { "name": "Matlab", "bytes": "4416" }, { "name": "Python", "bytes": "17905" }, { "name": "Shell", "bytes": "2147" }, { "name": "TeX", "bytes": "167249" } ], "symlink_target": "" }
"""Definition of the audio service backends base classes. These classes can be used to create an Audioservice plugin extending Mycroft's media playback options. """ from abc import ABCMeta, abstractmethod class AudioBackend(metaclass=ABCMeta): """Base class for all audio backend implementations. Args: config (dict): configuration dict for the instance bus (MessageBusClient): Mycroft messagebus emitter """ def __init__(self, config, bus): self._track_start_callback = None self.supports_mime_hints = False self.config = config self.bus = bus @abstractmethod def supported_uris(self): """List of supported uri types. Returns: list: Supported uri's """ @abstractmethod def clear_list(self): """Clear playlist.""" @abstractmethod def add_list(self, tracks): """Add tracks to backend's playlist. Args: tracks (list): list of tracks. """ @abstractmethod def play(self, repeat=False): """Start playback. Starts playing the first track in the playlist and will contiune until all tracks have been played. Args: repeat (bool): Repeat playlist, defaults to False """ @abstractmethod def stop(self): """Stop playback. Stops the current playback. Returns: bool: True if playback was stopped, otherwise False """ def set_track_start_callback(self, callback_func): """Register callback on track start. This method should be called as each track in a playlist is started. """ self._track_start_callback = callback_func def pause(self): """Pause playback. Stops playback but may be resumed at the exact position the pause occured. """ def resume(self): """Resume paused playback. Resumes playback after being paused. """ def next(self): """Skip to next track in playlist.""" def previous(self): """Skip to previous track in playlist.""" def lower_volume(self): """Lower volume. This method is used to implement audio ducking. It will be called when Mycroft is listening or speaking to make sure the media playing isn't interfering. """ def restore_volume(self): """Restore normal volume. Called when to restore the playback volume to previous level after Mycroft has lowered it using lower_volume(). """ def seek_forward(self, seconds=1): """Skip X seconds. Args: seconds (int): number of seconds to seek, if negative rewind """ def seek_backward(self, seconds=1): """Rewind X seconds. Args: seconds (int): number of seconds to seek, if negative jump forward. """ def track_info(self): """Get info about current playing track. Returns: dict: Track info containing atleast the keys artist and album. """ ret = {} ret['artist'] = '' ret['album'] = '' return ret def shutdown(self): """Perform clean shutdown. Implements any audio backend specific shutdown procedures. """ self.stop() class RemoteAudioBackend(AudioBackend): """Base class for remote audio backends. RemoteAudioBackends will always be checked after the normal AudioBackends to make playback start locally by default. An example of a RemoteAudioBackend would be things like Chromecasts, mopidy servers, etc. """
{ "content_hash": "e040c2ef4d158953c49a7405181d9108", "timestamp": "", "source": "github", "line_count": 146, "max_line_length": 79, "avg_line_length": 25.328767123287673, "alnum_prop": 0.607355327203894, "repo_name": "MycroftAI/mycroft-core", "id": "9a387dc7da0f179d8279dcddf7200c5faa3d29b8", "size": "4278", "binary": false, "copies": "2", "ref": "refs/heads/dev", "path": "mycroft/audio/services/__init__.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Dockerfile", "bytes": "3791" }, { "name": "Python", "bytes": "1370285" }, { "name": "QML", "bytes": "18805" }, { "name": "Shell", "bytes": "85326" } ], "symlink_target": "" }
"""Extract reference documentation from the NumPy source tree. """ import inspect import textwrap import re import pydoc from warnings import warn, catch_warnings # Try Python 2 first, otherwise load from Python 3 try: from StringIO import StringIO except: from io import StringIO class Reader(object): """A line-based string reader. """ def __init__(self, data): """ Parameters ---------- data : str String with lines separated by '\n'. """ if isinstance(data, list): self._str = data else: self._str = data.split('\n') # store string as list of lines self.reset() def __getitem__(self, n): return self._str[n] def reset(self): self._l = 0 # current line nr def read(self): if not self.eof(): out = self[self._l] self._l += 1 return out else: return '' def seek_next_non_empty_line(self): for l in self[self._l:]: if l.strip(): break else: self._l += 1 def eof(self): return self._l >= len(self._str) def read_to_condition(self, condition_func): start = self._l for line in self[start:]: if condition_func(line): return self[start:self._l] self._l += 1 if self.eof(): return self[start:self._l + 1] return [] def read_to_next_empty_line(self): self.seek_next_non_empty_line() def is_empty(line): return not line.strip() return self.read_to_condition(is_empty) def read_to_next_unindented_line(self): def is_unindented(line): return (line.strip() and (len(line.lstrip()) == len(line))) return self.read_to_condition(is_unindented) def peek(self, n=0): if self._l + n < len(self._str): return self[self._l + n] else: return '' def is_empty(self): return not ''.join(self._str).strip() class NumpyDocString(object): def __init__(self, docstring, config={}): docstring = textwrap.dedent(docstring).split('\n') self._doc = Reader(docstring) self._parsed_data = { 'Signature': '', 'Summary': [''], 'Extended Summary': [], 'Parameters': [], 'Returns': [], 'Raises': [], 'Warns': [], 'Other Parameters': [], 'Attributes': [], 'Methods': [], 'See Also': [], 'Notes': [], 'Warnings': [], 'References': '', 'Examples': '', 'index': {} } self._parse() def __getitem__(self, key): return self._parsed_data[key] def __setitem__(self, key, val): if key not in self._parsed_data: warn("Unknown section %s" % key) else: self._parsed_data[key] = val def _is_at_section(self): self._doc.seek_next_non_empty_line() if self._doc.eof(): return False l1 = self._doc.peek().strip() # e.g. Parameters if l1.startswith('.. index::'): return True l2 = self._doc.peek(1).strip() # ---------- or ========== return l2.startswith('-' * len(l1)) or l2.startswith('=' * len(l1)) def _strip(self, doc): i = 0 j = 0 for i, line in enumerate(doc): if line.strip(): break for j, line in enumerate(doc[::-1]): if line.strip(): break return doc[i:len(doc) - j] def _read_to_next_section(self): section = self._doc.read_to_next_empty_line() while not self._is_at_section() and not self._doc.eof(): if not self._doc.peek(-1).strip(): # previous line was empty section += [''] section += self._doc.read_to_next_empty_line() return section def _read_sections(self): while not self._doc.eof(): data = self._read_to_next_section() name = data[0].strip() if name.startswith('..'): # index section yield name, data[1:] elif len(data) < 2: yield StopIteration else: yield name, self._strip(data[2:]) def _parse_param_list(self, content): r = Reader(content) params = [] while not r.eof(): header = r.read().strip() if ' : ' in header: arg_name, arg_type = header.split(' : ')[:2] else: arg_name, arg_type = header, '' desc = r.read_to_next_unindented_line() desc = dedent_lines(desc) params.append((arg_name, arg_type, desc)) return params _name_rgx = re.compile(r"^\s*(:(?P<role>\w+):`(?P<name>[a-zA-Z0-9_.-]+)`|" r" (?P<name2>[a-zA-Z0-9_.-]+))\s*", re.X) def _parse_see_also(self, content): """ func_name : Descriptive text continued text another_func_name : Descriptive text func_name1, func_name2, :meth:`func_name`, func_name3 """ items = [] def parse_item_name(text): """Match ':role:`name`' or 'name'""" m = self._name_rgx.match(text) if m: g = m.groups() if g[1] is None: return g[3], None else: return g[2], g[1] raise ValueError("%s is not a item name" % text) def push_item(name, rest): if not name: return name, role = parse_item_name(name) items.append((name, list(rest), role)) del rest[:] current_func = None rest = [] for line in content: if not line.strip(): continue m = self._name_rgx.match(line) if m and line[m.end():].strip().startswith(':'): push_item(current_func, rest) current_func, line = line[:m.end()], line[m.end():] rest = [line.split(':', 1)[1].strip()] if not rest[0]: rest = [] elif not line.startswith(' '): push_item(current_func, rest) current_func = None if ',' in line: for func in line.split(','): push_item(func, []) elif line.strip(): current_func = line elif current_func is not None: rest.append(line.strip()) push_item(current_func, rest) return items def _parse_index(self, section, content): """ .. index: default :refguide: something, else, and more """ def strip_each_in(lst): return [s.strip() for s in lst] out = {} section = section.split('::') if len(section) > 1: out['default'] = strip_each_in(section[1].split(','))[0] for line in content: line = line.split(':') if len(line) > 2: out[line[1]] = strip_each_in(line[2].split(',')) return out def _parse_summary(self): """Grab signature (if given) and summary""" if self._is_at_section(): return summary = self._doc.read_to_next_empty_line() summary_str = " ".join([s.strip() for s in summary]).strip() if re.compile('^([\w., ]+=)?\s*[\w\.]+\(.*\)$').match(summary_str): self['Signature'] = summary_str if not self._is_at_section(): self['Summary'] = self._doc.read_to_next_empty_line() else: self['Summary'] = summary if not self._is_at_section(): self['Extended Summary'] = self._read_to_next_section() def _parse(self): self._doc.reset() self._parse_summary() for (section, content) in self._read_sections(): if not section.startswith('..'): section = ' '.join([s.capitalize() for s in section.split(' ')]) if section in ('Parameters', 'Attributes', 'Methods', 'Returns', 'Raises', 'Warns'): self[section] = self._parse_param_list(content) elif section.startswith('.. index::'): self['index'] = self._parse_index(section, content) elif section == 'See Also': self['See Also'] = self._parse_see_also(content) else: self[section] = content # string conversion routines def _str_header(self, name, symbol='-'): return [name, len(name) * symbol] def _str_indent(self, doc, indent=4): out = [] for line in doc: out += [' ' * indent + line] return out def _str_signature(self): if self['Signature']: return [self['Signature'].replace('*', '\*')] + [''] else: return [''] def _str_summary(self): if self['Summary']: return self['Summary'] + [''] else: return [] def _str_extended_summary(self): if self['Extended Summary']: return self['Extended Summary'] + [''] else: return [] def _str_param_list(self, name): out = [] if self[name]: out += self._str_header(name) for param, param_type, desc in self[name]: out += ['%s : %s' % (param, param_type)] out += self._str_indent(desc) out += [''] return out def _str_section(self, name): out = [] if self[name]: out += self._str_header(name) out += self[name] out += [''] return out def _str_see_also(self, func_role): if not self['See Also']: return [] out = [] out += self._str_header("See Also") last_had_desc = True for func, desc, role in self['See Also']: if role: link = ':%s:`%s`' % (role, func) elif func_role: link = ':%s:`%s`' % (func_role, func) else: link = "`%s`_" % func if desc or last_had_desc: out += [''] out += [link] else: out[-1] += ", %s" % link if desc: out += self._str_indent([' '.join(desc)]) last_had_desc = True else: last_had_desc = False out += [''] return out def _str_index(self): idx = self['index'] out = [] out += ['.. index:: %s' % idx.get('default', '')] for section, references in idx.iteritems(): if section == 'default': continue out += [' :%s: %s' % (section, ', '.join(references))] return out def __str__(self, func_role=''): out = [] out += self._str_signature() out += self._str_summary() out += self._str_extended_summary() for param_list in ('Parameters', 'Returns', 'Raises'): out += self._str_param_list(param_list) out += self._str_section('Warnings') out += self._str_see_also(func_role) for s in ('Notes', 'References', 'Examples'): out += self._str_section(s) for param_list in ('Attributes', 'Methods'): out += self._str_param_list(param_list) out += self._str_index() return '\n'.join(out) def indent(str, indent=4): indent_str = ' ' * indent if str is None: return indent_str lines = str.split('\n') return '\n'.join(indent_str + l for l in lines) def dedent_lines(lines): """Deindent a list of lines maximally""" return textwrap.dedent("\n".join(lines)).split("\n") def header(text, style='-'): return text + '\n' + style * len(text) + '\n' class FunctionDoc(NumpyDocString): def __init__(self, func, role='func', doc=None, config={}): self._f = func self._role = role # e.g. "func" or "meth" if doc is None: if func is None: raise ValueError("No function or docstring given") doc = inspect.getdoc(func) or '' NumpyDocString.__init__(self, doc) if not self['Signature'] and func is not None: func, func_name = self.get_func() try: # try to read signature with catch_warnings(record=True): argspec = inspect.getargspec(func) argspec = inspect.formatargspec(*argspec) argspec = argspec.replace('*', '\*') signature = '%s%s' % (func_name, argspec) except TypeError as e: signature = '%s()' % func_name self['Signature'] = signature def get_func(self): func_name = getattr(self._f, '__name__', self.__class__.__name__) if inspect.isclass(self._f): func = getattr(self._f, '__call__', self._f.__init__) else: func = self._f return func, func_name def __str__(self): out = '' func, func_name = self.get_func() signature = self['Signature'].replace('*', '\*') roles = {'func': 'function', 'meth': 'method'} if self._role: if not roles.has_key(self._role): print("Warning: invalid role %s" % self._role) out += '.. %s:: %s\n \n\n' % (roles.get(self._role, ''), func_name) out += super(FunctionDoc, self).__str__(func_role=self._role) return out class ClassDoc(NumpyDocString): def __init__(self, cls, doc=None, modulename='', func_doc=FunctionDoc, config=None): if not inspect.isclass(cls) and cls is not None: raise ValueError("Expected a class or None, but got %r" % cls) self._cls = cls if modulename and not modulename.endswith('.'): modulename += '.' self._mod = modulename if doc is None: if cls is None: raise ValueError("No class or documentation string given") doc = pydoc.getdoc(cls) NumpyDocString.__init__(self, doc) if config is not None and config.get('show_class_members', True): if not self['Methods']: self['Methods'] = [(name, '', '') for name in sorted(self.methods)] if not self['Attributes']: self['Attributes'] = [(name, '', '') for name in sorted(self.properties)] @property def methods(self): if self._cls is None: return [] return [name for name, func in inspect.getmembers(self._cls) if not name.startswith('_') and callable(func)] @property def properties(self): if self._cls is None: return [] return [name for name, func in inspect.getmembers(self._cls) if not name.startswith('_') and func is None]
{ "content_hash": "6986930fc340ae757ba9b6d610f6a8a2", "timestamp": "", "source": "github", "line_count": 512, "max_line_length": 78, "avg_line_length": 30.294921875, "alnum_prop": 0.47495325897749985, "repo_name": "wronk/mne-python", "id": "5e01fd82be9284676090d3782b856cbf4ccf460b", "size": "15511", "binary": false, "copies": "5", "ref": "refs/heads/master", "path": "doc/sphinxext/numpy_ext/docscrape.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "Makefile", "bytes": "3769" }, { "name": "PowerShell", "bytes": "2988" }, { "name": "Python", "bytes": "5079143" }, { "name": "Shell", "bytes": "936" } ], "symlink_target": "" }
import dota2api import greetings import o1 import o2 import os ############################################## ################GLOBAL VARIABLES################ fileDirectory = '../txtResults/' apiKey = os.environ['dota2_api_key'] ################################################ def main(): myAccountID = greetings.login() while (1): option = greetings.greetings() if (option == 0): break elif (option == 1): o1.optionOne(api, myAccountID, fileDirectory) elif (option == 2): o2.optionTwo(myAccountID) elif (option != 0 or option != 1 or option != 2): print("Wrong option, try again\n") ##########################RUNNER########################## api = dota2api.Initialise(apiKey) main() ##########################################################
{ "content_hash": "54acab143844a425bcbab20f509c14cf", "timestamp": "", "source": "github", "line_count": 29, "max_line_length": 58, "avg_line_length": 26.586206896551722, "alnum_prop": 0.47470817120622566, "repo_name": "divinoob/DotaScene", "id": "f4a23bf40392aa85a94da11539341f6d70f4fd68", "size": "1007", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "Program/dotaStats.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Python", "bytes": "10535" } ], "symlink_target": "" }
import requests url = "https://maps.googleapis.com/maps/api/geocode/json?latlng=0%2C0&key=YOUR_API_KEY" payload={} headers = {} response = requests.request("GET", url, headers=headers, data=payload) print(response.text) # [END maps_http_geocode_zero_results]
{ "content_hash": "8a7a15c0ab33e13acc198164c8bfb1df", "timestamp": "", "source": "github", "line_count": 12, "max_line_length": 87, "avg_line_length": 21.916666666666668, "alnum_prop": 0.7376425855513308, "repo_name": "googlemaps/openapi-specification", "id": "fb4f6e645d3c1d0c0f4a577ffb3f8d0742d5405c", "size": "304", "binary": false, "copies": "1", "ref": "refs/heads/main", "path": "dist/snippets/maps_http_geocode_zero_results/maps_http_geocode_zero_results.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Starlark", "bytes": "11394" }, { "name": "TypeScript", "bytes": "71469" } ], "symlink_target": "" }
from elasticsearch_dsl import Date, Boolean, Text, Integer, Byte, Float, Keyword from {{appname}}.models.elastic.dsl_basemodel import ElasticDSLBaseModel from {{appname}}.lib.powlib import relation from elasticsearch_dsl import DocType from {{appname}}.database.elasticdblib import dbname from datetime import datetime @relation.setup_elastic_dsl_schema() class {{model_class_name}}(ElasticBaseModel): # # Use the cerberus schema style # which offer you an ElasticDSL schema and # immediate validation with cerberus # class Meta: index = dbname schema = { 'title': { 'type': 'string', "elastic" : { "analyzer" : "snowball", "fields" : {'raw': Keyword()} } }, 'body': { 'type': 'string', 'maxlength' : 235, "elastic" : { "analyzer" : "snowball" } }, 'tags': { 'type': 'list', "elastic" : { "index" : "not_analyzed" } }, 'published_from' : { "type": 'date' }, 'lines' : { "type": 'integer' } } # # your model's methods down here # (the two below are just examples from the elasticsearch_dsl py documentation) # def save(self, ** kwargs): self.lines = len(self.body.split()) self.upsert() def is_published(self): return datetime.now() < self.published_from
{ "content_hash": "d29c5a5744b0ee1052cf77d59b9fdb7a", "timestamp": "", "source": "github", "line_count": 54, "max_line_length": 83, "avg_line_length": 27.87037037037037, "alnum_prop": 0.5375415282392026, "repo_name": "pythononwheels/pow_devel", "id": "44f0bf125a266af13c9d3bf525b68b9560119718", "size": "1539", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "pythononwheels/start/stubs/elasticdsl_model_template.py", "mode": "33188", "license": "mit", "language": [ { "name": "CSS", "bytes": "3448946" }, { "name": "Dockerfile", "bytes": "3363" }, { "name": "HTML", "bytes": "1128" }, { "name": "JavaScript", "bytes": "3002089" }, { "name": "Mako", "bytes": "493" }, { "name": "PLSQL", "bytes": "10276" }, { "name": "Python", "bytes": "414691" } ], "symlink_target": "" }
from __future__ import absolute_import import logging import mock import re import unittest2 import prompt_toolkit from prompt_toolkit.document import Document from six.moves import StringIO from st2client.utils import interactive import six LOG = logging.getLogger(__name__) class TestInteractive(unittest2.TestCase): def assertPromptMessage(self, prompt_mock, message, msg=None): self.assertEqual(prompt_mock.call_args[0], (message,), msg) def assertPromptDescription(self, prompt_mock, message, msg=None): toolbar_factory = prompt_mock.call_args[1]["get_bottom_toolbar_tokens"] self.assertEqual(toolbar_factory(None)[0][1], message, msg) def assertPromptValidate(self, prompt_mock, value): validator = prompt_mock.call_args[1]["validator"] validator.validate(Document(text=six.text_type(value))) def assertPromptPassword(self, prompt_mock, value, msg=None): self.assertEqual(prompt_mock.call_args[1]["is_password"], value, msg) def test_interactive_form(self): reader = mock.MagicMock() Reader = mock.MagicMock(return_value=reader) Reader.condition = mock.MagicMock(return_value=True) schema = {"string": {"type": "string"}} with mock.patch.object(interactive.InteractiveForm, "readers", [Reader]): interactive.InteractiveForm(schema).initiate_dialog() Reader.condition.assert_called_once_with(schema["string"]) reader.read.assert_called_once_with() def test_interactive_form_no_match(self): reader = mock.MagicMock() Reader = mock.MagicMock(return_value=reader) Reader.condition = mock.MagicMock(return_value=False) schema = {"string": {"type": "string"}} with mock.patch.object(interactive.InteractiveForm, "readers", [Reader]): interactive.InteractiveForm(schema).initiate_dialog() Reader.condition.assert_called_once_with(schema["string"]) reader.read.assert_not_called() @mock.patch("sys.stdout", new_callable=StringIO) def test_interactive_form_interrupted(self, stdout_mock): reader = mock.MagicMock() Reader = mock.MagicMock(return_value=reader) Reader.condition = mock.MagicMock(return_value=True) reader.read = mock.MagicMock(side_effect=KeyboardInterrupt) schema = {"string": {"type": "string"}} with mock.patch.object(interactive.InteractiveForm, "readers", [Reader]): interactive.InteractiveForm(schema).initiate_dialog() self.assertEqual(stdout_mock.getvalue(), "Dialog interrupted.\n") def test_interactive_form_interrupted_reraised(self): reader = mock.MagicMock() Reader = mock.MagicMock(return_value=reader) Reader.condition = mock.MagicMock(return_value=True) reader.read = mock.MagicMock(side_effect=KeyboardInterrupt) schema = {"string": {"type": "string"}} with mock.patch.object(interactive.InteractiveForm, "readers", [Reader]): self.assertRaises( interactive.DialogInterrupted, interactive.InteractiveForm(schema, reraise=True).initiate_dialog, ) @mock.patch.object(interactive, "prompt") def test_stringreader(self, prompt_mock): spec = {"description": "some description", "default": "hey"} Reader = interactive.StringReader("some", spec) prompt_mock.return_value = "stuff" result = Reader.read() self.assertEqual(result, "stuff") self.assertPromptMessage(prompt_mock, "some [hey]: ") self.assertPromptDescription(prompt_mock, "some description") self.assertPromptValidate(prompt_mock, "stuff") prompt_mock.return_value = "" result = Reader.read() self.assertEqual(result, "hey") self.assertPromptValidate(prompt_mock, "") @mock.patch.object(interactive, "prompt") def test_booleanreader(self, prompt_mock): spec = {"description": "some description", "default": False} Reader = interactive.BooleanReader("some", spec) prompt_mock.return_value = "y" result = Reader.read() self.assertEqual(result, True) self.assertPromptMessage(prompt_mock, "some (boolean) [n]: ") self.assertPromptDescription(prompt_mock, "some description") self.assertPromptValidate(prompt_mock, "y") self.assertRaises( prompt_toolkit.validation.ValidationError, self.assertPromptValidate, prompt_mock, "some", ) prompt_mock.return_value = "" result = Reader.read() self.assertEqual(result, False) self.assertPromptValidate(prompt_mock, "") @mock.patch.object(interactive, "prompt") def test_numberreader(self, prompt_mock): spec = {"description": "some description", "default": 3.2} Reader = interactive.NumberReader("some", spec) prompt_mock.return_value = "5.3" result = Reader.read() self.assertEqual(result, 5.3) self.assertPromptMessage(prompt_mock, "some (float) [3.2]: ") self.assertPromptDescription(prompt_mock, "some description") self.assertPromptValidate(prompt_mock, "5.3") self.assertRaises( prompt_toolkit.validation.ValidationError, self.assertPromptValidate, prompt_mock, "some", ) prompt_mock.return_value = "" result = Reader.read() self.assertEqual(result, 3.2) self.assertPromptValidate(prompt_mock, "") @mock.patch.object(interactive, "prompt") def test_integerreader(self, prompt_mock): spec = {"description": "some description", "default": 3} Reader = interactive.IntegerReader("some", spec) prompt_mock.return_value = "5" result = Reader.read() self.assertEqual(result, 5) self.assertPromptMessage(prompt_mock, "some (integer) [3]: ") self.assertPromptDescription(prompt_mock, "some description") self.assertPromptValidate(prompt_mock, "5") self.assertRaises( prompt_toolkit.validation.ValidationError, self.assertPromptValidate, prompt_mock, "5.3", ) prompt_mock.return_value = "" result = Reader.read() self.assertEqual(result, 3) self.assertPromptValidate(prompt_mock, "") @mock.patch.object(interactive, "prompt") def test_secretstringreader(self, prompt_mock): spec = {"description": "some description", "default": "hey"} Reader = interactive.SecretStringReader("some", spec) prompt_mock.return_value = "stuff" result = Reader.read() self.assertEqual(result, "stuff") self.assertPromptMessage(prompt_mock, "some (secret) [hey]: ") self.assertPromptDescription(prompt_mock, "some description") self.assertPromptValidate(prompt_mock, "stuff") self.assertPromptPassword(prompt_mock, True) prompt_mock.return_value = "" result = Reader.read() self.assertEqual(result, "hey") self.assertPromptValidate(prompt_mock, "") @mock.patch.object(interactive, "prompt") def test_enumreader(self, prompt_mock): spec = { "enum": ["some", "thing", "else"], "description": "some description", "default": "thing", } Reader = interactive.EnumReader("some", spec) prompt_mock.return_value = "2" result = Reader.read() self.assertEqual(result, "else") message = "some: \n 0 - some\n 1 - thing\n 2 - else\nChoose from 0, 1, 2 [1]: " self.assertPromptMessage(prompt_mock, message) self.assertPromptDescription(prompt_mock, "some description") self.assertPromptValidate(prompt_mock, "0") self.assertRaises( prompt_toolkit.validation.ValidationError, self.assertPromptValidate, prompt_mock, "some", ) self.assertRaises( prompt_toolkit.validation.ValidationError, self.assertPromptValidate, prompt_mock, "5", ) prompt_mock.return_value = "" result = Reader.read() self.assertEqual(result, "thing") self.assertPromptValidate(prompt_mock, "") @mock.patch.object(interactive, "prompt") def test_arrayreader(self, prompt_mock): spec = {"description": "some description", "default": ["a", "b"]} Reader = interactive.ArrayReader("some", spec) prompt_mock.return_value = "some,thing,else" result = Reader.read() self.assertEqual(result, ["some", "thing", "else"]) self.assertPromptMessage(prompt_mock, "some (comma-separated list) [a,b]: ") self.assertPromptDescription(prompt_mock, "some description") self.assertPromptValidate(prompt_mock, "some,thing,else") prompt_mock.return_value = "" result = Reader.read() self.assertEqual(result, ["a", "b"]) self.assertPromptValidate(prompt_mock, "") @mock.patch.object(interactive, "prompt") def test_arrayreader_ends_with_comma(self, prompt_mock): spec = {"description": "some description", "default": ["a", "b"]} Reader = interactive.ArrayReader("some", spec) prompt_mock.return_value = "some,thing,else," result = Reader.read() self.assertEqual(result, ["some", "thing", "else", ""]) self.assertPromptMessage(prompt_mock, "some (comma-separated list) [a,b]: ") self.assertPromptDescription(prompt_mock, "some description") self.assertPromptValidate(prompt_mock, "some,thing,else,") @mock.patch.object(interactive, "prompt") def test_arrayenumreader(self, prompt_mock): spec = { "items": {"enum": ["a", "b", "c", "d", "e"]}, "description": "some description", "default": ["a", "b"], } Reader = interactive.ArrayEnumReader("some", spec) prompt_mock.return_value = "0,2,4" result = Reader.read() self.assertEqual(result, ["a", "c", "e"]) message = "some: \n 0 - a\n 1 - b\n 2 - c\n 3 - d\n 4 - e\nChoose from 0, 1, 2... [0, 1]: " self.assertPromptMessage(prompt_mock, message) self.assertPromptDescription(prompt_mock, "some description") self.assertPromptValidate(prompt_mock, "0,2,4") prompt_mock.return_value = "" result = Reader.read() self.assertEqual(result, ["a", "b"]) self.assertPromptValidate(prompt_mock, "") @mock.patch.object(interactive, "prompt") def test_arrayenumreader_ends_with_comma(self, prompt_mock): spec = { "items": {"enum": ["a", "b", "c", "d", "e"]}, "description": "some description", "default": ["a", "b"], } Reader = interactive.ArrayEnumReader("some", spec) prompt_mock.return_value = "0,2,4," result = Reader.read() self.assertEqual(result, ["a", "c", "e"]) message = "some: \n 0 - a\n 1 - b\n 2 - c\n 3 - d\n 4 - e\nChoose from 0, 1, 2... [0, 1]: " self.assertPromptMessage(prompt_mock, message) self.assertPromptDescription(prompt_mock, "some description") self.assertPromptValidate(prompt_mock, "0,2,4,") @mock.patch.object(interactive, "prompt") def test_arrayobjectreader(self, prompt_mock): spec = { "items": { "type": "object", "properties": { "foo": { "type": "string", "description": "some description", }, "bar": { "type": "string", "description": "some description", }, }, }, "description": "some description", } Reader = interactive.ArrayObjectReader("some", spec) # To emulate continuing setting, this flag variable is needed self.is_continued = False def side_effect(msg, **kwargs): if re.match(r"^~~~ Would you like to add another item to.*", msg): # prompt requires the input to judge continuing setting, or not if not self.is_continued: # continuing the configuration only once self.is_continued = True return "" else: # finishing to configuration return "n" else: # prompt requires the input of property value in the object return "value" prompt_mock.side_effect = side_effect results = Reader.read() self.assertEqual(len(results), 2) self.assertTrue(all([len(list(x.keys())) == 2 for x in results])) self.assertTrue(all(["foo" in x and "bar" in x for x in results]))
{ "content_hash": "c0e628f6d5350f5f1d411ff8c1658d17", "timestamp": "", "source": "github", "line_count": 354, "max_line_length": 99, "avg_line_length": 36.85310734463277, "alnum_prop": 0.6037099494097807, "repo_name": "Plexxi/st2", "id": "dce4c6748ddfa2605b0b1deb096d8c03f7c143ab", "size": "13674", "binary": false, "copies": "3", "ref": "refs/heads/master", "path": "st2client/tests/unit/test_interactive.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "HTML", "bytes": "198" }, { "name": "JavaScript", "bytes": "444" }, { "name": "Jinja", "bytes": "174532" }, { "name": "Makefile", "bytes": "75242" }, { "name": "PowerShell", "bytes": "856" }, { "name": "Python", "bytes": "6453910" }, { "name": "Shell", "bytes": "93607" }, { "name": "Starlark", "bytes": "7236" } ], "symlink_target": "" }
"""Mix provider which read config from different other providers. .. moduleauthor:: Xiaodong Wang <xiaodongwang@huawei.com> """ from compass.config_management.providers import config_provider from compass.utils import setting_wrapper as setting class MixProvider(config_provider.ConfigProvider): """mix provider which read config from different other providers.""" NAME = 'mix' def __init__(self): self.global_provider_ = config_provider.get_provider_by_name( setting.GLOBAL_CONFIG_PROVIDER) self.cluster_provider_ = config_provider.get_provider_by_name( setting.CLUSTER_CONFIG_PROVIDER) self.host_provider_ = config_provider.get_provider_by_name( setting.HOST_CONFIG_PROVIDER) def get_global_config(self): """get global config.""" return self.global_provider_.get_global_config() def get_cluster_config(self, clusterid): """get cluster config.""" return self.cluster_provider_.get_cluster_config(clusterid) def get_host_config(self, hostid): """get host config.""" return self.host_provider_.get_host_config(hostid) def update_global_config(self, config): """update global config.""" self.global_provider_.update_global_config(config) def update_cluster_config(self, clusterid, config): """update cluster config.""" self.cluster_provider_.update_cluster_config( clusterid, config) def update_host_config(self, hostid, config): """update host config.""" self.host_provider_.update_host_config(hostid, config) config_provider.register_provider(MixProvider)
{ "content_hash": "9726256dd942356b77707ddfaaa44e6c", "timestamp": "", "source": "github", "line_count": 47, "max_line_length": 72, "avg_line_length": 35.638297872340424, "alnum_prop": 0.6770149253731343, "repo_name": "SysCompass/compass-core", "id": "b858ea9d4588da361c33de738f6fd01979266133", "size": "1675", "binary": false, "copies": "4", "ref": "refs/heads/master", "path": "compass/config_management/providers/plugins/mix_config_provider.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Python", "bytes": "396968" }, { "name": "Shell", "bytes": "28585" } ], "symlink_target": "" }
""" find_only_repeat.py Find the only element in an array that only occurs once. Author: Corwin Brown <blakfeld@gmail.com> """ from __future__ import print_function import sys from collections import defaultdict def find_only_repeat(list_to_search): """ Search a list for a single repeating int. Args: list_to_search (list): The list to search. Returns: int """ count = defaultdict(int) for item in list_to_search: count[item] += 1 return min(count, key=count.get) def main(): """ Main. """ list_to_search = [0, 1, 5, 0, 1, 8, 7, 8, 7] print(find_only_repeat(list_to_search)) if __name__ == '__main__': sys.exit(main())
{ "content_hash": "41dc59aa627ef41e365d6cf915dcb594", "timestamp": "", "source": "github", "line_count": 39, "max_line_length": 60, "avg_line_length": 18.333333333333332, "alnum_prop": 0.6013986013986014, "repo_name": "blakfeld/Data-Structures-and-Algoritms-Practice", "id": "2b15d10ec5527a8ccf4af518b5f1da3152ca82f9", "size": "737", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "Python/general/find_only_repeat.py", "mode": "33188", "license": "mit", "language": [ { "name": "C", "bytes": "15663" }, { "name": "Java", "bytes": "30495" }, { "name": "Python", "bytes": "22386" } ], "symlink_target": "" }
from __future__ import division, absolute_import, print_function import collections import operator import re import sys import warnings import numpy as np import numpy.core.numeric as _nx from numpy.core import linspace, atleast_1d, atleast_2d, transpose from numpy.core.numeric import ( ones, zeros, arange, concatenate, array, asarray, asanyarray, empty, empty_like, ndarray, around, floor, ceil, take, dot, where, intp, integer, isscalar, absolute, AxisError ) from numpy.core.umath import ( pi, multiply, add, arctan2, frompyfunc, cos, less_equal, sqrt, sin, mod, exp, log10 ) from numpy.core.fromnumeric import ( ravel, nonzero, sort, partition, mean, any, sum ) from numpy.core.numerictypes import typecodes, number from numpy.lib.twodim_base import diag from .utils import deprecate from numpy.core.multiarray import ( _insert, add_docstring, digitize, bincount, normalize_axis_index, interp as compiled_interp, interp_complex as compiled_interp_complex ) from numpy.core.umath import _add_newdoc_ufunc as add_newdoc_ufunc from numpy.compat import long from numpy.compat.py3k import basestring if sys.version_info[0] < 3: # Force range to be a generator, for np.delete's usage. range = xrange import __builtin__ as builtins else: import builtins __all__ = [ 'select', 'piecewise', 'trim_zeros', 'copy', 'iterable', 'percentile', 'diff', 'gradient', 'angle', 'unwrap', 'sort_complex', 'disp', 'flip', 'rot90', 'extract', 'place', 'vectorize', 'asarray_chkfinite', 'average', 'histogram', 'histogramdd', 'bincount', 'digitize', 'cov', 'corrcoef', 'msort', 'median', 'sinc', 'hamming', 'hanning', 'bartlett', 'blackman', 'kaiser', 'trapz', 'i0', 'add_newdoc', 'add_docstring', 'meshgrid', 'delete', 'insert', 'append', 'interp', 'add_newdoc_ufunc' ] def rot90(m, k=1, axes=(0,1)): """ Rotate an array by 90 degrees in the plane specified by axes. Rotation direction is from the first towards the second axis. .. versionadded:: 1.12.0 Parameters ---------- m : array_like Array of two or more dimensions. k : integer Number of times the array is rotated by 90 degrees. axes: (2,) array_like The array is rotated in the plane defined by the axes. Axes must be different. Returns ------- y : ndarray A rotated view of `m`. See Also -------- flip : Reverse the order of elements in an array along the given axis. fliplr : Flip an array horizontally. flipud : Flip an array vertically. Notes ----- rot90(m, k=1, axes=(1,0)) is the reverse of rot90(m, k=1, axes=(0,1)) rot90(m, k=1, axes=(1,0)) is equivalent to rot90(m, k=-1, axes=(0,1)) Examples -------- >>> m = np.array([[1,2],[3,4]], int) >>> m array([[1, 2], [3, 4]]) >>> np.rot90(m) array([[2, 4], [1, 3]]) >>> np.rot90(m, 2) array([[4, 3], [2, 1]]) >>> m = np.arange(8).reshape((2,2,2)) >>> np.rot90(m, 1, (1,2)) array([[[1, 3], [0, 2]], [[5, 7], [4, 6]]]) """ axes = tuple(axes) if len(axes) != 2: raise ValueError("len(axes) must be 2.") m = asanyarray(m) if axes[0] == axes[1] or absolute(axes[0] - axes[1]) == m.ndim: raise ValueError("Axes must be different.") if (axes[0] >= m.ndim or axes[0] < -m.ndim or axes[1] >= m.ndim or axes[1] < -m.ndim): raise ValueError("Axes={} out of range for array of ndim={}." .format(axes, m.ndim)) k %= 4 if k == 0: return m[:] if k == 2: return flip(flip(m, axes[0]), axes[1]) axes_list = arange(0, m.ndim) (axes_list[axes[0]], axes_list[axes[1]]) = (axes_list[axes[1]], axes_list[axes[0]]) if k == 1: return transpose(flip(m,axes[1]), axes_list) else: # k == 3 return flip(transpose(m, axes_list), axes[1]) def flip(m, axis): """ Reverse the order of elements in an array along the given axis. The shape of the array is preserved, but the elements are reordered. .. versionadded:: 1.12.0 Parameters ---------- m : array_like Input array. axis : integer Axis in array, which entries are reversed. Returns ------- out : array_like A view of `m` with the entries of axis reversed. Since a view is returned, this operation is done in constant time. See Also -------- flipud : Flip an array vertically (axis=0). fliplr : Flip an array horizontally (axis=1). Notes ----- flip(m, 0) is equivalent to flipud(m). flip(m, 1) is equivalent to fliplr(m). flip(m, n) corresponds to ``m[...,::-1,...]`` with ``::-1`` at position n. Examples -------- >>> A = np.arange(8).reshape((2,2,2)) >>> A array([[[0, 1], [2, 3]], [[4, 5], [6, 7]]]) >>> flip(A, 0) array([[[4, 5], [6, 7]], [[0, 1], [2, 3]]]) >>> flip(A, 1) array([[[2, 3], [0, 1]], [[6, 7], [4, 5]]]) >>> A = np.random.randn(3,4,5) >>> np.all(flip(A,2) == A[:,:,::-1,...]) True """ if not hasattr(m, 'ndim'): m = asarray(m) indexer = [slice(None)] * m.ndim try: indexer[axis] = slice(None, None, -1) except IndexError: raise ValueError("axis=%i is invalid for the %i-dimensional input array" % (axis, m.ndim)) return m[tuple(indexer)] def iterable(y): """ Check whether or not an object can be iterated over. Parameters ---------- y : object Input object. Returns ------- b : bool Return ``True`` if the object has an iterator method or is a sequence and ``False`` otherwise. Examples -------- >>> np.iterable([1, 2, 3]) True >>> np.iterable(2) False """ try: iter(y) except TypeError: return False return True def _hist_bin_sqrt(x): """ Square root histogram bin estimator. Bin width is inversely proportional to the data size. Used by many programs for its simplicity. Parameters ---------- x : array_like Input data that is to be histogrammed, trimmed to range. May not be empty. Returns ------- h : An estimate of the optimal bin width for the given data. """ return x.ptp() / np.sqrt(x.size) def _hist_bin_sturges(x): """ Sturges histogram bin estimator. A very simplistic estimator based on the assumption of normality of the data. This estimator has poor performance for non-normal data, which becomes especially obvious for large data sets. The estimate depends only on size of the data. Parameters ---------- x : array_like Input data that is to be histogrammed, trimmed to range. May not be empty. Returns ------- h : An estimate of the optimal bin width for the given data. """ return x.ptp() / (np.log2(x.size) + 1.0) def _hist_bin_rice(x): """ Rice histogram bin estimator. Another simple estimator with no normality assumption. It has better performance for large data than Sturges, but tends to overestimate the number of bins. The number of bins is proportional to the cube root of data size (asymptotically optimal). The estimate depends only on size of the data. Parameters ---------- x : array_like Input data that is to be histogrammed, trimmed to range. May not be empty. Returns ------- h : An estimate of the optimal bin width for the given data. """ return x.ptp() / (2.0 * x.size ** (1.0 / 3)) def _hist_bin_scott(x): """ Scott histogram bin estimator. The binwidth is proportional to the standard deviation of the data and inversely proportional to the cube root of data size (asymptotically optimal). Parameters ---------- x : array_like Input data that is to be histogrammed, trimmed to range. May not be empty. Returns ------- h : An estimate of the optimal bin width for the given data. """ return (24.0 * np.pi**0.5 / x.size)**(1.0 / 3.0) * np.std(x) def _hist_bin_doane(x): """ Doane's histogram bin estimator. Improved version of Sturges' formula which works better for non-normal data. See stats.stackexchange.com/questions/55134/doanes-formula-for-histogram-binning Parameters ---------- x : array_like Input data that is to be histogrammed, trimmed to range. May not be empty. Returns ------- h : An estimate of the optimal bin width for the given data. """ if x.size > 2: sg1 = np.sqrt(6.0 * (x.size - 2) / ((x.size + 1.0) * (x.size + 3))) sigma = np.std(x) if sigma > 0.0: # These three operations add up to # g1 = np.mean(((x - np.mean(x)) / sigma)**3) # but use only one temp array instead of three temp = x - np.mean(x) np.true_divide(temp, sigma, temp) np.power(temp, 3, temp) g1 = np.mean(temp) return x.ptp() / (1.0 + np.log2(x.size) + np.log2(1.0 + np.absolute(g1) / sg1)) return 0.0 def _hist_bin_fd(x): """ The Freedman-Diaconis histogram bin estimator. The Freedman-Diaconis rule uses interquartile range (IQR) to estimate binwidth. It is considered a variation of the Scott rule with more robustness as the IQR is less affected by outliers than the standard deviation. However, the IQR depends on fewer points than the standard deviation, so it is less accurate, especially for long tailed distributions. If the IQR is 0, this function returns 1 for the number of bins. Binwidth is inversely proportional to the cube root of data size (asymptotically optimal). Parameters ---------- x : array_like Input data that is to be histogrammed, trimmed to range. May not be empty. Returns ------- h : An estimate of the optimal bin width for the given data. """ iqr = np.subtract(*np.percentile(x, [75, 25])) return 2.0 * iqr * x.size ** (-1.0 / 3.0) def _hist_bin_auto(x): """ Histogram bin estimator that uses the minimum width of the Freedman-Diaconis and Sturges estimators. The FD estimator is usually the most robust method, but its width estimate tends to be too large for small `x`. The Sturges estimator is quite good for small (<1000) datasets and is the default in the R language. This method gives good off the shelf behaviour. Parameters ---------- x : array_like Input data that is to be histogrammed, trimmed to range. May not be empty. Returns ------- h : An estimate of the optimal bin width for the given data. See Also -------- _hist_bin_fd, _hist_bin_sturges """ # There is no need to check for zero here. If ptp is, so is IQR and # vice versa. Either both are zero or neither one is. return min(_hist_bin_fd(x), _hist_bin_sturges(x)) # Private dict initialized at module load time _hist_bin_selectors = {'auto': _hist_bin_auto, 'doane': _hist_bin_doane, 'fd': _hist_bin_fd, 'rice': _hist_bin_rice, 'scott': _hist_bin_scott, 'sqrt': _hist_bin_sqrt, 'sturges': _hist_bin_sturges} def histogram(a, bins=10, range=None, normed=False, weights=None, density=None): r""" Compute the histogram of a set of data. Parameters ---------- a : array_like Input data. The histogram is computed over the flattened array. bins : int or sequence of scalars or str, optional If `bins` is an int, it defines the number of equal-width bins in the given range (10, by default). If `bins` is a sequence, it defines the bin edges, including the rightmost edge, allowing for non-uniform bin widths. .. versionadded:: 1.11.0 If `bins` is a string from the list below, `histogram` will use the method chosen to calculate the optimal bin width and consequently the number of bins (see `Notes` for more detail on the estimators) from the data that falls within the requested range. While the bin width will be optimal for the actual data in the range, the number of bins will be computed to fill the entire range, including the empty portions. For visualisation, using the 'auto' option is suggested. Weighted data is not supported for automated bin size selection. 'auto' Maximum of the 'sturges' and 'fd' estimators. Provides good all around performance. 'fd' (Freedman Diaconis Estimator) Robust (resilient to outliers) estimator that takes into account data variability and data size. 'doane' An improved version of Sturges' estimator that works better with non-normal datasets. 'scott' Less robust estimator that that takes into account data variability and data size. 'rice' Estimator does not take variability into account, only data size. Commonly overestimates number of bins required. 'sturges' R's default method, only accounts for data size. Only optimal for gaussian data and underestimates number of bins for large non-gaussian datasets. 'sqrt' Square root (of data size) estimator, used by Excel and other programs for its speed and simplicity. range : (float, float), optional The lower and upper range of the bins. If not provided, range is simply ``(a.min(), a.max())``. Values outside the range are ignored. The first element of the range must be less than or equal to the second. `range` affects the automatic bin computation as well. While bin width is computed to be optimal based on the actual data within `range`, the bin count will fill the entire range including portions containing no data. normed : bool, optional This keyword is deprecated in NumPy 1.6.0 due to confusing/buggy behavior. It will be removed in NumPy 2.0.0. Use the ``density`` keyword instead. If ``False``, the result will contain the number of samples in each bin. If ``True``, the result is the value of the probability *density* function at the bin, normalized such that the *integral* over the range is 1. Note that this latter behavior is known to be buggy with unequal bin widths; use ``density`` instead. weights : array_like, optional An array of weights, of the same shape as `a`. Each value in `a` only contributes its associated weight towards the bin count (instead of 1). If `density` is True, the weights are normalized, so that the integral of the density over the range remains 1. density : bool, optional If ``False``, the result will contain the number of samples in each bin. If ``True``, the result is the value of the probability *density* function at the bin, normalized such that the *integral* over the range is 1. Note that the sum of the histogram values will not be equal to 1 unless bins of unity width are chosen; it is not a probability *mass* function. Overrides the ``normed`` keyword if given. Returns ------- hist : array The values of the histogram. See `density` and `weights` for a description of the possible semantics. bin_edges : array of dtype float Return the bin edges ``(length(hist)+1)``. See Also -------- histogramdd, bincount, searchsorted, digitize Notes ----- All but the last (righthand-most) bin is half-open. In other words, if `bins` is:: [1, 2, 3, 4] then the first bin is ``[1, 2)`` (including 1, but excluding 2) and the second ``[2, 3)``. The last bin, however, is ``[3, 4]``, which *includes* 4. .. versionadded:: 1.11.0 The methods to estimate the optimal number of bins are well founded in literature, and are inspired by the choices R provides for histogram visualisation. Note that having the number of bins proportional to :math:`n^{1/3}` is asymptotically optimal, which is why it appears in most estimators. These are simply plug-in methods that give good starting points for number of bins. In the equations below, :math:`h` is the binwidth and :math:`n_h` is the number of bins. All estimators that compute bin counts are recast to bin width using the `ptp` of the data. The final bin count is obtained from ``np.round(np.ceil(range / h))`. 'Auto' (maximum of the 'Sturges' and 'FD' estimators) A compromise to get a good value. For small datasets the Sturges value will usually be chosen, while larger datasets will usually default to FD. Avoids the overly conservative behaviour of FD and Sturges for small and large datasets respectively. Switchover point is usually :math:`a.size \approx 1000`. 'FD' (Freedman Diaconis Estimator) .. math:: h = 2 \frac{IQR}{n^{1/3}} The binwidth is proportional to the interquartile range (IQR) and inversely proportional to cube root of a.size. Can be too conservative for small datasets, but is quite good for large datasets. The IQR is very robust to outliers. 'Scott' .. math:: h = \sigma \sqrt[3]{\frac{24 * \sqrt{\pi}}{n}} The binwidth is proportional to the standard deviation of the data and inversely proportional to cube root of ``x.size``. Can be too conservative for small datasets, but is quite good for large datasets. The standard deviation is not very robust to outliers. Values are very similar to the Freedman-Diaconis estimator in the absence of outliers. 'Rice' .. math:: n_h = 2n^{1/3} The number of bins is only proportional to cube root of ``a.size``. It tends to overestimate the number of bins and it does not take into account data variability. 'Sturges' .. math:: n_h = \log _{2}n+1 The number of bins is the base 2 log of ``a.size``. This estimator assumes normality of data and is too conservative for larger, non-normal datasets. This is the default method in R's ``hist`` method. 'Doane' .. math:: n_h = 1 + \log_{2}(n) + \log_{2}(1 + \frac{|g_1|}{\sigma_{g_1}}) g_1 = mean[(\frac{x - \mu}{\sigma})^3] \sigma_{g_1} = \sqrt{\frac{6(n - 2)}{(n + 1)(n + 3)}} An improved version of Sturges' formula that produces better estimates for non-normal datasets. This estimator attempts to account for the skew of the data. 'Sqrt' .. math:: n_h = \sqrt n The simplest and fastest estimator. Only takes into account the data size. Examples -------- >>> np.histogram([1, 2, 1], bins=[0, 1, 2, 3]) (array([0, 2, 1]), array([0, 1, 2, 3])) >>> np.histogram(np.arange(4), bins=np.arange(5), density=True) (array([ 0.25, 0.25, 0.25, 0.25]), array([0, 1, 2, 3, 4])) >>> np.histogram([[1, 2, 1], [1, 0, 1]], bins=[0,1,2,3]) (array([1, 4, 1]), array([0, 1, 2, 3])) >>> a = np.arange(5) >>> hist, bin_edges = np.histogram(a, density=True) >>> hist array([ 0.5, 0. , 0.5, 0. , 0. , 0.5, 0. , 0.5, 0. , 0.5]) >>> hist.sum() 2.4999999999999996 >>> np.sum(hist*np.diff(bin_edges)) 1.0 .. versionadded:: 1.11.0 Automated Bin Selection Methods example, using 2 peak random data with 2000 points: >>> import matplotlib.pyplot as plt >>> rng = np.random.RandomState(10) # deterministic random data >>> a = np.hstack((rng.normal(size=1000), ... rng.normal(loc=5, scale=2, size=1000))) >>> plt.hist(a, bins='auto') # arguments are passed to np.histogram >>> plt.title("Histogram with 'auto' bins") >>> plt.show() """ a = asarray(a) if weights is not None: weights = asarray(weights) if np.any(weights.shape != a.shape): raise ValueError( 'weights should have the same shape as a.') weights = weights.ravel() a = a.ravel() # Do not modify the original value of range so we can check for `None` if range is None: if a.size == 0: # handle empty arrays. Can't determine range, so use 0-1. mn, mx = 0.0, 1.0 else: mn, mx = a.min() + 0.0, a.max() + 0.0 else: mn, mx = [mi + 0.0 for mi in range] if mn > mx: raise ValueError( 'max must be larger than min in range parameter.') if not np.all(np.isfinite([mn, mx])): raise ValueError( 'range parameter must be finite.') if mn == mx: mn -= 0.5 mx += 0.5 if isinstance(bins, basestring): # if `bins` is a string for an automatic method, # this will replace it with the number of bins calculated if bins not in _hist_bin_selectors: raise ValueError("{0} not a valid estimator for bins".format(bins)) if weights is not None: raise TypeError("Automated estimation of the number of " "bins is not supported for weighted data") # Make a reference to `a` b = a # Update the reference if the range needs truncation if range is not None: keep = (a >= mn) keep &= (a <= mx) if not np.logical_and.reduce(keep): b = a[keep] if b.size == 0: bins = 1 else: # Do not call selectors on empty arrays width = _hist_bin_selectors[bins](b) if width: bins = int(np.ceil((mx - mn) / width)) else: # Width can be zero for some estimators, e.g. FD when # the IQR of the data is zero. bins = 1 # Histogram is an integer or a float array depending on the weights. if weights is None: ntype = np.dtype(np.intp) else: ntype = weights.dtype # We set a block size, as this allows us to iterate over chunks when # computing histograms, to minimize memory usage. BLOCK = 65536 if not iterable(bins): if np.isscalar(bins) and bins < 1: raise ValueError( '`bins` should be a positive integer.') # At this point, if the weights are not integer, floating point, or # complex, we have to use the slow algorithm. if weights is not None and not (np.can_cast(weights.dtype, np.double) or np.can_cast(weights.dtype, np.complex)): bins = linspace(mn, mx, bins + 1, endpoint=True) if not iterable(bins): # We now convert values of a to bin indices, under the assumption of # equal bin widths (which is valid here). # Initialize empty histogram n = np.zeros(bins, ntype) # Pre-compute histogram scaling factor norm = bins / (mx - mn) # Compute the bin edges for potential correction. bin_edges = linspace(mn, mx, bins + 1, endpoint=True) # We iterate over blocks here for two reasons: the first is that for # large arrays, it is actually faster (for example for a 10^8 array it # is 2x as fast) and it results in a memory footprint 3x lower in the # limit of large arrays. for i in arange(0, len(a), BLOCK): tmp_a = a[i:i+BLOCK] if weights is None: tmp_w = None else: tmp_w = weights[i:i + BLOCK] # Only include values in the right range keep = (tmp_a >= mn) keep &= (tmp_a <= mx) if not np.logical_and.reduce(keep): tmp_a = tmp_a[keep] if tmp_w is not None: tmp_w = tmp_w[keep] tmp_a_data = tmp_a.astype(float) tmp_a = tmp_a_data - mn tmp_a *= norm # Compute the bin indices, and for values that lie exactly on mx we # need to subtract one indices = tmp_a.astype(np.intp) indices[indices == bins] -= 1 # The index computation is not guaranteed to give exactly # consistent results within ~1 ULP of the bin edges. decrement = tmp_a_data < bin_edges[indices] indices[decrement] -= 1 # The last bin includes the right edge. The other bins do not. increment = ((tmp_a_data >= bin_edges[indices + 1]) & (indices != bins - 1)) indices[increment] += 1 # We now compute the histogram using bincount if ntype.kind == 'c': n.real += np.bincount(indices, weights=tmp_w.real, minlength=bins) n.imag += np.bincount(indices, weights=tmp_w.imag, minlength=bins) else: n += np.bincount(indices, weights=tmp_w, minlength=bins).astype(ntype) # Rename the bin edges for return. bins = bin_edges else: bins = asarray(bins) if (np.diff(bins) < 0).any(): raise ValueError( 'bins must increase monotonically.') # Initialize empty histogram n = np.zeros(bins.shape, ntype) if weights is None: for i in arange(0, len(a), BLOCK): sa = sort(a[i:i+BLOCK]) n += np.r_[sa.searchsorted(bins[:-1], 'left'), sa.searchsorted(bins[-1], 'right')] else: zero = array(0, dtype=ntype) for i in arange(0, len(a), BLOCK): tmp_a = a[i:i+BLOCK] tmp_w = weights[i:i+BLOCK] sorting_index = np.argsort(tmp_a) sa = tmp_a[sorting_index] sw = tmp_w[sorting_index] cw = np.concatenate(([zero, ], sw.cumsum())) bin_index = np.r_[sa.searchsorted(bins[:-1], 'left'), sa.searchsorted(bins[-1], 'right')] n += cw[bin_index] n = np.diff(n) if density is not None: if density: db = array(np.diff(bins), float) return n/db/n.sum(), bins else: return n, bins else: # deprecated, buggy behavior. Remove for NumPy 2.0.0 if normed: db = array(np.diff(bins), float) return n/(n*db).sum(), bins else: return n, bins def histogramdd(sample, bins=10, range=None, normed=False, weights=None): """ Compute the multidimensional histogram of some data. Parameters ---------- sample : array_like The data to be histogrammed. It must be an (N,D) array or data that can be converted to such. The rows of the resulting array are the coordinates of points in a D dimensional polytope. bins : sequence or int, optional The bin specification: * A sequence of arrays describing the bin edges along each dimension. * The number of bins for each dimension (nx, ny, ... =bins) * The number of bins for all dimensions (nx=ny=...=bins). range : sequence, optional A sequence of lower and upper bin edges to be used if the edges are not given explicitly in `bins`. Defaults to the minimum and maximum values along each dimension. normed : bool, optional If False, returns the number of samples in each bin. If True, returns the bin density ``bin_count / sample_count / bin_volume``. weights : (N,) array_like, optional An array of values `w_i` weighing each sample `(x_i, y_i, z_i, ...)`. Weights are normalized to 1 if normed is True. If normed is False, the values of the returned histogram are equal to the sum of the weights belonging to the samples falling into each bin. Returns ------- H : ndarray The multidimensional histogram of sample x. See normed and weights for the different possible semantics. edges : list A list of D arrays describing the bin edges for each dimension. See Also -------- histogram: 1-D histogram histogram2d: 2-D histogram Examples -------- >>> r = np.random.randn(100,3) >>> H, edges = np.histogramdd(r, bins = (5, 8, 4)) >>> H.shape, edges[0].size, edges[1].size, edges[2].size ((5, 8, 4), 6, 9, 5) """ try: # Sample is an ND-array. N, D = sample.shape except (AttributeError, ValueError): # Sample is a sequence of 1D arrays. sample = atleast_2d(sample).T N, D = sample.shape nbin = empty(D, int) edges = D*[None] dedges = D*[None] if weights is not None: weights = asarray(weights) try: M = len(bins) if M != D: raise ValueError( 'The dimension of bins must be equal to the dimension of the ' ' sample x.') except TypeError: # bins is an integer bins = D*[bins] # Select range for each dimension # Used only if number of bins is given. if range is None: # Handle empty input. Range can't be determined in that case, use 0-1. if N == 0: smin = zeros(D) smax = ones(D) else: smin = atleast_1d(array(sample.min(0), float)) smax = atleast_1d(array(sample.max(0), float)) else: if not np.all(np.isfinite(range)): raise ValueError( 'range parameter must be finite.') smin = zeros(D) smax = zeros(D) for i in arange(D): smin[i], smax[i] = range[i] # Make sure the bins have a finite width. for i in arange(len(smin)): if smin[i] == smax[i]: smin[i] = smin[i] - .5 smax[i] = smax[i] + .5 # avoid rounding issues for comparisons when dealing with inexact types if np.issubdtype(sample.dtype, np.inexact): edge_dt = sample.dtype else: edge_dt = float # Create edge arrays for i in arange(D): if isscalar(bins[i]): if bins[i] < 1: raise ValueError( "Element at index %s in `bins` should be a positive " "integer." % i) nbin[i] = bins[i] + 2 # +2 for outlier bins edges[i] = linspace(smin[i], smax[i], nbin[i]-1, dtype=edge_dt) else: edges[i] = asarray(bins[i], edge_dt) nbin[i] = len(edges[i]) + 1 # +1 for outlier bins dedges[i] = diff(edges[i]) if np.any(np.asarray(dedges[i]) <= 0): raise ValueError( "Found bin edge of size <= 0. Did you specify `bins` with" "non-monotonic sequence?") nbin = asarray(nbin) # Handle empty input. if N == 0: return np.zeros(nbin-2), edges # Compute the bin number each sample falls into. Ncount = {} for i in arange(D): Ncount[i] = digitize(sample[:, i], edges[i]) # Using digitize, values that fall on an edge are put in the right bin. # For the rightmost bin, we want values equal to the right edge to be # counted in the last bin, and not as an outlier. for i in arange(D): # Rounding precision mindiff = dedges[i].min() if not np.isinf(mindiff): decimal = int(-log10(mindiff)) + 6 # Find which points are on the rightmost edge. not_smaller_than_edge = (sample[:, i] >= edges[i][-1]) on_edge = (around(sample[:, i], decimal) == around(edges[i][-1], decimal)) # Shift these points one bin to the left. Ncount[i][where(on_edge & not_smaller_than_edge)[0]] -= 1 # Flattened histogram matrix (1D) # Reshape is used so that overlarge arrays # will raise an error. hist = zeros(nbin, float).reshape(-1) # Compute the sample indices in the flattened histogram matrix. ni = nbin.argsort() xy = zeros(N, int) for i in arange(0, D-1): xy += Ncount[ni[i]] * nbin[ni[i+1:]].prod() xy += Ncount[ni[-1]] # Compute the number of repetitions in xy and assign it to the # flattened histmat. if len(xy) == 0: return zeros(nbin-2, int), edges flatcount = bincount(xy, weights) a = arange(len(flatcount)) hist[a] = flatcount # Shape into a proper matrix hist = hist.reshape(sort(nbin)) for i in arange(nbin.size): j = ni.argsort()[i] hist = hist.swapaxes(i, j) ni[i], ni[j] = ni[j], ni[i] # Remove outliers (indices 0 and -1 for each dimension). core = D*[slice(1, -1)] hist = hist[core] # Normalize if normed is True if normed: s = hist.sum() for i in arange(D): shape = ones(D, int) shape[i] = nbin[i] - 2 hist = hist / dedges[i].reshape(shape) hist /= s if (hist.shape != nbin - 2).any(): raise RuntimeError( "Internal Shape Error") return hist, edges def average(a, axis=None, weights=None, returned=False): """ Compute the weighted average along the specified axis. Parameters ---------- a : array_like Array containing data to be averaged. If `a` is not an array, a conversion is attempted. axis : None or int or tuple of ints, optional Axis or axes along which to average `a`. The default, axis=None, will average over all of the elements of the input array. If axis is negative it counts from the last to the first axis. .. versionadded:: 1.7.0 If axis is a tuple of ints, averaging is performed on all of the axes specified in the tuple instead of a single axis or all the axes as before. weights : array_like, optional An array of weights associated with the values in `a`. Each value in `a` contributes to the average according to its associated weight. The weights array can either be 1-D (in which case its length must be the size of `a` along the given axis) or of the same shape as `a`. If `weights=None`, then all data in `a` are assumed to have a weight equal to one. returned : bool, optional Default is `False`. If `True`, the tuple (`average`, `sum_of_weights`) is returned, otherwise only the average is returned. If `weights=None`, `sum_of_weights` is equivalent to the number of elements over which the average is taken. Returns ------- average, [sum_of_weights] : array_type or double Return the average along the specified axis. When returned is `True`, return a tuple with the average as the first element and the sum of the weights as the second element. The return type is `Float` if `a` is of integer type, otherwise it is of the same type as `a`. `sum_of_weights` is of the same type as `average`. Raises ------ ZeroDivisionError When all weights along axis are zero. See `numpy.ma.average` for a version robust to this type of error. TypeError When the length of 1D `weights` is not the same as the shape of `a` along axis. See Also -------- mean ma.average : average for masked arrays -- useful if your data contains "missing" values Examples -------- >>> data = range(1,5) >>> data [1, 2, 3, 4] >>> np.average(data) 2.5 >>> np.average(range(1,11), weights=range(10,0,-1)) 4.0 >>> data = np.arange(6).reshape((3,2)) >>> data array([[0, 1], [2, 3], [4, 5]]) >>> np.average(data, axis=1, weights=[1./4, 3./4]) array([ 0.75, 2.75, 4.75]) >>> np.average(data, weights=[1./4, 3./4]) Traceback (most recent call last): ... TypeError: Axis must be specified when shapes of a and weights differ. """ a = np.asanyarray(a) if weights is None: avg = a.mean(axis) scl = avg.dtype.type(a.size/avg.size) else: wgt = np.asanyarray(weights) if issubclass(a.dtype.type, (np.integer, np.bool_)): result_dtype = np.result_type(a.dtype, wgt.dtype, 'f8') else: result_dtype = np.result_type(a.dtype, wgt.dtype) # Sanity checks if a.shape != wgt.shape: if axis is None: raise TypeError( "Axis must be specified when shapes of a and weights " "differ.") if wgt.ndim != 1: raise TypeError( "1D weights expected when shapes of a and weights differ.") if wgt.shape[0] != a.shape[axis]: raise ValueError( "Length of weights not compatible with specified axis.") # setup wgt to broadcast along axis wgt = np.broadcast_to(wgt, (a.ndim-1)*(1,) + wgt.shape) wgt = wgt.swapaxes(-1, axis) scl = wgt.sum(axis=axis, dtype=result_dtype) if np.any(scl == 0.0): raise ZeroDivisionError( "Weights sum to zero, can't be normalized") avg = np.multiply(a, wgt, dtype=result_dtype).sum(axis)/scl if returned: if scl.shape != avg.shape: scl = np.broadcast_to(scl, avg.shape).copy() return avg, scl else: return avg def asarray_chkfinite(a, dtype=None, order=None): """Convert the input to an array, checking for NaNs or Infs. Parameters ---------- a : array_like Input data, in any form that can be converted to an array. This includes lists, lists of tuples, tuples, tuples of tuples, tuples of lists and ndarrays. Success requires no NaNs or Infs. dtype : data-type, optional By default, the data-type is inferred from the input data. order : {'C', 'F'}, optional Whether to use row-major (C-style) or column-major (Fortran-style) memory representation. Defaults to 'C'. Returns ------- out : ndarray Array interpretation of `a`. No copy is performed if the input is already an ndarray. If `a` is a subclass of ndarray, a base class ndarray is returned. Raises ------ ValueError Raises ValueError if `a` contains NaN (Not a Number) or Inf (Infinity). See Also -------- asarray : Create and array. asanyarray : Similar function which passes through subclasses. ascontiguousarray : Convert input to a contiguous array. asfarray : Convert input to a floating point ndarray. asfortranarray : Convert input to an ndarray with column-major memory order. fromiter : Create an array from an iterator. fromfunction : Construct an array by executing a function on grid positions. Examples -------- Convert a list into an array. If all elements are finite ``asarray_chkfinite`` is identical to ``asarray``. >>> a = [1, 2] >>> np.asarray_chkfinite(a, dtype=float) array([1., 2.]) Raises ValueError if array_like contains Nans or Infs. >>> a = [1, 2, np.inf] >>> try: ... np.asarray_chkfinite(a) ... except ValueError: ... print('ValueError') ... ValueError """ a = asarray(a, dtype=dtype, order=order) if a.dtype.char in typecodes['AllFloat'] and not np.isfinite(a).all(): raise ValueError( "array must not contain infs or NaNs") return a def piecewise(x, condlist, funclist, *args, **kw): """ Evaluate a piecewise-defined function. Given a set of conditions and corresponding functions, evaluate each function on the input data wherever its condition is true. Parameters ---------- x : ndarray or scalar The input domain. condlist : list of bool arrays or bool scalars Each boolean array corresponds to a function in `funclist`. Wherever `condlist[i]` is True, `funclist[i](x)` is used as the output value. Each boolean array in `condlist` selects a piece of `x`, and should therefore be of the same shape as `x`. The length of `condlist` must correspond to that of `funclist`. If one extra function is given, i.e. if ``len(funclist) - len(condlist) == 1``, then that extra function is the default value, used wherever all conditions are false. funclist : list of callables, f(x,*args,**kw), or scalars Each function is evaluated over `x` wherever its corresponding condition is True. It should take an array as input and give an array or a scalar value as output. If, instead of a callable, a scalar is provided then a constant function (``lambda x: scalar``) is assumed. args : tuple, optional Any further arguments given to `piecewise` are passed to the functions upon execution, i.e., if called ``piecewise(..., ..., 1, 'a')``, then each function is called as ``f(x, 1, 'a')``. kw : dict, optional Keyword arguments used in calling `piecewise` are passed to the functions upon execution, i.e., if called ``piecewise(..., ..., alpha=1)``, then each function is called as ``f(x, alpha=1)``. Returns ------- out : ndarray The output is the same shape and type as x and is found by calling the functions in `funclist` on the appropriate portions of `x`, as defined by the boolean arrays in `condlist`. Portions not covered by any condition have a default value of 0. See Also -------- choose, select, where Notes ----- This is similar to choose or select, except that functions are evaluated on elements of `x` that satisfy the corresponding condition from `condlist`. The result is:: |-- |funclist[0](x[condlist[0]]) out = |funclist[1](x[condlist[1]]) |... |funclist[n2](x[condlist[n2]]) |-- Examples -------- Define the sigma function, which is -1 for ``x < 0`` and +1 for ``x >= 0``. >>> x = np.linspace(-2.5, 2.5, 6) >>> np.piecewise(x, [x < 0, x >= 0], [-1, 1]) array([-1., -1., -1., 1., 1., 1.]) Define the absolute value, which is ``-x`` for ``x <0`` and ``x`` for ``x >= 0``. >>> np.piecewise(x, [x < 0, x >= 0], [lambda x: -x, lambda x: x]) array([ 2.5, 1.5, 0.5, 0.5, 1.5, 2.5]) Apply the same function to a scalar value. >>> y = -2 >>> np.piecewise(y, [y < 0, y >= 0], [lambda x: -x, lambda x: x]) array(2) """ x = asanyarray(x) n2 = len(funclist) if (isscalar(condlist) or not (isinstance(condlist[0], list) or isinstance(condlist[0], ndarray))): if not isscalar(condlist) and x.size == 1 and x.ndim == 0: condlist = [[c] for c in condlist] else: condlist = [condlist] condlist = array(condlist, dtype=bool) n = len(condlist) # This is a hack to work around problems with NumPy's # handling of 0-d arrays and boolean indexing with # numpy.bool_ scalars zerod = False if x.ndim == 0: x = x[None] zerod = True if n == n2 - 1: # compute the "otherwise" condition. totlist = np.logical_or.reduce(condlist, axis=0) # Only able to stack vertically if the array is 1d or less if x.ndim <= 1: condlist = np.vstack([condlist, ~totlist]) else: condlist = [asarray(c, dtype=bool) for c in condlist] totlist = condlist[0] for k in range(1, n): totlist |= condlist[k] condlist.append(~totlist) n += 1 y = zeros(x.shape, x.dtype) for k in range(n): item = funclist[k] if not isinstance(item, collections.Callable): y[condlist[k]] = item else: vals = x[condlist[k]] if vals.size > 0: y[condlist[k]] = item(vals, *args, **kw) if zerod: y = y.squeeze() return y def select(condlist, choicelist, default=0): """ Return an array drawn from elements in choicelist, depending on conditions. Parameters ---------- condlist : list of bool ndarrays The list of conditions which determine from which array in `choicelist` the output elements are taken. When multiple conditions are satisfied, the first one encountered in `condlist` is used. choicelist : list of ndarrays The list of arrays from which the output elements are taken. It has to be of the same length as `condlist`. default : scalar, optional The element inserted in `output` when all conditions evaluate to False. Returns ------- output : ndarray The output at position m is the m-th element of the array in `choicelist` where the m-th element of the corresponding array in `condlist` is True. See Also -------- where : Return elements from one of two arrays depending on condition. take, choose, compress, diag, diagonal Examples -------- >>> x = np.arange(10) >>> condlist = [x<3, x>5] >>> choicelist = [x, x**2] >>> np.select(condlist, choicelist) array([ 0, 1, 2, 0, 0, 0, 36, 49, 64, 81]) """ # Check the size of condlist and choicelist are the same, or abort. if len(condlist) != len(choicelist): raise ValueError( 'list of cases must be same length as list of conditions') # Now that the dtype is known, handle the deprecated select([], []) case if len(condlist) == 0: # 2014-02-24, 1.9 warnings.warn("select with an empty condition list is not possible" "and will be deprecated", DeprecationWarning, stacklevel=2) return np.asarray(default)[()] choicelist = [np.asarray(choice) for choice in choicelist] choicelist.append(np.asarray(default)) # need to get the result type before broadcasting for correct scalar # behaviour dtype = np.result_type(*choicelist) # Convert conditions to arrays and broadcast conditions and choices # as the shape is needed for the result. Doing it separately optimizes # for example when all choices are scalars. condlist = np.broadcast_arrays(*condlist) choicelist = np.broadcast_arrays(*choicelist) # If cond array is not an ndarray in boolean format or scalar bool, abort. deprecated_ints = False for i in range(len(condlist)): cond = condlist[i] if cond.dtype.type is not np.bool_: if np.issubdtype(cond.dtype, np.integer): # A previous implementation accepted int ndarrays accidentally. # Supported here deliberately, but deprecated. condlist[i] = condlist[i].astype(bool) deprecated_ints = True else: raise ValueError( 'invalid entry in choicelist: should be boolean ndarray') if deprecated_ints: # 2014-02-24, 1.9 msg = "select condlists containing integer ndarrays is deprecated " \ "and will be removed in the future. Use `.astype(bool)` to " \ "convert to bools." warnings.warn(msg, DeprecationWarning, stacklevel=2) if choicelist[0].ndim == 0: # This may be common, so avoid the call. result_shape = condlist[0].shape else: result_shape = np.broadcast_arrays(condlist[0], choicelist[0])[0].shape result = np.full(result_shape, choicelist[-1], dtype) # Use np.copyto to burn each choicelist array onto result, using the # corresponding condlist as a boolean mask. This is done in reverse # order since the first choice should take precedence. choicelist = choicelist[-2::-1] condlist = condlist[::-1] for choice, cond in zip(choicelist, condlist): np.copyto(result, choice, where=cond) return result def copy(a, order='K'): """ Return an array copy of the given object. Parameters ---------- a : array_like Input data. order : {'C', 'F', 'A', 'K'}, optional Controls the memory layout of the copy. 'C' means C-order, 'F' means F-order, 'A' means 'F' if `a` is Fortran contiguous, 'C' otherwise. 'K' means match the layout of `a` as closely as possible. (Note that this function and :meth:`ndarray.copy` are very similar, but have different default values for their order= arguments.) Returns ------- arr : ndarray Array interpretation of `a`. Notes ----- This is equivalent to: >>> np.array(a, copy=True) #doctest: +SKIP Examples -------- Create an array x, with a reference y and a copy z: >>> x = np.array([1, 2, 3]) >>> y = x >>> z = np.copy(x) Note that, when we modify x, y changes, but not z: >>> x[0] = 10 >>> x[0] == y[0] True >>> x[0] == z[0] False """ return array(a, order=order, copy=True) # Basic operations def gradient(f, *varargs, **kwargs): """ Return the gradient of an N-dimensional array. The gradient is computed using second order accurate central differences in the interior points and either first or second order accurate one-sides (forward or backwards) differences at the boundaries. The returned gradient hence has the same shape as the input array. Parameters ---------- f : array_like An N-dimensional array containing samples of a scalar function. varargs : list of scalar or array, optional Spacing between f values. Default unitary spacing for all dimensions. Spacing can be specified using: 1. single scalar to specify a sample distance for all dimensions. 2. N scalars to specify a constant sample distance for each dimension. i.e. `dx`, `dy`, `dz`, ... 3. N arrays to specify the coordinates of the values along each dimension of F. The length of the array must match the size of the corresponding dimension 4. Any combination of N scalars/arrays with the meaning of 2. and 3. If `axis` is given, the number of varargs must equal the number of axes. Default: 1. edge_order : {1, 2}, optional Gradient is calculated using N-th order accurate differences at the boundaries. Default: 1. .. versionadded:: 1.9.1 axis : None or int or tuple of ints, optional Gradient is calculated only along the given axis or axes The default (axis = None) is to calculate the gradient for all the axes of the input array. axis may be negative, in which case it counts from the last to the first axis. .. versionadded:: 1.11.0 Returns ------- gradient : ndarray or list of ndarray A set of ndarrays (or a single ndarray if there is only one dimension) corresponding to the derivatives of f with respect to each dimension. Each derivative has the same shape as f. Examples -------- >>> f = np.array([1, 2, 4, 7, 11, 16], dtype=np.float) >>> np.gradient(f) array([ 1. , 1.5, 2.5, 3.5, 4.5, 5. ]) >>> np.gradient(f, 2) array([ 0.5 , 0.75, 1.25, 1.75, 2.25, 2.5 ]) Spacing can be also specified with an array that represents the coordinates of the values F along the dimensions. For instance a uniform spacing: >>> x = np.arange(f.size) >>> np.gradient(f, x) array([ 1. , 1.5, 2.5, 3.5, 4.5, 5. ]) Or a non uniform one: >>> x = np.array([0., 1., 1.5, 3.5, 4., 6.], dtype=np.float) >>> np.gradient(f, x) array([ 1. , 3. , 3.5, 6.7, 6.9, 2.5]) For two dimensional arrays, the return will be two arrays ordered by axis. In this example the first array stands for the gradient in rows and the second one in columns direction: >>> np.gradient(np.array([[1, 2, 6], [3, 4, 5]], dtype=np.float)) [array([[ 2., 2., -1.], [ 2., 2., -1.]]), array([[ 1. , 2.5, 4. ], [ 1. , 1. , 1. ]])] In this example the spacing is also specified: uniform for axis=0 and non uniform for axis=1 >>> dx = 2. >>> y = [1., 1.5, 3.5] >>> np.gradient(np.array([[1, 2, 6], [3, 4, 5]], dtype=np.float), dx, y) [array([[ 1. , 1. , -0.5], [ 1. , 1. , -0.5]]), array([[ 2. , 2. , 2. ], [ 2. , 1.7, 0.5]])] It is possible to specify how boundaries are treated using `edge_order` >>> x = np.array([0, 1, 2, 3, 4]) >>> f = x**2 >>> np.gradient(f, edge_order=1) array([ 1., 2., 4., 6., 7.]) >>> np.gradient(f, edge_order=2) array([-0., 2., 4., 6., 8.]) The `axis` keyword can be used to specify a subset of axes of which the gradient is calculated >>> np.gradient(np.array([[1, 2, 6], [3, 4, 5]], dtype=np.float), axis=0) array([[ 2., 2., -1.], [ 2., 2., -1.]]) Notes ----- Assuming that :math:`f\\in C^{3}` (i.e., :math:`f` has at least 3 continous derivatives) and let be :math:`h_{*}` a non homogeneous stepsize, the spacing the finite difference coefficients are computed by minimising the consistency error :math:`\\eta_{i}`: .. math:: \\eta_{i} = f_{i}^{\\left(1\\right)} - \\left[ \\alpha f\\left(x_{i}\\right) + \\beta f\\left(x_{i} + h_{d}\\right) + \\gamma f\\left(x_{i}-h_{s}\\right) \\right] By substituting :math:`f(x_{i} + h_{d})` and :math:`f(x_{i} - h_{s})` with their Taylor series expansion, this translates into solving the following the linear system: .. math:: \\left\\{ \\begin{array}{r} \\alpha+\\beta+\\gamma=0 \\\\ -\\beta h_{d}+\\gamma h_{s}=1 \\\\ \\beta h_{d}^{2}+\\gamma h_{s}^{2}=0 \\end{array} \\right. The resulting approximation of :math:`f_{i}^{(1)}` is the following: .. math:: \\hat f_{i}^{(1)} = \\frac{ h_{s}^{2}f\\left(x_{i} + h_{d}\\right) + \\left(h_{d}^{2} - h_{s}^{2}\\right)f\\left(x_{i}\\right) - h_{d}^{2}f\\left(x_{i}-h_{s}\\right)} { h_{s}h_{d}\\left(h_{d} + h_{s}\\right)} + \\mathcal{O}\\left(\\frac{h_{d}h_{s}^{2} + h_{s}h_{d}^{2}}{h_{d} + h_{s}}\\right) It is worth noting that if :math:`h_{s}=h_{d}` (i.e., data are evenly spaced) we find the standard second order approximation: .. math:: \\hat f_{i}^{(1)}= \\frac{f\\left(x_{i+1}\\right) - f\\left(x_{i-1}\\right)}{2h} + \\mathcal{O}\\left(h^{2}\\right) With a similar procedure the forward/backward approximations used for boundaries can be derived. References ---------- .. [1] Quarteroni A., Sacco R., Saleri F. (2007) Numerical Mathematics (Texts in Applied Mathematics). New York: Springer. .. [2] Durran D. R. (1999) Numerical Methods for Wave Equations in Geophysical Fluid Dynamics. New York: Springer. .. [3] Fornberg B. (1988) Generation of Finite Difference Formulas on Arbitrarily Spaced Grids, Mathematics of Computation 51, no. 184 : 699-706. `PDF <http://www.ams.org/journals/mcom/1988-51-184/ S0025-5718-1988-0935077-0/S0025-5718-1988-0935077-0.pdf>`_. """ f = np.asanyarray(f) N = f.ndim # number of dimensions axes = kwargs.pop('axis', None) if axes is None: axes = tuple(range(N)) else: axes = _nx.normalize_axis_tuple(axes, N) len_axes = len(axes) n = len(varargs) if n == 0: dx = [1.0] * len_axes elif n == len_axes or (n == 1 and np.isscalar(varargs[0])): dx = list(varargs) for i, distances in enumerate(dx): if np.isscalar(distances): continue if len(distances) != f.shape[axes[i]]: raise ValueError("distances must be either scalars or match " "the length of the corresponding dimension") diffx = np.diff(dx[i]) # if distances are constant reduce to the scalar case # since it brings a consistent speedup if (diffx == diffx[0]).all(): diffx = diffx[0] dx[i] = diffx if len(dx) == 1: dx *= len_axes else: raise TypeError("invalid number of arguments") edge_order = kwargs.pop('edge_order', 1) if kwargs: raise TypeError('"{}" are not valid keyword arguments.'.format( '", "'.join(kwargs.keys()))) if edge_order > 2: raise ValueError("'edge_order' greater than 2 not supported") # use central differences on interior and one-sided differences on the # endpoints. This preserves second order-accuracy over the full domain. outvals = [] # create slice objects --- initially all are [:, :, ..., :] slice1 = [slice(None)]*N slice2 = [slice(None)]*N slice3 = [slice(None)]*N slice4 = [slice(None)]*N otype = f.dtype.char if otype not in ['f', 'd', 'F', 'D', 'm', 'M']: otype = 'd' # Difference of datetime64 elements results in timedelta64 if otype == 'M': # Need to use the full dtype name because it contains unit information otype = f.dtype.name.replace('datetime', 'timedelta') elif otype == 'm': # Needs to keep the specific units, can't be a general unit otype = f.dtype # Convert datetime64 data into ints. Make dummy variable `y` # that is a view of ints if the data is datetime64, otherwise # just set y equal to the array `f`. if f.dtype.char in ["M", "m"]: y = f.view('int64') else: y = f for i, axis in enumerate(axes): if y.shape[axis] < edge_order + 1: raise ValueError( "Shape of array too small to calculate a numerical gradient, " "at least (edge_order + 1) elements are required.") # result allocation out = np.empty_like(y, dtype=otype) uniform_spacing = np.isscalar(dx[i]) # Numerical differentiation: 2nd order interior slice1[axis] = slice(1, -1) slice2[axis] = slice(None, -2) slice3[axis] = slice(1, -1) slice4[axis] = slice(2, None) if uniform_spacing: out[slice1] = (f[slice4] - f[slice2]) / (2. * dx[i]) else: dx1 = dx[i][0:-1] dx2 = dx[i][1:] a = -(dx2)/(dx1 * (dx1 + dx2)) b = (dx2 - dx1) / (dx1 * dx2) c = dx1 / (dx2 * (dx1 + dx2)) # fix the shape for broadcasting shape = np.ones(N, dtype=int) shape[axis] = -1 a.shape = b.shape = c.shape = shape # 1D equivalent -- out[1:-1] = a * f[:-2] + b * f[1:-1] + c * f[2:] out[slice1] = a * f[slice2] + b * f[slice3] + c * f[slice4] # Numerical differentiation: 1st order edges if edge_order == 1: slice1[axis] = 0 slice2[axis] = 1 slice3[axis] = 0 dx_0 = dx[i] if uniform_spacing else dx[i][0] # 1D equivalent -- out[0] = (y[1] - y[0]) / (x[1] - x[0]) out[slice1] = (y[slice2] - y[slice3]) / dx_0 slice1[axis] = -1 slice2[axis] = -1 slice3[axis] = -2 dx_n = dx[i] if uniform_spacing else dx[i][-1] # 1D equivalent -- out[-1] = (y[-1] - y[-2]) / (x[-1] - x[-2]) out[slice1] = (y[slice2] - y[slice3]) / dx_n # Numerical differentiation: 2nd order edges else: slice1[axis] = 0 slice2[axis] = 0 slice3[axis] = 1 slice4[axis] = 2 if uniform_spacing: a = -1.5 / dx[i] b = 2. / dx[i] c = -0.5 / dx[i] else: dx1 = dx[i][0] dx2 = dx[i][1] a = -(2. * dx1 + dx2)/(dx1 * (dx1 + dx2)) b = (dx1 + dx2) / (dx1 * dx2) c = - dx1 / (dx2 * (dx1 + dx2)) # 1D equivalent -- out[0] = a * y[0] + b * y[1] + c * y[2] out[slice1] = a * y[slice2] + b * y[slice3] + c * y[slice4] slice1[axis] = -1 slice2[axis] = -3 slice3[axis] = -2 slice4[axis] = -1 if uniform_spacing: a = 0.5 / dx[i] b = -2. / dx[i] c = 1.5 / dx[i] else: dx1 = dx[i][-2] dx2 = dx[i][-1] a = (dx2) / (dx1 * (dx1 + dx2)) b = - (dx2 + dx1) / (dx1 * dx2) c = (2. * dx2 + dx1) / (dx2 * (dx1 + dx2)) # 1D equivalent -- out[-1] = a * f[-3] + b * f[-2] + c * f[-1] out[slice1] = a * y[slice2] + b * y[slice3] + c * y[slice4] outvals.append(out) # reset the slice object in this dimension to ":" slice1[axis] = slice(None) slice2[axis] = slice(None) slice3[axis] = slice(None) slice4[axis] = slice(None) if len_axes == 1: return outvals[0] else: return outvals def diff(a, n=1, axis=-1): """ Calculate the n-th discrete difference along given axis. The first difference is given by ``out[n] = a[n+1] - a[n]`` along the given axis, higher differences are calculated by using `diff` recursively. Parameters ---------- a : array_like Input array n : int, optional The number of times values are differenced. axis : int, optional The axis along which the difference is taken, default is the last axis. Returns ------- diff : ndarray The n-th differences. The shape of the output is the same as `a` except along `axis` where the dimension is smaller by `n`. The type of the output is the same as that of the input. See Also -------- gradient, ediff1d, cumsum Notes ----- For boolean arrays, the preservation of type means that the result will contain `False` when consecutive elements are the same and `True` when they differ. Examples -------- >>> x = np.array([1, 2, 4, 7, 0]) >>> np.diff(x) array([ 1, 2, 3, -7]) >>> np.diff(x, n=2) array([ 1, 1, -10]) >>> x = np.array([[1, 3, 6, 10], [0, 5, 6, 8]]) >>> np.diff(x) array([[2, 3, 4], [5, 1, 2]]) >>> np.diff(x, axis=0) array([[-1, 2, 0, -2]]) """ if n == 0: return a if n < 0: raise ValueError( "order must be non-negative but got " + repr(n)) a = asanyarray(a) nd = a.ndim slice1 = [slice(None)]*nd slice2 = [slice(None)]*nd slice1[axis] = slice(1, None) slice2[axis] = slice(None, -1) slice1 = tuple(slice1) slice2 = tuple(slice2) if n > 1: return diff(a[slice1]-a[slice2], n-1, axis=axis) else: return a[slice1]-a[slice2] def interp(x, xp, fp, left=None, right=None, period=None): """ One-dimensional linear interpolation. Returns the one-dimensional piecewise linear interpolant to a function with given values at discrete data-points. Parameters ---------- x : array_like The x-coordinates of the interpolated values. xp : 1-D sequence of floats The x-coordinates of the data points, must be increasing if argument `period` is not specified. Otherwise, `xp` is internally sorted after normalizing the periodic boundaries with ``xp = xp % period``. fp : 1-D sequence of float or complex The y-coordinates of the data points, same length as `xp`. left : optional float or complex corresponding to fp Value to return for `x < xp[0]`, default is `fp[0]`. right : optional float or complex corresponding to fp Value to return for `x > xp[-1]`, default is `fp[-1]`. period : None or float, optional A period for the x-coordinates. This parameter allows the proper interpolation of angular x-coordinates. Parameters `left` and `right` are ignored if `period` is specified. .. versionadded:: 1.10.0 Returns ------- y : float or complex (corresponding to fp) or ndarray The interpolated values, same shape as `x`. Raises ------ ValueError If `xp` and `fp` have different length If `xp` or `fp` are not 1-D sequences If `period == 0` Notes ----- Does not check that the x-coordinate sequence `xp` is increasing. If `xp` is not increasing, the results are nonsense. A simple check for increasing is:: np.all(np.diff(xp) > 0) Examples -------- >>> xp = [1, 2, 3] >>> fp = [3, 2, 0] >>> np.interp(2.5, xp, fp) 1.0 >>> np.interp([0, 1, 1.5, 2.72, 3.14], xp, fp) array([ 3. , 3. , 2.5 , 0.56, 0. ]) >>> UNDEF = -99.0 >>> np.interp(3.14, xp, fp, right=UNDEF) -99.0 Plot an interpolant to the sine function: >>> x = np.linspace(0, 2*np.pi, 10) >>> y = np.sin(x) >>> xvals = np.linspace(0, 2*np.pi, 50) >>> yinterp = np.interp(xvals, x, y) >>> import matplotlib.pyplot as plt >>> plt.plot(x, y, 'o') [<matplotlib.lines.Line2D object at 0x...>] >>> plt.plot(xvals, yinterp, '-x') [<matplotlib.lines.Line2D object at 0x...>] >>> plt.show() Interpolation with periodic x-coordinates: >>> x = [-180, -170, -185, 185, -10, -5, 0, 365] >>> xp = [190, -190, 350, -350] >>> fp = [5, 10, 3, 4] >>> np.interp(x, xp, fp, period=360) array([7.5, 5., 8.75, 6.25, 3., 3.25, 3.5, 3.75]) Complex interpolation >>> x = [1.5, 4.0] >>> xp = [2,3,5] >>> fp = [1.0j, 0, 2+3j] >>> np.interp(x, xp, fp) array([ 0.+1.j , 1.+1.5j]) """ fp = np.asarray(fp) if np.iscomplexobj(fp): interp_func = compiled_interp_complex input_dtype = np.complex128 else: interp_func = compiled_interp input_dtype = np.float64 if period is None: if isinstance(x, (float, int, number)): return interp_func([x], xp, fp, left, right).item() elif isinstance(x, np.ndarray) and x.ndim == 0: return interp_func([x], xp, fp, left, right).item() else: return interp_func(x, xp, fp, left, right) else: if period == 0: raise ValueError("period must be a non-zero value") period = abs(period) left = None right = None return_array = True if isinstance(x, (float, int, number)): return_array = False x = [x] x = np.asarray(x, dtype=np.float64) xp = np.asarray(xp, dtype=np.float64) fp = np.asarray(fp, dtype=input_dtype) if xp.ndim != 1 or fp.ndim != 1: raise ValueError("Data points must be 1-D sequences") if xp.shape[0] != fp.shape[0]: raise ValueError("fp and xp are not of the same length") # normalizing periodic boundaries x = x % period xp = xp % period asort_xp = np.argsort(xp) xp = xp[asort_xp] fp = fp[asort_xp] xp = np.concatenate((xp[-1:]-period, xp, xp[0:1]+period)) fp = np.concatenate((fp[-1:], fp, fp[0:1])) if return_array: return interp_func(x, xp, fp, left, right) else: return interp_func(x, xp, fp, left, right).item() def angle(z, deg=0): """ Return the angle of the complex argument. Parameters ---------- z : array_like A complex number or sequence of complex numbers. deg : bool, optional Return angle in degrees if True, radians if False (default). Returns ------- angle : ndarray or scalar The counterclockwise angle from the positive real axis on the complex plane, with dtype as numpy.float64. See Also -------- arctan2 absolute Examples -------- >>> np.angle([1.0, 1.0j, 1+1j]) # in radians array([ 0. , 1.57079633, 0.78539816]) >>> np.angle(1+1j, deg=True) # in degrees 45.0 """ if deg: fact = 180/pi else: fact = 1.0 z = asarray(z) if (issubclass(z.dtype.type, _nx.complexfloating)): zimag = z.imag zreal = z.real else: zimag = 0 zreal = z return arctan2(zimag, zreal) * fact def unwrap(p, discont=pi, axis=-1): """ Unwrap by changing deltas between values to 2*pi complement. Unwrap radian phase `p` by changing absolute jumps greater than `discont` to their 2*pi complement along the given axis. Parameters ---------- p : array_like Input array. discont : float, optional Maximum discontinuity between values, default is ``pi``. axis : int, optional Axis along which unwrap will operate, default is the last axis. Returns ------- out : ndarray Output array. See Also -------- rad2deg, deg2rad Notes ----- If the discontinuity in `p` is smaller than ``pi``, but larger than `discont`, no unwrapping is done because taking the 2*pi complement would only make the discontinuity larger. Examples -------- >>> phase = np.linspace(0, np.pi, num=5) >>> phase[3:] += np.pi >>> phase array([ 0. , 0.78539816, 1.57079633, 5.49778714, 6.28318531]) >>> np.unwrap(phase) array([ 0. , 0.78539816, 1.57079633, -0.78539816, 0. ]) """ p = asarray(p) nd = p.ndim dd = diff(p, axis=axis) slice1 = [slice(None, None)]*nd # full slices slice1[axis] = slice(1, None) ddmod = mod(dd + pi, 2*pi) - pi _nx.copyto(ddmod, pi, where=(ddmod == -pi) & (dd > 0)) ph_correct = ddmod - dd _nx.copyto(ph_correct, 0, where=abs(dd) < discont) up = array(p, copy=True, dtype='d') up[slice1] = p[slice1] + ph_correct.cumsum(axis) return up def sort_complex(a): """ Sort a complex array using the real part first, then the imaginary part. Parameters ---------- a : array_like Input array Returns ------- out : complex ndarray Always returns a sorted complex array. Examples -------- >>> np.sort_complex([5, 3, 6, 2, 1]) array([ 1.+0.j, 2.+0.j, 3.+0.j, 5.+0.j, 6.+0.j]) >>> np.sort_complex([1 + 2j, 2 - 1j, 3 - 2j, 3 - 3j, 3 + 5j]) array([ 1.+2.j, 2.-1.j, 3.-3.j, 3.-2.j, 3.+5.j]) """ b = array(a, copy=True) b.sort() if not issubclass(b.dtype.type, _nx.complexfloating): if b.dtype.char in 'bhBH': return b.astype('F') elif b.dtype.char == 'g': return b.astype('G') else: return b.astype('D') else: return b def trim_zeros(filt, trim='fb'): """ Trim the leading and/or trailing zeros from a 1-D array or sequence. Parameters ---------- filt : 1-D array or sequence Input array. trim : str, optional A string with 'f' representing trim from front and 'b' to trim from back. Default is 'fb', trim zeros from both front and back of the array. Returns ------- trimmed : 1-D array or sequence The result of trimming the input. The input data type is preserved. Examples -------- >>> a = np.array((0, 0, 0, 1, 2, 3, 0, 2, 1, 0)) >>> np.trim_zeros(a) array([1, 2, 3, 0, 2, 1]) >>> np.trim_zeros(a, 'b') array([0, 0, 0, 1, 2, 3, 0, 2, 1]) The input data type is preserved, list/tuple in means list/tuple out. >>> np.trim_zeros([0, 1, 2, 0]) [1, 2] """ first = 0 trim = trim.upper() if 'F' in trim: for i in filt: if i != 0.: break else: first = first + 1 last = len(filt) if 'B' in trim: for i in filt[::-1]: if i != 0.: break else: last = last - 1 return filt[first:last] @deprecate def unique(x): """ This function is deprecated. Use numpy.lib.arraysetops.unique() instead. """ try: tmp = x.flatten() if tmp.size == 0: return tmp tmp.sort() idx = concatenate(([True], tmp[1:] != tmp[:-1])) return tmp[idx] except AttributeError: items = sorted(set(x)) return asarray(items) def extract(condition, arr): """ Return the elements of an array that satisfy some condition. This is equivalent to ``np.compress(ravel(condition), ravel(arr))``. If `condition` is boolean ``np.extract`` is equivalent to ``arr[condition]``. Note that `place` does the exact opposite of `extract`. Parameters ---------- condition : array_like An array whose nonzero or True entries indicate the elements of `arr` to extract. arr : array_like Input array of the same size as `condition`. Returns ------- extract : ndarray Rank 1 array of values from `arr` where `condition` is True. See Also -------- take, put, copyto, compress, place Examples -------- >>> arr = np.arange(12).reshape((3, 4)) >>> arr array([[ 0, 1, 2, 3], [ 4, 5, 6, 7], [ 8, 9, 10, 11]]) >>> condition = np.mod(arr, 3)==0 >>> condition array([[ True, False, False, True], [False, False, True, False], [False, True, False, False]], dtype=bool) >>> np.extract(condition, arr) array([0, 3, 6, 9]) If `condition` is boolean: >>> arr[condition] array([0, 3, 6, 9]) """ return _nx.take(ravel(arr), nonzero(ravel(condition))[0]) def place(arr, mask, vals): """ Change elements of an array based on conditional and input values. Similar to ``np.copyto(arr, vals, where=mask)``, the difference is that `place` uses the first N elements of `vals`, where N is the number of True values in `mask`, while `copyto` uses the elements where `mask` is True. Note that `extract` does the exact opposite of `place`. Parameters ---------- arr : ndarray Array to put data into. mask : array_like Boolean mask array. Must have the same size as `a`. vals : 1-D sequence Values to put into `a`. Only the first N elements are used, where N is the number of True values in `mask`. If `vals` is smaller than N, it will be repeated, and if elements of `a` are to be masked, this sequence must be non-empty. See Also -------- copyto, put, take, extract Examples -------- >>> arr = np.arange(6).reshape(2, 3) >>> np.place(arr, arr>2, [44, 55]) >>> arr array([[ 0, 1, 2], [44, 55, 44]]) """ if not isinstance(arr, np.ndarray): raise TypeError("argument 1 must be numpy.ndarray, " "not {name}".format(name=type(arr).__name__)) return _insert(arr, mask, vals) def disp(mesg, device=None, linefeed=True): """ Display a message on a device. Parameters ---------- mesg : str Message to display. device : object Device to write message. If None, defaults to ``sys.stdout`` which is very similar to ``print``. `device` needs to have ``write()`` and ``flush()`` methods. linefeed : bool, optional Option whether to print a line feed or not. Defaults to True. Raises ------ AttributeError If `device` does not have a ``write()`` or ``flush()`` method. Examples -------- Besides ``sys.stdout``, a file-like object can also be used as it has both required methods: >>> from StringIO import StringIO >>> buf = StringIO() >>> np.disp('"Display" in a file', device=buf) >>> buf.getvalue() '"Display" in a file\\n' """ if device is None: device = sys.stdout if linefeed: device.write('%s\n' % mesg) else: device.write('%s' % mesg) device.flush() return # See http://docs.scipy.org/doc/numpy/reference/c-api.generalized-ufuncs.html _DIMENSION_NAME = r'\w+' _CORE_DIMENSION_LIST = '(?:{0:}(?:,{0:})*)?'.format(_DIMENSION_NAME) _ARGUMENT = r'\({}\)'.format(_CORE_DIMENSION_LIST) _ARGUMENT_LIST = '{0:}(?:,{0:})*'.format(_ARGUMENT) _SIGNATURE = '^{0:}->{0:}$'.format(_ARGUMENT_LIST) def _parse_gufunc_signature(signature): """ Parse string signatures for a generalized universal function. Arguments --------- signature : string Generalized universal function signature, e.g., ``(m,n),(n,p)->(m,p)`` for ``np.matmul``. Returns ------- Tuple of input and output core dimensions parsed from the signature, each of the form List[Tuple[str, ...]]. """ if not re.match(_SIGNATURE, signature): raise ValueError( 'not a valid gufunc signature: {}'.format(signature)) return tuple([tuple(re.findall(_DIMENSION_NAME, arg)) for arg in re.findall(_ARGUMENT, arg_list)] for arg_list in signature.split('->')) def _update_dim_sizes(dim_sizes, arg, core_dims): """ Incrementally check and update core dimension sizes for a single argument. Arguments --------- dim_sizes : Dict[str, int] Sizes of existing core dimensions. Will be updated in-place. arg : ndarray Argument to examine. core_dims : Tuple[str, ...] Core dimensions for this argument. """ if not core_dims: return num_core_dims = len(core_dims) if arg.ndim < num_core_dims: raise ValueError( '%d-dimensional argument does not have enough ' 'dimensions for all core dimensions %r' % (arg.ndim, core_dims)) core_shape = arg.shape[-num_core_dims:] for dim, size in zip(core_dims, core_shape): if dim in dim_sizes: if size != dim_sizes[dim]: raise ValueError( 'inconsistent size for core dimension %r: %r vs %r' % (dim, size, dim_sizes[dim])) else: dim_sizes[dim] = size def _parse_input_dimensions(args, input_core_dims): """ Parse broadcast and core dimensions for vectorize with a signature. Arguments --------- args : Tuple[ndarray, ...] Tuple of input arguments to examine. input_core_dims : List[Tuple[str, ...]] List of core dimensions corresponding to each input. Returns ------- broadcast_shape : Tuple[int, ...] Common shape to broadcast all non-core dimensions to. dim_sizes : Dict[str, int] Common sizes for named core dimensions. """ broadcast_args = [] dim_sizes = {} for arg, core_dims in zip(args, input_core_dims): _update_dim_sizes(dim_sizes, arg, core_dims) ndim = arg.ndim - len(core_dims) dummy_array = np.lib.stride_tricks.as_strided(0, arg.shape[:ndim]) broadcast_args.append(dummy_array) broadcast_shape = np.lib.stride_tricks._broadcast_shape(*broadcast_args) return broadcast_shape, dim_sizes def _calculate_shapes(broadcast_shape, dim_sizes, list_of_core_dims): """Helper for calculating broadcast shapes with core dimensions.""" return [broadcast_shape + tuple(dim_sizes[dim] for dim in core_dims) for core_dims in list_of_core_dims] def _create_arrays(broadcast_shape, dim_sizes, list_of_core_dims, dtypes): """Helper for creating output arrays in vectorize.""" shapes = _calculate_shapes(broadcast_shape, dim_sizes, list_of_core_dims) arrays = tuple(np.empty(shape, dtype=dtype) for shape, dtype in zip(shapes, dtypes)) return arrays class vectorize(object): """ vectorize(pyfunc, otypes=None, doc=None, excluded=None, cache=False, signature=None) Generalized function class. Define a vectorized function which takes a nested sequence of objects or numpy arrays as inputs and returns an single or tuple of numpy array as output. The vectorized function evaluates `pyfunc` over successive tuples of the input arrays like the python map function, except it uses the broadcasting rules of numpy. The data type of the output of `vectorized` is determined by calling the function with the first element of the input. This can be avoided by specifying the `otypes` argument. Parameters ---------- pyfunc : callable A python function or method. otypes : str or list of dtypes, optional The output data type. It must be specified as either a string of typecode characters or a list of data type specifiers. There should be one data type specifier for each output. doc : str, optional The docstring for the function. If `None`, the docstring will be the ``pyfunc.__doc__``. excluded : set, optional Set of strings or integers representing the positional or keyword arguments for which the function will not be vectorized. These will be passed directly to `pyfunc` unmodified. .. versionadded:: 1.7.0 cache : bool, optional If `True`, then cache the first function call that determines the number of outputs if `otypes` is not provided. .. versionadded:: 1.7.0 signature : string, optional Generalized universal function signature, e.g., ``(m,n),(n)->(m)`` for vectorized matrix-vector multiplication. If provided, ``pyfunc`` will be called with (and expected to return) arrays with shapes given by the size of corresponding core dimensions. By default, ``pyfunc`` is assumed to take scalars as input and output. .. versionadded:: 1.12.0 Returns ------- vectorized : callable Vectorized function. Examples -------- >>> def myfunc(a, b): ... "Return a-b if a>b, otherwise return a+b" ... if a > b: ... return a - b ... else: ... return a + b >>> vfunc = np.vectorize(myfunc) >>> vfunc([1, 2, 3, 4], 2) array([3, 4, 1, 2]) The docstring is taken from the input function to `vectorize` unless it is specified: >>> vfunc.__doc__ 'Return a-b if a>b, otherwise return a+b' >>> vfunc = np.vectorize(myfunc, doc='Vectorized `myfunc`') >>> vfunc.__doc__ 'Vectorized `myfunc`' The output type is determined by evaluating the first element of the input, unless it is specified: >>> out = vfunc([1, 2, 3, 4], 2) >>> type(out[0]) <type 'numpy.int32'> >>> vfunc = np.vectorize(myfunc, otypes=[np.float]) >>> out = vfunc([1, 2, 3, 4], 2) >>> type(out[0]) <type 'numpy.float64'> The `excluded` argument can be used to prevent vectorizing over certain arguments. This can be useful for array-like arguments of a fixed length such as the coefficients for a polynomial as in `polyval`: >>> def mypolyval(p, x): ... _p = list(p) ... res = _p.pop(0) ... while _p: ... res = res*x + _p.pop(0) ... return res >>> vpolyval = np.vectorize(mypolyval, excluded=['p']) >>> vpolyval(p=[1, 2, 3], x=[0, 1]) array([3, 6]) Positional arguments may also be excluded by specifying their position: >>> vpolyval.excluded.add(0) >>> vpolyval([1, 2, 3], x=[0, 1]) array([3, 6]) The `signature` argument allows for vectorizing functions that act on non-scalar arrays of fixed length. For example, you can use it for a vectorized calculation of Pearson correlation coefficient and its p-value: >>> import scipy.stats >>> pearsonr = np.vectorize(scipy.stats.pearsonr, ... signature='(n),(n)->(),()') >>> pearsonr([[0, 1, 2, 3]], [[1, 2, 3, 4], [4, 3, 2, 1]]) (array([ 1., -1.]), array([ 0., 0.])) Or for a vectorized convolution: >>> convolve = np.vectorize(np.convolve, signature='(n),(m)->(k)') >>> convolve(np.eye(4), [1, 2, 1]) array([[ 1., 2., 1., 0., 0., 0.], [ 0., 1., 2., 1., 0., 0.], [ 0., 0., 1., 2., 1., 0.], [ 0., 0., 0., 1., 2., 1.]]) See Also -------- frompyfunc : Takes an arbitrary Python function and returns a ufunc Notes ----- The `vectorize` function is provided primarily for convenience, not for performance. The implementation is essentially a for loop. If `otypes` is not specified, then a call to the function with the first argument will be used to determine the number of outputs. The results of this call will be cached if `cache` is `True` to prevent calling the function twice. However, to implement the cache, the original function must be wrapped which will slow down subsequent calls, so only do this if your function is expensive. The new keyword argument interface and `excluded` argument support further degrades performance. References ---------- .. [1] NumPy Reference, section `Generalized Universal Function API <http://docs.scipy.org/doc/numpy/reference/c-api.generalized-ufuncs.html>`_. """ def __init__(self, pyfunc, otypes=None, doc=None, excluded=None, cache=False, signature=None): self.pyfunc = pyfunc self.cache = cache self.signature = signature self._ufunc = None # Caching to improve default performance if doc is None: self.__doc__ = pyfunc.__doc__ else: self.__doc__ = doc if isinstance(otypes, str): for char in otypes: if char not in typecodes['All']: raise ValueError("Invalid otype specified: %s" % (char,)) elif iterable(otypes): otypes = ''.join([_nx.dtype(x).char for x in otypes]) elif otypes is not None: raise ValueError("Invalid otype specification") self.otypes = otypes # Excluded variable support if excluded is None: excluded = set() self.excluded = set(excluded) if signature is not None: self._in_and_out_core_dims = _parse_gufunc_signature(signature) else: self._in_and_out_core_dims = None def __call__(self, *args, **kwargs): """ Return arrays with the results of `pyfunc` broadcast (vectorized) over `args` and `kwargs` not in `excluded`. """ excluded = self.excluded if not kwargs and not excluded: func = self.pyfunc vargs = args else: # The wrapper accepts only positional arguments: we use `names` and # `inds` to mutate `the_args` and `kwargs` to pass to the original # function. nargs = len(args) names = [_n for _n in kwargs if _n not in excluded] inds = [_i for _i in range(nargs) if _i not in excluded] the_args = list(args) def func(*vargs): for _n, _i in enumerate(inds): the_args[_i] = vargs[_n] kwargs.update(zip(names, vargs[len(inds):])) return self.pyfunc(*the_args, **kwargs) vargs = [args[_i] for _i in inds] vargs.extend([kwargs[_n] for _n in names]) return self._vectorize_call(func=func, args=vargs) def _get_ufunc_and_otypes(self, func, args): """Return (ufunc, otypes).""" # frompyfunc will fail if args is empty if not args: raise ValueError('args can not be empty') if self.otypes is not None: otypes = self.otypes nout = len(otypes) # Note logic here: We only *use* self._ufunc if func is self.pyfunc # even though we set self._ufunc regardless. if func is self.pyfunc and self._ufunc is not None: ufunc = self._ufunc else: ufunc = self._ufunc = frompyfunc(func, len(args), nout) else: # Get number of outputs and output types by calling the function on # the first entries of args. We also cache the result to prevent # the subsequent call when the ufunc is evaluated. # Assumes that ufunc first evaluates the 0th elements in the input # arrays (the input values are not checked to ensure this) args = [asarray(arg) for arg in args] if builtins.any(arg.size == 0 for arg in args): raise ValueError('cannot call `vectorize` on size 0 inputs ' 'unless `otypes` is set') inputs = [arg.flat[0] for arg in args] outputs = func(*inputs) # Performance note: profiling indicates that -- for simple # functions at least -- this wrapping can almost double the # execution time. # Hence we make it optional. if self.cache: _cache = [outputs] def _func(*vargs): if _cache: return _cache.pop() else: return func(*vargs) else: _func = func if isinstance(outputs, tuple): nout = len(outputs) else: nout = 1 outputs = (outputs,) otypes = ''.join([asarray(outputs[_k]).dtype.char for _k in range(nout)]) # Performance note: profiling indicates that creating the ufunc is # not a significant cost compared with wrapping so it seems not # worth trying to cache this. ufunc = frompyfunc(_func, len(args), nout) return ufunc, otypes def _vectorize_call(self, func, args): """Vectorized call to `func` over positional `args`.""" if self.signature is not None: res = self._vectorize_call_with_signature(func, args) elif not args: res = func() else: ufunc, otypes = self._get_ufunc_and_otypes(func=func, args=args) # Convert args to object arrays first inputs = [array(a, copy=False, subok=True, dtype=object) for a in args] outputs = ufunc(*inputs) if ufunc.nout == 1: res = array(outputs, copy=False, subok=True, dtype=otypes[0]) else: res = tuple([array(x, copy=False, subok=True, dtype=t) for x, t in zip(outputs, otypes)]) return res def _vectorize_call_with_signature(self, func, args): """Vectorized call over positional arguments with a signature.""" input_core_dims, output_core_dims = self._in_and_out_core_dims if len(args) != len(input_core_dims): raise TypeError('wrong number of positional arguments: ' 'expected %r, got %r' % (len(input_core_dims), len(args))) args = tuple(asanyarray(arg) for arg in args) broadcast_shape, dim_sizes = _parse_input_dimensions( args, input_core_dims) input_shapes = _calculate_shapes(broadcast_shape, dim_sizes, input_core_dims) args = [np.broadcast_to(arg, shape, subok=True) for arg, shape in zip(args, input_shapes)] outputs = None otypes = self.otypes nout = len(output_core_dims) for index in np.ndindex(*broadcast_shape): results = func(*(arg[index] for arg in args)) n_results = len(results) if isinstance(results, tuple) else 1 if nout != n_results: raise ValueError( 'wrong number of outputs from pyfunc: expected %r, got %r' % (nout, n_results)) if nout == 1: results = (results,) if outputs is None: for result, core_dims in zip(results, output_core_dims): _update_dim_sizes(dim_sizes, result, core_dims) if otypes is None: otypes = [asarray(result).dtype for result in results] outputs = _create_arrays(broadcast_shape, dim_sizes, output_core_dims, otypes) for output, result in zip(outputs, results): output[index] = result if outputs is None: # did not call the function even once if otypes is None: raise ValueError('cannot call `vectorize` on size 0 inputs ' 'unless `otypes` is set') if builtins.any(dim not in dim_sizes for dims in output_core_dims for dim in dims): raise ValueError('cannot call `vectorize` with a signature ' 'including new output dimensions on size 0 ' 'inputs') outputs = _create_arrays(broadcast_shape, dim_sizes, output_core_dims, otypes) return outputs[0] if nout == 1 else outputs def cov(m, y=None, rowvar=True, bias=False, ddof=None, fweights=None, aweights=None): """ Estimate a covariance matrix, given data and weights. Covariance indicates the level to which two variables vary together. If we examine N-dimensional samples, :math:`X = [x_1, x_2, ... x_N]^T`, then the covariance matrix element :math:`C_{ij}` is the covariance of :math:`x_i` and :math:`x_j`. The element :math:`C_{ii}` is the variance of :math:`x_i`. See the notes for an outline of the algorithm. Parameters ---------- m : array_like A 1-D or 2-D array containing multiple variables and observations. Each row of `m` represents a variable, and each column a single observation of all those variables. Also see `rowvar` below. y : array_like, optional An additional set of variables and observations. `y` has the same form as that of `m`. rowvar : bool, optional If `rowvar` is True (default), then each row represents a variable, with observations in the columns. Otherwise, the relationship is transposed: each column represents a variable, while the rows contain observations. bias : bool, optional Default normalization (False) is by ``(N - 1)``, where ``N`` is the number of observations given (unbiased estimate). If `bias` is True, then normalization is by ``N``. These values can be overridden by using the keyword ``ddof`` in numpy versions >= 1.5. ddof : int, optional If not ``None`` the default value implied by `bias` is overridden. Note that ``ddof=1`` will return the unbiased estimate, even if both `fweights` and `aweights` are specified, and ``ddof=0`` will return the simple average. See the notes for the details. The default value is ``None``. .. versionadded:: 1.5 fweights : array_like, int, optional 1-D array of integer freguency weights; the number of times each observation vector should be repeated. .. versionadded:: 1.10 aweights : array_like, optional 1-D array of observation vector weights. These relative weights are typically large for observations considered "important" and smaller for observations considered less "important". If ``ddof=0`` the array of weights can be used to assign probabilities to observation vectors. .. versionadded:: 1.10 Returns ------- out : ndarray The covariance matrix of the variables. See Also -------- corrcoef : Normalized covariance matrix Notes ----- Assume that the observations are in the columns of the observation array `m` and let ``f = fweights`` and ``a = aweights`` for brevity. The steps to compute the weighted covariance are as follows:: >>> w = f * a >>> v1 = np.sum(w) >>> v2 = np.sum(w * a) >>> m -= np.sum(m * w, axis=1, keepdims=True) / v1 >>> cov = np.dot(m * w, m.T) * v1 / (v1**2 - ddof * v2) Note that when ``a == 1``, the normalization factor ``v1 / (v1**2 - ddof * v2)`` goes over to ``1 / (np.sum(f) - ddof)`` as it should. Examples -------- Consider two variables, :math:`x_0` and :math:`x_1`, which correlate perfectly, but in opposite directions: >>> x = np.array([[0, 2], [1, 1], [2, 0]]).T >>> x array([[0, 1, 2], [2, 1, 0]]) Note how :math:`x_0` increases while :math:`x_1` decreases. The covariance matrix shows this clearly: >>> np.cov(x) array([[ 1., -1.], [-1., 1.]]) Note that element :math:`C_{0,1}`, which shows the correlation between :math:`x_0` and :math:`x_1`, is negative. Further, note how `x` and `y` are combined: >>> x = [-2.1, -1, 4.3] >>> y = [3, 1.1, 0.12] >>> X = np.vstack((x,y)) >>> print(np.cov(X)) [[ 11.71 -4.286 ] [ -4.286 2.14413333]] >>> print(np.cov(x, y)) [[ 11.71 -4.286 ] [ -4.286 2.14413333]] >>> print(np.cov(x)) 11.71 """ # Check inputs if ddof is not None and ddof != int(ddof): raise ValueError( "ddof must be integer") # Handles complex arrays too m = np.asarray(m) if m.ndim > 2: raise ValueError("m has more than 2 dimensions") if y is None: dtype = np.result_type(m, np.float64) else: y = np.asarray(y) if y.ndim > 2: raise ValueError("y has more than 2 dimensions") dtype = np.result_type(m, y, np.float64) X = array(m, ndmin=2, dtype=dtype) if not rowvar and X.shape[0] != 1: X = X.T if X.shape[0] == 0: return np.array([]).reshape(0, 0) if y is not None: y = array(y, copy=False, ndmin=2, dtype=dtype) if not rowvar and y.shape[0] != 1: y = y.T X = np.vstack((X, y)) if ddof is None: if bias == 0: ddof = 1 else: ddof = 0 # Get the product of frequencies and weights w = None if fweights is not None: fweights = np.asarray(fweights, dtype=np.float) if not np.all(fweights == np.around(fweights)): raise TypeError( "fweights must be integer") if fweights.ndim > 1: raise RuntimeError( "cannot handle multidimensional fweights") if fweights.shape[0] != X.shape[1]: raise RuntimeError( "incompatible numbers of samples and fweights") if any(fweights < 0): raise ValueError( "fweights cannot be negative") w = fweights if aweights is not None: aweights = np.asarray(aweights, dtype=np.float) if aweights.ndim > 1: raise RuntimeError( "cannot handle multidimensional aweights") if aweights.shape[0] != X.shape[1]: raise RuntimeError( "incompatible numbers of samples and aweights") if any(aweights < 0): raise ValueError( "aweights cannot be negative") if w is None: w = aweights else: w *= aweights avg, w_sum = average(X, axis=1, weights=w, returned=True) w_sum = w_sum[0] # Determine the normalization if w is None: fact = X.shape[1] - ddof elif ddof == 0: fact = w_sum elif aweights is None: fact = w_sum - ddof else: fact = w_sum - ddof*sum(w*aweights)/w_sum if fact <= 0: warnings.warn("Degrees of freedom <= 0 for slice", RuntimeWarning, stacklevel=2) fact = 0.0 X -= avg[:, None] if w is None: X_T = X.T else: X_T = (X*w).T c = dot(X, X_T.conj()) c *= 1. / np.float64(fact) return c.squeeze() def corrcoef(x, y=None, rowvar=True, bias=np._NoValue, ddof=np._NoValue): """ Return Pearson product-moment correlation coefficients. Please refer to the documentation for `cov` for more detail. The relationship between the correlation coefficient matrix, `R`, and the covariance matrix, `C`, is .. math:: R_{ij} = \\frac{ C_{ij} } { \\sqrt{ C_{ii} * C_{jj} } } The values of `R` are between -1 and 1, inclusive. Parameters ---------- x : array_like A 1-D or 2-D array containing multiple variables and observations. Each row of `x` represents a variable, and each column a single observation of all those variables. Also see `rowvar` below. y : array_like, optional An additional set of variables and observations. `y` has the same shape as `x`. rowvar : bool, optional If `rowvar` is True (default), then each row represents a variable, with observations in the columns. Otherwise, the relationship is transposed: each column represents a variable, while the rows contain observations. bias : _NoValue, optional Has no effect, do not use. .. deprecated:: 1.10.0 ddof : _NoValue, optional Has no effect, do not use. .. deprecated:: 1.10.0 Returns ------- R : ndarray The correlation coefficient matrix of the variables. See Also -------- cov : Covariance matrix Notes ----- Due to floating point rounding the resulting array may not be Hermitian, the diagonal elements may not be 1, and the elements may not satisfy the inequality abs(a) <= 1. The real and imaginary parts are clipped to the interval [-1, 1] in an attempt to improve on that situation but is not much help in the complex case. This function accepts but discards arguments `bias` and `ddof`. This is for backwards compatibility with previous versions of this function. These arguments had no effect on the return values of the function and can be safely ignored in this and previous versions of numpy. """ if bias is not np._NoValue or ddof is not np._NoValue: # 2015-03-15, 1.10 warnings.warn('bias and ddof have no effect and are deprecated', DeprecationWarning, stacklevel=2) c = cov(x, y, rowvar) try: d = diag(c) except ValueError: # scalar covariance # nan if incorrect value (nan, inf, 0), 1 otherwise return c / c stddev = sqrt(d.real) c /= stddev[:, None] c /= stddev[None, :] # Clip real and imaginary parts to [-1, 1]. This does not guarantee # abs(a[i,j]) <= 1 for complex arrays, but is the best we can do without # excessive work. np.clip(c.real, -1, 1, out=c.real) if np.iscomplexobj(c): np.clip(c.imag, -1, 1, out=c.imag) return c def blackman(M): """ Return the Blackman window. The Blackman window is a taper formed by using the first three terms of a summation of cosines. It was designed to have close to the minimal leakage possible. It is close to optimal, only slightly worse than a Kaiser window. Parameters ---------- M : int Number of points in the output window. If zero or less, an empty array is returned. Returns ------- out : ndarray The window, with the maximum value normalized to one (the value one appears only if the number of samples is odd). See Also -------- bartlett, hamming, hanning, kaiser Notes ----- The Blackman window is defined as .. math:: w(n) = 0.42 - 0.5 \\cos(2\\pi n/M) + 0.08 \\cos(4\\pi n/M) Most references to the Blackman window come from the signal processing literature, where it is used as one of many windowing functions for smoothing values. It is also known as an apodization (which means "removing the foot", i.e. smoothing discontinuities at the beginning and end of the sampled signal) or tapering function. It is known as a "near optimal" tapering function, almost as good (by some measures) as the kaiser window. References ---------- Blackman, R.B. and Tukey, J.W., (1958) The measurement of power spectra, Dover Publications, New York. Oppenheim, A.V., and R.W. Schafer. Discrete-Time Signal Processing. Upper Saddle River, NJ: Prentice-Hall, 1999, pp. 468-471. Examples -------- >>> np.blackman(12) array([ -1.38777878e-17, 3.26064346e-02, 1.59903635e-01, 4.14397981e-01, 7.36045180e-01, 9.67046769e-01, 9.67046769e-01, 7.36045180e-01, 4.14397981e-01, 1.59903635e-01, 3.26064346e-02, -1.38777878e-17]) Plot the window and the frequency response: >>> from numpy.fft import fft, fftshift >>> window = np.blackman(51) >>> plt.plot(window) [<matplotlib.lines.Line2D object at 0x...>] >>> plt.title("Blackman window") <matplotlib.text.Text object at 0x...> >>> plt.ylabel("Amplitude") <matplotlib.text.Text object at 0x...> >>> plt.xlabel("Sample") <matplotlib.text.Text object at 0x...> >>> plt.show() >>> plt.figure() <matplotlib.figure.Figure object at 0x...> >>> A = fft(window, 2048) / 25.5 >>> mag = np.abs(fftshift(A)) >>> freq = np.linspace(-0.5, 0.5, len(A)) >>> response = 20 * np.log10(mag) >>> response = np.clip(response, -100, 100) >>> plt.plot(freq, response) [<matplotlib.lines.Line2D object at 0x...>] >>> plt.title("Frequency response of Blackman window") <matplotlib.text.Text object at 0x...> >>> plt.ylabel("Magnitude [dB]") <matplotlib.text.Text object at 0x...> >>> plt.xlabel("Normalized frequency [cycles per sample]") <matplotlib.text.Text object at 0x...> >>> plt.axis('tight') (-0.5, 0.5, -100.0, ...) >>> plt.show() """ if M < 1: return array([]) if M == 1: return ones(1, float) n = arange(0, M) return 0.42 - 0.5*cos(2.0*pi*n/(M-1)) + 0.08*cos(4.0*pi*n/(M-1)) def bartlett(M): """ Return the Bartlett window. The Bartlett window is very similar to a triangular window, except that the end points are at zero. It is often used in signal processing for tapering a signal, without generating too much ripple in the frequency domain. Parameters ---------- M : int Number of points in the output window. If zero or less, an empty array is returned. Returns ------- out : array The triangular window, with the maximum value normalized to one (the value one appears only if the number of samples is odd), with the first and last samples equal to zero. See Also -------- blackman, hamming, hanning, kaiser Notes ----- The Bartlett window is defined as .. math:: w(n) = \\frac{2}{M-1} \\left( \\frac{M-1}{2} - \\left|n - \\frac{M-1}{2}\\right| \\right) Most references to the Bartlett window come from the signal processing literature, where it is used as one of many windowing functions for smoothing values. Note that convolution with this window produces linear interpolation. It is also known as an apodization (which means"removing the foot", i.e. smoothing discontinuities at the beginning and end of the sampled signal) or tapering function. The fourier transform of the Bartlett is the product of two sinc functions. Note the excellent discussion in Kanasewich. References ---------- .. [1] M.S. Bartlett, "Periodogram Analysis and Continuous Spectra", Biometrika 37, 1-16, 1950. .. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", The University of Alberta Press, 1975, pp. 109-110. .. [3] A.V. Oppenheim and R.W. Schafer, "Discrete-Time Signal Processing", Prentice-Hall, 1999, pp. 468-471. .. [4] Wikipedia, "Window function", http://en.wikipedia.org/wiki/Window_function .. [5] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling, "Numerical Recipes", Cambridge University Press, 1986, page 429. Examples -------- >>> np.bartlett(12) array([ 0. , 0.18181818, 0.36363636, 0.54545455, 0.72727273, 0.90909091, 0.90909091, 0.72727273, 0.54545455, 0.36363636, 0.18181818, 0. ]) Plot the window and its frequency response (requires SciPy and matplotlib): >>> from numpy.fft import fft, fftshift >>> window = np.bartlett(51) >>> plt.plot(window) [<matplotlib.lines.Line2D object at 0x...>] >>> plt.title("Bartlett window") <matplotlib.text.Text object at 0x...> >>> plt.ylabel("Amplitude") <matplotlib.text.Text object at 0x...> >>> plt.xlabel("Sample") <matplotlib.text.Text object at 0x...> >>> plt.show() >>> plt.figure() <matplotlib.figure.Figure object at 0x...> >>> A = fft(window, 2048) / 25.5 >>> mag = np.abs(fftshift(A)) >>> freq = np.linspace(-0.5, 0.5, len(A)) >>> response = 20 * np.log10(mag) >>> response = np.clip(response, -100, 100) >>> plt.plot(freq, response) [<matplotlib.lines.Line2D object at 0x...>] >>> plt.title("Frequency response of Bartlett window") <matplotlib.text.Text object at 0x...> >>> plt.ylabel("Magnitude [dB]") <matplotlib.text.Text object at 0x...> >>> plt.xlabel("Normalized frequency [cycles per sample]") <matplotlib.text.Text object at 0x...> >>> plt.axis('tight') (-0.5, 0.5, -100.0, ...) >>> plt.show() """ if M < 1: return array([]) if M == 1: return ones(1, float) n = arange(0, M) return where(less_equal(n, (M-1)/2.0), 2.0*n/(M-1), 2.0 - 2.0*n/(M-1)) def hanning(M): """ Return the Hanning window. The Hanning window is a taper formed by using a weighted cosine. Parameters ---------- M : int Number of points in the output window. If zero or less, an empty array is returned. Returns ------- out : ndarray, shape(M,) The window, with the maximum value normalized to one (the value one appears only if `M` is odd). See Also -------- bartlett, blackman, hamming, kaiser Notes ----- The Hanning window is defined as .. math:: w(n) = 0.5 - 0.5cos\\left(\\frac{2\\pi{n}}{M-1}\\right) \\qquad 0 \\leq n \\leq M-1 The Hanning was named for Julius von Hann, an Austrian meteorologist. It is also known as the Cosine Bell. Some authors prefer that it be called a Hann window, to help avoid confusion with the very similar Hamming window. Most references to the Hanning window come from the signal processing literature, where it is used as one of many windowing functions for smoothing values. It is also known as an apodization (which means "removing the foot", i.e. smoothing discontinuities at the beginning and end of the sampled signal) or tapering function. References ---------- .. [1] Blackman, R.B. and Tukey, J.W., (1958) The measurement of power spectra, Dover Publications, New York. .. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", The University of Alberta Press, 1975, pp. 106-108. .. [3] Wikipedia, "Window function", http://en.wikipedia.org/wiki/Window_function .. [4] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling, "Numerical Recipes", Cambridge University Press, 1986, page 425. Examples -------- >>> np.hanning(12) array([ 0. , 0.07937323, 0.29229249, 0.57115742, 0.82743037, 0.97974649, 0.97974649, 0.82743037, 0.57115742, 0.29229249, 0.07937323, 0. ]) Plot the window and its frequency response: >>> from numpy.fft import fft, fftshift >>> window = np.hanning(51) >>> plt.plot(window) [<matplotlib.lines.Line2D object at 0x...>] >>> plt.title("Hann window") <matplotlib.text.Text object at 0x...> >>> plt.ylabel("Amplitude") <matplotlib.text.Text object at 0x...> >>> plt.xlabel("Sample") <matplotlib.text.Text object at 0x...> >>> plt.show() >>> plt.figure() <matplotlib.figure.Figure object at 0x...> >>> A = fft(window, 2048) / 25.5 >>> mag = np.abs(fftshift(A)) >>> freq = np.linspace(-0.5, 0.5, len(A)) >>> response = 20 * np.log10(mag) >>> response = np.clip(response, -100, 100) >>> plt.plot(freq, response) [<matplotlib.lines.Line2D object at 0x...>] >>> plt.title("Frequency response of the Hann window") <matplotlib.text.Text object at 0x...> >>> plt.ylabel("Magnitude [dB]") <matplotlib.text.Text object at 0x...> >>> plt.xlabel("Normalized frequency [cycles per sample]") <matplotlib.text.Text object at 0x...> >>> plt.axis('tight') (-0.5, 0.5, -100.0, ...) >>> plt.show() """ if M < 1: return array([]) if M == 1: return ones(1, float) n = arange(0, M) return 0.5 - 0.5*cos(2.0*pi*n/(M-1)) def hamming(M): """ Return the Hamming window. The Hamming window is a taper formed by using a weighted cosine. Parameters ---------- M : int Number of points in the output window. If zero or less, an empty array is returned. Returns ------- out : ndarray The window, with the maximum value normalized to one (the value one appears only if the number of samples is odd). See Also -------- bartlett, blackman, hanning, kaiser Notes ----- The Hamming window is defined as .. math:: w(n) = 0.54 - 0.46cos\\left(\\frac{2\\pi{n}}{M-1}\\right) \\qquad 0 \\leq n \\leq M-1 The Hamming was named for R. W. Hamming, an associate of J. W. Tukey and is described in Blackman and Tukey. It was recommended for smoothing the truncated autocovariance function in the time domain. Most references to the Hamming window come from the signal processing literature, where it is used as one of many windowing functions for smoothing values. It is also known as an apodization (which means "removing the foot", i.e. smoothing discontinuities at the beginning and end of the sampled signal) or tapering function. References ---------- .. [1] Blackman, R.B. and Tukey, J.W., (1958) The measurement of power spectra, Dover Publications, New York. .. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", The University of Alberta Press, 1975, pp. 109-110. .. [3] Wikipedia, "Window function", http://en.wikipedia.org/wiki/Window_function .. [4] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling, "Numerical Recipes", Cambridge University Press, 1986, page 425. Examples -------- >>> np.hamming(12) array([ 0.08 , 0.15302337, 0.34890909, 0.60546483, 0.84123594, 0.98136677, 0.98136677, 0.84123594, 0.60546483, 0.34890909, 0.15302337, 0.08 ]) Plot the window and the frequency response: >>> from numpy.fft import fft, fftshift >>> window = np.hamming(51) >>> plt.plot(window) [<matplotlib.lines.Line2D object at 0x...>] >>> plt.title("Hamming window") <matplotlib.text.Text object at 0x...> >>> plt.ylabel("Amplitude") <matplotlib.text.Text object at 0x...> >>> plt.xlabel("Sample") <matplotlib.text.Text object at 0x...> >>> plt.show() >>> plt.figure() <matplotlib.figure.Figure object at 0x...> >>> A = fft(window, 2048) / 25.5 >>> mag = np.abs(fftshift(A)) >>> freq = np.linspace(-0.5, 0.5, len(A)) >>> response = 20 * np.log10(mag) >>> response = np.clip(response, -100, 100) >>> plt.plot(freq, response) [<matplotlib.lines.Line2D object at 0x...>] >>> plt.title("Frequency response of Hamming window") <matplotlib.text.Text object at 0x...> >>> plt.ylabel("Magnitude [dB]") <matplotlib.text.Text object at 0x...> >>> plt.xlabel("Normalized frequency [cycles per sample]") <matplotlib.text.Text object at 0x...> >>> plt.axis('tight') (-0.5, 0.5, -100.0, ...) >>> plt.show() """ if M < 1: return array([]) if M == 1: return ones(1, float) n = arange(0, M) return 0.54 - 0.46*cos(2.0*pi*n/(M-1)) ## Code from cephes for i0 _i0A = [ -4.41534164647933937950E-18, 3.33079451882223809783E-17, -2.43127984654795469359E-16, 1.71539128555513303061E-15, -1.16853328779934516808E-14, 7.67618549860493561688E-14, -4.85644678311192946090E-13, 2.95505266312963983461E-12, -1.72682629144155570723E-11, 9.67580903537323691224E-11, -5.18979560163526290666E-10, 2.65982372468238665035E-9, -1.30002500998624804212E-8, 6.04699502254191894932E-8, -2.67079385394061173391E-7, 1.11738753912010371815E-6, -4.41673835845875056359E-6, 1.64484480707288970893E-5, -5.75419501008210370398E-5, 1.88502885095841655729E-4, -5.76375574538582365885E-4, 1.63947561694133579842E-3, -4.32430999505057594430E-3, 1.05464603945949983183E-2, -2.37374148058994688156E-2, 4.93052842396707084878E-2, -9.49010970480476444210E-2, 1.71620901522208775349E-1, -3.04682672343198398683E-1, 6.76795274409476084995E-1 ] _i0B = [ -7.23318048787475395456E-18, -4.83050448594418207126E-18, 4.46562142029675999901E-17, 3.46122286769746109310E-17, -2.82762398051658348494E-16, -3.42548561967721913462E-16, 1.77256013305652638360E-15, 3.81168066935262242075E-15, -9.55484669882830764870E-15, -4.15056934728722208663E-14, 1.54008621752140982691E-14, 3.85277838274214270114E-13, 7.18012445138366623367E-13, -1.79417853150680611778E-12, -1.32158118404477131188E-11, -3.14991652796324136454E-11, 1.18891471078464383424E-11, 4.94060238822496958910E-10, 3.39623202570838634515E-9, 2.26666899049817806459E-8, 2.04891858946906374183E-7, 2.89137052083475648297E-6, 6.88975834691682398426E-5, 3.36911647825569408990E-3, 8.04490411014108831608E-1 ] def _chbevl(x, vals): b0 = vals[0] b1 = 0.0 for i in range(1, len(vals)): b2 = b1 b1 = b0 b0 = x*b1 - b2 + vals[i] return 0.5*(b0 - b2) def _i0_1(x): return exp(x) * _chbevl(x/2.0-2, _i0A) def _i0_2(x): return exp(x) * _chbevl(32.0/x - 2.0, _i0B) / sqrt(x) def i0(x): """ Modified Bessel function of the first kind, order 0. Usually denoted :math:`I_0`. This function does broadcast, but will *not* "up-cast" int dtype arguments unless accompanied by at least one float or complex dtype argument (see Raises below). Parameters ---------- x : array_like, dtype float or complex Argument of the Bessel function. Returns ------- out : ndarray, shape = x.shape, dtype = x.dtype The modified Bessel function evaluated at each of the elements of `x`. Raises ------ TypeError: array cannot be safely cast to required type If argument consists exclusively of int dtypes. See Also -------- scipy.special.iv, scipy.special.ive Notes ----- We use the algorithm published by Clenshaw [1]_ and referenced by Abramowitz and Stegun [2]_, for which the function domain is partitioned into the two intervals [0,8] and (8,inf), and Chebyshev polynomial expansions are employed in each interval. Relative error on the domain [0,30] using IEEE arithmetic is documented [3]_ as having a peak of 5.8e-16 with an rms of 1.4e-16 (n = 30000). References ---------- .. [1] C. W. Clenshaw, "Chebyshev series for mathematical functions", in *National Physical Laboratory Mathematical Tables*, vol. 5, London: Her Majesty's Stationery Office, 1962. .. [2] M. Abramowitz and I. A. Stegun, *Handbook of Mathematical Functions*, 10th printing, New York: Dover, 1964, pp. 379. http://www.math.sfu.ca/~cbm/aands/page_379.htm .. [3] http://kobesearch.cpan.org/htdocs/Math-Cephes/Math/Cephes.html Examples -------- >>> np.i0([0.]) array(1.0) >>> np.i0([0., 1. + 2j]) array([ 1.00000000+0.j , 0.18785373+0.64616944j]) """ x = atleast_1d(x).copy() y = empty_like(x) ind = (x < 0) x[ind] = -x[ind] ind = (x <= 8.0) y[ind] = _i0_1(x[ind]) ind2 = ~ind y[ind2] = _i0_2(x[ind2]) return y.squeeze() ## End of cephes code for i0 def kaiser(M, beta): """ Return the Kaiser window. The Kaiser window is a taper formed by using a Bessel function. Parameters ---------- M : int Number of points in the output window. If zero or less, an empty array is returned. beta : float Shape parameter for window. Returns ------- out : array The window, with the maximum value normalized to one (the value one appears only if the number of samples is odd). See Also -------- bartlett, blackman, hamming, hanning Notes ----- The Kaiser window is defined as .. math:: w(n) = I_0\\left( \\beta \\sqrt{1-\\frac{4n^2}{(M-1)^2}} \\right)/I_0(\\beta) with .. math:: \\quad -\\frac{M-1}{2} \\leq n \\leq \\frac{M-1}{2}, where :math:`I_0` is the modified zeroth-order Bessel function. The Kaiser was named for Jim Kaiser, who discovered a simple approximation to the DPSS window based on Bessel functions. The Kaiser window is a very good approximation to the Digital Prolate Spheroidal Sequence, or Slepian window, which is the transform which maximizes the energy in the main lobe of the window relative to total energy. The Kaiser can approximate many other windows by varying the beta parameter. ==== ======================= beta Window shape ==== ======================= 0 Rectangular 5 Similar to a Hamming 6 Similar to a Hanning 8.6 Similar to a Blackman ==== ======================= A beta value of 14 is probably a good starting point. Note that as beta gets large, the window narrows, and so the number of samples needs to be large enough to sample the increasingly narrow spike, otherwise NaNs will get returned. Most references to the Kaiser window come from the signal processing literature, where it is used as one of many windowing functions for smoothing values. It is also known as an apodization (which means "removing the foot", i.e. smoothing discontinuities at the beginning and end of the sampled signal) or tapering function. References ---------- .. [1] J. F. Kaiser, "Digital Filters" - Ch 7 in "Systems analysis by digital computer", Editors: F.F. Kuo and J.F. Kaiser, p 218-285. John Wiley and Sons, New York, (1966). .. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", The University of Alberta Press, 1975, pp. 177-178. .. [3] Wikipedia, "Window function", http://en.wikipedia.org/wiki/Window_function Examples -------- >>> np.kaiser(12, 14) array([ 7.72686684e-06, 3.46009194e-03, 4.65200189e-02, 2.29737120e-01, 5.99885316e-01, 9.45674898e-01, 9.45674898e-01, 5.99885316e-01, 2.29737120e-01, 4.65200189e-02, 3.46009194e-03, 7.72686684e-06]) Plot the window and the frequency response: >>> from numpy.fft import fft, fftshift >>> window = np.kaiser(51, 14) >>> plt.plot(window) [<matplotlib.lines.Line2D object at 0x...>] >>> plt.title("Kaiser window") <matplotlib.text.Text object at 0x...> >>> plt.ylabel("Amplitude") <matplotlib.text.Text object at 0x...> >>> plt.xlabel("Sample") <matplotlib.text.Text object at 0x...> >>> plt.show() >>> plt.figure() <matplotlib.figure.Figure object at 0x...> >>> A = fft(window, 2048) / 25.5 >>> mag = np.abs(fftshift(A)) >>> freq = np.linspace(-0.5, 0.5, len(A)) >>> response = 20 * np.log10(mag) >>> response = np.clip(response, -100, 100) >>> plt.plot(freq, response) [<matplotlib.lines.Line2D object at 0x...>] >>> plt.title("Frequency response of Kaiser window") <matplotlib.text.Text object at 0x...> >>> plt.ylabel("Magnitude [dB]") <matplotlib.text.Text object at 0x...> >>> plt.xlabel("Normalized frequency [cycles per sample]") <matplotlib.text.Text object at 0x...> >>> plt.axis('tight') (-0.5, 0.5, -100.0, ...) >>> plt.show() """ from numpy.dual import i0 if M == 1: return np.array([1.]) n = arange(0, M) alpha = (M-1)/2.0 return i0(beta * sqrt(1-((n-alpha)/alpha)**2.0))/i0(float(beta)) def sinc(x): """ Return the sinc function. The sinc function is :math:`\\sin(\\pi x)/(\\pi x)`. Parameters ---------- x : ndarray Array (possibly multi-dimensional) of values for which to to calculate ``sinc(x)``. Returns ------- out : ndarray ``sinc(x)``, which has the same shape as the input. Notes ----- ``sinc(0)`` is the limit value 1. The name sinc is short for "sine cardinal" or "sinus cardinalis". The sinc function is used in various signal processing applications, including in anti-aliasing, in the construction of a Lanczos resampling filter, and in interpolation. For bandlimited interpolation of discrete-time signals, the ideal interpolation kernel is proportional to the sinc function. References ---------- .. [1] Weisstein, Eric W. "Sinc Function." From MathWorld--A Wolfram Web Resource. http://mathworld.wolfram.com/SincFunction.html .. [2] Wikipedia, "Sinc function", http://en.wikipedia.org/wiki/Sinc_function Examples -------- >>> x = np.linspace(-4, 4, 41) >>> np.sinc(x) array([ -3.89804309e-17, -4.92362781e-02, -8.40918587e-02, -8.90384387e-02, -5.84680802e-02, 3.89804309e-17, 6.68206631e-02, 1.16434881e-01, 1.26137788e-01, 8.50444803e-02, -3.89804309e-17, -1.03943254e-01, -1.89206682e-01, -2.16236208e-01, -1.55914881e-01, 3.89804309e-17, 2.33872321e-01, 5.04551152e-01, 7.56826729e-01, 9.35489284e-01, 1.00000000e+00, 9.35489284e-01, 7.56826729e-01, 5.04551152e-01, 2.33872321e-01, 3.89804309e-17, -1.55914881e-01, -2.16236208e-01, -1.89206682e-01, -1.03943254e-01, -3.89804309e-17, 8.50444803e-02, 1.26137788e-01, 1.16434881e-01, 6.68206631e-02, 3.89804309e-17, -5.84680802e-02, -8.90384387e-02, -8.40918587e-02, -4.92362781e-02, -3.89804309e-17]) >>> plt.plot(x, np.sinc(x)) [<matplotlib.lines.Line2D object at 0x...>] >>> plt.title("Sinc Function") <matplotlib.text.Text object at 0x...> >>> plt.ylabel("Amplitude") <matplotlib.text.Text object at 0x...> >>> plt.xlabel("X") <matplotlib.text.Text object at 0x...> >>> plt.show() It works in 2-D as well: >>> x = np.linspace(-4, 4, 401) >>> xx = np.outer(x, x) >>> plt.imshow(np.sinc(xx)) <matplotlib.image.AxesImage object at 0x...> """ x = np.asanyarray(x) y = pi * where(x == 0, 1.0e-20, x) return sin(y)/y def msort(a): """ Return a copy of an array sorted along the first axis. Parameters ---------- a : array_like Array to be sorted. Returns ------- sorted_array : ndarray Array of the same type and shape as `a`. See Also -------- sort Notes ----- ``np.msort(a)`` is equivalent to ``np.sort(a, axis=0)``. """ b = array(a, subok=True, copy=True) b.sort(0) return b def _ureduce(a, func, **kwargs): """ Internal Function. Call `func` with `a` as first argument swapping the axes to use extended axis on functions that don't support it natively. Returns result and a.shape with axis dims set to 1. Parameters ---------- a : array_like Input array or object that can be converted to an array. func : callable Reduction function capable of receiving a single axis argument. It is is called with `a` as first argument followed by `kwargs`. kwargs : keyword arguments additional keyword arguments to pass to `func`. Returns ------- result : tuple Result of func(a, **kwargs) and a.shape with axis dims set to 1 which can be used to reshape the result to the same shape a ufunc with keepdims=True would produce. """ a = np.asanyarray(a) axis = kwargs.get('axis', None) if axis is not None: keepdim = list(a.shape) nd = a.ndim axis = _nx.normalize_axis_tuple(axis, nd) for ax in axis: keepdim[ax] = 1 if len(axis) == 1: kwargs['axis'] = axis[0] else: keep = set(range(nd)) - set(axis) nkeep = len(keep) # swap axis that should not be reduced to front for i, s in enumerate(sorted(keep)): a = a.swapaxes(i, s) # merge reduced axis a = a.reshape(a.shape[:nkeep] + (-1,)) kwargs['axis'] = -1 else: keepdim = [1] * a.ndim r = func(a, **kwargs) return r, keepdim def median(a, axis=None, out=None, overwrite_input=False, keepdims=False): """ Compute the median along the specified axis. Returns the median of the array elements. Parameters ---------- a : array_like Input array or object that can be converted to an array. axis : {int, sequence of int, None}, optional Axis or axes along which the medians are computed. The default is to compute the median along a flattened version of the array. A sequence of axes is supported since version 1.9.0. out : ndarray, optional Alternative output array in which to place the result. It must have the same shape and buffer length as the expected output, but the type (of the output) will be cast if necessary. overwrite_input : bool, optional If True, then allow use of memory of input array `a` for calculations. The input array will be modified by the call to `median`. This will save memory when you do not need to preserve the contents of the input array. Treat the input as undefined, but it will probably be fully or partially sorted. Default is False. If `overwrite_input` is ``True`` and `a` is not already an `ndarray`, an error will be raised. keepdims : bool, optional If this is set to True, the axes which are reduced are left in the result as dimensions with size one. With this option, the result will broadcast correctly against the original `arr`. .. versionadded:: 1.9.0 Returns ------- median : ndarray A new array holding the result. If the input contains integers or floats smaller than ``float64``, then the output data-type is ``np.float64``. Otherwise, the data-type of the output is the same as that of the input. If `out` is specified, that array is returned instead. See Also -------- mean, percentile Notes ----- Given a vector ``V`` of length ``N``, the median of ``V`` is the middle value of a sorted copy of ``V``, ``V_sorted`` - i e., ``V_sorted[(N-1)/2]``, when ``N`` is odd, and the average of the two middle values of ``V_sorted`` when ``N`` is even. Examples -------- >>> a = np.array([[10, 7, 4], [3, 2, 1]]) >>> a array([[10, 7, 4], [ 3, 2, 1]]) >>> np.median(a) 3.5 >>> np.median(a, axis=0) array([ 6.5, 4.5, 2.5]) >>> np.median(a, axis=1) array([ 7., 2.]) >>> m = np.median(a, axis=0) >>> out = np.zeros_like(m) >>> np.median(a, axis=0, out=m) array([ 6.5, 4.5, 2.5]) >>> m array([ 6.5, 4.5, 2.5]) >>> b = a.copy() >>> np.median(b, axis=1, overwrite_input=True) array([ 7., 2.]) >>> assert not np.all(a==b) >>> b = a.copy() >>> np.median(b, axis=None, overwrite_input=True) 3.5 >>> assert not np.all(a==b) """ r, k = _ureduce(a, func=_median, axis=axis, out=out, overwrite_input=overwrite_input) if keepdims: return r.reshape(k) else: return r def _median(a, axis=None, out=None, overwrite_input=False): # can't be reasonably be implemented in terms of percentile as we have to # call mean to not break astropy a = np.asanyarray(a) # Set the partition indexes if axis is None: sz = a.size else: sz = a.shape[axis] if sz % 2 == 0: szh = sz // 2 kth = [szh - 1, szh] else: kth = [(sz - 1) // 2] # Check if the array contains any nan's if np.issubdtype(a.dtype, np.inexact): kth.append(-1) if overwrite_input: if axis is None: part = a.ravel() part.partition(kth) else: a.partition(kth, axis=axis) part = a else: part = partition(a, kth, axis=axis) if part.shape == (): # make 0-D arrays work return part.item() if axis is None: axis = 0 indexer = [slice(None)] * part.ndim index = part.shape[axis] // 2 if part.shape[axis] % 2 == 1: # index with slice to allow mean (below) to work indexer[axis] = slice(index, index+1) else: indexer[axis] = slice(index-1, index+1) # Check if the array contains any nan's if np.issubdtype(a.dtype, np.inexact) and sz > 0: # warn and return nans like mean would rout = mean(part[indexer], axis=axis, out=out) return np.lib.utils._median_nancheck(part, rout, axis, out) else: # if there are no nans # Use mean in odd and even case to coerce data type # and check, use out array. return mean(part[indexer], axis=axis, out=out) def percentile(a, q, axis=None, out=None, overwrite_input=False, interpolation='linear', keepdims=False): """ Compute the qth percentile of the data along the specified axis. Returns the qth percentile(s) of the array elements. Parameters ---------- a : array_like Input array or object that can be converted to an array. q : float in range of [0,100] (or sequence of floats) Percentile to compute, which must be between 0 and 100 inclusive. axis : {int, sequence of int, None}, optional Axis or axes along which the percentiles are computed. The default is to compute the percentile(s) along a flattened version of the array. A sequence of axes is supported since version 1.9.0. out : ndarray, optional Alternative output array in which to place the result. It must have the same shape and buffer length as the expected output, but the type (of the output) will be cast if necessary. overwrite_input : bool, optional If True, then allow use of memory of input array `a` calculations. The input array will be modified by the call to `percentile`. This will save memory when you do not need to preserve the contents of the input array. In this case you should not make any assumptions about the contents of the input `a` after this function completes -- treat it as undefined. Default is False. If `a` is not already an array, this parameter will have no effect as `a` will be converted to an array internally regardless of the value of this parameter. interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'} This optional parameter specifies the interpolation method to use when the desired quantile lies between two data points ``i < j``: * linear: ``i + (j - i) * fraction``, where ``fraction`` is the fractional part of the index surrounded by ``i`` and ``j``. * lower: ``i``. * higher: ``j``. * nearest: ``i`` or ``j``, whichever is nearest. * midpoint: ``(i + j) / 2``. .. versionadded:: 1.9.0 keepdims : bool, optional If this is set to True, the axes which are reduced are left in the result as dimensions with size one. With this option, the result will broadcast correctly against the original array `a`. .. versionadded:: 1.9.0 Returns ------- percentile : scalar or ndarray If `q` is a single percentile and `axis=None`, then the result is a scalar. If multiple percentiles are given, first axis of the result corresponds to the percentiles. The other axes are the axes that remain after the reduction of `a`. If the input contains integers or floats smaller than ``float64``, the output data-type is ``float64``. Otherwise, the output data-type is the same as that of the input. If `out` is specified, that array is returned instead. See Also -------- mean, median, nanpercentile Notes ----- Given a vector ``V`` of length ``N``, the ``q``-th percentile of ``V`` is the value ``q/100`` of the way from the minimum to the maximum in a sorted copy of ``V``. The values and distances of the two nearest neighbors as well as the `interpolation` parameter will determine the percentile if the normalized ranking does not match the location of ``q`` exactly. This function is the same as the median if ``q=50``, the same as the minimum if ``q=0`` and the same as the maximum if ``q=100``. Examples -------- >>> a = np.array([[10, 7, 4], [3, 2, 1]]) >>> a array([[10, 7, 4], [ 3, 2, 1]]) >>> np.percentile(a, 50) 3.5 >>> np.percentile(a, 50, axis=0) array([[ 6.5, 4.5, 2.5]]) >>> np.percentile(a, 50, axis=1) array([ 7., 2.]) >>> np.percentile(a, 50, axis=1, keepdims=True) array([[ 7.], [ 2.]]) >>> m = np.percentile(a, 50, axis=0) >>> out = np.zeros_like(m) >>> np.percentile(a, 50, axis=0, out=out) array([[ 6.5, 4.5, 2.5]]) >>> m array([[ 6.5, 4.5, 2.5]]) >>> b = a.copy() >>> np.percentile(b, 50, axis=1, overwrite_input=True) array([ 7., 2.]) >>> assert not np.all(a == b) """ q = array(q, dtype=np.float64, copy=True) r, k = _ureduce(a, func=_percentile, q=q, axis=axis, out=out, overwrite_input=overwrite_input, interpolation=interpolation) if keepdims: if q.ndim == 0: return r.reshape(k) else: return r.reshape([len(q)] + k) else: return r def _percentile(a, q, axis=None, out=None, overwrite_input=False, interpolation='linear', keepdims=False): a = asarray(a) if q.ndim == 0: # Do not allow 0-d arrays because following code fails for scalar zerod = True q = q[None] else: zerod = False # avoid expensive reductions, relevant for arrays with < O(1000) elements if q.size < 10: for i in range(q.size): if q[i] < 0. or q[i] > 100.: raise ValueError("Percentiles must be in the range [0,100]") q[i] /= 100. else: # faster than any() if np.count_nonzero(q < 0.) or np.count_nonzero(q > 100.): raise ValueError("Percentiles must be in the range [0,100]") q /= 100. # prepare a for partioning if overwrite_input: if axis is None: ap = a.ravel() else: ap = a else: if axis is None: ap = a.flatten() else: ap = a.copy() if axis is None: axis = 0 Nx = ap.shape[axis] indices = q * (Nx - 1) # round fractional indices according to interpolation method if interpolation == 'lower': indices = floor(indices).astype(intp) elif interpolation == 'higher': indices = ceil(indices).astype(intp) elif interpolation == 'midpoint': indices = 0.5 * (floor(indices) + ceil(indices)) elif interpolation == 'nearest': indices = around(indices).astype(intp) elif interpolation == 'linear': pass # keep index as fraction and interpolate else: raise ValueError( "interpolation can only be 'linear', 'lower' 'higher', " "'midpoint', or 'nearest'") n = np.array(False, dtype=bool) # check for nan's flag if indices.dtype == intp: # take the points along axis # Check if the array contains any nan's if np.issubdtype(a.dtype, np.inexact): indices = concatenate((indices, [-1])) ap.partition(indices, axis=axis) # ensure axis with qth is first ap = np.rollaxis(ap, axis, 0) axis = 0 # Check if the array contains any nan's if np.issubdtype(a.dtype, np.inexact): indices = indices[:-1] n = np.isnan(ap[-1:, ...]) if zerod: indices = indices[0] r = take(ap, indices, axis=axis, out=out) else: # weight the points above and below the indices indices_below = floor(indices).astype(intp) indices_above = indices_below + 1 indices_above[indices_above > Nx - 1] = Nx - 1 # Check if the array contains any nan's if np.issubdtype(a.dtype, np.inexact): indices_above = concatenate((indices_above, [-1])) weights_above = indices - indices_below weights_below = 1.0 - weights_above weights_shape = [1, ] * ap.ndim weights_shape[axis] = len(indices) weights_below.shape = weights_shape weights_above.shape = weights_shape ap.partition(concatenate((indices_below, indices_above)), axis=axis) # ensure axis with qth is first ap = np.rollaxis(ap, axis, 0) weights_below = np.rollaxis(weights_below, axis, 0) weights_above = np.rollaxis(weights_above, axis, 0) axis = 0 # Check if the array contains any nan's if np.issubdtype(a.dtype, np.inexact): indices_above = indices_above[:-1] n = np.isnan(ap[-1:, ...]) x1 = take(ap, indices_below, axis=axis) * weights_below x2 = take(ap, indices_above, axis=axis) * weights_above # ensure axis with qth is first x1 = np.rollaxis(x1, axis, 0) x2 = np.rollaxis(x2, axis, 0) if zerod: x1 = x1.squeeze(0) x2 = x2.squeeze(0) if out is not None: r = add(x1, x2, out=out) else: r = add(x1, x2) if np.any(n): warnings.warn("Invalid value encountered in percentile", RuntimeWarning, stacklevel=3) if zerod: if ap.ndim == 1: if out is not None: out[...] = a.dtype.type(np.nan) r = out else: r = a.dtype.type(np.nan) else: r[..., n.squeeze(0)] = a.dtype.type(np.nan) else: if r.ndim == 1: r[:] = a.dtype.type(np.nan) else: r[..., n.repeat(q.size, 0)] = a.dtype.type(np.nan) return r def trapz(y, x=None, dx=1.0, axis=-1): """ Integrate along the given axis using the composite trapezoidal rule. Integrate `y` (`x`) along given axis. Parameters ---------- y : array_like Input array to integrate. x : array_like, optional The sample points corresponding to the `y` values. If `x` is None, the sample points are assumed to be evenly spaced `dx` apart. The default is None. dx : scalar, optional The spacing between sample points when `x` is None. The default is 1. axis : int, optional The axis along which to integrate. Returns ------- trapz : float Definite integral as approximated by trapezoidal rule. See Also -------- sum, cumsum Notes ----- Image [2]_ illustrates trapezoidal rule -- y-axis locations of points will be taken from `y` array, by default x-axis distances between points will be 1.0, alternatively they can be provided with `x` array or with `dx` scalar. Return value will be equal to combined area under the red lines. References ---------- .. [1] Wikipedia page: http://en.wikipedia.org/wiki/Trapezoidal_rule .. [2] Illustration image: http://en.wikipedia.org/wiki/File:Composite_trapezoidal_rule_illustration.png Examples -------- >>> np.trapz([1,2,3]) 4.0 >>> np.trapz([1,2,3], x=[4,6,8]) 8.0 >>> np.trapz([1,2,3], dx=2) 8.0 >>> a = np.arange(6).reshape(2, 3) >>> a array([[0, 1, 2], [3, 4, 5]]) >>> np.trapz(a, axis=0) array([ 1.5, 2.5, 3.5]) >>> np.trapz(a, axis=1) array([ 2., 8.]) """ y = asanyarray(y) if x is None: d = dx else: x = asanyarray(x) if x.ndim == 1: d = diff(x) # reshape to correct shape shape = [1]*y.ndim shape[axis] = d.shape[0] d = d.reshape(shape) else: d = diff(x, axis=axis) nd = y.ndim slice1 = [slice(None)]*nd slice2 = [slice(None)]*nd slice1[axis] = slice(1, None) slice2[axis] = slice(None, -1) try: ret = (d * (y[slice1] + y[slice2]) / 2.0).sum(axis) except ValueError: # Operations didn't work, cast to ndarray d = np.asarray(d) y = np.asarray(y) ret = add.reduce(d * (y[slice1]+y[slice2])/2.0, axis) return ret #always succeed def add_newdoc(place, obj, doc): """ Adds documentation to obj which is in module place. If doc is a string add it to obj as a docstring If doc is a tuple, then the first element is interpreted as an attribute of obj and the second as the docstring (method, docstring) If doc is a list, then each element of the list should be a sequence of length two --> [(method1, docstring1), (method2, docstring2), ...] This routine never raises an error. This routine cannot modify read-only docstrings, as appear in new-style classes or built-in functions. Because this routine never raises an error the caller must check manually that the docstrings were changed. """ try: new = getattr(__import__(place, globals(), {}, [obj]), obj) if isinstance(doc, str): add_docstring(new, doc.strip()) elif isinstance(doc, tuple): add_docstring(getattr(new, doc[0]), doc[1].strip()) elif isinstance(doc, list): for val in doc: add_docstring(getattr(new, val[0]), val[1].strip()) except: pass # Based on scitools meshgrid def meshgrid(*xi, **kwargs): """ Return coordinate matrices from coordinate vectors. Make N-D coordinate arrays for vectorized evaluations of N-D scalar/vector fields over N-D grids, given one-dimensional coordinate arrays x1, x2,..., xn. .. versionchanged:: 1.9 1-D and 0-D cases are allowed. Parameters ---------- x1, x2,..., xn : array_like 1-D arrays representing the coordinates of a grid. indexing : {'xy', 'ij'}, optional Cartesian ('xy', default) or matrix ('ij') indexing of output. See Notes for more details. .. versionadded:: 1.7.0 sparse : bool, optional If True a sparse grid is returned in order to conserve memory. Default is False. .. versionadded:: 1.7.0 copy : bool, optional If False, a view into the original arrays are returned in order to conserve memory. Default is True. Please note that ``sparse=False, copy=False`` will likely return non-contiguous arrays. Furthermore, more than one element of a broadcast array may refer to a single memory location. If you need to write to the arrays, make copies first. .. versionadded:: 1.7.0 Returns ------- X1, X2,..., XN : ndarray For vectors `x1`, `x2`,..., 'xn' with lengths ``Ni=len(xi)`` , return ``(N1, N2, N3,...Nn)`` shaped arrays if indexing='ij' or ``(N2, N1, N3,...Nn)`` shaped arrays if indexing='xy' with the elements of `xi` repeated to fill the matrix along the first dimension for `x1`, the second for `x2` and so on. Notes ----- This function supports both indexing conventions through the indexing keyword argument. Giving the string 'ij' returns a meshgrid with matrix indexing, while 'xy' returns a meshgrid with Cartesian indexing. In the 2-D case with inputs of length M and N, the outputs are of shape (N, M) for 'xy' indexing and (M, N) for 'ij' indexing. In the 3-D case with inputs of length M, N and P, outputs are of shape (N, M, P) for 'xy' indexing and (M, N, P) for 'ij' indexing. The difference is illustrated by the following code snippet:: xv, yv = np.meshgrid(x, y, sparse=False, indexing='ij') for i in range(nx): for j in range(ny): # treat xv[i,j], yv[i,j] xv, yv = np.meshgrid(x, y, sparse=False, indexing='xy') for i in range(nx): for j in range(ny): # treat xv[j,i], yv[j,i] In the 1-D and 0-D case, the indexing and sparse keywords have no effect. See Also -------- index_tricks.mgrid : Construct a multi-dimensional "meshgrid" using indexing notation. index_tricks.ogrid : Construct an open multi-dimensional "meshgrid" using indexing notation. Examples -------- >>> nx, ny = (3, 2) >>> x = np.linspace(0, 1, nx) >>> y = np.linspace(0, 1, ny) >>> xv, yv = np.meshgrid(x, y) >>> xv array([[ 0. , 0.5, 1. ], [ 0. , 0.5, 1. ]]) >>> yv array([[ 0., 0., 0.], [ 1., 1., 1.]]) >>> xv, yv = np.meshgrid(x, y, sparse=True) # make sparse output arrays >>> xv array([[ 0. , 0.5, 1. ]]) >>> yv array([[ 0.], [ 1.]]) `meshgrid` is very useful to evaluate functions on a grid. >>> x = np.arange(-5, 5, 0.1) >>> y = np.arange(-5, 5, 0.1) >>> xx, yy = np.meshgrid(x, y, sparse=True) >>> z = np.sin(xx**2 + yy**2) / (xx**2 + yy**2) >>> h = plt.contourf(x,y,z) """ ndim = len(xi) copy_ = kwargs.pop('copy', True) sparse = kwargs.pop('sparse', False) indexing = kwargs.pop('indexing', 'xy') if kwargs: raise TypeError("meshgrid() got an unexpected keyword argument '%s'" % (list(kwargs)[0],)) if indexing not in ['xy', 'ij']: raise ValueError( "Valid values for `indexing` are 'xy' and 'ij'.") s0 = (1,) * ndim output = [np.asanyarray(x).reshape(s0[:i] + (-1,) + s0[i + 1:]) for i, x in enumerate(xi)] if indexing == 'xy' and ndim > 1: # switch first and second axis output[0].shape = (1, -1) + s0[2:] output[1].shape = (-1, 1) + s0[2:] if not sparse: # Return the full N-D matrix (not only the 1-D vector) output = np.broadcast_arrays(*output, subok=True) if copy_: output = [x.copy() for x in output] return output def delete(arr, obj, axis=None): """ Return a new array with sub-arrays along an axis deleted. For a one dimensional array, this returns those entries not returned by `arr[obj]`. Parameters ---------- arr : array_like Input array. obj : slice, int or array of ints Indicate which sub-arrays to remove. axis : int, optional The axis along which to delete the subarray defined by `obj`. If `axis` is None, `obj` is applied to the flattened array. Returns ------- out : ndarray A copy of `arr` with the elements specified by `obj` removed. Note that `delete` does not occur in-place. If `axis` is None, `out` is a flattened array. See Also -------- insert : Insert elements into an array. append : Append elements at the end of an array. Notes ----- Often it is preferable to use a boolean mask. For example: >>> mask = np.ones(len(arr), dtype=bool) >>> mask[[0,2,4]] = False >>> result = arr[mask,...] Is equivalent to `np.delete(arr, [0,2,4], axis=0)`, but allows further use of `mask`. Examples -------- >>> arr = np.array([[1,2,3,4], [5,6,7,8], [9,10,11,12]]) >>> arr array([[ 1, 2, 3, 4], [ 5, 6, 7, 8], [ 9, 10, 11, 12]]) >>> np.delete(arr, 1, 0) array([[ 1, 2, 3, 4], [ 9, 10, 11, 12]]) >>> np.delete(arr, np.s_[::2], 1) array([[ 2, 4], [ 6, 8], [10, 12]]) >>> np.delete(arr, [1,3,5], None) array([ 1, 3, 5, 7, 8, 9, 10, 11, 12]) """ wrap = None if type(arr) is not ndarray: try: wrap = arr.__array_wrap__ except AttributeError: pass arr = asarray(arr) ndim = arr.ndim arrorder = 'F' if arr.flags.fnc else 'C' if axis is None: if ndim != 1: arr = arr.ravel() ndim = arr.ndim axis = -1 if ndim == 0: # 2013-09-24, 1.9 warnings.warn( "in the future the special handling of scalars will be removed " "from delete and raise an error", DeprecationWarning, stacklevel=2) if wrap: return wrap(arr) else: return arr.copy(order=arrorder) axis = normalize_axis_index(axis, ndim) slobj = [slice(None)]*ndim N = arr.shape[axis] newshape = list(arr.shape) if isinstance(obj, slice): start, stop, step = obj.indices(N) xr = range(start, stop, step) numtodel = len(xr) if numtodel <= 0: if wrap: return wrap(arr.copy(order=arrorder)) else: return arr.copy(order=arrorder) # Invert if step is negative: if step < 0: step = -step start = xr[-1] stop = xr[0] + 1 newshape[axis] -= numtodel new = empty(newshape, arr.dtype, arrorder) # copy initial chunk if start == 0: pass else: slobj[axis] = slice(None, start) new[slobj] = arr[slobj] # copy end chunck if stop == N: pass else: slobj[axis] = slice(stop-numtodel, None) slobj2 = [slice(None)]*ndim slobj2[axis] = slice(stop, None) new[slobj] = arr[slobj2] # copy middle pieces if step == 1: pass else: # use array indexing. keep = ones(stop-start, dtype=bool) keep[:stop-start:step] = False slobj[axis] = slice(start, stop-numtodel) slobj2 = [slice(None)]*ndim slobj2[axis] = slice(start, stop) arr = arr[slobj2] slobj2[axis] = keep new[slobj] = arr[slobj2] if wrap: return wrap(new) else: return new _obj = obj obj = np.asarray(obj) # After removing the special handling of booleans and out of # bounds values, the conversion to the array can be removed. if obj.dtype == bool: warnings.warn("in the future insert will treat boolean arrays and " "array-likes as boolean index instead of casting it " "to integer", FutureWarning, stacklevel=2) obj = obj.astype(intp) if isinstance(_obj, (int, long, integer)): # optimization for a single value obj = obj.item() if (obj < -N or obj >= N): raise IndexError( "index %i is out of bounds for axis %i with " "size %i" % (obj, axis, N)) if (obj < 0): obj += N newshape[axis] -= 1 new = empty(newshape, arr.dtype, arrorder) slobj[axis] = slice(None, obj) new[slobj] = arr[slobj] slobj[axis] = slice(obj, None) slobj2 = [slice(None)]*ndim slobj2[axis] = slice(obj+1, None) new[slobj] = arr[slobj2] else: if obj.size == 0 and not isinstance(_obj, np.ndarray): obj = obj.astype(intp) if not np.can_cast(obj, intp, 'same_kind'): # obj.size = 1 special case always failed and would just # give superfluous warnings. # 2013-09-24, 1.9 warnings.warn( "using a non-integer array as obj in delete will result in an " "error in the future", DeprecationWarning, stacklevel=2) obj = obj.astype(intp) keep = ones(N, dtype=bool) # Test if there are out of bound indices, this is deprecated inside_bounds = (obj < N) & (obj >= -N) if not inside_bounds.all(): # 2013-09-24, 1.9 warnings.warn( "in the future out of bounds indices will raise an error " "instead of being ignored by `numpy.delete`.", DeprecationWarning, stacklevel=2) obj = obj[inside_bounds] positive_indices = obj >= 0 if not positive_indices.all(): warnings.warn( "in the future negative indices will not be ignored by " "`numpy.delete`.", FutureWarning, stacklevel=2) obj = obj[positive_indices] keep[obj, ] = False slobj[axis] = keep new = arr[slobj] if wrap: return wrap(new) else: return new def insert(arr, obj, values, axis=None): """ Insert values along the given axis before the given indices. Parameters ---------- arr : array_like Input array. obj : int, slice or sequence of ints Object that defines the index or indices before which `values` is inserted. .. versionadded:: 1.8.0 Support for multiple insertions when `obj` is a single scalar or a sequence with one element (similar to calling insert multiple times). values : array_like Values to insert into `arr`. If the type of `values` is different from that of `arr`, `values` is converted to the type of `arr`. `values` should be shaped so that ``arr[...,obj,...] = values`` is legal. axis : int, optional Axis along which to insert `values`. If `axis` is None then `arr` is flattened first. Returns ------- out : ndarray A copy of `arr` with `values` inserted. Note that `insert` does not occur in-place: a new array is returned. If `axis` is None, `out` is a flattened array. See Also -------- append : Append elements at the end of an array. concatenate : Join a sequence of arrays along an existing axis. delete : Delete elements from an array. Notes ----- Note that for higher dimensional inserts `obj=0` behaves very different from `obj=[0]` just like `arr[:,0,:] = values` is different from `arr[:,[0],:] = values`. Examples -------- >>> a = np.array([[1, 1], [2, 2], [3, 3]]) >>> a array([[1, 1], [2, 2], [3, 3]]) >>> np.insert(a, 1, 5) array([1, 5, 1, 2, 2, 3, 3]) >>> np.insert(a, 1, 5, axis=1) array([[1, 5, 1], [2, 5, 2], [3, 5, 3]]) Difference between sequence and scalars: >>> np.insert(a, [1], [[1],[2],[3]], axis=1) array([[1, 1, 1], [2, 2, 2], [3, 3, 3]]) >>> np.array_equal(np.insert(a, 1, [1, 2, 3], axis=1), ... np.insert(a, [1], [[1],[2],[3]], axis=1)) True >>> b = a.flatten() >>> b array([1, 1, 2, 2, 3, 3]) >>> np.insert(b, [2, 2], [5, 6]) array([1, 1, 5, 6, 2, 2, 3, 3]) >>> np.insert(b, slice(2, 4), [5, 6]) array([1, 1, 5, 2, 6, 2, 3, 3]) >>> np.insert(b, [2, 2], [7.13, False]) # type casting array([1, 1, 7, 0, 2, 2, 3, 3]) >>> x = np.arange(8).reshape(2, 4) >>> idx = (1, 3) >>> np.insert(x, idx, 999, axis=1) array([[ 0, 999, 1, 2, 999, 3], [ 4, 999, 5, 6, 999, 7]]) """ wrap = None if type(arr) is not ndarray: try: wrap = arr.__array_wrap__ except AttributeError: pass arr = asarray(arr) ndim = arr.ndim arrorder = 'F' if arr.flags.fnc else 'C' if axis is None: if ndim != 1: arr = arr.ravel() ndim = arr.ndim axis = ndim - 1 elif ndim == 0: # 2013-09-24, 1.9 warnings.warn( "in the future the special handling of scalars will be removed " "from insert and raise an error", DeprecationWarning, stacklevel=2) arr = arr.copy(order=arrorder) arr[...] = values if wrap: return wrap(arr) else: return arr else: axis = normalize_axis_index(axis, ndim) slobj = [slice(None)]*ndim N = arr.shape[axis] newshape = list(arr.shape) if isinstance(obj, slice): # turn it into a range object indices = arange(*obj.indices(N), **{'dtype': intp}) else: # need to copy obj, because indices will be changed in-place indices = np.array(obj) if indices.dtype == bool: # See also delete warnings.warn( "in the future insert will treat boolean arrays and " "array-likes as a boolean index instead of casting it to " "integer", FutureWarning, stacklevel=2) indices = indices.astype(intp) # Code after warning period: #if obj.ndim != 1: # raise ValueError('boolean array argument obj to insert ' # 'must be one dimensional') #indices = np.flatnonzero(obj) elif indices.ndim > 1: raise ValueError( "index array argument obj to insert must be one dimensional " "or scalar") if indices.size == 1: index = indices.item() if index < -N or index > N: raise IndexError( "index %i is out of bounds for axis %i with " "size %i" % (obj, axis, N)) if (index < 0): index += N # There are some object array corner cases here, but we cannot avoid # that: values = array(values, copy=False, ndmin=arr.ndim, dtype=arr.dtype) if indices.ndim == 0: # broadcasting is very different here, since a[:,0,:] = ... behaves # very different from a[:,[0],:] = ...! This changes values so that # it works likes the second case. (here a[:,0:1,:]) values = np.rollaxis(values, 0, (axis % values.ndim) + 1) numnew = values.shape[axis] newshape[axis] += numnew new = empty(newshape, arr.dtype, arrorder) slobj[axis] = slice(None, index) new[slobj] = arr[slobj] slobj[axis] = slice(index, index+numnew) new[slobj] = values slobj[axis] = slice(index+numnew, None) slobj2 = [slice(None)] * ndim slobj2[axis] = slice(index, None) new[slobj] = arr[slobj2] if wrap: return wrap(new) return new elif indices.size == 0 and not isinstance(obj, np.ndarray): # Can safely cast the empty list to intp indices = indices.astype(intp) if not np.can_cast(indices, intp, 'same_kind'): # 2013-09-24, 1.9 warnings.warn( "using a non-integer array as obj in insert will result in an " "error in the future", DeprecationWarning, stacklevel=2) indices = indices.astype(intp) indices[indices < 0] += N numnew = len(indices) order = indices.argsort(kind='mergesort') # stable sort indices[order] += np.arange(numnew) newshape[axis] += numnew old_mask = ones(newshape[axis], dtype=bool) old_mask[indices] = False new = empty(newshape, arr.dtype, arrorder) slobj2 = [slice(None)]*ndim slobj[axis] = indices slobj2[axis] = old_mask new[slobj] = values new[slobj2] = arr if wrap: return wrap(new) return new def append(arr, values, axis=None): """ Append values to the end of an array. Parameters ---------- arr : array_like Values are appended to a copy of this array. values : array_like These values are appended to a copy of `arr`. It must be of the correct shape (the same shape as `arr`, excluding `axis`). If `axis` is not specified, `values` can be any shape and will be flattened before use. axis : int, optional The axis along which `values` are appended. If `axis` is not given, both `arr` and `values` are flattened before use. Returns ------- append : ndarray A copy of `arr` with `values` appended to `axis`. Note that `append` does not occur in-place: a new array is allocated and filled. If `axis` is None, `out` is a flattened array. See Also -------- insert : Insert elements into an array. delete : Delete elements from an array. Examples -------- >>> np.append([1, 2, 3], [[4, 5, 6], [7, 8, 9]]) array([1, 2, 3, 4, 5, 6, 7, 8, 9]) When `axis` is specified, `values` must have the correct shape. >>> np.append([[1, 2, 3], [4, 5, 6]], [[7, 8, 9]], axis=0) array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) >>> np.append([[1, 2, 3], [4, 5, 6]], [7, 8, 9], axis=0) Traceback (most recent call last): ... ValueError: arrays must have same number of dimensions """ arr = asanyarray(arr) if axis is None: if arr.ndim != 1: arr = arr.ravel() values = ravel(values) axis = arr.ndim-1 return concatenate((arr, values), axis=axis)
{ "content_hash": "228d69aa138e4818f3f1097bd5798571", "timestamp": "", "source": "github", "line_count": 5130, "max_line_length": 88, "avg_line_length": 32.910526315789475, "alnum_prop": 0.5687877226338764, "repo_name": "chatcannon/numpy", "id": "4a07815e84acb8e0e44ebecaecc3982f1555eca9", "size": "168831", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "numpy/lib/function_base.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "C", "bytes": "8085747" }, { "name": "C++", "bytes": "164478" }, { "name": "Fortran", "bytes": "10042" }, { "name": "Makefile", "bytes": "2574" }, { "name": "Objective-C", "bytes": "567" }, { "name": "Python", "bytes": "6545821" } ], "symlink_target": "" }
""" Django settings for oopsallbaddies project. Generated by 'django-admin startproject' using Django 1.11.2. For more information on this file, see https://docs.djangoproject.com/en/1.11/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/1.11/ref/settings/ """ import os # Build paths inside the project like this: os.path.join(BASE_DIR, ...) BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/ # SECURITY WARNING: keep the secret key used in production secret! SECRET_KEY = '-s*$i(n!f*zkl8-a&k5(#!out1^zwrs+p1=c282gf_lagfqhzg' # SECURITY WARNING: don't run with debug turned on in production! DEBUG = True ALLOWED_HOSTS = ['127.0.0.1', 'localhost', 'oopsallbaddies.cfapps.io', 'oopsallbaddies.com'] ADMIN_HONEYPOT_EMAIL_ADMINS = True ADMIN_URL = r'^blessedbethyname/' # Application definition INSTALLED_APPS = [ 'recruitment.apps.RecruitmentConfig', 'roster.apps.RosterConfig', 'api.apps.ApiConfig', 'area51.apps.Area51Config', 'home.apps.HomeConfig', 'imagekit', 'admin_honeypot', 'jet', 'django.contrib.humanize', 'django.contrib.admindocs', 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', ] MIDDLEWARE = [ 'django.middleware.security.SecurityMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ] ROOT_URLCONF = 'oopsallbaddies.urls' TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [], 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', ], }, }, ] WSGI_APPLICATION = 'oopsallbaddies.wsgi.application' # Database # https://docs.djangoproject.com/en/1.11/ref/settings/#databases DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'), } } # Password validation # https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators AUTH_PASSWORD_VALIDATORS = [ { 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', }, { 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', }, { 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', }, { 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', }, ] # Internationalization # https://docs.djangoproject.com/en/1.11/topics/i18n/ LANGUAGE_CODE = 'en-us' TIME_ZONE = 'UTC' USE_I18N = True USE_L10N = True USE_TZ = True MEDIA_URL = '/media/' MEDIA_ROOT = os.path.join(BASE_DIR, 'media') # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/1.11/howto/static-files/ STATIC_ROOT = os.path.join(BASE_DIR, 'static') STATIC_URL = '/static/' STATICFILES_DIRS = ( # Put strings here, like "/home/html/static" or "C:/www/django/static". # Always use forward slashes, even on Windows. # Don't forget to use absolute paths, not relative paths. # os.path.join(BASE_DIR, "static"), )
{ "content_hash": "66bc1dbc2a2f875f1c89cde99db1d93e", "timestamp": "", "source": "github", "line_count": 146, "max_line_length": 92, "avg_line_length": 26.753424657534246, "alnum_prop": 0.6853558627752177, "repo_name": "MotherGinger/oopsallbaddies", "id": "e9c7f2f24f8dd3d3b3e90b2c513548c3e72a0945", "size": "3906", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "oopsallbaddies/oopsallbaddies/settings.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "HTML", "bytes": "5440" }, { "name": "Python", "bytes": "31929" } ], "symlink_target": "" }
import numpy as np from pystruct.models.base import StructuredModel from sklearn.utils.extmath import safe_sparse_dot from label import Label def _validate_params(unary_potentials, pairwise_params, edges): n_states = unary_potentials.shape[-1] if pairwise_params.shape == (n_states, n_states): # only one matrix given pairwise_potentials = np.repeat(pairwise_params[np.newaxis, :, :], edges.shape[0], axis=0) else: if pairwise_params.shape != (edges.shape[0], n_states, n_states): raise ValueError("Expected pairwise_params either to " "be of shape n_states x n_states " "or n_edges x n_states x n_states, but" " got shape %s. n_states=%d, n_edge=%d." % (repr(pairwise_params.shape), n_states, edges.shape[0])) pairwise_potentials = pairwise_params return n_states, pairwise_potentials def inference_ad3(unary_potentials, pairwise_potentials, edges, relaxed=False, verbose=0, return_energy=False, branch_and_bound=False, n_iterations=4000): """Inference with AD3 dual decomposition subgradient solver. Parameters ---------- unary_potentials : nd-array Unary potentials of energy function. pairwise_potentials : nd-array Pairwise potentials of energy function. edges : nd-array Edges of energy function. relaxed : bool (default=False) Whether to return the relaxed solution (``True``) or round to the next integer solution (``False``). verbose : int (default=0) Degree of verbosity for solver. return_energy : bool (default=False) Additionally return the energy of the returned solution (according to the solver). If relaxed=False, this is the energy of the relaxed, not the rounded solution. branch_and_bound : bool (default=False) Whether to attempt to produce an integral solution using branch-and-bound. Returns ------- labels : nd-array Approximate (usually) MAP variable assignment. If relaxed=False, this is a tuple of unary and edge 'marginals'. """ import ad3 n_states, pairwise_potentials = \ _validate_params(unary_potentials, pairwise_potentials, edges) unaries = unary_potentials.reshape(-1, n_states) res = ad3.general_graph(unaries, edges, pairwise_potentials, verbose=1, n_iterations=n_iterations, exact=branch_and_bound) unary_marginals, pairwise_marginals, energy, solver_status = res if verbose: print solver_status[0], if solver_status in ["fractional", "unsolved"] and relaxed: unary_marginals = unary_marginals.reshape(unary_potentials.shape) y = (unary_marginals, pairwise_marginals) else: y = np.argmax(unary_marginals, axis=-1) if return_energy: return y, -energy return y def inference_gco(unary_potentials, pairwise_potentials, edges, label_costs=None, **kwargs): from pygco import cut_from_graph_gen_potts shape_org = unary_potentials.shape[:-1] n_states = unary_potentials.shape[-1] pairwise_cost = {} count = 0 for i in xrange(0, pairwise_potentials.shape[0]): count += np.sum(np.diag(pairwise_potentials[i, :]) < 0) pairwise_cost[(edges[i, 0], edges[i, 1])] = list(np.maximum( np.diag(pairwise_potentials[i, :]), 0)) unary_potentials *= -1 if 'n_iter' in kwargs: y = cut_from_graph_gen_potts(unary_potentials, pairwise_cost, label_cost=label_costs, n_iter=kwargs['n_iter']) else: y = cut_from_graph_gen_potts(unary_potentials, pairwise_cost, label_cost=label_costs) if 'return_energy' in kwargs and kwargs['return_energy']: return y[0].reshape(shape_org), y[1] else: return y[0].reshape(shape_org) class HCRF(StructuredModel): def __init__(self, n_states=2, n_features=None, n_edge_features=1, inference_method='gco', n_iter=5, alpha=1): self.all_states = set(range(0, n_states)) self.n_edge_features = n_edge_features self.n_states = n_states self.n_features = n_features self.inference_method = inference_method self.inference_calls = 0 self.alpha = alpha self.n_iter = n_iter self.size_joint_feature = (self.n_states * self.n_features + self.n_states * self.n_edge_features) def _check_size_x(self, x): features = self._get_features(x) if features.shape[1] != self.n_features: raise ValueError("Unary evidence should have %d feature per node," " got %s instead." % (self.n_features, features.shape[1])) def __repr__(self): return ("%s(n_states: %d, inference_method: %s, n_features: %d, " "n_edge_features: %d)" % (type(self).__name__, self.n_states, self.inference_method, self.n_features, self.n_edge_features)) def _get_edges(self, x): return x[1] def _get_features(self, x): return x[0] def _get_edge_features(self, x): return x[2] def latent(self, x, y, w): if self.inference_method != 'gco': raise NotImplementedError if y.full_labeled: return y unary_potentials = self._get_unary_potentials(x, w) # forbid h that is incompoatible with y # by modifying unary potentials other_states = list(self.all_states - set(y.weak)) unary_potentials[:, other_states] = -1000000 pairwise_potentials = self._get_pairwise_potentials(x, w) edges = self._get_edges(x) h = inference_gco(unary_potentials, pairwise_potentials, edges, n_iter=self.n_iter) # for l in np.unique(h): assert(l in y.weak) # return Label(h, y.weak, y.weights, False) def _get_pairwise_potentials(self, x, w): """Computes pairwise potentials for x and w. Parameters ---------- x : tuple Instance Representation. w : ndarray, shape=(size_joint_feature,) Weight vector for CRF instance. Returns ------- pairwise : ndarray, shape=(n_edges, n_states, n_states) Pairwise weights. """ self._check_size_w(w) self._check_size_x(x) edge_features = self._get_edge_features(x) pairwise = np.asarray(w[self.n_states * self.n_features:]) pairwise = pairwise.reshape(self.n_edge_features, -1) pairwise = np.dot(edge_features, pairwise) res = np.zeros((edge_features.shape[0], self.n_states, self.n_states)) for i in range(edge_features.shape[0]): res[i, :, :] = np.diag(pairwise[i, :]) return res def _get_unary_potentials(self, x, w): """Computes unary potentials for x and w. Parameters ---------- x : tuple Instance Representation. w : ndarray, shape=(size_joint_feature,) Weight vector for CRF instance. Returns ------- unary : ndarray, shape=(n_nodes, n_states) Unary weights. """ self._check_size_w(w) self._check_size_x(x) features = self._get_features(x) unary_params = w[:self.n_states * self.n_features].reshape( self.n_states, self.n_features) result = safe_sparse_dot(features, unary_params.T, dense_output=True) return result def joint_feature(self, x, y): self._check_size_x(x) features, edges = self._get_features(x), self._get_edges(x) edge_features = self._get_edge_features(x) n_nodes = features.shape[0] full_labeled = y.full_labeled y = y.full if isinstance(y, tuple): unary_marginals, pw = y unary_marginals = unary_marginals.reshape(n_nodes, self.n_states) pw_new = np.zeros((pw.shape[0], self.n_states)) for i in xrange(self.n_states): pw_new[:, i] = pw[:, self.n_states * i + i] pw = np.dot(edge_features.T, pw_new) else: y = y.reshape(n_nodes) gx = np.ogrid[:n_nodes] unary_marginals = np.zeros((n_nodes, self.n_states), dtype=np.float64) gx = np.ogrid[:n_nodes] unary_marginals[gx, y] = 1 pw = np.zeros((self.n_edge_features, self.n_states)) for label in xrange(self.n_states): mask = (y[edges[:, 0]] == label) & (y[edges[:, 1]] == label) pw[:, label] = np.sum(edge_features[mask], axis=0) unaries_acc = safe_sparse_dot(unary_marginals.T, features, dense_output=True) joint_feature_vector = np.hstack([unaries_acc.ravel(), pw.ravel()]) if not full_labeled: joint_feature_vector *= self.alpha return joint_feature_vector def loss(self, y, y_hat): if y.full_labeled: if isinstance(y_hat.full, tuple): w = y.weights y, y_hat = y.full, y_hat.full[0] gx = np.indices(y.shape) return np.sum(w * (1 - y_hat[gx, y])) return np.sum(y.weights * (y.full != y_hat.full)) else: # should use Kappa here if isinstance(y_hat.full, tuple): loss = 0 c = np.sum(y.weights) / float(self.n_states) yy = y_hat.full[0] for label in xrange(0, self.n_states): if label in y.weak and not np.any(yy[:,label] > 0): loss += c elif label not in y.weak: loss += np.sum(y.weights * (yy[:,label] > 0)) return loss * self.alpha loss = 0 c = np.sum(y.weights) / float(self.n_states) for label in xrange(0, self.n_states): if label in y.weak and not np.any(y_hat.full == label): loss += c elif label not in y.weak: loss += np.sum(y.weights * (y_hat.full == label)) return loss * self.alpha def max_loss(self, y): return np.sum(y.weights) def _kappa(self, y, y_hat): # not true kappa, use this to debug loss = 0 c = np.sum(y.weights) / float(self.n_states) for label in xrange(0, self.n_states): if label in y.weak and np.any(y_hat.full == label): loss -= c elif label not in y.weak: loss += np.sum(y.weights * (y_hat.full == label)) return loss * self.alpha def loss_augmented_inference(self, x, y, w, relaxed=False, return_energy=False): self.inference_calls += 1 self._check_size_w(w) unary_potentials = self._get_unary_potentials(x, w) pairwise_potentials = self._get_pairwise_potentials(x, w) edges = self._get_edges(x) if y.full_labeled: # loss augment unaries for label in xrange(self.n_states): mask = y.full != label unary_potentials[mask, label] += y.weights[mask] if self.inference_method == 'gco': h = inference_gco(unary_potentials, pairwise_potentials, edges, n_iter=self.n_iter, return_energy=True) y_ret = Label(h[0], None, y.weights, True) elif self.inference_method == 'ad3': h = inference_ad3(unary_potentials, pairwise_potentials, edges, relaxed=relaxed, return_energy=False, n_iterations=self.n_iter) y_ret = Label(h, None, y.weights, True, relaxed) elif self.inference_method == 'trw': from trw import trw h = trw(-unary_potentials, edges, -pairwise_potentials, max_iter=self.n_iter, relaxed=relaxed) y_ret = Label(h, None, y.weights, True, relaxed) # count = h[2] # energy = np.dot(w, self.joint_feature(x, y_ret)) + self.loss(y, y_ret) # # if count == 0 and np.abs(energy + h[1]) > 1e-4: # print 'FULL: energy does not match: %f, %f, difference=%f' % (energy, -h[1], # energy + h[1]) if return_energy: return y_ret, h[1] return y_ret else: if self.inference_method != 'gco': # only gco inference_method supported: we need label costs raise NotImplementedError # this is weak labeled example # use pygco with label costs label_costs = np.zeros(self.n_states) c = np.sum(y.weights) / float(self.n_states) for label in y.weak: label_costs[label] = c for label in xrange(0, self.n_states): if label not in y.weak: unary_potentials[:, label] += y.weights h = inference_gco(unary_potentials, pairwise_potentials, edges, label_costs, n_iter=self.n_iter, return_energy=True) y_ret = Label(h[0], None, y.weights, False) # energy = np.dot(w, self.joint_feature(x, y_ret)) + self._kappa(y, y_ret) # if h[2] == 0 and np.abs(energy + h[1]) > 1e-4: # print 'energy does not match: %f, %f, difference=%f' % (energy, -h[1], energy + h[1]) return y_ret def inference(self, x, w, relaxed=False, return_energy=False, invert=False): """Inference for x using parameters w. Finds (approximately) argmin_y np.dot(w, joint_feature(x, y)) using self.inference_method. Parameters ---------- x : tuple Instance of a graph with unary & pairwise potentials. x=(unaries, edges, pairwise) unaries are an nd-array of shape (n_nodes, n_states), edges are an nd-array of shape (n_edges, 2) pairwise are an nd-array of shape (n_edges, n_states, n_states) w : ndarray, shape=(size_joint_feature,) Parameters for the CRF energy function. relaxed : bool, default=False We do not support it yet. return_energy : bool, default=False Whether to return the energy of the solution (x, y) that was found. Returns ------- y_pred : ndarray By default an integer ndarray of shape=(width, height) of variable assignments for x is returned. """ self._check_size_w(w) self.inference_calls += 1 unary_potentials = self._get_unary_potentials(x, w) pairwise_potentials = self._get_pairwise_potentials(x, w) edges = self._get_edges(x) if invert: unary_potentials = -unary_potentials pairwise_potentials = -pairwise_potentials if self.inference_method == 'gco': h = inference_gco(unary_potentials, pairwise_potentials, edges, n_iter=self.n_iter) y_ret = Label(h, None, None, True) elif self.inference_method == 'ad3': h = inference_ad3(unary_potentials, pairwise_potentials, edges, relaxed=relaxed, return_energy=False, n_iterations=self.n_iter) y_ret = Label(h, None, None, True, relaxed) elif self.inference_method == 'trw': from trw import trw h = trw(-unary_potentials, edges, -pairwise_potentials, max_iter=self.n_iter) y_ret = Label(h, None, None, True) return y_ret
{ "content_hash": "6a4257057573a125b94b11f3bfed6c8e", "timestamp": "", "source": "github", "line_count": 428, "max_line_length": 110, "avg_line_length": 37.80607476635514, "alnum_prop": 0.5474321735368642, "repo_name": "kondra/latent_ssvm", "id": "244649998d3df1f3b59ac36377fb3811d0309920", "size": "16311", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "heterogenous_crf.py", "mode": "33188", "license": "bsd-2-clause", "language": [ { "name": "Python", "bytes": "264104" } ], "symlink_target": "" }
import os # try/except added for compatibility with python < 3.8 try: from unittest import mock from unittest.mock import AsyncMock # pragma: NO COVER except ImportError: # pragma: NO COVER import mock import math from google.api_core import ( future, gapic_v1, grpc_helpers, grpc_helpers_async, operation, operations_v1, path_template, ) from google.api_core import client_options from google.api_core import exceptions as core_exceptions from google.api_core import operation_async # type: ignore import google.auth from google.auth import credentials as ga_credentials from google.auth.exceptions import MutualTLSChannelError from google.longrunning import operations_pb2 from google.oauth2 import service_account from google.rpc import status_pb2 # type: ignore import grpc from grpc.experimental import aio from proto.marshal.rules import wrappers from proto.marshal.rules.dates import DurationRule, TimestampRule import pytest from google.cloud.documentai_v1beta2.services.document_understanding_service import ( DocumentUnderstandingServiceAsyncClient, DocumentUnderstandingServiceClient, transports, ) from google.cloud.documentai_v1beta2.types import ( document, document_understanding, geometry, ) def client_cert_source_callback(): return b"cert bytes", b"key bytes" # If default endpoint is localhost, then default mtls endpoint will be the same. # This method modifies the default endpoint so the client can produce a different # mtls endpoint for endpoint testing purposes. def modify_default_endpoint(client): return ( "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT ) def test__get_default_mtls_endpoint(): api_endpoint = "example.googleapis.com" api_mtls_endpoint = "example.mtls.googleapis.com" sandbox_endpoint = "example.sandbox.googleapis.com" sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" non_googleapi = "api.example.com" assert DocumentUnderstandingServiceClient._get_default_mtls_endpoint(None) is None assert ( DocumentUnderstandingServiceClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint ) assert ( DocumentUnderstandingServiceClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint ) assert ( DocumentUnderstandingServiceClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint ) assert ( DocumentUnderstandingServiceClient._get_default_mtls_endpoint( sandbox_mtls_endpoint ) == sandbox_mtls_endpoint ) assert ( DocumentUnderstandingServiceClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi ) @pytest.mark.parametrize( "client_class,transport_name", [ (DocumentUnderstandingServiceClient, "grpc"), (DocumentUnderstandingServiceAsyncClient, "grpc_asyncio"), ], ) def test_document_understanding_service_client_from_service_account_info( client_class, transport_name ): creds = ga_credentials.AnonymousCredentials() with mock.patch.object( service_account.Credentials, "from_service_account_info" ) as factory: factory.return_value = creds info = {"valid": True} client = client_class.from_service_account_info(info, transport=transport_name) assert client.transport._credentials == creds assert isinstance(client, client_class) assert client.transport._host == ("documentai.googleapis.com:443") @pytest.mark.parametrize( "transport_class,transport_name", [ (transports.DocumentUnderstandingServiceGrpcTransport, "grpc"), (transports.DocumentUnderstandingServiceGrpcAsyncIOTransport, "grpc_asyncio"), ], ) def test_document_understanding_service_client_service_account_always_use_jwt( transport_class, transport_name ): with mock.patch.object( service_account.Credentials, "with_always_use_jwt_access", create=True ) as use_jwt: creds = service_account.Credentials(None, None, None) transport = transport_class(credentials=creds, always_use_jwt_access=True) use_jwt.assert_called_once_with(True) with mock.patch.object( service_account.Credentials, "with_always_use_jwt_access", create=True ) as use_jwt: creds = service_account.Credentials(None, None, None) transport = transport_class(credentials=creds, always_use_jwt_access=False) use_jwt.assert_not_called() @pytest.mark.parametrize( "client_class,transport_name", [ (DocumentUnderstandingServiceClient, "grpc"), (DocumentUnderstandingServiceAsyncClient, "grpc_asyncio"), ], ) def test_document_understanding_service_client_from_service_account_file( client_class, transport_name ): creds = ga_credentials.AnonymousCredentials() with mock.patch.object( service_account.Credentials, "from_service_account_file" ) as factory: factory.return_value = creds client = client_class.from_service_account_file( "dummy/file/path.json", transport=transport_name ) assert client.transport._credentials == creds assert isinstance(client, client_class) client = client_class.from_service_account_json( "dummy/file/path.json", transport=transport_name ) assert client.transport._credentials == creds assert isinstance(client, client_class) assert client.transport._host == ("documentai.googleapis.com:443") def test_document_understanding_service_client_get_transport_class(): transport = DocumentUnderstandingServiceClient.get_transport_class() available_transports = [ transports.DocumentUnderstandingServiceGrpcTransport, ] assert transport in available_transports transport = DocumentUnderstandingServiceClient.get_transport_class("grpc") assert transport == transports.DocumentUnderstandingServiceGrpcTransport @pytest.mark.parametrize( "client_class,transport_class,transport_name", [ ( DocumentUnderstandingServiceClient, transports.DocumentUnderstandingServiceGrpcTransport, "grpc", ), ( DocumentUnderstandingServiceAsyncClient, transports.DocumentUnderstandingServiceGrpcAsyncIOTransport, "grpc_asyncio", ), ], ) @mock.patch.object( DocumentUnderstandingServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(DocumentUnderstandingServiceClient), ) @mock.patch.object( DocumentUnderstandingServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(DocumentUnderstandingServiceAsyncClient), ) def test_document_understanding_service_client_client_options( client_class, transport_class, transport_name ): # Check that if channel is provided we won't create a new one. with mock.patch.object( DocumentUnderstandingServiceClient, "get_transport_class" ) as gtc: transport = transport_class(credentials=ga_credentials.AnonymousCredentials()) client = client_class(transport=transport) gtc.assert_not_called() # Check that if channel is provided via str we will create a new one. with mock.patch.object( DocumentUnderstandingServiceClient, "get_transport_class" ) as gtc: client = client_class(transport=transport_name) gtc.assert_called() # Check the case api_endpoint is provided. options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") with mock.patch.object(transport_class, "__init__") as patched: patched.return_value = None client = client_class(transport=transport_name, client_options=options) patched.assert_called_once_with( credentials=None, credentials_file=None, host="squid.clam.whelk", scopes=None, client_cert_source_for_mtls=None, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, always_use_jwt_access=True, api_audience=None, ) # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is # "never". with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): with mock.patch.object(transport_class, "__init__") as patched: patched.return_value = None client = client_class(transport=transport_name) patched.assert_called_once_with( credentials=None, credentials_file=None, host=client.DEFAULT_ENDPOINT, scopes=None, client_cert_source_for_mtls=None, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, always_use_jwt_access=True, api_audience=None, ) # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is # "always". with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): with mock.patch.object(transport_class, "__init__") as patched: patched.return_value = None client = client_class(transport=transport_name) patched.assert_called_once_with( credentials=None, credentials_file=None, host=client.DEFAULT_MTLS_ENDPOINT, scopes=None, client_cert_source_for_mtls=None, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, always_use_jwt_access=True, api_audience=None, ) # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has # unsupported value. with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): with pytest.raises(MutualTLSChannelError): client = client_class(transport=transport_name) # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. with mock.patch.dict( os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"} ): with pytest.raises(ValueError): client = client_class(transport=transport_name) # Check the case quota_project_id is provided options = client_options.ClientOptions(quota_project_id="octopus") with mock.patch.object(transport_class, "__init__") as patched: patched.return_value = None client = client_class(client_options=options, transport=transport_name) patched.assert_called_once_with( credentials=None, credentials_file=None, host=client.DEFAULT_ENDPOINT, scopes=None, client_cert_source_for_mtls=None, quota_project_id="octopus", client_info=transports.base.DEFAULT_CLIENT_INFO, always_use_jwt_access=True, api_audience=None, ) # Check the case api_endpoint is provided options = client_options.ClientOptions( api_audience="https://language.googleapis.com" ) with mock.patch.object(transport_class, "__init__") as patched: patched.return_value = None client = client_class(client_options=options, transport=transport_name) patched.assert_called_once_with( credentials=None, credentials_file=None, host=client.DEFAULT_ENDPOINT, scopes=None, client_cert_source_for_mtls=None, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, always_use_jwt_access=True, api_audience="https://language.googleapis.com", ) @pytest.mark.parametrize( "client_class,transport_class,transport_name,use_client_cert_env", [ ( DocumentUnderstandingServiceClient, transports.DocumentUnderstandingServiceGrpcTransport, "grpc", "true", ), ( DocumentUnderstandingServiceAsyncClient, transports.DocumentUnderstandingServiceGrpcAsyncIOTransport, "grpc_asyncio", "true", ), ( DocumentUnderstandingServiceClient, transports.DocumentUnderstandingServiceGrpcTransport, "grpc", "false", ), ( DocumentUnderstandingServiceAsyncClient, transports.DocumentUnderstandingServiceGrpcAsyncIOTransport, "grpc_asyncio", "false", ), ], ) @mock.patch.object( DocumentUnderstandingServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(DocumentUnderstandingServiceClient), ) @mock.patch.object( DocumentUnderstandingServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(DocumentUnderstandingServiceAsyncClient), ) @mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) def test_document_understanding_service_client_mtls_env_auto( client_class, transport_class, transport_name, use_client_cert_env ): # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. # Check the case client_cert_source is provided. Whether client cert is used depends on # GOOGLE_API_USE_CLIENT_CERTIFICATE value. with mock.patch.dict( os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env} ): options = client_options.ClientOptions( client_cert_source=client_cert_source_callback ) with mock.patch.object(transport_class, "__init__") as patched: patched.return_value = None client = client_class(client_options=options, transport=transport_name) if use_client_cert_env == "false": expected_client_cert_source = None expected_host = client.DEFAULT_ENDPOINT else: expected_client_cert_source = client_cert_source_callback expected_host = client.DEFAULT_MTLS_ENDPOINT patched.assert_called_once_with( credentials=None, credentials_file=None, host=expected_host, scopes=None, client_cert_source_for_mtls=expected_client_cert_source, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, always_use_jwt_access=True, api_audience=None, ) # Check the case ADC client cert is provided. Whether client cert is used depends on # GOOGLE_API_USE_CLIENT_CERTIFICATE value. with mock.patch.dict( os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env} ): with mock.patch.object(transport_class, "__init__") as patched: with mock.patch( "google.auth.transport.mtls.has_default_client_cert_source", return_value=True, ): with mock.patch( "google.auth.transport.mtls.default_client_cert_source", return_value=client_cert_source_callback, ): if use_client_cert_env == "false": expected_host = client.DEFAULT_ENDPOINT expected_client_cert_source = None else: expected_host = client.DEFAULT_MTLS_ENDPOINT expected_client_cert_source = client_cert_source_callback patched.return_value = None client = client_class(transport=transport_name) patched.assert_called_once_with( credentials=None, credentials_file=None, host=expected_host, scopes=None, client_cert_source_for_mtls=expected_client_cert_source, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, always_use_jwt_access=True, api_audience=None, ) # Check the case client_cert_source and ADC client cert are not provided. with mock.patch.dict( os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env} ): with mock.patch.object(transport_class, "__init__") as patched: with mock.patch( "google.auth.transport.mtls.has_default_client_cert_source", return_value=False, ): patched.return_value = None client = client_class(transport=transport_name) patched.assert_called_once_with( credentials=None, credentials_file=None, host=client.DEFAULT_ENDPOINT, scopes=None, client_cert_source_for_mtls=None, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, always_use_jwt_access=True, api_audience=None, ) @pytest.mark.parametrize( "client_class", [DocumentUnderstandingServiceClient, DocumentUnderstandingServiceAsyncClient], ) @mock.patch.object( DocumentUnderstandingServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(DocumentUnderstandingServiceClient), ) @mock.patch.object( DocumentUnderstandingServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(DocumentUnderstandingServiceAsyncClient), ) def test_document_understanding_service_client_get_mtls_endpoint_and_cert_source( client_class, ): mock_client_cert_source = mock.Mock() # Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "true". with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): mock_api_endpoint = "foo" options = client_options.ClientOptions( client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint ) api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source( options ) assert api_endpoint == mock_api_endpoint assert cert_source == mock_client_cert_source # Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "false". with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "false"}): mock_client_cert_source = mock.Mock() mock_api_endpoint = "foo" options = client_options.ClientOptions( client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint ) api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source( options ) assert api_endpoint == mock_api_endpoint assert cert_source is None # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "never". with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() assert api_endpoint == client_class.DEFAULT_ENDPOINT assert cert_source is None # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "always". with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT assert cert_source is None # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert doesn't exist. with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): with mock.patch( "google.auth.transport.mtls.has_default_client_cert_source", return_value=False, ): api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() assert api_endpoint == client_class.DEFAULT_ENDPOINT assert cert_source is None # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert exists. with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): with mock.patch( "google.auth.transport.mtls.has_default_client_cert_source", return_value=True, ): with mock.patch( "google.auth.transport.mtls.default_client_cert_source", return_value=mock_client_cert_source, ): ( api_endpoint, cert_source, ) = client_class.get_mtls_endpoint_and_cert_source() assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT assert cert_source == mock_client_cert_source @pytest.mark.parametrize( "client_class,transport_class,transport_name", [ ( DocumentUnderstandingServiceClient, transports.DocumentUnderstandingServiceGrpcTransport, "grpc", ), ( DocumentUnderstandingServiceAsyncClient, transports.DocumentUnderstandingServiceGrpcAsyncIOTransport, "grpc_asyncio", ), ], ) def test_document_understanding_service_client_client_options_scopes( client_class, transport_class, transport_name ): # Check the case scopes are provided. options = client_options.ClientOptions( scopes=["1", "2"], ) with mock.patch.object(transport_class, "__init__") as patched: patched.return_value = None client = client_class(client_options=options, transport=transport_name) patched.assert_called_once_with( credentials=None, credentials_file=None, host=client.DEFAULT_ENDPOINT, scopes=["1", "2"], client_cert_source_for_mtls=None, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, always_use_jwt_access=True, api_audience=None, ) @pytest.mark.parametrize( "client_class,transport_class,transport_name,grpc_helpers", [ ( DocumentUnderstandingServiceClient, transports.DocumentUnderstandingServiceGrpcTransport, "grpc", grpc_helpers, ), ( DocumentUnderstandingServiceAsyncClient, transports.DocumentUnderstandingServiceGrpcAsyncIOTransport, "grpc_asyncio", grpc_helpers_async, ), ], ) def test_document_understanding_service_client_client_options_credentials_file( client_class, transport_class, transport_name, grpc_helpers ): # Check the case credentials file is provided. options = client_options.ClientOptions(credentials_file="credentials.json") with mock.patch.object(transport_class, "__init__") as patched: patched.return_value = None client = client_class(client_options=options, transport=transport_name) patched.assert_called_once_with( credentials=None, credentials_file="credentials.json", host=client.DEFAULT_ENDPOINT, scopes=None, client_cert_source_for_mtls=None, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, always_use_jwt_access=True, api_audience=None, ) def test_document_understanding_service_client_client_options_from_dict(): with mock.patch( "google.cloud.documentai_v1beta2.services.document_understanding_service.transports.DocumentUnderstandingServiceGrpcTransport.__init__" ) as grpc_transport: grpc_transport.return_value = None client = DocumentUnderstandingServiceClient( client_options={"api_endpoint": "squid.clam.whelk"} ) grpc_transport.assert_called_once_with( credentials=None, credentials_file=None, host="squid.clam.whelk", scopes=None, client_cert_source_for_mtls=None, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, always_use_jwt_access=True, api_audience=None, ) @pytest.mark.parametrize( "client_class,transport_class,transport_name,grpc_helpers", [ ( DocumentUnderstandingServiceClient, transports.DocumentUnderstandingServiceGrpcTransport, "grpc", grpc_helpers, ), ( DocumentUnderstandingServiceAsyncClient, transports.DocumentUnderstandingServiceGrpcAsyncIOTransport, "grpc_asyncio", grpc_helpers_async, ), ], ) def test_document_understanding_service_client_create_channel_credentials_file( client_class, transport_class, transport_name, grpc_helpers ): # Check the case credentials file is provided. options = client_options.ClientOptions(credentials_file="credentials.json") with mock.patch.object(transport_class, "__init__") as patched: patched.return_value = None client = client_class(client_options=options, transport=transport_name) patched.assert_called_once_with( credentials=None, credentials_file="credentials.json", host=client.DEFAULT_ENDPOINT, scopes=None, client_cert_source_for_mtls=None, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, always_use_jwt_access=True, api_audience=None, ) # test that the credentials from file are saved and used as the credentials. with mock.patch.object( google.auth, "load_credentials_from_file", autospec=True ) as load_creds, mock.patch.object( google.auth, "default", autospec=True ) as adc, mock.patch.object( grpc_helpers, "create_channel" ) as create_channel: creds = ga_credentials.AnonymousCredentials() file_creds = ga_credentials.AnonymousCredentials() load_creds.return_value = (file_creds, None) adc.return_value = (creds, None) client = client_class(client_options=options, transport=transport_name) create_channel.assert_called_with( "documentai.googleapis.com:443", credentials=file_creds, credentials_file=None, quota_project_id=None, default_scopes=("https://www.googleapis.com/auth/cloud-platform",), scopes=None, default_host="documentai.googleapis.com", ssl_credentials=None, options=[ ("grpc.max_send_message_length", -1), ("grpc.max_receive_message_length", -1), ], ) @pytest.mark.parametrize( "request_type", [ document_understanding.BatchProcessDocumentsRequest, dict, ], ) def test_batch_process_documents(request_type, transport: str = "grpc"): client = DocumentUnderstandingServiceClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. request = request_type() # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.batch_process_documents), "__call__" ) as call: # Designate an appropriate return value for the call. call.return_value = operations_pb2.Operation(name="operations/spam") response = client.batch_process_documents(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] assert args[0] == document_understanding.BatchProcessDocumentsRequest() # Establish that the response is the type that we expect. assert isinstance(response, future.Future) def test_batch_process_documents_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = DocumentUnderstandingServiceClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.batch_process_documents), "__call__" ) as call: client.batch_process_documents() call.assert_called() _, args, _ = call.mock_calls[0] assert args[0] == document_understanding.BatchProcessDocumentsRequest() @pytest.mark.asyncio async def test_batch_process_documents_async( transport: str = "grpc_asyncio", request_type=document_understanding.BatchProcessDocumentsRequest, ): client = DocumentUnderstandingServiceAsyncClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. request = request_type() # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.batch_process_documents), "__call__" ) as call: # Designate an appropriate return value for the call. call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( operations_pb2.Operation(name="operations/spam") ) response = await client.batch_process_documents(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] assert args[0] == document_understanding.BatchProcessDocumentsRequest() # Establish that the response is the type that we expect. assert isinstance(response, future.Future) @pytest.mark.asyncio async def test_batch_process_documents_async_from_dict(): await test_batch_process_documents_async(request_type=dict) def test_batch_process_documents_field_headers(): client = DocumentUnderstandingServiceClient( credentials=ga_credentials.AnonymousCredentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = document_understanding.BatchProcessDocumentsRequest() request.parent = "parent_value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.batch_process_documents), "__call__" ) as call: call.return_value = operations_pb2.Operation(name="operations/op") client.batch_process_documents(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] assert args[0] == request # Establish that the field header was sent. _, _, kw = call.mock_calls[0] assert ( "x-goog-request-params", "parent=parent_value", ) in kw["metadata"] @pytest.mark.asyncio async def test_batch_process_documents_field_headers_async(): client = DocumentUnderstandingServiceAsyncClient( credentials=ga_credentials.AnonymousCredentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = document_understanding.BatchProcessDocumentsRequest() request.parent = "parent_value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.batch_process_documents), "__call__" ) as call: call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( operations_pb2.Operation(name="operations/op") ) await client.batch_process_documents(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] assert args[0] == request # Establish that the field header was sent. _, _, kw = call.mock_calls[0] assert ( "x-goog-request-params", "parent=parent_value", ) in kw["metadata"] def test_batch_process_documents_flattened(): client = DocumentUnderstandingServiceClient( credentials=ga_credentials.AnonymousCredentials(), ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.batch_process_documents), "__call__" ) as call: # Designate an appropriate return value for the call. call.return_value = operations_pb2.Operation(name="operations/op") # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. client.batch_process_documents( requests=[ document_understanding.ProcessDocumentRequest(parent="parent_value") ], ) # Establish that the underlying call was made with the expected # request object values. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] arg = args[0].requests mock_val = [ document_understanding.ProcessDocumentRequest(parent="parent_value") ] assert arg == mock_val def test_batch_process_documents_flattened_error(): client = DocumentUnderstandingServiceClient( credentials=ga_credentials.AnonymousCredentials(), ) # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): client.batch_process_documents( document_understanding.BatchProcessDocumentsRequest(), requests=[ document_understanding.ProcessDocumentRequest(parent="parent_value") ], ) @pytest.mark.asyncio async def test_batch_process_documents_flattened_async(): client = DocumentUnderstandingServiceAsyncClient( credentials=ga_credentials.AnonymousCredentials(), ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.batch_process_documents), "__call__" ) as call: # Designate an appropriate return value for the call. call.return_value = operations_pb2.Operation(name="operations/op") call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( operations_pb2.Operation(name="operations/spam") ) # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. response = await client.batch_process_documents( requests=[ document_understanding.ProcessDocumentRequest(parent="parent_value") ], ) # Establish that the underlying call was made with the expected # request object values. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] arg = args[0].requests mock_val = [ document_understanding.ProcessDocumentRequest(parent="parent_value") ] assert arg == mock_val @pytest.mark.asyncio async def test_batch_process_documents_flattened_error_async(): client = DocumentUnderstandingServiceAsyncClient( credentials=ga_credentials.AnonymousCredentials(), ) # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): await client.batch_process_documents( document_understanding.BatchProcessDocumentsRequest(), requests=[ document_understanding.ProcessDocumentRequest(parent="parent_value") ], ) @pytest.mark.parametrize( "request_type", [ document_understanding.ProcessDocumentRequest, dict, ], ) def test_process_document(request_type, transport: str = "grpc"): client = DocumentUnderstandingServiceClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. request = request_type() # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.process_document), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = document.Document( mime_type="mime_type_value", text="text_value", uri="uri_value", ) response = client.process_document(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] assert args[0] == document_understanding.ProcessDocumentRequest() # Establish that the response is the type that we expect. assert isinstance(response, document.Document) assert response.mime_type == "mime_type_value" assert response.text == "text_value" def test_process_document_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = DocumentUnderstandingServiceClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.process_document), "__call__") as call: client.process_document() call.assert_called() _, args, _ = call.mock_calls[0] assert args[0] == document_understanding.ProcessDocumentRequest() @pytest.mark.asyncio async def test_process_document_async( transport: str = "grpc_asyncio", request_type=document_understanding.ProcessDocumentRequest, ): client = DocumentUnderstandingServiceAsyncClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. request = request_type() # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.process_document), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( document.Document( mime_type="mime_type_value", text="text_value", ) ) response = await client.process_document(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] assert args[0] == document_understanding.ProcessDocumentRequest() # Establish that the response is the type that we expect. assert isinstance(response, document.Document) assert response.mime_type == "mime_type_value" assert response.text == "text_value" @pytest.mark.asyncio async def test_process_document_async_from_dict(): await test_process_document_async(request_type=dict) def test_process_document_field_headers(): client = DocumentUnderstandingServiceClient( credentials=ga_credentials.AnonymousCredentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = document_understanding.ProcessDocumentRequest() request.parent = "parent_value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.process_document), "__call__") as call: call.return_value = document.Document() client.process_document(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] assert args[0] == request # Establish that the field header was sent. _, _, kw = call.mock_calls[0] assert ( "x-goog-request-params", "parent=parent_value", ) in kw["metadata"] @pytest.mark.asyncio async def test_process_document_field_headers_async(): client = DocumentUnderstandingServiceAsyncClient( credentials=ga_credentials.AnonymousCredentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = document_understanding.ProcessDocumentRequest() request.parent = "parent_value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.process_document), "__call__") as call: call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(document.Document()) await client.process_document(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] assert args[0] == request # Establish that the field header was sent. _, _, kw = call.mock_calls[0] assert ( "x-goog-request-params", "parent=parent_value", ) in kw["metadata"] def test_credentials_transport_error(): # It is an error to provide credentials and a transport instance. transport = transports.DocumentUnderstandingServiceGrpcTransport( credentials=ga_credentials.AnonymousCredentials(), ) with pytest.raises(ValueError): client = DocumentUnderstandingServiceClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # It is an error to provide a credentials file and a transport instance. transport = transports.DocumentUnderstandingServiceGrpcTransport( credentials=ga_credentials.AnonymousCredentials(), ) with pytest.raises(ValueError): client = DocumentUnderstandingServiceClient( client_options={"credentials_file": "credentials.json"}, transport=transport, ) # It is an error to provide an api_key and a transport instance. transport = transports.DocumentUnderstandingServiceGrpcTransport( credentials=ga_credentials.AnonymousCredentials(), ) options = client_options.ClientOptions() options.api_key = "api_key" with pytest.raises(ValueError): client = DocumentUnderstandingServiceClient( client_options=options, transport=transport, ) # It is an error to provide an api_key and a credential. options = mock.Mock() options.api_key = "api_key" with pytest.raises(ValueError): client = DocumentUnderstandingServiceClient( client_options=options, credentials=ga_credentials.AnonymousCredentials() ) # It is an error to provide scopes and a transport instance. transport = transports.DocumentUnderstandingServiceGrpcTransport( credentials=ga_credentials.AnonymousCredentials(), ) with pytest.raises(ValueError): client = DocumentUnderstandingServiceClient( client_options={"scopes": ["1", "2"]}, transport=transport, ) def test_transport_instance(): # A client may be instantiated with a custom transport instance. transport = transports.DocumentUnderstandingServiceGrpcTransport( credentials=ga_credentials.AnonymousCredentials(), ) client = DocumentUnderstandingServiceClient(transport=transport) assert client.transport is transport def test_transport_get_channel(): # A client may be instantiated with a custom transport instance. transport = transports.DocumentUnderstandingServiceGrpcTransport( credentials=ga_credentials.AnonymousCredentials(), ) channel = transport.grpc_channel assert channel transport = transports.DocumentUnderstandingServiceGrpcAsyncIOTransport( credentials=ga_credentials.AnonymousCredentials(), ) channel = transport.grpc_channel assert channel @pytest.mark.parametrize( "transport_class", [ transports.DocumentUnderstandingServiceGrpcTransport, transports.DocumentUnderstandingServiceGrpcAsyncIOTransport, ], ) def test_transport_adc(transport_class): # Test default credentials are used if not provided. with mock.patch.object(google.auth, "default") as adc: adc.return_value = (ga_credentials.AnonymousCredentials(), None) transport_class() adc.assert_called_once() @pytest.mark.parametrize( "transport_name", [ "grpc", ], ) def test_transport_kind(transport_name): transport = DocumentUnderstandingServiceClient.get_transport_class(transport_name)( credentials=ga_credentials.AnonymousCredentials(), ) assert transport.kind == transport_name def test_transport_grpc_default(): # A client should use the gRPC transport by default. client = DocumentUnderstandingServiceClient( credentials=ga_credentials.AnonymousCredentials(), ) assert isinstance( client.transport, transports.DocumentUnderstandingServiceGrpcTransport, ) def test_document_understanding_service_base_transport_error(): # Passing both a credentials object and credentials_file should raise an error with pytest.raises(core_exceptions.DuplicateCredentialArgs): transport = transports.DocumentUnderstandingServiceTransport( credentials=ga_credentials.AnonymousCredentials(), credentials_file="credentials.json", ) def test_document_understanding_service_base_transport(): # Instantiate the base transport. with mock.patch( "google.cloud.documentai_v1beta2.services.document_understanding_service.transports.DocumentUnderstandingServiceTransport.__init__" ) as Transport: Transport.return_value = None transport = transports.DocumentUnderstandingServiceTransport( credentials=ga_credentials.AnonymousCredentials(), ) # Every method on the transport should just blindly # raise NotImplementedError. methods = ( "batch_process_documents", "process_document", ) for method in methods: with pytest.raises(NotImplementedError): getattr(transport, method)(request=object()) with pytest.raises(NotImplementedError): transport.close() # Additionally, the LRO client (a property) should # also raise NotImplementedError with pytest.raises(NotImplementedError): transport.operations_client # Catch all for all remaining methods and properties remainder = [ "kind", ] for r in remainder: with pytest.raises(NotImplementedError): getattr(transport, r)() def test_document_understanding_service_base_transport_with_credentials_file(): # Instantiate the base transport with a credentials file with mock.patch.object( google.auth, "load_credentials_from_file", autospec=True ) as load_creds, mock.patch( "google.cloud.documentai_v1beta2.services.document_understanding_service.transports.DocumentUnderstandingServiceTransport._prep_wrapped_messages" ) as Transport: Transport.return_value = None load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) transport = transports.DocumentUnderstandingServiceTransport( credentials_file="credentials.json", quota_project_id="octopus", ) load_creds.assert_called_once_with( "credentials.json", scopes=None, default_scopes=("https://www.googleapis.com/auth/cloud-platform",), quota_project_id="octopus", ) def test_document_understanding_service_base_transport_with_adc(): # Test the default credentials are used if credentials and credentials_file are None. with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch( "google.cloud.documentai_v1beta2.services.document_understanding_service.transports.DocumentUnderstandingServiceTransport._prep_wrapped_messages" ) as Transport: Transport.return_value = None adc.return_value = (ga_credentials.AnonymousCredentials(), None) transport = transports.DocumentUnderstandingServiceTransport() adc.assert_called_once() def test_document_understanding_service_auth_adc(): # If no credentials are provided, we should use ADC credentials. with mock.patch.object(google.auth, "default", autospec=True) as adc: adc.return_value = (ga_credentials.AnonymousCredentials(), None) DocumentUnderstandingServiceClient() adc.assert_called_once_with( scopes=None, default_scopes=("https://www.googleapis.com/auth/cloud-platform",), quota_project_id=None, ) @pytest.mark.parametrize( "transport_class", [ transports.DocumentUnderstandingServiceGrpcTransport, transports.DocumentUnderstandingServiceGrpcAsyncIOTransport, ], ) def test_document_understanding_service_transport_auth_adc(transport_class): # If credentials and host are not provided, the transport class should use # ADC credentials. with mock.patch.object(google.auth, "default", autospec=True) as adc: adc.return_value = (ga_credentials.AnonymousCredentials(), None) transport_class(quota_project_id="octopus", scopes=["1", "2"]) adc.assert_called_once_with( scopes=["1", "2"], default_scopes=("https://www.googleapis.com/auth/cloud-platform",), quota_project_id="octopus", ) @pytest.mark.parametrize( "transport_class", [ transports.DocumentUnderstandingServiceGrpcTransport, transports.DocumentUnderstandingServiceGrpcAsyncIOTransport, ], ) def test_document_understanding_service_transport_auth_gdch_credentials( transport_class, ): host = "https://language.com" api_audience_tests = [None, "https://language2.com"] api_audience_expect = [host, "https://language2.com"] for t, e in zip(api_audience_tests, api_audience_expect): with mock.patch.object(google.auth, "default", autospec=True) as adc: gdch_mock = mock.MagicMock() type(gdch_mock).with_gdch_audience = mock.PropertyMock( return_value=gdch_mock ) adc.return_value = (gdch_mock, None) transport_class(host=host, api_audience=t) gdch_mock.with_gdch_audience.assert_called_once_with(e) @pytest.mark.parametrize( "transport_class,grpc_helpers", [ (transports.DocumentUnderstandingServiceGrpcTransport, grpc_helpers), ( transports.DocumentUnderstandingServiceGrpcAsyncIOTransport, grpc_helpers_async, ), ], ) def test_document_understanding_service_transport_create_channel( transport_class, grpc_helpers ): # If credentials and host are not provided, the transport class should use # ADC credentials. with mock.patch.object( google.auth, "default", autospec=True ) as adc, mock.patch.object( grpc_helpers, "create_channel", autospec=True ) as create_channel: creds = ga_credentials.AnonymousCredentials() adc.return_value = (creds, None) transport_class(quota_project_id="octopus", scopes=["1", "2"]) create_channel.assert_called_with( "documentai.googleapis.com:443", credentials=creds, credentials_file=None, quota_project_id="octopus", default_scopes=("https://www.googleapis.com/auth/cloud-platform",), scopes=["1", "2"], default_host="documentai.googleapis.com", ssl_credentials=None, options=[ ("grpc.max_send_message_length", -1), ("grpc.max_receive_message_length", -1), ], ) @pytest.mark.parametrize( "transport_class", [ transports.DocumentUnderstandingServiceGrpcTransport, transports.DocumentUnderstandingServiceGrpcAsyncIOTransport, ], ) def test_document_understanding_service_grpc_transport_client_cert_source_for_mtls( transport_class, ): cred = ga_credentials.AnonymousCredentials() # Check ssl_channel_credentials is used if provided. with mock.patch.object(transport_class, "create_channel") as mock_create_channel: mock_ssl_channel_creds = mock.Mock() transport_class( host="squid.clam.whelk", credentials=cred, ssl_channel_credentials=mock_ssl_channel_creds, ) mock_create_channel.assert_called_once_with( "squid.clam.whelk:443", credentials=cred, credentials_file=None, scopes=None, ssl_credentials=mock_ssl_channel_creds, quota_project_id=None, options=[ ("grpc.max_send_message_length", -1), ("grpc.max_receive_message_length", -1), ], ) # Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls # is used. with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()): with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred: transport_class( credentials=cred, client_cert_source_for_mtls=client_cert_source_callback, ) expected_cert, expected_key = client_cert_source_callback() mock_ssl_cred.assert_called_once_with( certificate_chain=expected_cert, private_key=expected_key ) @pytest.mark.parametrize( "transport_name", [ "grpc", "grpc_asyncio", ], ) def test_document_understanding_service_host_no_port(transport_name): client = DocumentUnderstandingServiceClient( credentials=ga_credentials.AnonymousCredentials(), client_options=client_options.ClientOptions( api_endpoint="documentai.googleapis.com" ), transport=transport_name, ) assert client.transport._host == ("documentai.googleapis.com:443") @pytest.mark.parametrize( "transport_name", [ "grpc", "grpc_asyncio", ], ) def test_document_understanding_service_host_with_port(transport_name): client = DocumentUnderstandingServiceClient( credentials=ga_credentials.AnonymousCredentials(), client_options=client_options.ClientOptions( api_endpoint="documentai.googleapis.com:8000" ), transport=transport_name, ) assert client.transport._host == ("documentai.googleapis.com:8000") def test_document_understanding_service_grpc_transport_channel(): channel = grpc.secure_channel("http://localhost/", grpc.local_channel_credentials()) # Check that channel is used if provided. transport = transports.DocumentUnderstandingServiceGrpcTransport( host="squid.clam.whelk", channel=channel, ) assert transport.grpc_channel == channel assert transport._host == "squid.clam.whelk:443" assert transport._ssl_channel_credentials == None def test_document_understanding_service_grpc_asyncio_transport_channel(): channel = aio.secure_channel("http://localhost/", grpc.local_channel_credentials()) # Check that channel is used if provided. transport = transports.DocumentUnderstandingServiceGrpcAsyncIOTransport( host="squid.clam.whelk", channel=channel, ) assert transport.grpc_channel == channel assert transport._host == "squid.clam.whelk:443" assert transport._ssl_channel_credentials == None # Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are # removed from grpc/grpc_asyncio transport constructor. @pytest.mark.parametrize( "transport_class", [ transports.DocumentUnderstandingServiceGrpcTransport, transports.DocumentUnderstandingServiceGrpcAsyncIOTransport, ], ) def test_document_understanding_service_transport_channel_mtls_with_client_cert_source( transport_class, ): with mock.patch( "grpc.ssl_channel_credentials", autospec=True ) as grpc_ssl_channel_cred: with mock.patch.object( transport_class, "create_channel" ) as grpc_create_channel: mock_ssl_cred = mock.Mock() grpc_ssl_channel_cred.return_value = mock_ssl_cred mock_grpc_channel = mock.Mock() grpc_create_channel.return_value = mock_grpc_channel cred = ga_credentials.AnonymousCredentials() with pytest.warns(DeprecationWarning): with mock.patch.object(google.auth, "default") as adc: adc.return_value = (cred, None) transport = transport_class( host="squid.clam.whelk", api_mtls_endpoint="mtls.squid.clam.whelk", client_cert_source=client_cert_source_callback, ) adc.assert_called_once() grpc_ssl_channel_cred.assert_called_once_with( certificate_chain=b"cert bytes", private_key=b"key bytes" ) grpc_create_channel.assert_called_once_with( "mtls.squid.clam.whelk:443", credentials=cred, credentials_file=None, scopes=None, ssl_credentials=mock_ssl_cred, quota_project_id=None, options=[ ("grpc.max_send_message_length", -1), ("grpc.max_receive_message_length", -1), ], ) assert transport.grpc_channel == mock_grpc_channel assert transport._ssl_channel_credentials == mock_ssl_cred # Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are # removed from grpc/grpc_asyncio transport constructor. @pytest.mark.parametrize( "transport_class", [ transports.DocumentUnderstandingServiceGrpcTransport, transports.DocumentUnderstandingServiceGrpcAsyncIOTransport, ], ) def test_document_understanding_service_transport_channel_mtls_with_adc( transport_class, ): mock_ssl_cred = mock.Mock() with mock.patch.multiple( "google.auth.transport.grpc.SslCredentials", __init__=mock.Mock(return_value=None), ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), ): with mock.patch.object( transport_class, "create_channel" ) as grpc_create_channel: mock_grpc_channel = mock.Mock() grpc_create_channel.return_value = mock_grpc_channel mock_cred = mock.Mock() with pytest.warns(DeprecationWarning): transport = transport_class( host="squid.clam.whelk", credentials=mock_cred, api_mtls_endpoint="mtls.squid.clam.whelk", client_cert_source=None, ) grpc_create_channel.assert_called_once_with( "mtls.squid.clam.whelk:443", credentials=mock_cred, credentials_file=None, scopes=None, ssl_credentials=mock_ssl_cred, quota_project_id=None, options=[ ("grpc.max_send_message_length", -1), ("grpc.max_receive_message_length", -1), ], ) assert transport.grpc_channel == mock_grpc_channel def test_document_understanding_service_grpc_lro_client(): client = DocumentUnderstandingServiceClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) transport = client.transport # Ensure that we have a api-core operations client. assert isinstance( transport.operations_client, operations_v1.OperationsClient, ) # Ensure that subsequent calls to the property send the exact same object. assert transport.operations_client is transport.operations_client def test_document_understanding_service_grpc_lro_async_client(): client = DocumentUnderstandingServiceAsyncClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc_asyncio", ) transport = client.transport # Ensure that we have a api-core operations client. assert isinstance( transport.operations_client, operations_v1.OperationsAsyncClient, ) # Ensure that subsequent calls to the property send the exact same object. assert transport.operations_client is transport.operations_client def test_common_billing_account_path(): billing_account = "squid" expected = "billingAccounts/{billing_account}".format( billing_account=billing_account, ) actual = DocumentUnderstandingServiceClient.common_billing_account_path( billing_account ) assert expected == actual def test_parse_common_billing_account_path(): expected = { "billing_account": "clam", } path = DocumentUnderstandingServiceClient.common_billing_account_path(**expected) # Check that the path construction is reversible. actual = DocumentUnderstandingServiceClient.parse_common_billing_account_path(path) assert expected == actual def test_common_folder_path(): folder = "whelk" expected = "folders/{folder}".format( folder=folder, ) actual = DocumentUnderstandingServiceClient.common_folder_path(folder) assert expected == actual def test_parse_common_folder_path(): expected = { "folder": "octopus", } path = DocumentUnderstandingServiceClient.common_folder_path(**expected) # Check that the path construction is reversible. actual = DocumentUnderstandingServiceClient.parse_common_folder_path(path) assert expected == actual def test_common_organization_path(): organization = "oyster" expected = "organizations/{organization}".format( organization=organization, ) actual = DocumentUnderstandingServiceClient.common_organization_path(organization) assert expected == actual def test_parse_common_organization_path(): expected = { "organization": "nudibranch", } path = DocumentUnderstandingServiceClient.common_organization_path(**expected) # Check that the path construction is reversible. actual = DocumentUnderstandingServiceClient.parse_common_organization_path(path) assert expected == actual def test_common_project_path(): project = "cuttlefish" expected = "projects/{project}".format( project=project, ) actual = DocumentUnderstandingServiceClient.common_project_path(project) assert expected == actual def test_parse_common_project_path(): expected = { "project": "mussel", } path = DocumentUnderstandingServiceClient.common_project_path(**expected) # Check that the path construction is reversible. actual = DocumentUnderstandingServiceClient.parse_common_project_path(path) assert expected == actual def test_common_location_path(): project = "winkle" location = "nautilus" expected = "projects/{project}/locations/{location}".format( project=project, location=location, ) actual = DocumentUnderstandingServiceClient.common_location_path(project, location) assert expected == actual def test_parse_common_location_path(): expected = { "project": "scallop", "location": "abalone", } path = DocumentUnderstandingServiceClient.common_location_path(**expected) # Check that the path construction is reversible. actual = DocumentUnderstandingServiceClient.parse_common_location_path(path) assert expected == actual def test_client_with_default_client_info(): client_info = gapic_v1.client_info.ClientInfo() with mock.patch.object( transports.DocumentUnderstandingServiceTransport, "_prep_wrapped_messages" ) as prep: client = DocumentUnderstandingServiceClient( credentials=ga_credentials.AnonymousCredentials(), client_info=client_info, ) prep.assert_called_once_with(client_info) with mock.patch.object( transports.DocumentUnderstandingServiceTransport, "_prep_wrapped_messages" ) as prep: transport_class = DocumentUnderstandingServiceClient.get_transport_class() transport = transport_class( credentials=ga_credentials.AnonymousCredentials(), client_info=client_info, ) prep.assert_called_once_with(client_info) @pytest.mark.asyncio async def test_transport_close_async(): client = DocumentUnderstandingServiceAsyncClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc_asyncio", ) with mock.patch.object( type(getattr(client.transport, "grpc_channel")), "close" ) as close: async with client: close.assert_not_called() close.assert_called_once() def test_transport_close(): transports = { "grpc": "_grpc_channel", } for transport, close_name in transports.items(): client = DocumentUnderstandingServiceClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport ) with mock.patch.object( type(getattr(client.transport, close_name)), "close" ) as close: with client: close.assert_not_called() close.assert_called_once() def test_client_ctx(): transports = [ "grpc", ] for transport in transports: client = DocumentUnderstandingServiceClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport ) # Test client calls underlying transport. with mock.patch.object(type(client.transport), "close") as close: close.assert_not_called() with client: pass close.assert_called() @pytest.mark.parametrize( "client_class,transport_class", [ ( DocumentUnderstandingServiceClient, transports.DocumentUnderstandingServiceGrpcTransport, ), ( DocumentUnderstandingServiceAsyncClient, transports.DocumentUnderstandingServiceGrpcAsyncIOTransport, ), ], ) def test_api_key_credentials(client_class, transport_class): with mock.patch.object( google.auth._default, "get_api_key_credentials", create=True ) as get_api_key_credentials: mock_cred = mock.Mock() get_api_key_credentials.return_value = mock_cred options = client_options.ClientOptions() options.api_key = "api_key" with mock.patch.object(transport_class, "__init__") as patched: patched.return_value = None client = client_class(client_options=options) patched.assert_called_once_with( credentials=mock_cred, credentials_file=None, host=client.DEFAULT_ENDPOINT, scopes=None, client_cert_source_for_mtls=None, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, always_use_jwt_access=True, api_audience=None, )
{ "content_hash": "b47cb304c5b73e2188c9c6ee12b6765d", "timestamp": "", "source": "github", "line_count": 1864, "max_line_length": 153, "avg_line_length": 36.737124463519315, "alnum_prop": 0.6552761470837349, "repo_name": "googleapis/python-documentai", "id": "782f9b608fc04208a9270e247840b2cda4e97aa9", "size": "69078", "binary": false, "copies": "1", "ref": "refs/heads/main", "path": "tests/unit/gapic/documentai_v1beta2/test_document_understanding_service.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Dockerfile", "bytes": "2050" }, { "name": "Python", "bytes": "1819136" }, { "name": "Shell", "bytes": "30672" } ], "symlink_target": "" }
import click from arrow.cli import pass_context, json_loads from arrow.decorators import custom_exception, dict_output @click.command('delete_organism') @click.argument("organism_id", type=str) @pass_context @custom_exception @dict_output def cli(ctx, organism_id): """Remove an organism completely. Output: a dictionary with information about the deleted organism """ return ctx.gi.remote.delete_organism(organism_id)
{ "content_hash": "0ea1e1747b1f1d4004e8be495cff2f36", "timestamp": "", "source": "github", "line_count": 18, "max_line_length": 60, "avg_line_length": 24.38888888888889, "alnum_prop": 0.7562642369020501, "repo_name": "galaxy-genome-annotation/python-apollo", "id": "6b3dfdda71de4f0bb39266ae85732594478a07f7", "size": "439", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "arrow/commands/remote/delete_organism.py", "mode": "33188", "license": "mit", "language": [ { "name": "Makefile", "bytes": "131" }, { "name": "Python", "bytes": "210325" }, { "name": "Shell", "bytes": "2584" } ], "symlink_target": "" }
""" Test Gemini Astroquery module. For information on how/why this test is built the way it is, see the astroquery documentation at: https://astroquery.readthedocs.io/en/latest/testing.html """ from datetime import date import json import os import pytest import requests from astropy import units from astropy.coordinates import SkyCoord from astropy.table import Table from astroquery import gemini from astroquery.gemini.urlhelper import URLHelper DATA_FILES = {"m101": "m101.json"} class MockResponse: def __init__(self, text): self.text = text def json(self): return json.loads(self.text) @pytest.fixture def patch_get(request): """ mock get requests so they return our canned JSON to mimic Gemini's archive website """ mp = request.getfixturevalue("monkeypatch") mp.setattr(requests.Session, 'request', get_mockreturn) return mp # to inspect behavior, updated when the mock get call is made saved_request = None def get_mockreturn(url, *args, **kwargs): """ generate the actual mock textual data from our included datafile with json results """ global saved_request saved_request = {'url': url, 'args': args, 'kwargs': kwargs} filename = data_path(DATA_FILES['m101']) f = open(filename, 'r') text = f.read() retval = MockResponse(text) f.close() return retval def data_path(filename): """ determine the path to our sample data file """ data_dir = os.path.join(os.path.dirname(__file__), 'data') return os.path.join(data_dir, filename) """ Coordinates to use for testing """ coords = SkyCoord(210.80242917, 54.34875, unit="deg") def test_observations_query_region(patch_get): """ test query against a region of the sky """ result = gemini.Observations.query_region(coords, radius=0.3 * units.deg) assert isinstance(result, Table) assert len(result) > 0 def test_observations_query_criteria(patch_get): """ test query against an instrument/program via criteria """ result = gemini.Observations.query_criteria(instrument='GMOS-N', program_id='GN-CAL20191122', observation_type='BIAS', utc_date=(date(2019, 10, 1), date(2019, 11, 25))) assert isinstance(result, Table) assert len(result) > 0 def test_observations_query_criteria_radius_defaults(patch_get): """ test query against an instrument/program via criteria """ result = gemini.Observations.query_criteria(instrument='GMOS-N', program_id='GN-CAL20191122', observation_type='BIAS') global saved_request assert(saved_request is not None and 'args' in saved_request and len(saved_request['args']) >= 2) assert('/sr=' not in saved_request['args'][1]) saved_request = None result = gemini.Observations.query_criteria(instrument='GMOS-N', program_id='GN-2016A-Q-9', observation_type='BIAS', coordinates=coords) assert len(result) > 0 assert(saved_request is not None and 'args' in saved_request and len(saved_request['args']) >= 2) assert('/sr=0.300000d' in saved_request['args'][1]) saved_request = None gemini.Observations.query_criteria(instrument='GMOS-N', program_id='GN-2016A-Q-9', observation_type='BIAS', objectname='m101') assert(saved_request is not None and 'args' in saved_request and len(saved_request['args']) >= 2) assert('/sr=0.300000d' in saved_request['args'][1]) def test_observations_query_raw(patch_get): """ test querying raw """ result = gemini.Observations.query_raw('GMOS-N', 'BIAS', progid='GN-CAL20191122') assert isinstance(result, Table) assert len(result) > 0 def test_url_helper_arg(): """ test the urlhelper logic """ urlh = URLHelper() args = ["foo"] kwargs = {} url = urlh.build_url(*args, **kwargs) assert url == "https://archive.gemini.edu/jsonsummary/notengineering/NotFail/foo" def test_url_helper_kwarg(): """ test the urlhelper logic """ urlh = URLHelper() args = [] kwargs = {"foo": "bar"} url = urlh.build_url(*args, **kwargs) assert url == "https://archive.gemini.edu/jsonsummary/notengineering/NotFail/foo=bar" def test_url_helper_radius(): """ test the urlhelper logic """ urlh = URLHelper() args = [] kwargs = {"radius": "0.4d"} url = urlh.build_url(*args, **kwargs) assert url == "https://archive.gemini.edu/jsonsummary/notengineering/NotFail/sr=0.400000d" def test_url_helper_coordinates(): """ test the urlhelper logic """ urlh = URLHelper() args = [] kwargs = {"coordinates": "210.80242917 54.348753"} url = urlh.build_url(*args, **kwargs) assert url == "https://archive.gemini.edu/jsonsummary/notengineering/NotFail/ra=210.802429/dec=54.348753" # send arg, should it have notengineering?, should it have NotFail? eng_fail_tests = [ ('notengineering', True, True), ('engineering', False, True), ('includeengineering', False, True), ('NotFail', True, True), ('AnyQA', True, False), ('Pass', True, False), ('Lucky', True, False), ('Win', True, False), ('Usable', True, False), ('Undefind', True, False), ('Fail', True, False), ] @pytest.mark.parametrize("test_arg", eng_fail_tests) def test_url_helper_eng_fail(test_arg): """ test the urlhelper logic around engineering/fail requests/defaults """ urlh = URLHelper() args = [test_arg[0]] should_have_noteng = test_arg[1] should_have_notfail = test_arg[2] kwargs = {} url = urlh.build_url(*args, **kwargs) urlsplit = url.split('/') assert(('notengineering' in urlsplit) == should_have_noteng) assert(('NotFail' in urlsplit) == should_have_notfail)
{ "content_hash": "0cb2060244e36085544b4fda9721c3ae", "timestamp": "", "source": "github", "line_count": 173, "max_line_length": 109, "avg_line_length": 33.774566473988436, "alnum_prop": 0.6482971076501797, "repo_name": "imbasimba/astroquery", "id": "494751350315edb40781bc338ee65787eca45107", "size": "5843", "binary": false, "copies": "1", "ref": "refs/heads/main", "path": "astroquery/gemini/tests/test_gemini.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "HTML", "bytes": "493404" }, { "name": "Python", "bytes": "2852847" } ], "symlink_target": "" }
import sys _b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection from google.protobuf import symbol_database as _symbol_database from google.protobuf import descriptor_pb2 # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() DESCRIPTOR = _descriptor.FileDescriptor( name='cta_event.proto', package='cta_event', serialized_pb=_b('\n\x0f\x63ta_event.proto\x12\tcta_event\"2\n\x08\x43TAEvent\x12\x14\n\x0ctelescope_id\x18\x01 \x02(\r\x12\x10\n\x04\x64\x61ta\x18\x04 \x03(\x02\x42\x02\x10\x01') ) _sym_db.RegisterFileDescriptor(DESCRIPTOR) _CTAEVENT = _descriptor.Descriptor( name='CTAEvent', full_name='cta_event.CTAEvent', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='telescope_id', full_name='cta_event.CTAEvent.telescope_id', index=0, number=1, type=13, cpp_type=3, label=2, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='data', full_name='cta_event.CTAEvent.data', index=1, number=4, type=2, cpp_type=6, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\020\001'))), ], extensions=[ ], nested_types=[], enum_types=[ ], options=None, is_extendable=False, extension_ranges=[], oneofs=[ ], serialized_start=30, serialized_end=80, ) DESCRIPTOR.message_types_by_name['CTAEvent'] = _CTAEVENT CTAEvent = _reflection.GeneratedProtocolMessageType('CTAEvent', (_message.Message,), dict( DESCRIPTOR = _CTAEVENT, __module__ = 'cta_event_pb2' # @@protoc_insertion_point(class_scope:cta_event.CTAEvent) )) _sym_db.RegisterMessage(CTAEvent) _CTAEVENT.fields_by_name['data'].has_options = True _CTAEVENT.fields_by_name['data']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\020\001')) # @@protoc_insertion_point(module_scope)
{ "content_hash": "ccdc4e277c3b1192b45ccb9b0aa7729f", "timestamp": "", "source": "github", "line_count": 73, "max_line_length": 181, "avg_line_length": 32.15068493150685, "alnum_prop": 0.7115466553046442, "repo_name": "MaxNoe/cta_event_viewer", "id": "44fd4c705f27800ca8fba540aa01e28d1b4804f8", "size": "2433", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "protobuf/cta_event_pb2.py", "mode": "33188", "license": "mit", "language": [ { "name": "Protocol Buffer", "bytes": "118" }, { "name": "Python", "bytes": "9260" } ], "symlink_target": "" }
from django.db import models from mongo_storage.fields import MongoFileField __all__ = ( 'DeleteFileMixin', 'MongoDeleteFileModel', 'MongoFileModel', ) class DeleteFileMixin(object): """ Will delete the file from the database when the model is deleted. The file field MUST be named 'content'. This could potentially lead to a couple issues, so make sure you're comfortable with them before actually using this. - the delete could succeed from the file system, but not succeed in Django. You would be left with a lost file. - if multiple items are pointing to the same exact file, this may cause the not-deleted file to lose the file it was pointing to. """ def delete(self, using=None): self.content.delete() super(DeleteFileMixin, self).delete(using=using) class MongoFileModel(models.Model): content = MongoFileField(upload_to="files") class Meta(object): abstract = True class MongoDeleteFileModel(DeleteFileMixin, MongoFileModel): class Meta(object): abstract = True
{ "content_hash": "2704c389e2dcfacb5c33c0832b93cc10", "timestamp": "", "source": "github", "line_count": 39, "max_line_length": 69, "avg_line_length": 27.76923076923077, "alnum_prop": 0.7026777469990766, "repo_name": "madisona/django-mongo-storage", "id": "2c7b514ebdfef6e26bcbf14be9f961ed18b08c1f", "size": "1084", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "mongo_storage/models.py", "mode": "33188", "license": "bsd-2-clause", "language": [ { "name": "Python", "bytes": "20876" } ], "symlink_target": "" }
''' codegen.codegen -- shortdesc codegen.codegen is a description It defines classes_and_methods @author: OnApp @copyright: 2013 organization_name. All rights reserved. @license: license @contact: user_email @deffield updated: Updated ''' # TODO - Need to add actions and designators for mixins and links import sys import os import glob from argparse import ArgumentParser from argparse import RawDescriptionHelpFormatter # Local imports import logging import OCCI.Parser from stat import S_ISDIR, S_ISREG __all__ = [] __version__ = "0.1.0" __date__ = '2013-03-28' __updated__ = '2013-04-04' # TODO handle and check schema version's from meta class CLIError(Exception): '''Generic exception to raise and log different fatal errors.''' def __init__(self, msg): super(CLIError).__init__(type(self)) self.msg = "E: %s" % msg def __str__(self): return self.msg def __unicode__(self): return self.msg def get_class( kls ): parts = kls.split('.') module = ".".join(parts[:-1]) m = __import__( module ) for comp in parts[1:]: m = getattr(m, comp) return m def main(argv=None): # IGNORE:C0111 '''Command line options.''' if argv is None: argv = sys.argv else: sys.argv.extend(argv) program_name = os.path.basename(sys.argv[0]) program_version = "v%s" % __version__ program_build_date = str(__updated__) program_version_message = '%%(prog)s %s (%s)' % (program_version, program_build_date) program_shortdesc = __import__('__main__').__doc__.split("\n")[1] program_license = '''%s Created by user_name on %s. Copyright 2013 organization_name. All rights reserved. Licensed under the Apache License 2.0 http://www.apache.org/licenses/LICENSE-2.0 Distributed on an "AS IS" basis without warranties or conditions of any kind, either express or implied. USAGE ''' % (program_shortdesc, str(__date__)) try: # Process arguments parser = ArgumentParser(description=program_license, formatter_class=RawDescriptionHelpFormatter) parser.add_argument("-v", "--verbose", dest="verbose", action="store_true", default=False, help="Select to enable info output") parser.add_argument("-t", "--type", dest="outputType", required=False, help="Choose output type") parser.add_argument('-V', '--version', action='version', version=program_version_message) parser.add_argument("-o", "--output", dest="outputDst", required=True, help="Choose output directory") parser.add_argument("-f", "--file", dest="file", help="Process a single specific category/file") parser.add_argument(dest="paths", help="paths to folder(s) with source file(s) [default: %(default)s]", metavar="path", nargs='+') args = parser.parse_args() # Configure logging if (args.verbose == True): logging.basicConfig(format='%(message)s', level=logging.DEBUG) else: logging.basicConfig(format='%(message)s') # Check the output directory and get it in canonical form if not os.path.exists(args.outputDst): logging.error("Error: Output directory does not exist.") sys.exit(1) if not os.path.isdir(args.outputDst): logging.error("Error: Output is not a directory.") sys.exit(1) # Get directory in canonical form to help code down the line. args.outputDst = os.path.abspath(args.outputDst) + "/" # Get all input files and pass to parser files = [] for inpath in args.paths: # Get all files in this path if(os.path.exists(os.path.abspath(inpath))): mode = os.stat(inpath).st_mode # TODO consider directory recursion # Add all files in dir or specific file dependant on type if S_ISDIR(mode): files.extend(glob.glob(inpath+"/*")) if S_ISREG(mode): files.append(inpath) else: logging.warn("Ignoring (non-existent) input path " + inpath) op = OCCI.Parser.Parser(files) # Do the parse models = op.parse() # If a file/category has been specified, check it exists. if (args.file != None): found = False for model in models.list.values(): for cat in model.list.values(): if (cat.getOutputFilename() == args.file): found = True break if not found: logging.error("Error: File argument '" + args.file + "' is not valid.") sys.exit(1) # Get output class type and check sensible if args.outputType is not None: try: output_class = get_class("OCCI."+args.outputType+"."+args.outputType) # Note issubclass can raise an exception hence wrap in try and exception raised # if issubclass is False if not issubclass(output_class, OCCI.Output.Output): raise Exception() except: logging.error("Error: Output type "+args.outputType+" is not valid") sys.exit(1) # Instantiate the output class generator = output_class(models, args) # Generate output generator.go() return 0 except KeyboardInterrupt: ### handle keyboard interrupt ### return 0 # except Exception, e: # indent = len(program_name) * " " # sys.stderr.write(program_name + ": " + repr(e) + "\n") # sys.stderr.write(indent + " for help use --help\n") # return 2 if __name__ == "__main__": sys.exit(main())
{ "content_hash": "aa299a75f4c934a4e23959967b5dbf5f", "timestamp": "", "source": "github", "line_count": 174, "max_line_length": 138, "avg_line_length": 34.367816091954026, "alnum_prop": 0.5759197324414715, "repo_name": "ow2-compatibleone/accords-platform", "id": "d2342ab02d1aa3a2426334d829d74501a135339b", "size": "6058", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "tools/codegen/codegen.py", "mode": "33261", "license": "apache-2.0", "language": [ { "name": "C", "bytes": "3871076" }, { "name": "C++", "bytes": "165729" }, { "name": "Java", "bytes": "158712" }, { "name": "Objective-C", "bytes": "425" }, { "name": "PHP", "bytes": "96003" }, { "name": "Perl", "bytes": "182" }, { "name": "Python", "bytes": "180008" }, { "name": "Shell", "bytes": "128402" } ], "symlink_target": "" }
""" Copyright 2015 Logvinenko Maksim Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ class OperationRequest: def __init__(self, op_code=None, params=None): self.op_code = op_code self.params = params def __str__(self): return "OperationRequest {}: {}".format(self.op_code, self.params) class OperationResponse: def __init__(self, op_code=None, return_code=None, debug_message=None, params=None): self.op_code = op_code self.return_code = return_code self.debug_message = debug_message self.params = params def __str__(self): return "OperationResponse {}: ReturnCode: {} ({}). Parameters: {}" \ .format(self.op_code, self.return_code, self.debug_message, self.params) class EventData: def __init__(self, code=None, params=None): self.code = code self.params = params def __str__(self): return "Event {}: {}".format(self.code, self.params)
{ "content_hash": "ac58587881b0b6b5544d69d271b2d23b", "timestamp": "", "source": "github", "line_count": 44, "max_line_length": 88, "avg_line_length": 32.95454545454545, "alnum_prop": 0.6751724137931034, "repo_name": "logarithm/photon-python", "id": "d7cc9a785a2427702e69f3da36e71cd4ead7fe58", "size": "1450", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "photon/operations.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Python", "bytes": "47965" } ], "symlink_target": "" }
from .development import * ######################################### ## GENERIC ######################################### DEBUG = True ADMINS = ( ("mritd", "mritd1234@gmail.com"), ) SECRET_KEY = "DuEitHBCmausDXQpQJqY7QnJ" DATABASES = { 'default': { 'ENGINE': 'django.db.backends.postgresql', 'NAME': 'taiga', 'USER': 'taiga', 'PASSWORD': 'taiga', 'HOST': '172.16.0.30', 'PORT': '5432', } } #SITES = { # "api": { # "scheme": "http", # "domain": "0.0.0.0:8000", # "name": "api" # }, # "front": { # "scheme": "http", # "domain": "0.0.0.0:9001", # "name": "front" # }, #} SITE_ID = "api" MEDIA_URL = "https://kb.mritd.me/media/" STATIC_URL = "https://kb.mritd.me/static/" MEDIA_ROOT = "/data/taiga/media" STATIC_ROOT = "/data/taiga/static" EVENTS_PUSH_BACKEND = "taiga.events.backends.rabbitmq.EventsPushBackend" EVENTS_PUSH_BACKEND_OPTIONS = {"url": "amqp://taiga:taiga@172.16.0.30:5672/taiga"} ######################################### ## THROTTLING ######################################### #REST_FRAMEWORK["DEFAULT_THROTTLE_RATES"] = { # "anon-write": "20/min", # "user-write": None, # "anon-read": None, # "user-read": None, # "import-mode": None, # "import-dump-mode": "1/minute", # "create-memberships": None, # "login-fail": None, # "register-success": None, # "user-detail": None, # "user-update": None, #} # This list should containt: # - Tiga users IDs # - Valid clients IP addresses (X-Forwarded-For header) #REST_FRAMEWORK["DEFAULT_THROTTLE_WHITELIST"] = [] ######################################### ## MAIL SYSTEM SETTINGS ######################################### #DEFAULT_FROM_EMAIL = "john@doe.com" #CHANGE_NOTIFICATIONS_MIN_INTERVAL = 300 #seconds # EMAIL SETTINGS EXAMPLE #EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend' #EMAIL_USE_TLS = False #EMAIL_USE_SSL = False # You cannot use both (TLS and SSL) at the same time! #EMAIL_HOST = 'localhost' #EMAIL_PORT = 25 #EMAIL_HOST_USER = 'user' #EMAIL_HOST_PASSWORD = 'password' # GMAIL SETTINGS EXAMPLE #EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend' #EMAIL_USE_TLS = True #EMAIL_HOST = 'smtp.gmail.com' #EMAIL_PORT = 587 #EMAIL_HOST_USER = 'youremail@gmail.com' #EMAIL_HOST_PASSWORD = 'yourpassword' ######################################### ## REGISTRATION ######################################### PUBLIC_REGISTER_ENABLED = True # LIMIT ALLOWED DOMAINS FOR REGISTER AND INVITE # None or [] values in USER_EMAIL_ALLOWED_DOMAINS means allow any domain #USER_EMAIL_ALLOWED_DOMAINS = None # PUCLIC OR PRIVATE NUMBER OF PROJECT PER USER #MAX_PRIVATE_PROJECTS_PER_USER = None # None == no limit #MAX_PUBLIC_PROJECTS_PER_USER = None # None == no limit #MAX_MEMBERSHIPS_PRIVATE_PROJECTS = None # None == no limit #MAX_MEMBERSHIPS_PUBLIC_PROJECTS = None # None == no limit # GITHUB SETTINGS #GITHUB_URL = "https://github.com/" #GITHUB_API_URL = "https://api.github.com/" #GITHUB_API_CLIENT_ID = "yourgithubclientid" #GITHUB_API_CLIENT_SECRET = "yourgithubclientsecret" ######################################### ## SITEMAP ######################################### # If is True /front/sitemap.xml show a valid sitemap of taiga-front client #FRONT_SITEMAP_ENABLED = False #FRONT_SITEMAP_CACHE_TIMEOUT = 24*60*60 # In second ######################################### ## FEEDBACK ######################################### # Note: See config in taiga-front too #FEEDBACK_ENABLED = True #FEEDBACK_EMAIL = "support@taiga.io" ######################################### ## STATS ######################################### #STATS_ENABLED = False #FRONT_SITEMAP_CACHE_TIMEOUT = 60*60 # In second ######################################### ## CELERY ######################################### # Set to True to enable celery and work in async mode or False # to disable it and work in sync mode. You can find the celery # settings in settings/celery.py and settings/celery-local.py #CELERY_ENABLED = True ######################################### ## IMPORTERS ######################################### # Configuration for the GitHub importer # Remember to enable it in the front client too. #IMPORTERS["github"] = { # "active": True, # Enable or disable the importer # "client_id": "XXXXXX_get_a_valid_client_id_from_github_XXXXXX", # "client_secret": "XXXXXX_get_a_valid_client_secret_from_github_XXXXXX" #} # Configuration for the Trello importer # Remember to enable it in the front client too. #IMPORTERS["trello"] = { # "active": True, # Enable or disable the importer # "api_key": "XXXXXX_get_a_valid_api_key_from_trello_XXXXXX", # "secret_key": "XXXXXX_get_a_valid_secret_key_from_trello_XXXXXX" #} # Configuration for the Jira importer # Remember to enable it in the front client too. #IMPORTERS["jira"] = { # "active": True, # Enable or disable the importer # "consumer_key": "XXXXXX_get_a_valid_consumer_key_from_jira_XXXXXX", # "cert": "XXXXXX_get_a_valid_cert_from_jira_XXXXXX", # "pub_cert": "XXXXXX_get_a_valid_pub_cert_from_jira_XXXXXX" #} # Configuration for the Asane importer # Remember to enable it in the front client too. #IMPORTERS["asana"] = { # "active": True, # Enable or disable the importer # "callback_url": "{}://{}/project/new/import/asana".format(SITES["front"]["scheme"], # SITES["front"]["domain"]), # "app_id": "XXXXXX_get_a_valid_app_id_from_asana_XXXXXX", # "app_secret": "XXXXXX_get_a_valid_app_secret_from_asana_XXXXXX" #}
{ "content_hash": "b2fa46b43b1d445dd06304d5a7b9326e", "timestamp": "", "source": "github", "line_count": 195, "max_line_length": 89, "avg_line_length": 28.81025641025641, "alnum_prop": 0.5717337130651478, "repo_name": "mritd/docker-compose", "id": "92322c1b9de58fdad04c82cd9a4c3c56561913c2", "size": "6554", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "taiga/conf/local.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "6554" }, { "name": "Shell", "bytes": "580" }, { "name": "Smarty", "bytes": "8026" } ], "symlink_target": "" }
__doc__=""" Doubles single-unit distances. """ thisFont = Glyphs.font # frontmost font selectedLayers = thisFont.selectedLayers # active layers of selected glyphs def process( thisLayer ): for thisPath in thisLayer.paths: for thisNode in thisPath.nodes: prevNode = thisNode.prevNode if prevNode.type != OFFCURVE and thisNode.type != OFFCURVE: xDistance = thisNode.x-prevNode.x yDistance = thisNode.y-prevNode.y if abs(xDistance) < 1.0 and abs(yDistance) < 1.0: thisNode.x = prevNode.x + xDistance * 2 thisNode.y = prevNode.y + yDistance * 2 thisFont.disableUpdateInterface() # suppresses UI updates in Font View for thisLayer in selectedLayers: thisGlyph = thisLayer.parent print "Processing %s" % thisGlyph.name thisGlyph.beginUndo() # begin undo grouping process( thisLayer ) thisGlyph.endUndo() # end undo grouping thisFont.enableUpdateInterface() # re-enables UI updates in Font View
{ "content_hash": "8d7aec906bb7faf7e074e9a8b0c9c186", "timestamp": "", "source": "github", "line_count": 28, "max_line_length": 75, "avg_line_length": 33.25, "alnum_prop": 0.7379162191192267, "repo_name": "schriftgestalt/Mekka-Scripts", "id": "936d121833313397e9faddc1aff3cf91f65ed52d", "size": "990", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "Paths/Enlarge Single-Unit Segments.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Python", "bytes": "526540" } ], "symlink_target": "" }
"""Support for Dyson Pure Cool Link Sensors.""" import logging from libpurecool.dyson_pure_cool import DysonPureCool from libpurecool.dyson_pure_cool_link import DysonPureCoolLink from homeassistant.const import PERCENTAGE, STATE_OFF, TEMP_CELSIUS, TIME_HOURS from homeassistant.helpers.entity import Entity from . import DYSON_DEVICES SENSOR_UNITS = { "air_quality": None, "dust": None, "filter_life": TIME_HOURS, "humidity": PERCENTAGE, } SENSOR_ICONS = { "air_quality": "mdi:fan", "dust": "mdi:cloud", "filter_life": "mdi:filter-outline", "humidity": "mdi:water-percent", "temperature": "mdi:thermometer", } DYSON_SENSOR_DEVICES = "dyson_sensor_devices" _LOGGER = logging.getLogger(__name__) def setup_platform(hass, config, add_entities, discovery_info=None): """Set up the Dyson Sensors.""" if discovery_info is None: return hass.data.setdefault(DYSON_SENSOR_DEVICES, []) unit = hass.config.units.temperature_unit devices = hass.data[DYSON_SENSOR_DEVICES] # Get Dyson Devices from parent component device_ids = [device.unique_id for device in hass.data[DYSON_SENSOR_DEVICES]] new_entities = [] for device in hass.data[DYSON_DEVICES]: if isinstance(device, DysonPureCool): if f"{device.serial}-temperature" not in device_ids: new_entities.append(DysonTemperatureSensor(device, unit)) if f"{device.serial}-humidity" not in device_ids: new_entities.append(DysonHumiditySensor(device)) elif isinstance(device, DysonPureCoolLink): new_entities.append(DysonFilterLifeSensor(device)) new_entities.append(DysonDustSensor(device)) new_entities.append(DysonHumiditySensor(device)) new_entities.append(DysonTemperatureSensor(device, unit)) new_entities.append(DysonAirQualitySensor(device)) if not new_entities: return devices.extend(new_entities) add_entities(devices) class DysonSensor(Entity): """Representation of a generic Dyson sensor.""" def __init__(self, device, sensor_type): """Create a new generic Dyson sensor.""" self._device = device self._old_value = None self._name = None self._sensor_type = sensor_type async def async_added_to_hass(self): """Call when entity is added to hass.""" self.hass.async_add_executor_job( self._device.add_message_listener, self.on_message ) def on_message(self, message): """Handle new messages which are received from the fan.""" # Prevent refreshing if not needed if self._old_value is None or self._old_value != self.state: _LOGGER.debug("Message received for %s device: %s", self.name, message) self._old_value = self.state self.schedule_update_ha_state() @property def should_poll(self): """No polling needed.""" return False @property def name(self): """Return the name of the Dyson sensor name.""" return self._name @property def unit_of_measurement(self): """Return the unit the value is expressed in.""" return SENSOR_UNITS[self._sensor_type] @property def icon(self): """Return the icon for this sensor.""" return SENSOR_ICONS[self._sensor_type] @property def unique_id(self): """Return the sensor's unique id.""" return f"{self._device.serial}-{self._sensor_type}" class DysonFilterLifeSensor(DysonSensor): """Representation of Dyson Filter Life sensor (in hours).""" def __init__(self, device): """Create a new Dyson Filter Life sensor.""" super().__init__(device, "filter_life") self._name = f"{self._device.name} Filter Life" @property def state(self): """Return filter life in hours.""" if self._device.state: return int(self._device.state.filter_life) return None class DysonDustSensor(DysonSensor): """Representation of Dyson Dust sensor (lower is better).""" def __init__(self, device): """Create a new Dyson Dust sensor.""" super().__init__(device, "dust") self._name = f"{self._device.name} Dust" @property def state(self): """Return Dust value.""" if self._device.environmental_state: return self._device.environmental_state.dust return None class DysonHumiditySensor(DysonSensor): """Representation of Dyson Humidity sensor.""" def __init__(self, device): """Create a new Dyson Humidity sensor.""" super().__init__(device, "humidity") self._name = f"{self._device.name} Humidity" @property def state(self): """Return Humidity value.""" if self._device.environmental_state: if self._device.environmental_state.humidity == 0: return STATE_OFF return self._device.environmental_state.humidity return None class DysonTemperatureSensor(DysonSensor): """Representation of Dyson Temperature sensor.""" def __init__(self, device, unit): """Create a new Dyson Temperature sensor.""" super().__init__(device, "temperature") self._name = f"{self._device.name} Temperature" self._unit = unit @property def state(self): """Return Temperature value.""" if self._device.environmental_state: temperature_kelvin = self._device.environmental_state.temperature if temperature_kelvin == 0: return STATE_OFF if self._unit == TEMP_CELSIUS: return float(f"{(temperature_kelvin - 273.15):.1f}") return float(f"{(temperature_kelvin * 9 / 5 - 459.67):.1f}") return None @property def unit_of_measurement(self): """Return the unit the value is expressed in.""" return self._unit class DysonAirQualitySensor(DysonSensor): """Representation of Dyson Air Quality sensor (lower is better).""" def __init__(self, device): """Create a new Dyson Air Quality sensor.""" super().__init__(device, "air_quality") self._name = f"{self._device.name} AQI" @property def state(self): """Return Air Quality value.""" if self._device.environmental_state: return int(self._device.environmental_state.volatil_organic_compounds) return None
{ "content_hash": "f2bdd6d560d859c06e9c71d6e097c3f7", "timestamp": "", "source": "github", "line_count": 205, "max_line_length": 83, "avg_line_length": 31.770731707317072, "alnum_prop": 0.6262858897589436, "repo_name": "tchellomello/home-assistant", "id": "98c69a7a7db1c91b9b702d0f0d0005ab8c55a13a", "size": "6513", "binary": false, "copies": "1", "ref": "refs/heads/dev", "path": "homeassistant/components/dyson/sensor.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Dockerfile", "bytes": "1488" }, { "name": "Python", "bytes": "26713364" }, { "name": "Shell", "bytes": "4528" } ], "symlink_target": "" }
import os import sys import random import math import time class BadInputError(Exception): pass class Player(): def __init__(self, name): self.id = None self.name = name self.type = 'Human' self.hand = Hand() self.legalCards = [] self.wildCards = [] self.valueChangeCards = [] self.zeroCards = [] self.canSkip = False self.canReverse = False self.canDrawTwo = False self.canDrawFour = False self.canValueChange = False self.drew = False self.scrollMax = 0 self.points = 0 self.forceDraw = 0 def addCard(self, card): self.drew = True if self.forceDraw > 0: self.forceDraw -= 1 self.drew = False self.hand.addCard(card) def beginTurn(self): self.drew = False def didDraw(self): return self.drew def getLegalCards(self, color, value, zeroChange=False): self.canSkip = False self.canReverse = False self.canDrawTwo = False self.canDrawFour = False self.canValueChange = False self.canZeroChange = False self.legalCards = [] self.wildCards = [] self.valueChangeCards = [] self.zeroCards = [] plusFours = [] for card in self.hand: if card.isWild(): if card.getValue() == '+4': plusFours.append(card) else: self.wildCards.append(card) elif zeroChange and card.isZero(): self.canZero = True self.zeroCards.append(card) elif card.getColor() == color or card.getValue() == value: if card.getColor() != color: self.canValueChange = True self.valueChangeCards.append(card) if card.getValue() == "+2": self.canDrawTwo = True elif card.getValue() == 'R': self.canReverse = True elif card.getValue() == 'X': self.canSkip = True self.legalCards.append(card) if len(self.legalCards) == 0 and len(plusFours) > 0: self.canDrawFour = True self.wildCards += plusFours def getValidCards(self): return self.legalCards def getAllValidCards(self): return self.legalCards + self.wildCards + self.zeroCards def hasLegalCard(self): return len(self.legalCards) > 0 def addPoints(self, amount): if (self.points + amount) <= 999999999999999999999: self.points += amount def removeCard(self, index): return self.hand.removeCard(index) def assignID(self, identity): self.id = identity def getName(self): return self.name def getID(self): return self.id def getPoints(self): return self.points def getType(self): return self.type def getCardNum(self): return len(self.hand) def getHand(self, scrollNum=0, hide=False): return self.hand.show(scrollNum, hide) def getForceDraws(self): return self.forceDraw def addForceDraw(self, num): self.forceDraw += num def decreaseForceDraw(self): self.forceDraw -= 1 def removeForceDraw(self): self.forceDraw = 0 def checkCard(self, index): return self.hand.getCard(int(index)) def discardHand(self): self.hand.discard() def __str__(self): return self.name def __repr__(self): return '({},{})'.format(self.name, self.points) class Hand(): ''''deck' (Deck) : Card's Color (rgby) 'numberOfCards' (int) : Card's Value (0-9, R, X, W, +2, +4)''' def __init__(self, deck=None,numberOfCards=0): self.hand = [] if deck != None: self.draw(deck,numberOfCards) def __iter__(self): return iter(self.hand) def __len__(self): return len(self.hand) def __getitem__(self, item): try: return self.hand[item] except: return '' def addCard(self, card): self.hand.append(card) def removeCard(self, index): index = int(index) if (0 <= index < len(self)): return self.hand.pop(index) def discard(self): self.hand = [] def show(self, scrollNum=0, hide=False): if scrollNum == -1: scrollNum = 0 output = '' num = 0 header, footer, upper, lower = '', '', '', '' header += ('\033[97m\u2666--\u2666\033[0m ') upper += ('\033[97m|<-|\033[0m ') lower += ('\033[97m|<-|\033[0m ') footer += ('\033[97m\u2666--\u2666\033[0m ') for i in range(10): indexNum = i+(10*scrollNum) if indexNum < len(self): header += (self[indexNum].getRow(0,hide)+' ') upper += (self[indexNum].getRow(1,hide)+' ') lower += (self[indexNum].getRow(2,hide)+' ') footer += (self[indexNum].getRow(3,hide)+' ') num += 1 for j in range(10-num): j #unused header += (' ') footer += (' ') upper += (' ') lower += (' ') header += ('\033[97m\u2666--\u2666\033[0m ') upper += ('\033[97m|->|\033[0m ') lower += ('\033[97m|->|\033[0m ') footer += ('\033[97m\u2666--\u2666\033[0m ') output += (' '+header+'\n '+upper+'\n '+lower+'\n '+footer+'\n\033[97m|-(<)--') for k in range(num): output += '({})'.format(k) output += '--' for l in range(10-num): l #unused output += '-----' output += '(>)--|\033[0m\n' return output def getCard(self, index): return self.hand[index] def indexCard(self, card): return self.hand.index(card) class GameSettings(): playerIdentities = ('play1','play2','play3','play4') computerNames = ('Watson','SkyNet','Hal','Metal Gear') def __init__(self): self.playerStaging = [] # Where Player Objs Are Stored Before Game Starts self.players = {} # ID : Player Obj self.numPlayers = 0 self.useColor = True self.displayEffects = True self.hideComputerHands = True self.zeroChange = False self.computerSimulation = False self.mainMenuError = '' self.computerSpeed = 'normal' def canAddPlayer(self): return (self.numPlayers < 4) def canRemovePlayer(self): return (self.numPlayers > 0) def canBegin(self): return (self.numPlayers > 1) def addPlayer(self, player): self.playerStaging.append(player) self.numPlayers += 1 def removePlayer(self, number): number -= 1 del self.playerStaging[number] self.numPlayers -= 1 def clearStaging(self): self.numPlayers = 0 self.playerStaging = [] def finalizePlayers(self): self.players.clear() identity = 0 for player in self.playerStaging: playerID = self.playerIdentities[identity] player.assignID(playerID) self.players[playerID] = player identity += 1 def getPlayerNum(self): return self.numPlayers def getComputerName(self): complete = False index = self.numPlayers while not complete: name = self.computerNames[index] complete = True for player in self.playerStaging: if player.getName() == name: index += 1 if index >= len(self.computerNames): index = 0 complete = False return self.computerNames[index] def getRandomIdentity(self): '''For Getting a Random Player for First Turn.''' return random.choice(self.players.keys()) def compileMainMenuElements(self): def getBlankSpace(word, total): return " "*(total-len(word)) def getPlayerBox(playerNum, rowNum): if rowNum == 1: name = self.playerStaging[playerNum-1].getName() return '{}{}'.format(name, getBlankSpace(name, 29)) elif rowNum == 2: points = self.playerStaging[playerNum-1].getPoints() return 'Points: {}{}'.format(points, getBlankSpace(str(points), 21)) self.mainMenuElements= {'play1row1':'No Player ','play1row2':' ', 'play2row1':'No Player ', 'play2row2':' ', 'play3row1':'No Player ','play3row2':' ', 'play4row1':'No Player ', 'play4row2':' ', 'play1box':'\033[90m','play2box':'\033[90m','play3box':'\033[90m','play4box':'\033[90m', 'beginBox':'\033[90m','addBox':'\033[97m','removeBox':'\033[90m' } playerBoxKey = 'play{}box' playerRowKey = 'play{}row{}' i = 1 for j in self.playerStaging: j colorCode = ['\033[91m','\033[94m','\033[92m','\033[93m'] key = playerBoxKey.format(i) self.mainMenuElements[key] = colorCode[i-1] self.mainMenuElements[playerRowKey.format(i,1)] = getPlayerBox(i, 1) self.mainMenuElements[playerRowKey.format(i,2)] = getPlayerBox(i, 2) i+=1 if self.canBegin(): self.mainMenuElements['beginBox'] = '\033[95m' if not self.canAddPlayer(): self.mainMenuElements['addBox'] = '\033[90m' if self.canRemovePlayer(): self.mainMenuElements['removeBox'] = '\033[97m' def changeComputerSpeed(self): if self.computerSpeed == 'slow': self.computerSpeed = 'normal' elif self.computerSpeed == 'normal': self.computerSpeed = 'fast' elif self.computerSpeed == 'fast': self.computerSpeed = 'slow' def getMainMenuElements(self): return self.mainMenuElements class Deck(): ''''shuffle' (bool) : shuffle deck.''' colors = ('red','yellow','green','blue') values = ('0','1','2','3','4','5','6','7','8','9','X','R','+2') def __init__(self, populate): '''Initializes proper deck of 108 Uno Cards.''' self.deck = [] if populate: self.populate(True) def __getitem__(self, index): return self.deck[index] def populate(self, shuffle=True): for color in self.colors: for value in self.values: self.deck.append(Card(color, value)) if value != '0': self.deck.append(Card(color, value)) for i in range(4): i #unused self.deck.append(Card('wild', '+4')) self.deck.append(Card('wild', 'W')) if shuffle: self.shuffle() def __iter__(self): return iter(self.deck) def __len__(self): return len(self.deck) def draw(self): return self.deck.pop() def place(self, card): return self.deck.append(card) def insert(self, card): self.deck.insert(0, card) def shuffle(self): random.shuffle(self.deck) class ComputerPlayer(Player): def __init__(self, name): super().__init__(name) self.type = 'Computer' self.begun = False self.colorsInHand = {'red':0, 'blue':0, 'green':0, 'yellow':0, 'wild':0} self.colorsOutHand = {} self.currentColor = "" def addCard(self, card): Player.addCard(self, card) color = card.getColor() self.colorsInHand[color] += 1 def indexCard(self, cardColor, cardValue): for card in self.hand: if card.getValue() == cardValue: if cardValue in ('+4', 'W'): return self.hand.indexCard(card) else: if card.getColor() == cardColor: return self.hand.indexCard(card) raise ValueError("Card Cannot Be Found") def think(self, match): card = None self.currentColor = match.currentColor currentValue = match.currentValue zeroChangeRule = match.zeroChange twoPlayers = False previousTurnID = match.getNextTurn(True) nextTurnID = match.getNextTurn(False) previousPlayer = match.getPlayer(previousTurnID) #nextPlayer = match.getPlayer(nextTurnID) if previousTurnID == nextTurnID: twoPlayers = True if self.canSkip == False and self.canReverse == True: self.canSkip = True self.canReverse = False self.getLegalCards(self.currentColor, currentValue, zeroChangeRule) ### DRAW CASE ### if len(self.legalCards) == 0 and len(self.wildCards) == 0: return "d" else: ### NO LEGAL CARD, USE WILD CARD ### if len(self.legalCards) == 0: if zeroChangeRule and self.canZeroChange: bestZeroColor = self.getBestColor(self.zeroCards) card = self.getCardByColor(self.zeroCards, bestZeroColor) else: if self.canDrawFour: card = self.getCardByValue(self.wildCards, "+4") print(card) else: card = random.choice(self.wildCards) else: ### HAS LEGAL CARD ### if twoPlayers and self.canSkip: #Always play a skip card in a two player game #print("Shed Skip Strategy") card = self.getCardByValue(self.legalCards,"R", "X") if self.canReverse and previousPlayer.didDraw(): #print("Reverse Strategy") reverseCards = self.getAllCardsByValue(self.legalCards, "R") for reverseCard in reverseCards: if reverseCard.getColor() == self.currentColor: card = reverseCard if self.canValueChange: # Computer Can Value Change, However, Should it? # Computer Checks to See if Value Change Color is Better Than Current currentColorNum = self.colorsInHand[self.currentColor] bestValueChangeColor = self.getBestColor(self.valueChangeCards) if self.colorsInHand[bestValueChangeColor] > currentColorNum or len(self.valueChangeCards) == len(self.legalCards): card = self.getCardByColor(self.valueChangeCards, bestValueChangeColor) if card == None: #print("Random Strategy") card = random.choice(list(set(self.legalCards) - set(self.valueChangeCards))) color = card.getColor() self.colorsInHand[color] -= 1 return str(self.indexCard(card.getColor(), card.getValue())) def getWildColor(self): maxKey = max(self.colorsInHand, key=self.colorsInHand.get) if maxKey == 'wild': return random.choice(('r','g','b','y')) else: return maxKey def getCardByValue(self, cardList, *values): for card in cardList: if card.getValue() in values: return card def getAllCardsByValue(self, cardList, *values): cards = [] for card in cardList: if card.getValue() in values: cards.append(card) return cards def getCardByColor(self, cardList, *colors): for card in cardList: if card.getColor() in colors: return card def getBestColor(self, cardList): bestColor = None bestColorNum = 0 for card in cardList: color = card.getColor() if self.colorsInHand[color] > bestColorNum: bestColor = color bestColorNum = self.colorsInHand[color] return bestColor class Card(): ''' 'suit' (string) : Card's Color (rgby) 'rank' (string) : Card's Value (0-9, R, X, W, +2, +4) ''' colors = { 'red' : '\033[91m', 'green' : '\033[92m', 'yellow' : '\033[93m', 'blue' : '\033[94m', 'purple' : '\033[95m', 'cyan' : '\033[96m', 'white' : '\033[97m', 'wild' : '', 'dwild' : '', 'dred' : '\033[31m', 'dgreen' : '\033[32m', 'dyellow' : '\033[33m', 'dblue' : '\033[34m', 'dpurple' : '\033[35m', 'dcyan' : '\033[36m', 'dwhite' : '\033[37m', } idMap = { 'red':'R','blue':'B','green':'G','yellow':'Y','wild':'W', '0':'0','1':'1','2':'2','3':'3','4':'4','5':'5','6':'6','7':'7','8':'8','9':'9', '+2':'+','R':'R','W':'W','+4':'$','X':'X' } bigNums = { "0" : [" .d888b. ","d88P Y88b","888 888","888 888","888 888","888 888","d88P Y88b"," \"Y888P\" "], "1" : [" d888 "," d8888 "," 888 "," 888 "," 888 "," 888 "," 888 "," 8888888 "], "2" : [".d8888b. ","d88P Y88","d8 888"," .d88P",".od888P\" ","d88P\" ","888\" ","888888888"], "3" : [" .d8888b.","d88P Y88"," .d88"," 8888\" "," \"Y8b","888 88","Y88b d88"," \"Y8888P\""], "4" : [" d88b "," d8P88 "," d8 88 "," d8 88 ","d8 88 ","888888888"," 88 "," 88 "], "5" : ["888888888","888 ","888 ","8888888b "," \"Y88b "," 888","Y88b d88P","\"Y8888P\" "], "6" : [" .d888b. ","d88P Y88b","888 ","888d888b ","888P \"Y8b","888 888","Y88b d88b"," \"Y888P\" "], "7" : ["888888888"," d8P"," d8P "," d8P "," 8888888 "," d8P "," d8P ","d8P "], "8" : [" .d888b. ","d8P Y8b","Y8b. d8P"," \"Y8888\" "," .dP\"Yb. ","888 888","Y88b d88P"," \"Y888P\" "], "9" : [" .d888b. ","d8P Y8b","88 88","Y8b. d88"," \"Y88P888"," 888","Y88b d88P"," \"Y888P\" "], "X" : ["Y8b d8P"," Y8b d8P "," Y8o8P "," Y8P "," d8b "," d888b "," d8P Y8b ","d8P Y8b"], "W" : ["88 88","88 88","88 o 88","88 d8b 88","88d888b88","88P Y88","8P Y8","P Y"], "+2" : [" db "," 88 ","C8888D "," 88 8888"," VP 8"," 8888"," 8 "," 8888"], "+4" : [" db "," 88 ","C8888D "," 88 d "," VP d8 "," d 8 "," d8888"," 8 "], "R9" : [" d88P "," d88P "," d88P "," d88P "," Y88b "," Y88b "," Y88b "," Y88b "], "R8" : [" d88P "," d88P "," d88P ","d88P ","Y88b "," Y88b "," Y88b "," Y88b "], "R7" : [" d88P Y"," d88P ","d88P ","88P ","88b ","Y88b "," Y88b "," Y88b d"], "R6" : [" d88P Y8","d88P Y","88P ","8P ","8b ","88b ","Y88b d"," Y88b d8"], "R5" : ["d88P Y88","88P Y8","8P Y","P ","b ","8b d","88b d8","Y88b d88"], "R4" : ["88P Y88b","8P Y88","P Y8"," Y"," d","b d8","8b d88","88b d88P"], "R3" : ["8P Y88b ","P Y88b"," Y88"," Y8"," d8"," d88","b d88P","8b d88P "], "R2" : ["P Y88b "," Y88b "," Y88b"," Y88"," d88"," d88P"," d88P ","b d88P "], "R1" : [" Y88b "," Y88b "," Y88b "," Y88b"," d88P"," d88P "," d88P "," d88P "], "R0" : [" Y88b "," Y88b "," Y88b "," Y88b "," d88P "," d88P "," d88P "," d88P "], } def __init__(self, color, value): '''Initializes Uno Card w/ Color and Value.''' self.wild = False #Is wild card? self.zero = False self.cardID = '{}{}'.format(self.idMap[color],self.idMap[value]) self.setColor(color) self.setValue(value) self.setPoints(value) ############################################# ### -\/- Retrieve Card Information -\/- ### def __repr__(self): return "{},{}".format(self.color, self.value) def getBigNum(self, reverse, reverseSeed=0): '''Returns list of strings to draw card's value on the pile.''' bigNums = [] colorCode = self.colorCode colorCodeDark = self.colorCodeDark value = self.value if value == 'R': if not reverse: value += str(reverseSeed) else: value += str(9-reverseSeed) for mid in self.bigNums[value]: bigNums += ['{}| |{}'.format(colorCode,colorCodeDark)+mid+'{}| |\033[0m\t'.format(colorCode)] return bigNums def getColor(self): '''Returns card's color.''' return self.color def getColorCode(self): '''Returns card's color code.''' return self.colorCode def getValue(self): '''Returns card's value.''' return self.value def getPoints(self): '''Returns card's point value.''' return self.points def getRow(self,rowNum,hide=False): value = self.value displaySpace = self.displaySpace if hide: colorCode = '\033[97m' value = '?' displaySpace = ' ' else: colorCode = self.colorCode if self.isWild(): if rowNum == 0: colorCode = '\033[91m' elif rowNum == 1: colorCode = '\033[93m' elif rowNum == 2: colorCode = '\033[92m' elif rowNum == 3: colorCode = '\033[94m' if rowNum == 0: return '{}\u2666--\u2666\033[0m'.format(colorCode) elif rowNum == 1: return '{}|{}{}|\033[0m'.format(colorCode, displaySpace, value) elif rowNum == 2: if hide: return '{}|? |\033[0m'.format(colorCode) else: return '{}| |\033[0m'.format(colorCode) elif rowNum == 3: return '{}\u2666--\u2666\033[0m'.format(colorCode) ############################################# ### -\/- Set Card Information -\/- ### def setColor(self, color): '''Sets Card's color and escape code.''' if color == 'blue': self.color = 'blue' self.colorCode = self.colors['blue'] self.colorCodeDark = self.colors['dblue'] elif color == 'red': self.color = 'red' self.colorCode = self.colors['red'] self.colorCodeDark = self.colors['dred'] elif color == 'yellow': self.color = 'yellow' self.colorCode = self.colors['yellow'] self.colorCodeDark = self.colors['dyellow'] elif color == 'green': self.color = 'green' self.colorCode = self.colors['green'] self.colorCodeDark = self.colors['dgreen'] elif color == 'wild': #No color modification self.wild = True self.color = 'wild' self.colorCodeDark = self.colors['dwild'] self.colorCode = self.colors['wild'] def setValue(self, value): if value in ('0','1','2','3','4','5','6','7','8','9','X','R','+2','+4','W'): self.value = value self.displaySpace = ' ' if len(value) == 2: self.displaySpace = '' if value == '0': self.zero = True def setPoints(self, value): if value in ('0','1','2','3','4','5','6','7','8','9'): self.points = int(value) elif value in ("W", "+4"): self.points = 50 else: self.points = 20 ############################################# ### -\/- Wild Card Methods -\/- ### def changeColor(self, color): '''Changes Card's Color, Intended for Wild Cards.''' self.setColor(color) def isWild(self): '''Returns if card is a wild card.''' return self.wild def isZero(self): return self.zero class Match(): elementsInit = { ### Names (final) ### 'P1Name':' ', 'P2Name':' ', 'P3Name':' ', 'P4Name':' ', ### Card Values ### 'P1Cards':' ', 'P2Cards':' ', 'P3Cards':' ', 'P4Cards':' ', ### Turn Colors / Hand### 'P1Turn':'', 'P2Turn':'', 'P3Turn':'', 'P4Turn':'', 'HName':'\t\t', 'HVisual':'' ,'Hand':'', ### Deck ### 'DNum':'', 'Deck':['','','','','','','','',''], 'PostDNum':'', ### Pile ### 'uHeader':'\t\t\t\t', 'uMiddle':' ', 'uLower':' ', 'oHeader':'\t\t\t', 'oMiddle':['\t\t\t','\t\t\t','\t\t\t','\t\t\t','\t\t\t','\t\t\t','\t\t\t','\t\t\t'], ### Messages ### 'Console':'', 'Error':'' } speeds = {'slow':2,'normal':1,'fast':0} def __init__(self, gs): ### Decks ### self.deck = Deck(True) self.pile = Deck(False) ### Player Information ### self.players = gs.players self.turnList = [] self.handTitles = {'play1':'','play2':'','play3':'','play4':''} ### Carry Information ### self.displayEffects = gs.displayEffects self.hideComputerHands = gs.hideComputerHands self.zeroChange = gs.zeroChange self.computerSpeed = self.speeds[gs.computerSpeed] self.simulation = gs.computerSimulation ### Data ### self.handPosition = 0 # For hand displays self.drawAmount = 0 # Used for force draws self.passes = 0 # Keep track of consecutive passes for emergency color change self.passMax = 0 # Max passes before color change self.turn = '' # Current turn self.event = '' # Wild, Reverse, Skip, etc self.wildColorChange = '' # Specifies color to change wild card to self.currentColor = '' # Current color self.currentValue = '' # Current value self.winnerID = '' # ID of Player who Won self.reverse = False # Is turn order reversed self.turnComplete = False # Is turn complete self.matchComplete = False # Is the Game over? self.matchAbort = False # Did the match conclude without a winner? self.forcedWild = False # Force change wild ### Initialize Names / Cards / Deck (Assuming New Game) ### self.elements = dict(self.elementsInit) keyStringName = 'P{}Name' keyStringCards = 'P{}Cards' for i in self.players: self.elements[keyStringName.format(i[-1])] = self.players[i].getName()+(' '*(11-len(self.players[i].getName()))) self.elements[keyStringCards.format(i[-1])] = ' '+(' '*(3-len(str(self.players[i].getCardNum()))))+str(self.players[i].getCardNum())+' Cards' self.elements['DNum'] = len(self.deck) if len(str(len(self.deck))) < 2: self.elements['PostDNum'] = '\t' j = 8 for i in range(int(math.ceil(len(self.deck)/12))): self.elements['Deck'][j] = '=' j -= 1 for key in GameSettings.playerIdentities: try: self.buildHandString(key) self.turnList += [key] except KeyError: pass self.passMax = len(self.turnList) def clearShell(self): os.system('cls' if os.name == 'nt' else 'clear') def begin(self): self.elements['Console'] = 'Beginning Game, Press Enter.' print(self.drawScreen()) self.enterBreak() self.eventDealCards() self.turn = random.choice(self.turnList) self.elements['Console'] = 'First turn will be {}. Press Enter.'.format(self.players[self.turn].getName()) print(self.drawScreen(True)) self.enterBreak() self.placeCard() self.elements['P{}Turn'.format(self.turn[-1])] = '\033[93m' if self.event == 'wild': self.eventWildCard() elif self.event == 'reverse': self.eventReverse() def end(self, gs): if not self.matchAbort: points = 0 self.elements['P{}Turn'.format(self.turn[-1])] = '' self.elements['Console'] = '{} Wins! Press Enter to Begin Point Tally'.format(self.players[self.winnerID].getName()) print(self.drawScreen()) self.enterBreak() for identity in self.turnList: if identity != self.winnerID: self.turn = identity self.elements['HName'] = self.handTitles[self.turn] self.elements['P{}Turn'.format(self.turn[-1])] = '\033[93m' while self.players[identity].getCardNum() > 0: card = self.players[identity].removeCard(0) points += card.getPoints() self.elements['Console'] = '{} Won {} Points!'.format(self.players[self.winnerID].getName(),points) keyStringCards = 'P{}Cards' self.elements[keyStringCards.format(identity[-1])] = ' '+(' '*(3-len(str(self.players[identity].getCardNum()))))+str(self.players[identity].getCardNum())+' Cards' self.players[identity].maxScroll = math.ceil((self.players[identity].getCardNum() / 10)-1) if self.handPosition > self.players[identity].maxScroll: self.handPosition -= 1 self.buildHandVisual(identity) if self.displayEffects and not self.simulation: print(self.drawScreen()) time.sleep(.1) self.elements['P{}Turn'.format(self.turn[-1])] = '' self.players[self.winnerID].addPoints(points) self.elements['Console'] = '{} Won {} Points! Press Enter'.format(self.players[self.winnerID].getName(),points) print(self.drawScreen()) self.enterBreak() gs.clearStaging() for identity in self.turnList: self.players[identity].discardHand() gs.addPlayer(self.players[identity]) return gs def adjustCardAmount(self, playerID): keyStringCards = 'P{}Cards' self.elements[keyStringCards.format(playerID[-1])] = ' '+(' '*(3-len(str(self.players[playerID].getCardNum()))))+str(self.players[playerID].getCardNum())+' Cards' self.players[playerID].maxScroll = math.ceil((self.players[playerID].getCardNum() / 10)-1) if self.handPosition > self.players[playerID].maxScroll: self.handPosition -= 1 self.buildHandVisual(playerID) def buildHandString(self, playerID): playerName = self.players[playerID].getName() if len(playerName) < 9: self.handTitles[playerID] = "{}'s Hand\t".format(self.players[playerID].getName()) else: self.handTitles[playerID] = "{}'s Hand".format(self.players[playerID].getName()) def buildHandVisual(self, playerID): string = '[' for i in range(self.players[playerID].maxScroll+1): if i == self.handPosition: string += '|' else: string += '-' string += ']' self.elements['HVisual'] = string def checkInput(self, playerInput): if playerInput == '': return {'valid':False,'entry':playerInput} if playerInput.isnumeric(): if int(playerInput)+(10*self.handPosition) < self.players[self.turn].getCardNum(): return {'valid':True,'entry':str(int(playerInput)+(10*self.handPosition)),'type':'card'} else: self.elements['Error'] = '{} is not a card.'.format(playerInput) return {'valid':False,'entry':playerInput} else: playerInput = playerInput.lower()[0] if playerInput in ['<','>','u','d','p','q','s']: return {'valid':True,'entry':playerInput} else: self.elements['Error'] = '{} is not a valid selection.'.format(playerInput) return {'valid':False,'entry':playerInput} def checkColorInput(self, playerInput): if playerInput == '': return {'valid':False,'entry':playerInput} playerInput = str(playerInput).lower()[0] if playerInput[0] == 'b': return {'valid':True,'entry':'blue'} elif playerInput[0] == 'r': return {'valid':True,'entry':'red'} elif playerInput[0] == 'g': return {'valid':True,'entry':'green'} elif playerInput[0] == 'y': return {'valid':True,'entry':'yellow'} return {'valid':False,'entry':playerInput} def eventDealCards(self): if self.displayEffects and not self.simulation: self.elements['Console'] = 'Dealing Cards...' for i in ('play1','play2','play3','play4'): if i in self.players: for j in range(7): j #unused self.dealCard(i) if self.displayEffects and not self.simulation: print(self.drawScreen(True)) time.sleep(.1) def eventReverse(self): if self.displayEffects and not self.simulation: hide = False if self.players[self.turn].getType() == "Computer": hide = self.hideComputerHands self.elements['Console'] = "Reverse Card Played! Reversing Turn Order.".format(self.players[self.turn].getName()) print(self.drawScreen(hide)) time.sleep(1) for i in range(10): cardBigNums = self.pile[0].getBigNum(self.reverse,i) self.elements['oMiddle'] = cardBigNums print(self.drawScreen(hide)) if self.displayEffects and not self.simulation: time.sleep(.1) cardBigNums = self.pile[0].getBigNum(self.reverse,9) self.elements['oMiddle'] = cardBigNums self.reverse = not self.reverse self.event = '' def eventSkip(self): if self.displayEffects and not self.simulation: hide = False if self.players[self.turn].getType() == "Computer": hide = self.hideComputerHands self.elements['Console'] = "Skip Card Placed! Skipping {}'s Turn.".format(self.players[self.turn].getName()) print(self.drawScreen(hide)) time.sleep(1) for i in range(2): i #unused self.elements['P{}Turn'.format(self.turn[-1])] = '\033[91m' print(self.drawScreen(hide)) time.sleep(.3) self.elements['P{}Turn'.format(self.turn[-1])] = '' print(self.drawScreen(hide)) time.sleep(.3) self.turnComplete = True self.event = '' def eventWildCard(self): hide = False if not self.forcedWild: if self.players[self.turn].getType() == 'Human': self.elements['Console'] = 'Wild Card! Specifiy a Color: (B)lue, (R)ed, (G)reen, (Y)ellow' self.elements['Error'] = 'Specifiy A Color' print(self.drawScreen()) playerInput = str(input("Color Change: ")) checked = self.checkColorInput(playerInput) while not checked['valid']: if checked['entry'] == '<': self.handPosition -= 1 if self.handPosition == -1: self.handPosition = self.players[self.turn].maxScroll self.buildHandVisual(self.turn) elif checked['entry'] == '>': self.handPosition += 1 if self.handPosition > self.players[self.turn].maxScroll: self.handPosition = 0 self.buildHandVisual(self.turn) print(self.drawScreen()) playerInput = str(input("Color Change: ")) checked = self.checkColorInput(playerInput) else: hide = self.hideComputerHands checked = self.checkColorInput(self.players[self.turn].getWildColor()) self.wildColorChange = checked['entry'] else: self.wildColorChange = self.checkColorInput(random.choice(('r','b','g','y')))['entry'] self.forcedWild = False self.currentColor = self.wildColorChange self.elements['Error'] = "" if self.displayEffects and not self.simulation: self.elements['Console'] = 'Wild Card! Changing Color.' seed = 1 for i in range(10): i #unused if seed > 4: seed = 1 print(self.drawScreen(hide,wildSeed=seed)) time.sleep(.1) seed += 1 self.pile[0].changeColor(self.wildColorChange) self.wildColorChange = '' cardBigNums = self.pile[0].getBigNum(self.reverse) self.elements['oHeader'] = '{}\u2666\u2666\u2666=========\u2666\u2666\u2666\033[0m\t'.format(self.pile[0].getColorCode()) self.elements['oMiddle'] = cardBigNums self.event = '' def eventDraw(self): self.players[self.turn].addForceDraw(self.drawAmount) self.drawAmount = 0 self.event = '' def dealCard(self, playerID): card = self.deck.draw() self.players[playerID].addCard(card) ### Adjust Hand Visual ### self.players[playerID].maxScroll = math.ceil((self.players[playerID].getCardNum() / 10)-1) self.handPosition = self.players[playerID].maxScroll self.buildHandVisual(playerID) ### Adjust Player Tile ### keyStringCards = 'P{}Cards' self.elements[keyStringCards.format(playerID[-1])] = ' '+(' '*(3-len(str(self.players[playerID].getCardNum()))))+str(self.players[playerID].getCardNum())+' Cards' ### Adjust Deck ### self.elements['DNum'] = len(self.deck) if len(str(len(self.deck))) < 2: self.elements['PostDNum'] = '\t' j = 8 self.elements['Deck'] = [' ',' ',' ',' ',' ',' ',' ',' ', ' '] for i in range(math.ceil(len(self.deck)/12)): i #unused self.elements['Deck'][j] = '=' j -= 1 def placeCard(self, card=None): if card == None: ### Used At Beginning For First Card ### card = self.deck.draw() self.elements['DNum'] = len(self.deck) cardColor = card.getColorCode() cardBigNums = card.getBigNum(self.reverse) self.currentColor = card.getColor() self.currentValue = card.getValue() self.pile.insert(card) self.elements['oHeader'] = '{}\u2666\u2666\u2666=========\u2666\u2666\u2666\033[0m\t'.format(cardColor) self.elements['oMiddle'] = cardBigNums if len(self.pile) > 1: previousCard = self.pile[1] previousCardColor = previousCard.getColorCode() self.elements['uHeader'] = '{} \u2666\u2666\u2666=========\u2666\u2666\u2666\033[0m\t\t'.format(previousCardColor) self.elements['uMiddle'] = '{}| |\033[0m'.format(previousCardColor) self.elements['uLower'] = '{}\u2666\u2666\u2666\033[0m'.format(previousCardColor) if self.currentColor == 'wild': self.event = 'wild' if self.currentValue == 'X': self.event = 'skip' elif self.currentValue == 'R': if len(self.players) > 2: self.event = 'reverse' else: self.event = 'skip' elif self.currentValue == '+4': self.drawAmount = 4 elif self.currentValue == '+2': self.drawAmount = 2 self.passes = 0 def extractCard(self, playerID, index): card = self.players[playerID].removeCard(index) if self.players[playerID].getCardNum() == 0: self.matchComplete = True self.winnerID = self.turn self.adjustCardAmount(playerID) return card def enterBreak(self): if not self.simulation: str(input()) return def nextTurn(self): self.turnComplete = False self.handPosition = 0 turnType = self.players[self.turn].getType() self.players[self.turn].beginTurn() ### Prepare Hand Visuals ### self.elements['HName'] = self.handTitles[self.turn] self.buildHandVisual(self.turn) if self.event == 'skip': self.eventSkip() elif self.drawAmount > 0: self.eventDraw() while not self.turnComplete: if turnType == 'Human': self.players[self.turn].getLegalCards(self.currentColor, self.currentValue, self.zeroChange) if len(self.deck) > 0: self.elements['Console'] = 'Select a card, (D)raw, or (P)ause.' else: self.players[self.turn].removeForceDraw() self.elements['Console'] = 'Select a card, (D)raw, (P)ause, or Pas(s).' if self.players[self.turn].getForceDraws() > 0: self.elements['Error'] = 'Draw Card Played! Draw {} cards.'.format(self.players[self.turn].getForceDraws()) print(self.drawScreen()) playerInput = str(input("\033[97mSelection: \033[92m")) checked = self.checkInput(playerInput) while not checked['valid']: print(self.drawScreen()) playerInput = str(input("\033[97mSelection: \033[92m")) checked = self.checkInput(playerInput) playerInput = checked['entry'] if playerInput == '<': self.handPosition -= 1 if self.handPosition == -1: self.handPosition = self.players[self.turn].maxScroll self.buildHandVisual(self.turn) elif playerInput == '>': self.handPosition += 1 if self.handPosition > self.players[self.turn].maxScroll: self.handPosition = 0 self.buildHandVisual(self.turn) elif playerInput == 'd': if len(self.deck) > 0: self.elements['Error'] = '' self.dealCard(self.turn) else: self.elements['Error'] = "Cannot Draw. Deck is Empty" elif playerInput == 'p': pauseOutput = self.pauseScreen() if pauseOutput == 'quit': self.matchComplete = True self.turnComplete = True self.winnerID = 'play1' self.matchAbort = True elif playerInput == 's': if len(self.deck) > 0: self.elements['Error'] = "Cannot pass until Deck is empty." elif len(self.players[self.turn].getAllValidCards()) > 0: self.elements['Error'] = "Cannot pass while having playable cards." else: self.turnComplete = True self.passes += 1 if self.passes == self.passMax: self.forcedWild = True self.event = 'wild' self.passes = 0 elif playerInput.isnumeric(): if self.players[self.turn].getForceDraws() == 0: cardCheck = self.players[self.turn].checkCard(playerInput) if cardCheck in self.players[self.turn].getAllValidCards(): card = self.extractCard(self.turn, playerInput) self.placeCard(card) self.elements['Error'] = "" self.turnComplete = True else: self.elements['Error'] = "Card Doesn't Match The Color {} or Value {}!".format(self.currentColor, self.currentValue) else: pass elif turnType == 'Computer': self.elements['Console'] = '{}\'s Turn'.format(self.players[self.turn].getName()) print(self.drawScreen(self.hideComputerHands)) if not self.simulation: time.sleep(self.computerSpeed) #str(input()) while (True): if self.displayEffects and not self.simulation: time.sleep(.2) if self.players[self.turn].getForceDraws() > 0 and len(self.deck) > 0: cardIndex = 'd' else: cardIndex = self.players[self.turn].think(self) if cardIndex.isnumeric(): card = self.extractCard(self.turn, int(cardIndex)) if card.getColor() != self.currentColor: self.resetDrawBool() self.placeCard(card) self.turnComplete = True break else: if cardIndex == 'd': if len(self.deck) > 0: self.dealCard(self.turn) print(self.drawScreen(self.hideComputerHands)) else: self.turnComplete = True self.players[self.turn].removeForceDraw() self.passes += 1 if self.passes == self.passMax: self.forcedWild = True self.event = 'wild' self.passes = 0 break ### DECODE INPUT ### if self.event == 'reverse': self.eventReverse() elif self.event == 'wild': self.eventWildCard() # Clear Current Turn self.elements['P{}Turn'.format(self.turn[-1])] = '' # Prepare Next Turn self.turn = self.getNextTurn() self.elements['P{}Turn'.format(self.turn[-1])] = '\033[93m' def drawScreen(self, hide=False, wildSeed=0): if self.simulation: return '' colorCombos = { 1 : ['\033[91m','\033[93m','\033[92m','\033[94m'], 2 : ['\033[94m','\033[91m','\033[93m','\033[92m'], 3 : ['\033[92m','\033[94m','\033[91m','\033[93m'], 4 : ['\033[93m','\033[92m','\033[94m','\033[91m'] } currentTurn = self.turn if currentTurn == '': currentTurn = self.turnList[-1] hide = True if wildSeed != 0: colorMod = colorCombos[wildSeed] else: colorMod = ['','','',''] self.clearShell() screenout = '' screenout += '\t\t\033[94m || ||\033[92m ||\ || \033[91m// \\\\\n\033[0m' screenout += '\t\t\033[94m || ||\033[92m ||\\\|| \033[91m(( ))\n\033[0m' screenout += '\t\t\033[94m \\\ //\033[92m || \|| \033[91m \\\ //\n\033[0m' screenout += '\033[97m===============================================================\n' screenout += '\033[93m{}\033[0m\n'.format(self.elements['Console']) screenout += '\033[97m===============================================================\n' screenout += '\t\t\t\t\t\t' + ' \033[97m{}\u2666-----------\u2666\033[0m\n'.format(self.elements['P1Turn']) screenout += '\033[97mDeck:\t\t' + '{}'.format(self.elements['uHeader']) + ' \033[97m{}|{}|\033[0m\n'.format(self.elements['P1Turn'],self.elements['P1Name']) screenout += '\033[97m{} Cards'.format(self.elements['DNum']) + '{}'.format(self.elements['PostDNum'])+'\t' + '{}'.format(self.elements['uHeader']) + ' \033[97m{}|{}|\033[0m\n'.format(self.elements['P1Turn'],self.elements['P1Cards']) screenout += '\t\t ' + '{}'.format(self.elements['uMiddle']) + '\033[97m{}{}'.format(colorMod[0],self.elements['oHeader']) + ' \033[97m{}\u2666-----------\u2666\033[0m\n'.format(self.elements['P1Turn']) screenout += '\033[97m _+_ \t\t ' + '{}'.format(self.elements['uMiddle']) + '\033[97m{}{}'.format(colorMod[1],self.elements['oHeader']) + ' \033[97m{}\u2666-----------\u2666\033[0m\n'.format(self.elements['P2Turn']) screenout += '\033[97m | ' + '\033[92m{}\033[0m'.format(self.elements['Deck'][0]) + '\033[97m |\t\t ' + '{}'.format(self.elements['uMiddle']) + '\033[97m{}{}'.format(colorMod[2],self.elements['oMiddle'][0]) + ' \033[97m{}|{}|\033[0m\n'.format(self.elements['P2Turn'],self.elements['P2Name']) screenout += '\033[97m | ' + '\033[92m{}\033[0m'.format(self.elements['Deck'][1]) + '\033[97m |\t\t ' + '{}'.format(self.elements['uMiddle']) + '\033[97m{}{}'.format(colorMod[3],self.elements['oMiddle'][1]) + ' \033[97m{}|{}|\033[0m\n'.format(self.elements['P2Turn'],self.elements['P2Cards']) screenout += '\033[97m | ' + '\033[92m{}\033[0m'.format(self.elements['Deck'][2]) + '\033[97m |\t\t ' + '{}'.format(self.elements['uMiddle']) + '\033[97m{}{}'.format(colorMod[0],self.elements['oMiddle'][2]) + ' \033[97m{}\u2666-----------\u2666\033[0m\n'.format(self.elements['P2Turn']) screenout += '\033[97m | ' + '\033[93m{}\033[0m'.format(self.elements['Deck'][3]) + '\033[97m |\t\t ' + '{}'.format(self.elements['uMiddle']) + '\033[97m{}{}'.format(colorMod[1],self.elements['oMiddle'][3]) + ' \033[97m{}\u2666-----------\u2666\033[0m\n'.format(self.elements['P3Turn']) screenout += '\033[97m | ' + '\033[93m{}\033[0m'.format(self.elements['Deck'][4]) + '\033[97m |\t\t ' + '{}'.format(self.elements['uMiddle']) + '\033[97m{}{}'.format(colorMod[2],self.elements['oMiddle'][4]) + ' \033[97m{}|{}|\033[0m\n'.format(self.elements['P3Turn'],self.elements['P3Name']) screenout += '\033[97m | ' + '\033[93m{}\033[0m'.format(self.elements['Deck'][5]) + '\033[97m |\t\t ' + '{}'.format(self.elements['uMiddle']) + '\033[97m{}{}'.format(colorMod[3],self.elements['oMiddle'][5]) + ' \033[97m{}|{}|\033[0m\n'.format(self.elements['P3Turn'],self.elements['P3Cards']) screenout += '\033[97m | ' + '\033[91m{}\033[0m'.format(self.elements['Deck'][6]) + '\033[97m |\t\t ' + '{}'.format(self.elements['uLower']) + '\033[97m{}{}'.format(colorMod[0],self.elements['oMiddle'][6]) + ' \033[97m{}\u2666-----------\u2666\033[0m\n'.format(self.elements['P3Turn']) screenout += '\033[97m | ' + '\033[91m{}\033[0m'.format(self.elements['Deck'][7]) + '\033[97m |\t\t ' + '{}'.format(self.elements['uLower']) + '\033[97m{}{}'.format(colorMod[1],self.elements['oMiddle'][7]) + ' \033[97m{}\u2666-----------\u2666\033[0m\n'.format(self.elements['P4Turn']) screenout += '\033[97m |_' + '\033[91m{}\033[0m'.format(self.elements['Deck'][8]) + '\033[97m_|\t\t ' + '\033[97m{}{}'.format(colorMod[2],self.elements['oHeader']) + ' \033[97m{}|{}|\033[0m\n'.format(self.elements['P4Turn'],self.elements['P4Name']) screenout += '\033[97m\t\t ' + '\033[97m{}{}'.format(colorMod[3],self.elements['oHeader']) + ' \033[97m{}|{}|\033[0m\n'.format(self.elements['P4Turn'],self.elements['P4Cards']) screenout += '\t\t\t\t\t\t' + ' \033[97m{}\u2666-----------\u2666\033[0m\n'.format(self.elements['P4Turn']) screenout += "\033[97m{}".format(self.elements['HName']) + "\t\t\t\t {}\n".format(self.elements['HVisual']) screenout += '\033[97m===============================================================\n' screenout += self.players[currentTurn].getHand(self.handPosition,hide) screenout += '\033[91m{}\033[0m'.format(self.elements['Error']) return screenout def pauseScreen(self): while True: self.clearShell() print('\n\t\t\tPause') print('\n\t\t1. Resume') print('\t\t2. Quit') selection = str(input('\nSelection: ')).upper() while selection not in ['1', '2']: print('\nSelection Invalid') selection = str(input('\nSelection: ')).upper() if selection == '1' or "": return "" elif selection == '2': return "quit" def isComplete(self): return self.matchComplete def next(self): self.turn = self.getNextTurn() def getNextTurn(self, forceReverse=False): if forceReverse: reverse = not self.reverse else: reverse = self.reverse currentIndex = self.turnList.index(self.turn) if not reverse: if (currentIndex + 1) == len(self.turnList): return self.turnList[0] else: return self.turnList[currentIndex+1] else: if currentIndex == 0: return self.turnList[len(self.turnList) - 1] else: return self.turnList[currentIndex-1] def getPlayer(self, playerID): return self.players[playerID] def resetDrawBool(self): for identity in self.players: self.players[identity].drew = False def Uno(debugging=False): ###MENUS### def clearShell(): os.system('cls' if os.name == 'nt' else 'clear') def mainMenu(): sys.stdout.write("\x1b[8;32;63t") sys.stdout.flush() gs = GameSettings() while True: print(drawMainMenu(gs)) selection = str(input('\033[97mSelection: \033[92m')) while selection not in ['1', '2', '3', '4', '5']: gs.mainMenuError = "Invalid Selection" print(drawMainMenu(gs)) selection = str(input('\033[97mSelection: \033[92m')) if selection == '1': if gs.canBegin(): gs.mainMenuError = "" gs.finalizePlayers() gs = playMatch(gs) else: gs.mainMenuError = "Two Players Required to Begin" elif selection == '2': if gs.canAddPlayer(): gs.mainMenuError = "" gs = addPlayer(gs) else: gs.mainMenuError = "Max Number of Players Reached" elif selection == '3': if gs.canAddPlayer(): gs.mainMenuError = "" gs = addComputer(gs) else: gs.mainMenuError = "Max Number of Players Reached" elif selection == '4': if gs.canRemovePlayer(): gs.mainMenuError = "" gs = removePlayer(gs) else: gs.mainMenuError = "No Players to Remove" elif selection == '5': gs.mainMenuError = "" gs = settingsMenu(gs) else: raise BadInputError('Data Provided Has No Function') def playMatch(gs): for i in range(1): i m = Match(gs) m.begin() while (not m.isComplete()): m.nextTurn() gs = m.end(gs) return gs def addPlayer(gs): colors = ['\033[91m','\033[94m', '\033[92m', '\033[93m'] nameOkay = False playerNum = gs.getPlayerNum() + 1 colorIndex = playerNum - 1 message = "\033[97mPlease Enter Player {}'s Name: {}".format(playerNum, colors[colorIndex]) while not nameOkay: print(drawMainMenu(gs)) name = str(input(message)).title() if len(name) > 11: gs.mainMenuError = "Name Must Be 11 Characters or Less!" elif len(name) == 0: gs.mainMenuError = "" return gs else: nameOkay = True for player in gs.playerStaging: if player.getName() == name: nameOkay = False if nameOkay == False or name in GameSettings.computerNames: gs.mainMenuError = "Name Cannot Match Another Player's Name!" p = Player(name) gs.addPlayer(p) gs.mainMenuError = "" return gs def addComputer(gs): name = gs.getComputerName() c = ComputerPlayer(name) gs.addPlayer(c) return gs def removePlayer(gs): sys.stdout.write("\x1b[8;{rows};{cols}t".format(rows=32, cols=63)) sys.stdout.flush() clearShell() complete = False playerNum = gs.getPlayerNum() message = "\033[97mPlease Enter Player Number to Remove: \033[91m".format(playerNum) while (not complete): print(drawMainMenu(gs)) number = str(input(message)) if len(number) == 0: gs.mainMenuError = "" return gs try: number = int(number) if 0 < number <= playerNum: complete = True else: gs.mainMenuError = "Invalid Player Number!" except: gs.mainMenuError = "Please Enter the Player Number, not Name!" gs.mainMenuError = "" gs.removePlayer(number) return gs def settingsMenu(gs): while True: sys.stdout.write("\x1b[8;32;63t") sys.stdout.flush() clearShell() print('\n\t\tSettings') print('\n\t1. Draw Effects\t\t\t{}'.format(gs.displayEffects)) print('\t2. Hide Computer Hands\t\t{}'.format(gs.hideComputerHands)) print('\t3. Computer Speed\t\t{}'.format(gs.computerSpeed.title())) #print('\t4. Zero Card Changes Color\t{}'.format(gs.zeroChange)) #print('\t5. Run Simulations\t\t{}'.format(gs.computerSimulation)) print('\n\tA. Exit') selection = str(input('\nSelection: ')).upper() while selection not in ('1', '2', '3', '4', '5', 'A', ''): print('\nSelection Invalid') selection = str(input('\nSelection: ')).upper() if selection == '1': gs.displayEffects = not gs.displayEffects elif selection == '2': gs.hideComputerHands = not gs.hideComputerHands elif selection == '3': gs.changeComputerSpeed() ''' elif selection == '4': gs.zeroChange = not gs.zeroChange elif selection == '5': gs.computerSimulation = not gs.computerSimulation ''' elif selection == 'A' or selection == '' or selection in ('4','5'): return gs def drawMainMenu(gs): clearShell() gs.compileMainMenuElements() menuElements = gs.getMainMenuElements() screenout = '' screenout += '\t\t\033[94m || ||\033[92m ||\ || \033[91m// \\\\\n\033[0m' screenout += '\t\t\033[94m || ||\033[92m ||\\\|| \033[91m(( ))\n\033[0m' screenout += '\t\t\033[94m \\\ //\033[92m || \|| \033[91m \\\ //\n\033[0m' screenout += '\033[97m===============================================================\033[0m\n' screenout += "{}1-----------------------------1\033[0m {}2-----------------------------2\033[0m\n".format(menuElements['play1box'],menuElements['play2box']) screenout += "{}|{}|\033[0m {}|{}|\033[0m\n".format(menuElements['play1box'],menuElements['play1row1'],menuElements['play2box'],menuElements['play2row1']) screenout += "{}|{}|\033[0m {}|{}|\033[0m\n".format(menuElements['play1box'],menuElements['play1row2'],menuElements['play2box'],menuElements['play2row2']) screenout += "{}1-----------------------------1\033[0m {}2-----------------------------2\033[0m\n".format(menuElements['play1box'],menuElements['play2box']) screenout += "{}3-----------------------------3\033[0m {}4-----------------------------4\033[0m\n".format(menuElements['play3box'],menuElements['play4box']) screenout += "{}|{}|\033[0m {}|{}|\033[0m\n".format(menuElements['play3box'],menuElements['play3row1'],menuElements['play4box'],menuElements['play4row1']) screenout += "{}|{}|\033[0m {}|{}|\033[0m\n".format(menuElements['play3box'],menuElements['play3row2'],menuElements['play4box'],menuElements['play4row2']) screenout += "{}3-----------------------------3\033[0m {}4-----------------------------4\033[0m\n".format(menuElements['play3box'],menuElements['play4box']) screenout += "\033[97m===============================================================\033[0m\n" screenout += " {}\u2666---------------------------\u2666\033[0m \u2666===========================\u2666\n".format(menuElements['beginBox']) screenout += " {}|1. Begin Match |\033[0m | High Scores |\n".format(menuElements['beginBox']) screenout += " {}\u2666---------------------------\u2666\033[0m \u2666---------------------------\u2666\n".format(menuElements['beginBox']) screenout += " {}\u2666---------------------------\u2666\033[0m | |\n".format(menuElements['addBox']) screenout += " {}|2. Add Player |\033[0m | |\n".format(menuElements['addBox']) screenout += " {}\u2666---------------------------\u2666\033[0m | |\n".format(menuElements['addBox']) screenout += " {}\u2666---------------------------\u2666\033[0m | |\n".format(menuElements['addBox']) screenout += " {}|3. Add Computer |\033[0m | |\n".format(menuElements['addBox']) screenout += " {}\u2666---------------------------\u2666\033[0m | |\n".format(menuElements['addBox']) screenout += " {}\u2666---------------------------\u2666\033[0m | |\n".format(menuElements['removeBox']) screenout += " {}|4. Remove Player |\033[0m | |\n".format(menuElements['removeBox']) screenout += " {}\u2666---------------------------\u2666\033[0m | |\n".format(menuElements['removeBox']) screenout += " \033[97m\u2666---------------------------\u2666\033[0m | |\n" screenout += " \033[97m|5. Settings |\033[0m | |\n" screenout += " \033[97m\u2666---------------------------\u2666\033[0m \u2666===========================\u2666\n" screenout += "\033[97m===============================================================\033[0m\n" screenout += '\033[91m{}\033[0m'.format(gs.mainMenuError) return screenout mainMenu() if __name__ == "__main__": Uno()
{ "content_hash": "a913bdd3b824728c38e0c2f172ef8e5d", "timestamp": "", "source": "github", "line_count": 1535, "max_line_length": 439, "avg_line_length": 43.715309446254075, "alnum_prop": 0.47483719058760415, "repo_name": "ActiveState/code", "id": "04cccec53e500fdd1bf45b3d570ec76b31c5ca98", "size": "67103", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "recipes/Python/580811_Uno_TextBased/recipe-580811.py", "mode": "33188", "license": "mit", "language": [ { "name": "Batchfile", "bytes": "35894" }, { "name": "C", "bytes": "56048" }, { "name": "C++", "bytes": "90880" }, { "name": "HTML", "bytes": "11656" }, { "name": "Java", "bytes": "57468" }, { "name": "JavaScript", "bytes": "181218" }, { "name": "PHP", "bytes": "250144" }, { "name": "Perl", "bytes": "37296" }, { "name": "Perl 6", "bytes": "9914" }, { "name": "Python", "bytes": "17387779" }, { "name": "Ruby", "bytes": "40233" }, { "name": "Shell", "bytes": "190732" }, { "name": "Tcl", "bytes": "674650" } ], "symlink_target": "" }
"""Support for Netatmo Smart thermostats.""" from __future__ import annotations import logging import pyatmo import voluptuous as vol from homeassistant.components.climate import ClimateEntity from homeassistant.components.climate.const import ( CURRENT_HVAC_HEAT, CURRENT_HVAC_IDLE, DEFAULT_MIN_TEMP, HVAC_MODE_AUTO, HVAC_MODE_HEAT, HVAC_MODE_OFF, PRESET_AWAY, PRESET_BOOST, SUPPORT_PRESET_MODE, SUPPORT_TARGET_TEMPERATURE, ) from homeassistant.const import ( ATTR_BATTERY_LEVEL, ATTR_TEMPERATURE, PRECISION_HALVES, STATE_OFF, TEMP_CELSIUS, ) from homeassistant.core import callback from homeassistant.exceptions import PlatformNotReady from homeassistant.helpers import config_validation as cv, entity_platform from homeassistant.helpers.device_registry import async_get_registry from homeassistant.helpers.dispatcher import async_dispatcher_connect from .const import ( ATTR_HEATING_POWER_REQUEST, ATTR_SCHEDULE_NAME, ATTR_SELECTED_SCHEDULE, DATA_DEVICE_IDS, DATA_HANDLER, DATA_HOMES, DATA_SCHEDULES, DOMAIN, EVENT_TYPE_CANCEL_SET_POINT, EVENT_TYPE_SCHEDULE, EVENT_TYPE_SET_POINT, EVENT_TYPE_THERM_MODE, MANUFACTURER, SERVICE_SET_SCHEDULE, SIGNAL_NAME, ) from .data_handler import HOMEDATA_DATA_CLASS_NAME, HOMESTATUS_DATA_CLASS_NAME from .netatmo_entity_base import NetatmoBase _LOGGER = logging.getLogger(__name__) PRESET_FROST_GUARD = "Frost Guard" PRESET_SCHEDULE = "Schedule" PRESET_MANUAL = "Manual" SUPPORT_FLAGS = SUPPORT_TARGET_TEMPERATURE | SUPPORT_PRESET_MODE SUPPORT_HVAC = [HVAC_MODE_HEAT, HVAC_MODE_AUTO, HVAC_MODE_OFF] SUPPORT_PRESET = [PRESET_AWAY, PRESET_BOOST, PRESET_FROST_GUARD, PRESET_SCHEDULE] STATE_NETATMO_SCHEDULE = "schedule" STATE_NETATMO_HG = "hg" STATE_NETATMO_MAX = "max" STATE_NETATMO_AWAY = PRESET_AWAY STATE_NETATMO_OFF = STATE_OFF STATE_NETATMO_MANUAL = "manual" STATE_NETATMO_HOME = "home" PRESET_MAP_NETATMO = { PRESET_FROST_GUARD: STATE_NETATMO_HG, PRESET_BOOST: STATE_NETATMO_MAX, PRESET_SCHEDULE: STATE_NETATMO_SCHEDULE, PRESET_AWAY: STATE_NETATMO_AWAY, STATE_NETATMO_OFF: STATE_NETATMO_OFF, } NETATMO_MAP_PRESET = { STATE_NETATMO_HG: PRESET_FROST_GUARD, STATE_NETATMO_MAX: PRESET_BOOST, STATE_NETATMO_SCHEDULE: PRESET_SCHEDULE, STATE_NETATMO_AWAY: PRESET_AWAY, STATE_NETATMO_OFF: STATE_NETATMO_OFF, STATE_NETATMO_MANUAL: STATE_NETATMO_MANUAL, STATE_NETATMO_HOME: PRESET_SCHEDULE, } HVAC_MAP_NETATMO = { PRESET_SCHEDULE: HVAC_MODE_AUTO, STATE_NETATMO_HG: HVAC_MODE_AUTO, PRESET_FROST_GUARD: HVAC_MODE_AUTO, PRESET_BOOST: HVAC_MODE_HEAT, STATE_NETATMO_OFF: HVAC_MODE_OFF, STATE_NETATMO_MANUAL: HVAC_MODE_AUTO, PRESET_MANUAL: HVAC_MODE_AUTO, STATE_NETATMO_AWAY: HVAC_MODE_AUTO, } CURRENT_HVAC_MAP_NETATMO = {True: CURRENT_HVAC_HEAT, False: CURRENT_HVAC_IDLE} DEFAULT_MAX_TEMP = 30 NA_THERM = "NATherm1" NA_VALVE = "NRV" async def async_setup_entry(hass, entry, async_add_entities): """Set up the Netatmo energy platform.""" data_handler = hass.data[DOMAIN][entry.entry_id][DATA_HANDLER] await data_handler.register_data_class( HOMEDATA_DATA_CLASS_NAME, HOMEDATA_DATA_CLASS_NAME, None ) home_data = data_handler.data.get(HOMEDATA_DATA_CLASS_NAME) if not home_data or home_data.raw_data == {}: raise PlatformNotReady if HOMEDATA_DATA_CLASS_NAME not in data_handler.data: raise PlatformNotReady entities = [] for home_id in get_all_home_ids(home_data): for room_id in home_data.rooms[home_id]: signal_name = f"{HOMESTATUS_DATA_CLASS_NAME}-{home_id}" await data_handler.register_data_class( HOMESTATUS_DATA_CLASS_NAME, signal_name, None, home_id=home_id ) home_status = data_handler.data.get(signal_name) if home_status and room_id in home_status.rooms: entities.append(NetatmoThermostat(data_handler, home_id, room_id)) hass.data[DOMAIN][DATA_SCHEDULES][home_id] = { schedule_id: schedule_data.get("name") for schedule_id, schedule_data in ( data_handler.data[HOMEDATA_DATA_CLASS_NAME].schedules[home_id].items() ) } hass.data[DOMAIN][DATA_HOMES] = { home_id: home_data.get("name") for home_id, home_data in ( data_handler.data[HOMEDATA_DATA_CLASS_NAME].homes.items() ) } _LOGGER.debug("Adding climate devices %s", entities) async_add_entities(entities, True) platform = entity_platform.async_get_current_platform() if home_data is not None: platform.async_register_entity_service( SERVICE_SET_SCHEDULE, {vol.Required(ATTR_SCHEDULE_NAME): cv.string}, "_async_service_set_schedule", ) class NetatmoThermostat(NetatmoBase, ClimateEntity): """Representation a Netatmo thermostat.""" def __init__(self, data_handler, home_id, room_id): """Initialize the sensor.""" ClimateEntity.__init__(self) super().__init__(data_handler) self._id = room_id self._home_id = home_id self._home_status_class = f"{HOMESTATUS_DATA_CLASS_NAME}-{self._home_id}" self._data_classes.extend( [ { "name": HOMEDATA_DATA_CLASS_NAME, SIGNAL_NAME: HOMEDATA_DATA_CLASS_NAME, }, { "name": HOMESTATUS_DATA_CLASS_NAME, "home_id": self._home_id, SIGNAL_NAME: self._home_status_class, }, ] ) self._home_status = self.data_handler.data[self._home_status_class] self._room_status = self._home_status.rooms[room_id] self._room_data = self._data.rooms[home_id][room_id] self._model = NA_VALVE for module in self._room_data.get("module_ids"): if self._home_status.thermostats.get(module): self._model = NA_THERM break self._device_name = self._data.rooms[home_id][room_id]["name"] self._name = f"{MANUFACTURER} {self._device_name}" self._current_temperature = None self._target_temperature = None self._preset = None self._away = None self._operation_list = [HVAC_MODE_AUTO, HVAC_MODE_HEAT] self._support_flags = SUPPORT_FLAGS self._hvac_mode = None self._battery_level = None self._connected = None self._away_temperature = None self._hg_temperature = None self._boilerstatus = None self._setpoint_duration = None self._selected_schedule = None if self._model == NA_THERM: self._operation_list.append(HVAC_MODE_OFF) self._unique_id = f"{self._id}-{self._model}" async def async_added_to_hass(self) -> None: """Entity created.""" await super().async_added_to_hass() for event_type in ( EVENT_TYPE_SET_POINT, EVENT_TYPE_THERM_MODE, EVENT_TYPE_CANCEL_SET_POINT, EVENT_TYPE_SCHEDULE, ): self._listeners.append( async_dispatcher_connect( self.hass, f"signal-{DOMAIN}-webhook-{event_type}", self.handle_event, ) ) registry = await async_get_registry(self.hass) device = registry.async_get_device({(DOMAIN, self._id)}, set()) self.hass.data[DOMAIN][DATA_DEVICE_IDS][self._home_id] = device.id async def handle_event(self, event): """Handle webhook events.""" data = event["data"] if self._home_id != data["home_id"]: return if data["event_type"] == EVENT_TYPE_SCHEDULE and "schedule_id" in data: self._selected_schedule = self.hass.data[DOMAIN][DATA_SCHEDULES][ self._home_id ].get(data["schedule_id"]) self.async_write_ha_state() self.data_handler.async_force_update(self._home_status_class) return home = data["home"] if self._home_id != home["id"]: return if data["event_type"] == EVENT_TYPE_THERM_MODE: self._preset = NETATMO_MAP_PRESET[home[EVENT_TYPE_THERM_MODE]] self._hvac_mode = HVAC_MAP_NETATMO[self._preset] if self._preset == PRESET_FROST_GUARD: self._target_temperature = self._hg_temperature elif self._preset == PRESET_AWAY: self._target_temperature = self._away_temperature elif self._preset == PRESET_SCHEDULE: self.async_update_callback() self.data_handler.async_force_update(self._home_status_class) self.async_write_ha_state() return for room in home.get("rooms", []): if data["event_type"] == EVENT_TYPE_SET_POINT and self._id == room["id"]: if room["therm_setpoint_mode"] == STATE_NETATMO_OFF: self._hvac_mode = HVAC_MODE_OFF self._preset = STATE_NETATMO_OFF self._target_temperature = 0 elif room["therm_setpoint_mode"] == STATE_NETATMO_MAX: self._hvac_mode = HVAC_MODE_HEAT self._preset = PRESET_MAP_NETATMO[PRESET_BOOST] self._target_temperature = DEFAULT_MAX_TEMP elif room["therm_setpoint_mode"] == STATE_NETATMO_MANUAL: self._hvac_mode = HVAC_MODE_HEAT self._target_temperature = room["therm_setpoint_temperature"] else: self._target_temperature = room["therm_setpoint_temperature"] if self._target_temperature == DEFAULT_MAX_TEMP: self._hvac_mode = HVAC_MODE_HEAT self.async_write_ha_state() return if ( data["event_type"] == EVENT_TYPE_CANCEL_SET_POINT and self._id == room["id"] ): self.async_update_callback() self.async_write_ha_state() return @property def supported_features(self): """Return the list of supported features.""" return self._support_flags @property def temperature_unit(self): """Return the unit of measurement.""" return TEMP_CELSIUS @property def current_temperature(self): """Return the current temperature.""" return self._current_temperature @property def target_temperature(self): """Return the temperature we try to reach.""" return self._target_temperature @property def target_temperature_step(self) -> float | None: """Return the supported step of target temperature.""" return PRECISION_HALVES @property def hvac_mode(self): """Return hvac operation ie. heat, cool mode.""" return self._hvac_mode @property def hvac_modes(self): """Return the list of available hvac operation modes.""" return self._operation_list @property def hvac_action(self) -> str | None: """Return the current running hvac operation if supported.""" if self._model == NA_THERM and self._boilerstatus is not None: return CURRENT_HVAC_MAP_NETATMO[self._boilerstatus] # Maybe it is a valve if self._room_status and self._room_status.get("heating_power_request", 0) > 0: return CURRENT_HVAC_HEAT return CURRENT_HVAC_IDLE async def async_set_hvac_mode(self, hvac_mode: str) -> None: """Set new target hvac mode.""" if hvac_mode == HVAC_MODE_OFF: await self.async_turn_off() elif hvac_mode == HVAC_MODE_AUTO: if self.hvac_mode == HVAC_MODE_OFF: await self.async_turn_on() await self.async_set_preset_mode(PRESET_SCHEDULE) elif hvac_mode == HVAC_MODE_HEAT: await self.async_set_preset_mode(PRESET_BOOST) async def async_set_preset_mode(self, preset_mode: str) -> None: """Set new preset mode.""" if self.hvac_mode == HVAC_MODE_OFF: await self.async_turn_on() if self.target_temperature == 0: await self._home_status.async_set_room_thermpoint( self._id, STATE_NETATMO_HOME, ) if ( preset_mode in [PRESET_BOOST, STATE_NETATMO_MAX] and self._model == NA_VALVE and self.hvac_mode == HVAC_MODE_HEAT ): await self._home_status.async_set_room_thermpoint( self._id, STATE_NETATMO_HOME, ) elif ( preset_mode in [PRESET_BOOST, STATE_NETATMO_MAX] and self._model == NA_VALVE ): await self._home_status.async_set_room_thermpoint( self._id, STATE_NETATMO_MANUAL, DEFAULT_MAX_TEMP, ) elif ( preset_mode in [PRESET_BOOST, STATE_NETATMO_MAX] and self.hvac_mode == HVAC_MODE_HEAT ): await self._home_status.async_set_room_thermpoint( self._id, STATE_NETATMO_HOME ) elif preset_mode in [PRESET_BOOST, STATE_NETATMO_MAX]: await self._home_status.async_set_room_thermpoint( self._id, PRESET_MAP_NETATMO[preset_mode] ) elif preset_mode in [PRESET_SCHEDULE, PRESET_FROST_GUARD, PRESET_AWAY]: await self._home_status.async_set_thermmode(PRESET_MAP_NETATMO[preset_mode]) else: _LOGGER.error("Preset mode '%s' not available", preset_mode) self.async_write_ha_state() @property def preset_mode(self) -> str | None: """Return the current preset mode, e.g., home, away, temp.""" return self._preset @property def preset_modes(self) -> list[str] | None: """Return a list of available preset modes.""" return SUPPORT_PRESET async def async_set_temperature(self, **kwargs): """Set new target temperature for 2 hours.""" temp = kwargs.get(ATTR_TEMPERATURE) if temp is None: return await self._home_status.async_set_room_thermpoint( self._id, STATE_NETATMO_MANUAL, temp ) self.async_write_ha_state() @property def extra_state_attributes(self): """Return the state attributes of the thermostat.""" attr = {} if self._battery_level is not None: attr[ATTR_BATTERY_LEVEL] = self._battery_level if self._model == NA_VALVE: attr[ATTR_HEATING_POWER_REQUEST] = self._room_status.get( "heating_power_request", 0 ) if self._selected_schedule is not None: attr[ATTR_SELECTED_SCHEDULE] = self._selected_schedule return attr async def async_turn_off(self): """Turn the entity off.""" if self._model == NA_VALVE: await self._home_status.async_set_room_thermpoint( self._id, STATE_NETATMO_MANUAL, DEFAULT_MIN_TEMP, ) elif self.hvac_mode != HVAC_MODE_OFF: await self._home_status.async_set_room_thermpoint( self._id, STATE_NETATMO_OFF ) self.async_write_ha_state() async def async_turn_on(self): """Turn the entity on.""" await self._home_status.async_set_room_thermpoint(self._id, STATE_NETATMO_HOME) self.async_write_ha_state() @property def available(self) -> bool: """If the device hasn't been able to connect, mark as unavailable.""" return bool(self._connected) @callback def async_update_callback(self): """Update the entity's state.""" self._home_status = self.data_handler.data[self._home_status_class] if self._home_status is None: if self.available: self._connected = False return self._room_status = self._home_status.rooms.get(self._id) self._room_data = self._data.rooms.get(self._home_id, {}).get(self._id) if not self._room_status or not self._room_data: if self._connected: _LOGGER.info( "The thermostat in room %s seems to be out of reach", self._device_name, ) self._connected = False return roomstatus = {"roomID": self._room_status.get("id", {})} if self._room_status.get("reachable"): roomstatus.update(self._build_room_status()) self._away_temperature = self._data.get_away_temp(self._home_id) self._hg_temperature = self._data.get_hg_temp(self._home_id) self._setpoint_duration = self._data.setpoint_duration[self._home_id] self._selected_schedule = roomstatus.get("selected_schedule") if "current_temperature" not in roomstatus: return if self._model is None: self._model = roomstatus["module_type"] self._current_temperature = roomstatus["current_temperature"] self._target_temperature = roomstatus["target_temperature"] self._preset = NETATMO_MAP_PRESET[roomstatus["setpoint_mode"]] self._hvac_mode = HVAC_MAP_NETATMO[self._preset] self._battery_level = roomstatus.get("battery_state") self._connected = True self._away = self._hvac_mode == HVAC_MAP_NETATMO[STATE_NETATMO_AWAY] def _build_room_status(self): """Construct room status.""" try: roomstatus = { "roomname": self._room_data["name"], "target_temperature": self._room_status["therm_setpoint_temperature"], "setpoint_mode": self._room_status["therm_setpoint_mode"], "current_temperature": self._room_status["therm_measured_temperature"], "module_type": self._data.get_thermostat_type( home_id=self._home_id, room_id=self._id ), "module_id": None, "heating_status": None, "heating_power_request": None, "selected_schedule": self._data._get_selected_schedule( # pylint: disable=protected-access home_id=self._home_id ).get( "name" ), } batterylevel = None for module_id in self._room_data["module_ids"]: if ( self._data.modules[self._home_id][module_id]["type"] == NA_THERM or roomstatus["module_id"] is None ): roomstatus["module_id"] = module_id if roomstatus["module_type"] == NA_THERM: self._boilerstatus = self._home_status.boiler_status( roomstatus["module_id"] ) roomstatus["heating_status"] = self._boilerstatus batterylevel = self._home_status.thermostats[ roomstatus["module_id"] ].get("battery_state") elif roomstatus["module_type"] == NA_VALVE: roomstatus["heating_power_request"] = self._room_status[ "heating_power_request" ] roomstatus["heating_status"] = roomstatus["heating_power_request"] > 0 if self._boilerstatus is not None: roomstatus["heating_status"] = ( self._boilerstatus and roomstatus["heating_status"] ) batterylevel = self._home_status.valves[roomstatus["module_id"]].get( "battery_state" ) if batterylevel: roomstatus["battery_state"] = batterylevel return roomstatus except KeyError as err: _LOGGER.error("Update of room %s failed. Error: %s", self._id, err) return {} async def _async_service_set_schedule(self, **kwargs): schedule_name = kwargs.get(ATTR_SCHEDULE_NAME) schedule_id = None for sid, name in self.hass.data[DOMAIN][DATA_SCHEDULES][self._home_id].items(): if name == schedule_name: schedule_id = sid if not schedule_id: _LOGGER.error("%s is not a valid schedule", kwargs.get(ATTR_SCHEDULE_NAME)) return await self._data.async_switch_home_schedule( home_id=self._home_id, schedule_id=schedule_id ) _LOGGER.debug( "Setting %s schedule to %s (%s)", self._home_id, kwargs.get(ATTR_SCHEDULE_NAME), schedule_id, ) @property def device_info(self): """Return the device info for the thermostat.""" return {**super().device_info, "suggested_area": self._room_data["name"]} def get_all_home_ids(home_data: pyatmo.HomeData) -> list[str]: """Get all the home ids returned by NetAtmo API.""" if home_data is None: return [] return [ home_data.homes[home_id]["id"] for home_id in home_data.homes if "modules" in home_data.homes[home_id] ]
{ "content_hash": "9597807f92b9bbbce7a62bcaa49f57b1", "timestamp": "", "source": "github", "line_count": 610, "max_line_length": 107, "avg_line_length": 35.29672131147541, "alnum_prop": 0.5787933677023827, "repo_name": "kennedyshead/home-assistant", "id": "ce1eba11b70ab63569d1662570ba498059f00848", "size": "21531", "binary": false, "copies": "1", "ref": "refs/heads/dev", "path": "homeassistant/components/netatmo/climate.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Dockerfile", "bytes": "1795" }, { "name": "Python", "bytes": "33970989" }, { "name": "Shell", "bytes": "4900" } ], "symlink_target": "" }
from flask import session from flask_admin.contrib.sqla import ModelView from flask_admin.form import SecureForm class TalkView(ModelView): list_columns = ['id', 'speaker_facebook_id', 'speaker', 'title', 'description', 'likes'] form_base_class = SecureForm def is_accessible(self): if not session.get('logged'): return False else: return True
{ "content_hash": "90cb9db7ba0bc6b8b0e79408c49e5c5d", "timestamp": "", "source": "github", "line_count": 13, "max_line_length": 92, "avg_line_length": 30.615384615384617, "alnum_prop": 0.6633165829145728, "repo_name": "Stark-Mountain/meetup-facebook-bot", "id": "adc458abe2e0e5d548e0a1042446c2ed4d3c4819", "size": "398", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "meetup_facebook_bot/views/TalkView.py", "mode": "33188", "license": "mit", "language": [ { "name": "HTML", "bytes": "1325" }, { "name": "Python", "bytes": "42414" } ], "symlink_target": "" }
from unittest import mock from unittest.mock import call from osc_lib import exceptions from osc_lib import utils from openstackclient.tests.unit.volume.v1 import fakes as volume_fakes from openstackclient.volume.v1 import volume_transfer_request class TestTransfer(volume_fakes.TestVolumev1): def setUp(self): super().setUp() # Get a shortcut to the TransferManager Mock self.transfer_mock = self.app.client_manager.volume.transfers self.transfer_mock.reset_mock() # Get a shortcut to the VolumeManager Mock self.volumes_mock = self.app.client_manager.volume.volumes self.volumes_mock.reset_mock() class TestTransferAccept(TestTransfer): columns = ( 'id', 'name', 'volume_id', ) def setUp(self): super().setUp() self.volume_transfer = volume_fakes.create_one_transfer() self.data = ( self.volume_transfer.id, self.volume_transfer.name, self.volume_transfer.volume_id, ) self.transfer_mock.get.return_value = self.volume_transfer self.transfer_mock.accept.return_value = self.volume_transfer # Get the command object to test self.cmd = volume_transfer_request.AcceptTransferRequest( self.app, None) def test_transfer_accept(self): arglist = [ '--auth-key', 'key_value', self.volume_transfer.id, ] verifylist = [ ('transfer_request', self.volume_transfer.id), ('auth_key', 'key_value'), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) self.transfer_mock.get.assert_called_once_with( self.volume_transfer.id, ) self.transfer_mock.accept.assert_called_once_with( self.volume_transfer.id, 'key_value', ) self.assertEqual(self.columns, columns) self.assertEqual(self.data, data) def test_transfer_accept_no_option(self): arglist = [ self.volume_transfer.id, ] verifylist = [ ('transfer_request', self.volume_transfer.id), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) self.assertRaises( exceptions.CommandError, self.cmd.take_action, parsed_args, ) class TestTransferCreate(TestTransfer): volume = volume_fakes.create_one_volume() columns = ( 'auth_key', 'created_at', 'id', 'name', 'volume_id', ) def setUp(self): super().setUp() self.volume_transfer = volume_fakes.create_one_transfer( attrs={ 'volume_id': self.volume.id, 'auth_key': 'key', 'created_at': 'time', }, ) self.data = ( self.volume_transfer.auth_key, self.volume_transfer.created_at, self.volume_transfer.id, self.volume_transfer.name, self.volume_transfer.volume_id, ) self.transfer_mock.create.return_value = self.volume_transfer self.volumes_mock.get.return_value = self.volume # Get the command object to test self.cmd = volume_transfer_request.CreateTransferRequest( self.app, None) def test_transfer_create_without_name(self): arglist = [ self.volume.id, ] verifylist = [ ('volume', self.volume.id), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) self.transfer_mock.create.assert_called_once_with( self.volume.id, None) self.assertEqual(self.columns, columns) self.assertEqual(self.data, data) def test_transfer_create_with_name(self): arglist = [ '--name', self.volume_transfer.name, self.volume.id, ] verifylist = [ ('name', self.volume_transfer.name), ('volume', self.volume.id), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) self.transfer_mock.create.assert_called_once_with( self.volume.id, self.volume_transfer.name,) self.assertEqual(self.columns, columns) self.assertEqual(self.data, data) class TestTransferDelete(TestTransfer): volume_transfers = volume_fakes.create_transfers(count=2) def setUp(self): super().setUp() self.transfer_mock.get = volume_fakes.get_transfers( self.volume_transfers, ) self.transfer_mock.delete.return_value = None # Get the command object to mock self.cmd = volume_transfer_request.DeleteTransferRequest( self.app, None) def test_transfer_delete(self): arglist = [ self.volume_transfers[0].id ] verifylist = [ ("transfer_request", [self.volume_transfers[0].id]) ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = self.cmd.take_action(parsed_args) self.transfer_mock.delete.assert_called_with( self.volume_transfers[0].id) self.assertIsNone(result) def test_delete_multiple_transfers(self): arglist = [] for v in self.volume_transfers: arglist.append(v.id) verifylist = [ ('transfer_request', arglist), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = self.cmd.take_action(parsed_args) calls = [] for v in self.volume_transfers: calls.append(call(v.id)) self.transfer_mock.delete.assert_has_calls(calls) self.assertIsNone(result) def test_delete_multiple_transfers_with_exception(self): arglist = [ self.volume_transfers[0].id, 'unexist_transfer', ] verifylist = [ ('transfer_request', arglist), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) find_mock_result = [self.volume_transfers[0], exceptions.CommandError] with mock.patch.object(utils, 'find_resource', side_effect=find_mock_result) as find_mock: try: self.cmd.take_action(parsed_args) self.fail('CommandError should be raised.') except exceptions.CommandError as e: self.assertEqual('1 of 2 volume transfer requests failed ' 'to delete', str(e)) find_mock.assert_any_call( self.transfer_mock, self.volume_transfers[0].id) find_mock.assert_any_call(self.transfer_mock, 'unexist_transfer') self.assertEqual(2, find_mock.call_count) self.transfer_mock.delete.assert_called_once_with( self.volume_transfers[0].id, ) class TestTransferList(TestTransfer): # The Transfers to be listed volume_transfers = volume_fakes.create_one_transfer() def setUp(self): super().setUp() self.transfer_mock.list.return_value = [self.volume_transfers] # Get the command object to test self.cmd = volume_transfer_request.ListTransferRequest(self.app, None) def test_transfer_list_without_argument(self): arglist = [] verifylist = [] parsed_args = self.check_parser(self.cmd, arglist, verifylist) # In base command class Lister in cliff, abstract method take_action() # returns a tuple containing the column names and an iterable # containing the data to be listed. columns, data = self.cmd.take_action(parsed_args) expected_columns = [ 'ID', 'Name', 'Volume', ] # confirming if all expected columns are present in the result. self.assertEqual(expected_columns, columns) datalist = (( self.volume_transfers.id, self.volume_transfers.name, self.volume_transfers.volume_id, ), ) # confirming if all expected values are present in the result. self.assertEqual(datalist, tuple(data)) # checking if proper call was made to list volume_transfers self.transfer_mock.list.assert_called_with( detailed=True, search_opts={'all_tenants': 0} ) def test_transfer_list_with_argument(self): arglist = [ "--all-projects" ] verifylist = [ ("all_projects", True) ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) # In base command class Lister in cliff, abstract method take_action() # returns a tuple containing the column names and an iterable # containing the data to be listed. columns, data = self.cmd.take_action(parsed_args) expected_columns = [ 'ID', 'Name', 'Volume', ] # confirming if all expected columns are present in the result. self.assertEqual(expected_columns, columns) datalist = (( self.volume_transfers.id, self.volume_transfers.name, self.volume_transfers.volume_id, ), ) # confirming if all expected values are present in the result. self.assertEqual(datalist, tuple(data)) # checking if proper call was made to list volume_transfers self.transfer_mock.list.assert_called_with( detailed=True, search_opts={'all_tenants': 1} ) class TestTransferShow(TestTransfer): columns = ( 'created_at', 'id', 'name', 'volume_id', ) def setUp(self): super().setUp() self.volume_transfer = volume_fakes.create_one_transfer( attrs={'created_at': 'time'} ) self.data = ( self.volume_transfer.created_at, self.volume_transfer.id, self.volume_transfer.name, self.volume_transfer.volume_id, ) self.transfer_mock.get.return_value = self.volume_transfer # Get the command object to test self.cmd = volume_transfer_request.ShowTransferRequest( self.app, None) def test_transfer_show(self): arglist = [ self.volume_transfer.id, ] verifylist = [ ('transfer_request', self.volume_transfer.id), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) self.transfer_mock.get.assert_called_once_with( self.volume_transfer.id) self.assertEqual(self.columns, columns) self.assertEqual(self.data, data)
{ "content_hash": "998ee76e63382a7ea134c3800f3329c9", "timestamp": "", "source": "github", "line_count": 369, "max_line_length": 78, "avg_line_length": 30.02981029810298, "alnum_prop": 0.5842432993412147, "repo_name": "openstack/python-openstackclient", "id": "97700fbb7ffa3bd1ae758780be4c2e51e319f8a1", "size": "11649", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "openstackclient/tests/unit/volume/v1/test_transfer_request.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Dockerfile", "bytes": "923" }, { "name": "Python", "bytes": "5016301" }, { "name": "Shell", "bytes": "299" } ], "symlink_target": "" }
""" Aravind Veerappan BNFO 601 - Exam 2 Question 2. Protein BLAST """ import math from PAM import PAM class BLAST(object): FORWARD = 1 # These are class variables shared by all instances of the BLAST class BACKWARD = -1 ROW = (0, 1) COLUMN = (1, 0) def __init__(self, query=None, target=None, word_size=3, gap_open=-10, gap_extend=-4, threshold=10, PAM=None): self.query = query # This is the string corresponding to the query sequence self.target = target # This is the string corresponding to the target sequence self.word_size = word_size # Size of the seed word for initiating extensions self.word_score = None # something different required for PBLAST! self.gap_open = gap_open self.gap_extend = gap_extend self.querylen = len(query) self.targetlen = len(target) self.blast_table = {} # Our main dynamic programming table containing scores self.traceback_table = {} # A corresponding table for recording the tracebacks self.target_index = {} self.threshold = threshold # Neighborhood threshold value for scoring self.PAM = PAM # PAM table return def score(self): # This method performs BLAST scoring and returns a string describing the resulting alignment result_summary = [] # A list, for now, that will store results of the alignments if not self.target_index: # if this is the first time scoring we should index the target for i in xrange(len(self.target) - self.word_size + 1): word = self.target[i: i + self.word_size] if word in self.target_index: self.target_index[word].append(i) # A dict of lists is an efficient structure for this index. # The list items are word coordinates in the target. else: self.target_index[word] = [i] # print self.target_index ## First we must iterate through words in the query: query_position = 0 while query_position < self.querylen - self.word_size + 1: # print "Query position is", query_position query_word = self.query[query_position:query_position + self.word_size] # lookup scores for each AA pair from PAM table for target_word in self.target_index.keys(): score = 0 for i in range(len(target_word)): score += self.PAM[target_word[i], query_word[i]] # If the calculated score is higher than the neighborhood threshold value then extend the alignment # and set the starting word score equal to the calculated score if score > self.threshold: self.word_score = score for target_position in self.target_index[target_word]: print "Searching for seed", query_word, "at target position", target_position # print "Extending forward" forward_score, forward_extension_q, forward_extension_t = \ self._extend_alignment(query_position, target_position, self.FORWARD) # print "Extending backwards" backward_score, backward_extension_q, backward_extension_t = \ self._extend_alignment(query_position, target_position, self.BACKWARD) q_result = backward_extension_q[:-1] + query_word + forward_extension_q[1:] t_result = backward_extension_t[:-1] + query_word + forward_extension_t[1:] # Note that the last character of a backward extension, and the zeroth character of a forward # extension overlap with the query word and should therefore be discarded - thus the slice notation. score = forward_score + backward_score - self.word_score # We need to make sure that we don't double count the seed score! # calculate e-value # e_value = self.querylen * self.targetlen * math.e ** (math.log(1 / 4) * score) # calculate bit score # bit_score = (-math.log(1 / 4) * score - math.log(1)) / math.log(2) query_begin = query_position - len(backward_extension_q) + 2 target_begin = target_position - len(backward_extension_t) + 2 # result_summary.append((e_value, bit_score, score, q_result, t_result, query_begin, target_begin)) result_summary.append((score, q_result, t_result, query_begin, target_begin)) alignment_string = '\nAlignment had a score of ' + str(score) + ' and is:\n\nTarget:\t' + \ str(target_begin) + '\t' + str(t_result) + '\n\t\t\t' for k in xrange(len(t_result)): # t and q alignments should be the same length! if t_result[k] == q_result[k]: alignment_string += '|' # Only put a bar if the two characters are identical at this position else: alignment_string += ' ' # otherwise just insert a space alignment_string += '\nQuery:\t' + str(query_begin) + '\t' + str(q_result) + '\n' print alignment_string # The above statements just concatenate together a multi-line string that will correctly display # the best alignment when it is subsequently printed. query_position += 1 return result_summary def _extend_alignment(self, query_start, target_start, direction): """ This private method attempts to extend an alignment in the forward and backward direction depending on the value of the direction flag, which here takes the value 1 (for forward extension) or -1 for backward.For clarity these constants are defined by the class variables self.FORWARD and self.BACKWARD """ self.high_score = self.word_score # highest scores encountered so far will always initially be the word_score * match_reward self.high_q_pos = self.high_t_pos = 0 if direction == self.FORWARD: # We start with the 0,0 position representing the last character query_start += self.word_size - 1 # of the seed word for forward extensions. target_start += self.word_size - 1 # For backward extensions, leave it as it is (i.e. zeroth character) self.blast_table = dict() # The BLAST table is a dict of tuples. Each tuple represents a (query, target) position # this sparse representation will be much more efficient than using a 2D list self.blast_table[0, 0] = self.high_score # initialize the top left corner with the word score self.high_q_pos = 0 self.high_t_pos = 0 self.traceback_table[0, 0] = (1, 1) # There is no traceback path for the origin, but the program logic elsewhere dictates that we provide one cur_t_pos = 1 # we are going to score the edges first (top and left), which can *only* ever be gaps back # to the origin. i.e. the question of matching or not matching is completely irrelevant here. # We start by scoring the top edge, beginning with position 1.. cur_score = max(0, self.blast_table[(0, 0)] + self.gap_open) # first one always a gap open while cur_score: # only keep going as long as we have non-zero values self.blast_table[(0, cur_t_pos)] = cur_score # only record non-zero values self.traceback_table[(0, cur_t_pos)] = (0, 1) # record a target gap in the traceback table cur_score = max(0, self.blast_table[(0, cur_t_pos)] + self.gap_extend) # any subsequent are extends cur_t_pos += 1 cur_t_pos = 0 # Now we do the same thing for the left edge as we just did for the top edge cur_q_pos = 1 cur_score = max(0, self.blast_table[(0, 0)] + self.gap_open) # first one always a gap open while cur_score: # only keep going as long as we have non-zero values self.blast_table[(cur_q_pos, 0)] = cur_score # only record non-zero values self.traceback_table[(cur_q_pos, 0)] = (1, 0) # record a query gap in the traceback table cur_score = max(0, self.blast_table[(cur_q_pos, 0)] + self.gap_extend) cur_t_pos += 1 # print "blast table 0,0 is", self.blast_table[0, 0], "and high score is", self.high_score # alright, finished with edges. Note that high scores can NEVER occur in an edge so these were not considered. # Henceforth, however, we will need to think about this. cur_t_pos = 0 # Start at the first position cur_q_pos = 0 # Now we will score the table, proceeding according to the algorithm description: first incrementing along # the diagonal, then scoring the adjacent row, then the column below # Unlike Smith Waterman, the matrix is no longer of defined size, so we need to use while loops instead of for while True: # I think it's cleaner to affirmatively break out of this main loop. Too bad Python has no do-while cur_t_pos += 1 # Advance along the diagonal by incrementing cur_q_pos += 1 # Remember, these refer to coordinates in our table, not in the actual target or query # Probably we need to do some bounds checking here too with respect to absolute position in the query and # target similar to what is done in the _fill_in_row_or_column method # print "Beginning row starting at", cur_q_pos, cur_t_pos, "of the blast table" max_in_row = self._fill_in_row_or_column(cur_q_pos, cur_t_pos, query_start, target_start, direction, self.ROW) # print "Max in row was ", max_in_row # print "Beginning column starting at", cur_q_pos, cur_t_pos, "of the blast table" max_in_column = self._fill_in_row_or_column(cur_q_pos, cur_t_pos, query_start, target_start, direction, self.COLUMN) # print "Max in column was ", max_in_column if not max(max_in_row, max_in_column): break # If the maximum value we encounter in both the rows and columns is zero, we are done building # print "Finished building a matrix" best_q_alignment = [] # best partial alignment for the query sequence best_t_alignment = [] # best partial alignment for the target sequence ## Now we can go ahead and produce an output string corresponding to the best alignment cur_q_pos = self.high_q_pos # our approach is start at the high scoring box, and to trace our way back cur_t_pos = self.high_t_pos while cur_q_pos >= 0 and cur_t_pos >= 0 and self.blast_table.setdefault((cur_q_pos, cur_t_pos), 0): q_offset, t_offset = self.traceback_table[cur_q_pos, cur_t_pos] # unpack the offset tuples stored in the traceback table if q_offset: try: best_q_alignment.append(self.query[query_start + cur_q_pos * direction]) except IndexError: print "YO!", query_start, cur_q_pos, direction, query_start + cur_q_pos * direction print "Best_q_alignment", best_q_alignment quit() else: best_q_alignment.append('-') # if the value is a zero, we are gapping! if t_offset: best_t_alignment.append(self.target[target_start + cur_t_pos * direction]) else: best_t_alignment.append('-') # if the value is a zero, we are gapping, now the other way cur_q_pos -= q_offset # Note that we are subtracting positively valued offsets. cur_t_pos -= t_offset # This design choice makes later printing a traceback table a lot prettier. # Alternatively, we could have built our alignments by adding things at the beginning using statements like # best_t_alignment.insert(0,'-') etc. But in Python inserting items at the beginning of a list is much slower # than appending at the end. We are better off appending at the end, then reversing the whole mess when done. # print "Returning information about a partial alignment", self.high_score, best_q_alignment, best_t_alignment # flip 'em both once we are done, since we built them "end-to-beginning". Note that we don't need to flip # sequences corresponding to backwards extensions! if direction == self.FORWARD: best_q_alignment.reverse() best_t_alignment.reverse() return self.high_score, ''.join(best_q_alignment), ''.join(best_t_alignment) def _fill_in_row_or_column(self, cur_q_pos, cur_t_pos, query_start, target_start, direction, row_or_column): """This private method will fill in a row or column, depending on the tuple passed in the row_or_column argument Each row or column is filled in until a zero-valued result is obtained. """ # print "filling in a row or column" max_in_current_row_or_column = 0 q_add, t_add = row_or_column # These variables will control whether we fill in a row or a column. If the argument row_or_column = (0,1) # we will end filling in a row. If the argument is assigned (1,0) we will fill a column while True: query_position = query_start + cur_q_pos * direction # remember, direction here is either -1 or 1 target_position = target_start + cur_t_pos * direction # so is a positive or negative offset multiplier # query and target position variables here refer to the actual (absolute) position within the query # and target sequences respectively if (query_position < 0) or (target_position < 0): # print "Ran out of query or target sequence while attempting backwards extension" break # we can go no further if (query_position >= self.querylen) or (target_position >= self.targetlen): # print "Ran out of q or t while attempting forwards extension", query_position, target_position break # again, we can go no further q_char = self.query[query_position] t_char = self.target[target_position] # print "comparing", q_char, query_position, "to", t_char, target_position # use PAM table to find the increment increment = self.PAM[(q_char, t_char)] match_score = self.blast_table[(cur_q_pos - 1, cur_t_pos - 1)] + increment # improvement for later - decide whether to apply gap opening or gap extension penalties # for the moment just set gap increment to the gap_open value increment = self.gap_open # scores associated with gapping in either the target or query target_gap_score = self.blast_table.setdefault((cur_q_pos, cur_t_pos - 1), 0) + increment query_gap_score = self.blast_table.setdefault((cur_q_pos - 1, cur_t_pos), 0) + increment best_score = max( (0, (0, 0)), # a 0 score will never have a traceback (match_score, (1, 1)), # A match corresponds to a -1,-1 traceback (target_gap_score, (0, 1)), # A target gap corresponds to a 0, -1 traceback (query_gap_score, (1, 0)) # A query gap corresponds to a -1, 0 traceback ) if not best_score[0]: break self.blast_table[cur_q_pos, cur_t_pos] = best_score[0] # The first element in the tuple is the actual score to be recorded # print "Recording", best_score[0], "at position", cur_q_pos, cur_t_pos self.traceback_table[cur_q_pos, cur_t_pos] = best_score[1] # The traceback offsets associated with the score are in a tuple as described earlier if best_score[0] >= self.high_score: # This represents the "high road" approach. "low road" would simply be > self.high_score = best_score[0] # record the new high score self.high_q_pos = cur_q_pos # also record the i and j positions associated with that score self.high_t_pos = cur_t_pos if best_score[0] > max_in_current_row_or_column: max_in_current_row_or_column = best_score[0] # The maximum in a particular row or column is different from the overall high score! We actually # only care if this value is non-zero, as this will tell us that another iteration along the diagonal is # required. cur_t_pos += t_add # We end up adding either a zero or a one to these depending on cur_q_pos += q_add # whether we are filling in a row or a column, setting us up for the next iteration return max_in_current_row_or_column def __str__(self): """ This is a "special method attribute" overwriting the __str__ method defined in object. __str__ controls what the string representation of objects of the BLAST class will look like. It is invoked by print statements, which will print the return value. The bad news is that the routine here was more-or-less just lifted from the old Smith Waterman program. However, BLAST uses a fundamentally different sort of data structure for representing the blast and traceback tables. Can you fix this method so that it does something useful? """ lineout = 'Scoring table:\n\t' + '\t'.join(self.target) + '\n' # The above is just a fancy looking way to break the target string into tab-delimited individual characters for i in xrange(self.querylen): lineout += self.query[i] + "\t" for j in xrange(self.targetlen): lineout += str(self.blast_table[i, j]) + "\t" lineout += '\n' lineout += '\n\nTraceback table:\n\t' + '\t'.join(self.target) + '\n' for i in xrange(self.querylen): lineout += self.query[i] + "\t" for j in xrange(self.targetlen): lineout += ''.join([str(k) for k in self.traceback_table[i, j]]) + "\t" # just prettying up the traceback tuples lineout += '\n' return lineout # MAIN PROGRAM numbat = 'LVSMLESYVAAPDLILLDIMMPGMDGLELGGMDGGKPILT' quoll = 'DDMEVIGTAYNPDVLVLDIIMPHLDGLAVAAMEAGRPLIS' # calculate PAM120 matrix A = PAM(N=120) PAM1 = A.Build_PAMN() B = BLAST(numbat, quoll, PAM=PAM1) print B.score()
{ "content_hash": "47c2e2c9e43a5126ec3063605d4fb94e", "timestamp": "", "source": "github", "line_count": 402, "max_line_length": 120, "avg_line_length": 48.26865671641791, "alnum_prop": 0.5958049886621315, "repo_name": "aravindvrm/bnfo", "id": "233559cbbce20a6e666ce90f9a2459c195da1807", "size": "19404", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "Veerappan_bnfo601_exam2/Veerappan_BLAST_prot.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "53038" } ], "symlink_target": "" }
""" List the name and path of every buildable project. """ from __future__ import absolute_import from __future__ import unicode_literals from __future__ import print_function import re import operator import qibuild.parsers import qisys.parsers from qisys import ui def configure_parser(parser): """ Configure parser for this action. """ qisys.parsers.worktree_parser(parser) parser.add_argument("--names", action="store_true", dest="names", help="sort by names") parser.add_argument("--paths", action="store_false", dest="names", help="sort by path") parser.add_argument("pattern", metavar="PATTERN", nargs="?", help="pattern to be matched") parser.set_defaults(names=True) def do(args): """ Main method. """ build_worktree = qibuild.parsers.get_build_worktree(args) projects = build_worktree.build_projects if not projects: on_empty_worktree(build_worktree) return ui.info(ui.green, "qibuild projects in:", ui.blue, build_worktree.root) max_name = max(len(x.name) for x in projects) max_src = max(len(x.src) for x in projects) regex = args.pattern if args.pattern: regex = re.compile(regex) if args.names: projects = sorted(projects, key=operator.attrgetter("name")) else: projects = sorted(projects, key=operator.attrgetter("src")) for project in projects: if args.names: items = (project.name.ljust(max_name + 2), project.path) else: items = (project.src.ljust(max_src + 2), project.name) if not regex or regex.search(items[0]) or regex.search(items[1]): ui.info(ui.green, " * ", ui.blue, items[0], ui.reset, items[1]) def on_empty_worktree(worktree): """ On Empty Wortree """ mess = """The worktree in {worktree.root} does not contain any buildable project. Please use: * `qisrc init` to fetch some sources * `qisrc create` to create a new qibuild project from scratch * `qibuild convert` to convert an exixting CMake project to a qibuild project """ ui.warning(mess.format(worktree=worktree))
{ "content_hash": "632344a0448d121d13f20d937a9fb824", "timestamp": "", "source": "github", "line_count": 63, "max_line_length": 75, "avg_line_length": 34.666666666666664, "alnum_prop": 0.6433150183150184, "repo_name": "aldebaran/qibuild", "id": "8291df2280e12140ae62b554cab7b0d1380a3935", "size": "2381", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "python/qibuild/actions/list.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "C", "bytes": "6892" }, { "name": "C++", "bytes": "23130" }, { "name": "CMake", "bytes": "292637" }, { "name": "Makefile", "bytes": "755" }, { "name": "Nix", "bytes": "563" }, { "name": "Python", "bytes": "1581825" }, { "name": "SWIG", "bytes": "306" }, { "name": "Shell", "bytes": "888" } ], "symlink_target": "" }
import pika import logging import traceback import json import se.api from bs4 import BeautifulSoup class Dullu: RABBITMQ_QUEUE_URL_NAME = 'dullu_url_queue' RABBITMQ_QUEUE_ENTITY_NAME = 'dull_entity_queue' # Entity queue, up for change though. Other side not coded. JSON_KEY__ENTITYQ__ID = 'Id' JSON_KEY__ENTITYQ__TYPE = 'PostTypeId' JSON_KEY__ENTITYQ__BODY = 'Body' # URL queue JSON_KEY__URLQ__ENTITY_ID = 'entity_id' JSON_KEY__URLQ__ENTITY_TYPE = 'entity_type' JSON_KEY__URLQ__ATTEMPTS = 'attempts' JSON_KEY__URLQ__URL = 'url' JSON_KEY__URLQ__LAST_TEST_CODE = 'last_code' JSON_KEY__URLQ__LAST_TEST_STAMP = 'last_stamp' JSON_KEY__URLQ__LAST_CHECKBOT = 'last_checker' def __init__(self, broker_address='localhost'): self.broker_address = broker_address def callback_scan_entity_for_urls(self, ch, method, properties, body): logging.debug("Received entity from broker.") try: json_dict = json.loads(body.decode()) except json.decoder.JSONDecodeError as jsonde: logging.error("Oops! Couldn't decode that request. Caught {0}. Body = [{1}]. ".format(jsonde, body)) ch.basic_reject(delivery_tag=method.delivery_tag, requeue=False) return if not all(k in json_dict for k in (self.JSON_KEY__ENTITYQ__ID, self.JSON_KEY__ENTITYQ__TYPE, self.JSON_KEY__ENTITYQ__BODY)): logging.error("Rejecting request. Missing information ({k1}:{v1},{k2}:{v2},{k3}:{v3}).".format( k1=self.JSON_KEY__ENTITYQ__ID, k2=self.JSON_KEY__ENTITYQ__TYPE, k3=self.JSON_KEY__ENTITYQ__BODY, v1=self.JSON_KEY__ENTITYQ__ID in json_dict, v2=self.JSON_KEY__ENTITYQ__TYPE in json_dict, v3=self.JSON_KEY__ENTITYQ__BODY in json_dict)) ch.basic_reject(delivery_tag=method.delivery_tag, requeue=False) return """ We currently only process certain different types of entities. questions/answers (hopefully comments and docs) in the future. We may need special handling for them depending on how eridu ends up working. """ try: entity_type = se.api.PostType(json_dict[self.JSON_KEY__ENTITYQ__TYPE]) except ValueError as ve: logging.error("Received an entity we're unwilling to process [{0}]. ".format(json_dict[self.JSON_KEY__ENTITYQ__TYPE])) ch.basic_reject(delivery_tag=method.delivery_tag, requeue=False) return urls = get_all_urls(json_dict[self.JSON_KEY__ENTITYQ__BODY]) url_dict = {self.JSON_KEY__URLQ__ENTITY_ID: json_dict[self.JSON_KEY__ENTITYQ__ID], self.JSON_KEY__URLQ__ENTITY_TYPE: json_dict[self.JSON_KEY__ENTITYQ__TYPE]} for url in urls: url_dict[self.JSON_KEY__URLQ__URL] = url self.channel.basic_publish(exchange="", routing_key=Dullu.RABBITMQ_QUEUE_URL_NAME, body=json.dumps(url_dict)) ch.basic_ack(delivery_tag=method.delivery_tag) def run(self): """ This will set up the link bot and connect to the broker. If the channel doesn't yet exist, it will create it on the broker (shouldn't actually happen unless someone runs things in the wrong order). channel.start_consuming is a blocking function. :return: """ logging.info("Attempting to connect to broker at: {0}.".format(self.broker_address)) connection = pika.BlockingConnection(pika.ConnectionParameters(self.broker_address)) try: self.channel = connection.channel() # Connect to url queue self.channel.queue_declare(queue=self.RABBITMQ_QUEUE_URL_NAME, durable=True) # Connect to entity queue self.channel.queue_declare(queue=self.RABBITMQ_QUEUE_ENTITY_NAME, durable=True) self.channel.basic_consume(self.callback_scan_entity_for_urls, queue=self.RABBITMQ_QUEUE_ENTITY_NAME, no_ack=False) self.channel.basic_qos(prefetch_count=1) logging.info("Ready to start consuming") self.channel.start_consuming() except Exception as e: logging.warning("Exception detected.") logging.error(traceback.format_exc()) raise e finally: logging.info("Dullu instance terminating...") connection.close() logging.debug("Fin.") def get_all_urls(s): """ Find all the urls in the text and stick them in a list for later processing. :param s: Input string :return: List of links """ soup = BeautifulSoup(s, "html.parser") return [link.get('href') for link in soup.find_all('a')]
{ "content_hash": "e1c256ee5521c740de233f5cde4dfa6f", "timestamp": "", "source": "github", "line_count": 119, "max_line_length": 133, "avg_line_length": 40.61344537815126, "alnum_prop": 0.6184564452720878, "repo_name": "IntrepidBrit/dullu", "id": "d35f0427b25d42ca20f116b3df15b0a9ff9447d3", "size": "4857", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "dullu/dullu.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "CSS", "bytes": "101" }, { "name": "HTML", "bytes": "497" }, { "name": "Makefile", "bytes": "2264" }, { "name": "Python", "bytes": "42370" } ], "symlink_target": "" }
from subtlenet import config from subtlenet.generators import gen as generator config.limit = 10 generator.truncate = 7
{ "content_hash": "89ec80718b71ffe3228ef800a5b2036c", "timestamp": "", "source": "github", "line_count": 4, "max_line_length": 49, "avg_line_length": 30, "alnum_prop": 0.825, "repo_name": "sidnarayanan/BAdNet", "id": "716314ab85d45e35aec2fa51c7cc29ec8856acb3", "size": "121", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "train/gen/adv/models/particles/v4_Adam_trunc7_limit10/setup.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "326584" }, { "name": "Shell", "bytes": "900" } ], "symlink_target": "" }
"""K-prototypes clustering""" # Author: Nico de Vos <njdevos@gmail.com> # License: MIT from collections import defaultdict import numpy as np from .KModes import KModes def euclidean_dissim(a, b): """Euclidean distance dissimilarity function""" return np.sum((a - b) ** 2, axis=1) def move_point_num(point, ipoint, to_clust, from_clust, cl_attr_sum, membership): """Move point between clusters, numerical attributes.""" membership[to_clust, ipoint] = 1 membership[from_clust, ipoint] = 0 # Update sum of attributes in cluster. for iattr, curattr in enumerate(point): cl_attr_sum[to_clust][iattr] += curattr cl_attr_sum[from_clust][iattr] -= curattr return cl_attr_sum, membership def _labels_cost(Xnum, Xcat, centroids, gamma): """Calculate labels and cost function given a matrix of points and a list of centroids for the k-prototypes algorithm. """ npoints = Xnum.shape[0] cost = 0. labels = np.empty(npoints, dtype='int64') for ipoint in range(npoints): # Numerical cost = sum of Euclidean distances num_costs = euclidean_dissim(centroids[0], Xnum[ipoint]) cat_costs = KModes.matching_dissim(centroids[1], Xcat[ipoint]) # Gamma relates the categorical cost to the numerical cost. tot_costs = num_costs + gamma * cat_costs clust = np.argmin(tot_costs) labels[ipoint] = clust cost += tot_costs[clust] return labels, cost def _k_prototypes_iter(Xnum, Xcat, centroids, cl_attr_sum, cl_attr_freq, membership, gamma): """Single iteration of the k-prototypes algorithm""" moves = 0 for ipoint in range(Xnum.shape[0]): clust = np.argmin( euclidean_dissim(centroids[0], Xnum[ipoint]) + gamma * KModes.matching_dissim(centroids[1], Xcat[ipoint])) if membership[clust, ipoint]: # Point is already in its right place. continue # Move point, and update old/new cluster frequencies and centroids. moves += 1 old_clust = np.argwhere(membership[:, ipoint])[0][0] cl_attr_sum, membership = move_point_num( Xnum[ipoint], ipoint, clust, old_clust, cl_attr_sum, membership) cl_attr_freq, membership = KModes.move_point_cat( Xcat[ipoint], ipoint, clust, old_clust, cl_attr_freq, membership) # Update new and old centroids by choosing mean for numerical # and mode for categorical attributes. for iattr in range(len(Xnum[ipoint])): for curc in (clust, old_clust): if sum(membership[curc, :]): centroids[0][curc, iattr] = \ cl_attr_sum[curc, iattr] / sum(membership[curc, :]) else: centroids[0][curc, iattr] = 0. for iattr in range(len(Xcat[ipoint])): for curc in (clust, old_clust): centroids[1][curc, iattr] = \ KModes.get_max_value_key(cl_attr_freq[curc][iattr]) # In case of an empty cluster, reinitialize with a random point # from largest cluster. if sum(membership[old_clust, :]) == 0: from_clust = membership.sum(axis=1).argmax() choices = \ [ii for ii, ch in enumerate(membership[from_clust, :]) if ch] rindx = np.random.choice(choices) cl_attr_freq, membership = move_point_num( Xnum[rindx], rindx, old_clust, from_clust, cl_attr_sum, membership) cl_attr_freq, membership = KModes.move_point_cat( Xcat[rindx], rindx, old_clust, from_clust, cl_attr_freq, membership) return centroids, moves def k_prototypes(X, n_clusters, gamma, init, n_init, max_iter, verbose): """k-prototypes algorithm""" assert len(X) == 2, "X should be a list of Xnum and Xcat arrays" # List where [0] = numerical part of centroid and # [1] = categorical part. Same for centroids. Xnum, Xcat = X # Convert to numpy arrays, if needed. Xnum = np.asanyarray(Xnum) Xcat = np.asanyarray(Xcat) nnumpoints, nnumattrs = Xnum.shape ncatpoints, ncatattrs = Xcat.shape assert nnumpoints == ncatpoints,\ "Uneven number of numerical and categorical points" npoints = nnumpoints assert n_clusters < npoints, "More clusters than data points?" # Estimate a good value for gamma, which determines the weighing of # categorical values in clusters (see Huang [1997]). if gamma is None: gamma = 0.5 * Xnum.std() all_centroids = [] all_labels = [] all_costs = [] for init_no in range(n_init): # For numerical part of initialization, we don't have a guarantee # that there is not an empty cluster, so we need to retry until # there is none. while True: # _____ INIT _____ if verbose: print("Init: initializing centroids") if init == 'Huang': centroids = KModes.init_huang(Xcat, n_clusters) elif init == 'Cao': centroids = KModes.init_cao(Xcat, n_clusters) elif init == 'random': seeds = np.random.choice(range(npoints), n_clusters) centroids = Xcat[seeds] elif hasattr(init, '__array__'): centroids = init else: raise NotImplementedError # Numerical is initialized by drawing from normal distribution, # categorical following the k-modes methods. meanX = np.mean(Xnum, axis=0) stdX = np.std(Xnum, axis=0) centroids = [meanX + np.random.randn(n_clusters, nnumattrs) * stdX, centroids] if verbose: print("Init: initializing clusters") membership = np.zeros((n_clusters, npoints), dtype='int64') # Keep track of the sum of attribute values per cluster so that we # can do k-means on the numerical attributes. cl_attr_sum = np.zeros((n_clusters, nnumattrs), dtype='float') # cl_attr_freq is a list of lists with dictionaries that contain # the frequencies of values per cluster and attribute. cl_attr_freq = [[defaultdict(int) for _ in range(ncatattrs)] for _ in range(n_clusters)] for ipoint in range(npoints): # Initial assignment to clusters clust = np.argmin( euclidean_dissim(centroids[0], Xnum[ipoint]) + gamma * KModes.matching_dissim(centroids[1], Xcat[ipoint])) membership[clust, ipoint] = 1 # Count attribute values per cluster. for iattr, curattr in enumerate(Xnum[ipoint]): cl_attr_sum[clust, iattr] += curattr for iattr, curattr in enumerate(Xcat[ipoint]): cl_attr_freq[clust][iattr][curattr] += 1 # If no empty clusters, then consider initialization finalized. if membership.sum(axis=1).min() > 0: break # Perform an initial centroid update. for ik in range(n_clusters): for iattr in range(nnumattrs): centroids[0][ik, iattr] = \ cl_attr_sum[ik, iattr] / sum(membership[ik, :]) for iattr in range(ncatattrs): centroids[1][ik, iattr] = \ KModes.get_max_value_key(cl_attr_freq[ik][iattr]) # _____ ITERATION _____ if verbose: print("Starting iterations...") itr = 0 converged = False cost = np.Inf while itr <= max_iter and not converged: itr += 1 centroids, moves = _k_prototypes_iter( Xnum, Xcat, centroids, cl_attr_sum, cl_attr_freq, membership, gamma) # All points seen in this iteration labels, ncost = \ _labels_cost(Xnum, Xcat, centroids, gamma) converged = (moves == 0) or (ncost >= cost) cost = ncost if verbose: print("Run: {}, iteration: {}/{}, moves: {}, ncost: {}" .format(init_no + 1, itr, max_iter, moves, ncost)) # Store results of current run. all_centroids.append(centroids) all_labels.append(labels) all_costs.append(cost) best = np.argmin(all_costs) if n_init > 1 and verbose: print("Best run was number {}".format(best + 1)) # Note: return gamma in case it was automatically determined. return all_centroids[best], all_labels[best], all_costs[best], gamma class KPrototypes(KModes): """k-protoypes clustering algorithm for mixed numerical/categorical data. Parameters ----------- n_clusters : int, optional, default: 8 The number of clusters to form as well as the number of centroids to generate. gamma : float, default: None Weighing factor that determines relative importance of numerical vs. categorical attributes (see discussion in Huang [1997]). By default, automatically calculated from data. max_iter : int, default: 300 Maximum number of iterations of the k-modes algorithm for a single run. n_init : int, default: 10 Number of time the k-modes algorithm will be run with different centroid seeds. The final results will be the best output of n_init consecutive runs in terms of cost. init : {'Huang', 'Cao', 'random' or an ndarray} Method for initialization: 'Huang': Method in Huang [1997, 1998] 'Cao': Method in Cao et al. [2009] 'random': choose k observations (rows) at random from data for the initial centroids. If an ndarray is passed, it should be of shape (n_clusters, n_features) and gives the initial centroids. verbose : boolean, optional Verbosity mode. Attributes ---------- cluster_centroids_ : array, [n_clusters, n_features] Categories of cluster centroids labels_ : Labels of each point cost_ : float Clustering cost, defined as the sum distance of all points to their respective cluster centroids. Notes ----- See: Huang, Z.: Extensions to the k-modes algorithm for clustering large data sets with categorical values, Data Mining and Knowledge Discovery 2(3), 1998. """ def __init__(self, n_clusters=8, gamma=None, init='Huang', n_init=10, max_iter=100, verbose=0): super(KPrototypes, self).__init__(n_clusters, init, n_init, max_iter, verbose) self.gamma = gamma def fit(self, X): """Compute k-prototypes clustering. Parameters ---------- X : list of array-like, shape=[[n_num_samples, n_features], [n_cat_samples, n_features]] """ # If self.gamma is None, gamma will be automatically determined from # the data. The function below returns its value. self.cluster_centroids_, self.labels_, self.cost_, self.gamma = \ k_prototypes(X, self.n_clusters, self.gamma, self.init, self.n_init, self.max_iter, self.verbose) return self def predict(self, X): """Predict the closest cluster each sample in X belongs to. Parameters ---------- X : list of array-like, shape=[[n_num_samples, n_features], [n_cat_samples, n_features]] Returns ------- labels : array, shape [n_samples,] Index of the cluster each sample belongs to. """ assert hasattr(self, 'cluster_centroids_'), "Model not yet fitted." return _labels_cost(X[0], X[1], self.cluster_centroids_, self.gamma)[0]
{ "content_hash": "182d4526a7b13370b17ca146738e29a2", "timestamp": "", "source": "github", "line_count": 321, "max_line_length": 79, "avg_line_length": 37.850467289719624, "alnum_prop": 0.5761316872427984, "repo_name": "bejar/kemlglearn", "id": "3e71c6288384ff27bbed3688f266054c140c612a", "size": "12151", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "kemlglearn/cluster/KPrototypes.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "126378" } ], "symlink_target": "" }
""" sentry.search.django.constants ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ :copyright: (c) 2010-2014 by the Sentry Team, see AUTHORS for more details. :license: BSD, see LICENSE for more details. """ from __future__ import absolute_import SORT_CLAUSES = { 'priority': 'sentry_groupedmessage.score', 'date': 'EXTRACT(EPOCH FROM sentry_groupedmessage.last_seen)::int', 'new': 'EXTRACT(EPOCH FROM sentry_groupedmessage.first_seen)::int', 'freq': 'sentry_groupedmessage.times_seen', } SQLITE_SORT_CLAUSES = SORT_CLAUSES.copy() SQLITE_SORT_CLAUSES.update({ 'date': "cast((julianday(sentry_groupedmessage.last_seen) - 2440587.5) * 86400.0 as INTEGER)", 'new': "cast((julianday(sentry_groupedmessage.first_seen) - 2440587.5) * 86400.0 as INTEGER)", }) MYSQL_SORT_CLAUSES = SORT_CLAUSES.copy() MYSQL_SORT_CLAUSES.update({ 'date': 'UNIX_TIMESTAMP(sentry_groupedmessage.last_seen)', 'new': 'UNIX_TIMESTAMP(sentry_groupedmessage.first_seen)', }) ORACLE_SORT_CLAUSES = SORT_CLAUSES.copy() ORACLE_SORT_CLAUSES.update({ 'date': "(cast(sentry_groupedmessage.last_seen as date)-TO_DATE('01/01/1970 00:00:00', 'MM-DD-YYYY HH24:MI:SS')) * 24 * 60 * 60", 'new': "(cast(sentry_groupedmessage.first_seen as date)-TO_DATE('01/01/1970 00:00:00', 'MM-DD-YYYY HH24:MI:SS')) * 24 * 60 * 60", }) MSSQL_SORT_CLAUSES = SORT_CLAUSES.copy() MSSQL_SORT_CLAUSES.update({ 'date': "DATEDIFF(s, '1970-01-01T00:00:00', sentry_groupedmessage.last_seen)", 'new': "DATEDIFF(s, '1970-01-01T00:00:00', sentry_groupedmessage.first_seen)", }) MSSQL_ENGINES = set(['django_pytds', 'sqlserver_ado', 'sql_server.pyodbc'])
{ "content_hash": "5d9f4ed46369889e537637c280986838", "timestamp": "", "source": "github", "line_count": 42, "max_line_length": 133, "avg_line_length": 38.61904761904762, "alnum_prop": 0.6794081381011098, "repo_name": "mitsuhiko/sentry", "id": "d07708d74a015bdd0bd4f0411ae69587e4b956d6", "size": "1622", "binary": false, "copies": "7", "ref": "refs/heads/master", "path": "src/sentry/search/django/constants.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "JavaScript", "bytes": "171113" }, { "name": "Python", "bytes": "877258" } ], "symlink_target": "" }
import elasticsearch_dsl as es from pyjobsweb.lib.elasticsearch_ import compute_index_name class Geocomplete(es.DocType): class Meta: index = 'geocomplete' doc_type = 'geoloc-entry' french_elision = es.token_filter( 'french_elision', type='elision', articles_case=True, articles=[ 'l', 'm', 't', 'qu', 'n', 's', 'j', 'd', 'c', 'jusqu', 'quoiqu', 'lorsqu', 'puisqu' ] ) geocompletion_ngram_filter = es.token_filter( 'geocompletion_ngram', type='edgeNGram', min_gram=1, max_gram=50, side='front' ) town_filter = es.token_filter( 'town_filter', type='pattern_replace', pattern=' ', replacement='-' ) geocompletion_index_tokenizer = es.tokenizer( 'geocompletion_index_tokenizer', type='pattern', pattern='@' ) geocompletion_index_analyzer = es.analyzer( 'geocompletion_index_analyzer', type='custom', tokenizer=geocompletion_index_tokenizer, filter=[ 'lowercase', 'asciifolding', french_elision, town_filter, geocompletion_ngram_filter ] ) geocompletion_search_analyzer = es.analyzer( 'geocompletion_search_analyzer', type='custom', tokenizer=geocompletion_index_tokenizer, filter=[ 'lowercase', 'asciifolding', town_filter, french_elision ] ) name = es.String( index='analyzed', analyzer=geocompletion_index_analyzer, search_analyzer=geocompletion_search_analyzer, fields=dict(raw=es.String(index='not_analyzed')) ) complement = es.String(index='not_analyzed') postal_code_ngram_filter = es.token_filter( 'postal_code_ngram', type='edgeNGram', min_gram=1, max_gram=5, side='front' ) postal_code_index_analyzer = es.analyzer( 'postal_code_index_analyzer', type='custom', tokenizer='standard', filter=[ postal_code_ngram_filter ] ) postal_code_search_analyzer = es.analyzer( 'postal_code_search_analyzer', type='custom', tokenizer='standard' ) postal_code = es.String( index='analyzed', analyzer=postal_code_index_analyzer, search_analyzer=postal_code_search_analyzer, fields=dict(raw=es.String(index='not_analyzed')) ) geolocation = es.GeoPoint() weight = es.Float() def __init__(self, meta=None, **kwargs): super(Geocomplete, self).__init__(meta, **kwargs) if self.index in compute_index_name(self.index): self._doc_type.index = compute_index_name(self.index) @property def index(self): return self._doc_type.index @property def doc_type(self): return self._doc_type.name
{ "content_hash": "b04c79eb37333522f676e96f3719fb81", "timestamp": "", "source": "github", "line_count": 123, "max_line_length": 65, "avg_line_length": 24.504065040650406, "alnum_prop": 0.559721300597213, "repo_name": "pyjobs/web", "id": "30dc64bbb6a95ef5d8ec283a235b1cca8e6523f7", "size": "3038", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "pyjobs_web/pyjobsweb/model/elasticsearch_model/geocomplete.py", "mode": "33188", "license": "mit", "language": [ { "name": "CSS", "bytes": "39378" }, { "name": "JavaScript", "bytes": "1731" }, { "name": "Makefile", "bytes": "52181" }, { "name": "Mako", "bytes": "412" }, { "name": "Python", "bytes": "219209" }, { "name": "Shell", "bytes": "2815" } ], "symlink_target": "" }
from async import async import redis_wrap import urllib2 from cron import has_cron, add_cron @async(queue='clock') def bgrewriteaof(): """ 将redis的AOF文件压缩 """ redis = redis_wrap.get_redis() redis.bgrewriteaof() def set_bgrewriteaof(): # 自动定时压缩reids if not has_cron(bgrewriteaof): add_cron({'hour':1}, bgrewriteaof) @async(queue='urlopen') def async_urlopen(url, params=None, timeout=120): try: # 将unicode转换成utf8 urllib2.urlopen(url.encode('utf-8'), params, timeout=timeout) except IOError: raise IOError('Could not connected to %s' % url)
{ "content_hash": "2639b692d44080636afd46db03472c9d", "timestamp": "", "source": "github", "line_count": 24, "max_line_length": 69, "avg_line_length": 25.083333333333332, "alnum_prop": 0.6710963455149501, "repo_name": "everydo/ztq", "id": "cf1f79795dd263f0f748cfae221c97602da80acf", "size": "648", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "ztq_core/ztq_core/utils.py", "mode": "33188", "license": "mit", "language": [ { "name": "CSS", "bytes": "6856" }, { "name": "HTML", "bytes": "24310" }, { "name": "JavaScript", "bytes": "367" }, { "name": "Python", "bytes": "110075" }, { "name": "Visual Basic", "bytes": "646" } ], "symlink_target": "" }
import json from vnpy.event import Event from vnpy.rpc import RpcServer from vnpy.trader.vtFunction import getJsonPath from vnpy.trader.vtObject import VtLogData EVENT_RTDSERVICE_LOG = 'eRtdServiceLog' ######################################################################## class RtdServer(RpcServer): """RTD服务器,直接继承RPC服务""" #---------------------------------------------------------------------- def __init__(self, repAddress, pubAddress): """Constructor""" super(RtdServer, self).__init__(repAddress, pubAddress) self.useJson() #################################################5####################### class RtdEngine(object): """RTD引擎""" ENGINE_NAME = 'RTD' settingFileName = 'RTD_setting.json' settingfilePath = getJsonPath(settingFileName, __file__) #---------------------------------------------------------------------- def __init__(self, mainEngine, eventEngine): """Constructor""" self.mainEngine = mainEngine self.eventEngine = eventEngine self.server = None self.eventTypeDict = {} # key:事件类型,value:键 self.loadSetting() self.registerEvent() #---------------------------------------------------------------------- def loadSetting(self): """读取配置""" with open(self.settingfilePath) as f: d = json.load(f) repAddress = d['repAddress'] pubAddress = d['pubAddress'] self.server = RtdServer(repAddress, pubAddress) self.eventTypeDict = d['eventType'] #---------------------------------------------------------------------- def registerEvent(self): """注册事件监听""" for eventType in self.eventTypeDict.keys(): self.eventEngine.register(eventType, self.processDataEvent) #---------------------------------------------------------------------- def processDataEvent(self, event): """处理数据事件""" if not self.server: return data = event.dict_['data'] d = data.__dict__ keyname = self.eventTypeDict[event.type_] # 获取数据标识用的名称 d["key"] = d[keyname] self.server.publish(event.type_, d) self.writeLog(u'发布数据,类型%s,内容%s' %(event.type_, str(d))) #---------------------------------------------------------------------- def writeLog(self, content): """记录日志""" log = VtLogData() log.logContent = content log.gatewayName = self.ENGINE_NAME event = Event(EVENT_RTDSERVICE_LOG) event.dict_['data'] = log self.eventEngine.put(event) #---------------------------------------------------------------------- def stop(self): """停止""" pass
{ "content_hash": "269a863d05e1d4da26c745c9d9fc91aa", "timestamp": "", "source": "github", "line_count": 96, "max_line_length": 75, "avg_line_length": 30.375, "alnum_prop": 0.42421124828532236, "repo_name": "wisfern/vnpy", "id": "3d4463e924a3ece5be6bf72ca0635de4c80a5e9d", "size": "3059", "binary": false, "copies": "4", "ref": "refs/heads/master", "path": "vnpy/trader/app/rtdService/rtdEngine.py", "mode": "33188", "license": "mit", "language": [ { "name": "Batchfile", "bytes": "341" }, { "name": "C", "bytes": "3151559" }, { "name": "C++", "bytes": "8866606" }, { "name": "CMake", "bytes": "44564" }, { "name": "HTML", "bytes": "807" }, { "name": "Makefile", "bytes": "99693" }, { "name": "Objective-C", "bytes": "22505" }, { "name": "PHP", "bytes": "4107" }, { "name": "Python", "bytes": "5367161" }, { "name": "Shell", "bytes": "3722" } ], "symlink_target": "" }
"""Implementation of the metadata abstraction for gRPC Asyncio Python.""" from typing import List, Tuple, Iterator, Any, Text, Union from collections import abc, OrderedDict MetadataKey = Text MetadataValue = Union[str, bytes] class Metadata(abc.Mapping): """Metadata abstraction for the asynchronous calls and interceptors. The metadata is a mapping from str -> List[str] Traits * Multiple entries are allowed for the same key * The order of the values by key is preserved * Getting by an element by key, retrieves the first mapped value * Supports an immutable view of the data * Allows partial mutation on the data without recreating the new object from scratch. """ def __init__(self, *args: Tuple[MetadataKey, MetadataValue]) -> None: self._metadata = OrderedDict() for md_key, md_value in args: self.add(md_key, md_value) def add(self, key: MetadataKey, value: MetadataValue) -> None: self._metadata.setdefault(key, []) self._metadata[key].append(value) def __len__(self) -> int: """Return the total number of elements that there are in the metadata, including multiple values for the same key. """ return sum(map(len, self._metadata.values())) def __getitem__(self, key: MetadataKey) -> MetadataValue: """When calling <metadata>[<key>], the first element of all those mapped for <key> is returned. """ try: return self._metadata[key][0] except (ValueError, IndexError) as e: raise KeyError("{0!r}".format(key)) from e def __setitem__(self, key: MetadataKey, value: MetadataValue) -> None: """Calling metadata[<key>] = <value> Maps <value> to the first instance of <key>. """ if key not in self: self._metadata[key] = [value] else: current_values = self.get_all(key) self._metadata[key] = [value, *current_values[1:]] def __delitem__(self, key: MetadataKey) -> None: """``del metadata[<key>]`` deletes the first mapping for <key>.""" current_values = self.get_all(key) if not current_values: raise KeyError(repr(key)) self._metadata[key] = current_values[1:] def delete_all(self, key: MetadataKey) -> None: """Delete all mappings for <key>.""" del self._metadata[key] def __iter__(self) -> Iterator[Tuple[MetadataKey, MetadataValue]]: for key, values in self._metadata.items(): for value in values: yield (key, value) def get_all(self, key: MetadataKey) -> List[MetadataValue]: """For compatibility with other Metadata abstraction objects (like in Java), this would return all items under the desired <key>. """ return self._metadata.get(key, []) def set_all(self, key: MetadataKey, values: List[MetadataValue]) -> None: self._metadata[key] = values def __contains__(self, key: MetadataKey) -> bool: return key in self._metadata def __eq__(self, other: Any) -> bool: if not isinstance(other, self.__class__): return NotImplemented # pytype: disable=bad-return-type return self._metadata == other._metadata def __repr__(self) -> str: view = tuple(self) return "{0}({1!r})".format(self.__class__.__name__, view)
{ "content_hash": "fea727779a1102a105ca5d5c0e3d7b63", "timestamp": "", "source": "github", "line_count": 92, "max_line_length": 93, "avg_line_length": 37.47826086956522, "alnum_prop": 0.6102088167053364, "repo_name": "jboeuf/grpc", "id": "ff970106748dc5c84df6729e8224d726260a030a", "size": "4025", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "src/python/grpcio/grpc/experimental/aio/_metadata.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Batchfile", "bytes": "34391" }, { "name": "C", "bytes": "2506485" }, { "name": "C#", "bytes": "2056447" }, { "name": "C++", "bytes": "31737951" }, { "name": "CMake", "bytes": "678007" }, { "name": "CSS", "bytes": "1519" }, { "name": "DTrace", "bytes": "147" }, { "name": "Dockerfile", "bytes": "157798" }, { "name": "Go", "bytes": "34791" }, { "name": "HTML", "bytes": "14" }, { "name": "Java", "bytes": "6907" }, { "name": "JavaScript", "bytes": "61459" }, { "name": "M4", "bytes": "50995" }, { "name": "Makefile", "bytes": "1003022" }, { "name": "Mako", "bytes": "5629" }, { "name": "Objective-C", "bytes": "597466" }, { "name": "Objective-C++", "bytes": "77713" }, { "name": "PHP", "bytes": "474525" }, { "name": "PowerShell", "bytes": "621" }, { "name": "Python", "bytes": "2949502" }, { "name": "Ruby", "bytes": "1029888" }, { "name": "Shell", "bytes": "472145" }, { "name": "Swift", "bytes": "3516" }, { "name": "TSQL", "bytes": "4901" }, { "name": "XSLT", "bytes": "9673" } ], "symlink_target": "" }
from SNMApp import ObservableList, SnapshotBranch, Snapshot from CDPs import SphereCDP, BoxCDP, PlaneCDP, CapsuleCDP from RigidBody import RigidBody, ArticulatedRigidBody from ArticulatedFigure import ArticulatedFigure, Character from Joints import BallInSocketJoint, UniversalJoint, HingeJoint, StiffJoint from SimBiController import SimBiController, ControlParams, SimBiConState, ExternalForce, Trajectory, TrajectoryComponent, LinearBalanceFeedback, IKVMCController, DanceController, WalkController
{ "content_hash": "b1170aee1738fd2b5f02d32206bd9a22", "timestamp": "", "source": "github", "line_count": 6, "max_line_length": 194, "avg_line_length": 84.33333333333333, "alnum_prop": 0.8695652173913043, "repo_name": "Banbury/cartwheel-3d", "id": "f587d9353e985f91513c82a0a889b323479b0d17", "size": "506", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "Python/App/Proxys/__init__.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "C", "bytes": "978245" }, { "name": "C++", "bytes": "6093969" }, { "name": "CSS", "bytes": "9835" }, { "name": "GLSL", "bytes": "76094" }, { "name": "HTML", "bytes": "955553" }, { "name": "Objective-C", "bytes": "69867" }, { "name": "Python", "bytes": "753174" }, { "name": "RenderScript", "bytes": "10262" } ], "symlink_target": "" }
import http.server import threading import socket import tdl import hunting.level.parser as parser import hunting.level.encoder as encoder from hunting.display.render import Renderer import hunting.sim.runner as runner import hunting.resources as resources UTF_8 = 'utf-8' def get_random_open_port(): s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.bind(('', 0)) port = s.getsockname()[1] s.close() return port def shutdown_server_from_new_thread(server): def kill_server(): server.shutdown() killer = threading.Thread(target=kill_server) killer.start() class HelloWorldHandler(http.server.BaseHTTPRequestHandler): def hello_world(self): self.send_response(200) self.send_header('content-type', 'text/plain') self.end_headers() payload = bytes('Hello World!', UTF_8) self.wfile.write(payload) def goodbye(self): self.send_response(200) self.end_headers() self.wfile.write(bytes('Shutting down!\n', UTF_8)) shutdown_server_from_new_thread(self.server) def what(self): self.send_response(200) self.end_headers() self.wfile.write(bytes("I don't know what that is!", UTF_8)) def test_vis(self, file_path): full_path = resources.get_full_path(file_path) level = parser.parse_level(full_path) runner.run_level(level) main_console = tdl.init(level.width, level.height, 'TDL Test') scratch_level = parser.parse_level(full_path) renderer = Renderer(main_console, level.width, level.height) renderer.render_all(level=scratch_level) for event in level.log.events: renderer.render_event(level=scratch_level, event=event) main_console.__del__() # Crude, but this whole thing is crude. self.send_response(200) self.end_headers() self.wfile.write(bytes(encoder.encode_level(level), UTF_8)) def run_file(self, file_path): full_path = resources.get_full_path(file_path) if full_path is not None: try: level = parser.parse_level(full_path) runner.run_level(level) self.send_response(200) self.send_header('content-type', 'application/json') self.end_headers() self.wfile.write(bytes(encoder.encode_level(level), UTF_8)) except ValueError as err: self.send_response(500) self.send_header('content-type', 'text/plain') self.end_headers() self.wfile.write(bytes('Error: {0}'.format(err), UTF_8)) else: self.send_response(404) self.send_header('content-type', 'text/plain') self.end_headers() self.wfile.write(bytes('No such file!', UTF_8)) def do_GET(self): if self.path == '/goodbye': self.goodbye() elif self.path == '/hello': self.hello_world() elif self.path.startswith('/test_vis/'): self.test_vis(self.path[10:]) elif self.path.startswith('/run/'): self.run_file(self.path[5:]) else: self.what() def new_server(port): return http.server.HTTPServer(("", port), HelloWorldHandler) def start_server(port=8888): print('starting on port', port) httpd = new_server(port) httpd.serve_forever() print('server shut down')
{ "content_hash": "1d3a355a6aba7d6c4f920465883b092e", "timestamp": "", "source": "github", "line_count": 122, "max_line_length": 75, "avg_line_length": 28.557377049180328, "alnum_prop": 0.6050516647531573, "repo_name": "MoyTW/RL_Arena_Experiment", "id": "107416085c0c3bb0226edf05dd52436a057c262a", "size": "3484", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "hunting/server.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "68955" } ], "symlink_target": "" }
import Queue as queue import threading import collectd class BaseWriter(threading.Thread): """ Base class for all writers. :param formatter: Formatter instance. """ MAX_BUFFER_SIZE = 1000 """The maximum size of values in the output buffer.""" def __init__(self, formatter): collectd.debug("BaseWriter.__init__: formatter=%s, MAX_BUFFER_SIZE=%s" % (formatter, self.MAX_BUFFER_SIZE)) threading.Thread.__init__(self) self.buffer = queue.Queue(maxsize=self.MAX_BUFFER_SIZE) self.formatter = formatter def shutdown(self): """ `shutdown()` will be called by `run()`. This can be overridden by a derived class. """ pass def flush(self, message): """ `flush()` will be called by `run()` when the write buffer must be flushed. :param message: This must be overridden by a derived class. """ raise NotImplementedError def write(self, values_dict): collectd.debug('%s.write_callback: values_object=%s' % ('$NAME', values_dict)) try: self.buffer.put_nowait(values_dict) except queue.Full: collectd.notice("%s output buffer full" % (self)) def run(self): collectd.debug("BaseWriter.run") while True: try: values_dict = self.buffer.get(block=True, timeout=0.1) self.flush(values_dict) except queue.Empty: pass
{ "content_hash": "77a919ac389fbfd23e20b9354ce8d462", "timestamp": "", "source": "github", "line_count": 66, "max_line_length": 86, "avg_line_length": 23.484848484848484, "alnum_prop": 0.5670967741935484, "repo_name": "mjuenema/collectd-plugins", "id": "78073f80c2017fc28586e01a85e05afb432814ef", "size": "1563", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "include/_basewriter.py", "mode": "33188", "license": "mit", "language": [ { "name": "Makefile", "bytes": "529" }, { "name": "Python", "bytes": "46098" }, { "name": "Shell", "bytes": "2182" } ], "symlink_target": "" }
from random import random, uniform from math import inf from pygame import Color as PyGameColor from colormath.color_objects import sRGBColor, LabColor, HSVColor from colormath.color_conversions import convert_color from colormath.color_diff import delta_e_cie2000 class Color(sRGBColor): def __init__(self, r, g=None, b=None, is_upscaled=False, upscale=False): if isinstance(r, sRGBColor): r, b, g = r.get_value_tuple() elif isinstance(r, (str, int)) and g is None or b is None: if isinstance(r, str): s = r else: s = "#" + format(hex(r)[2:], "0>6") r = int(s[1:3], 16) g = int(s[3:5], 16) b = int(s[5:7], 16) elif hasattr(r, "__getitem__"): r, g, b = r if upscale: r, g, b = int(r*256), int(g*256), int(b*256) super().__init__(r, g, b, is_upscaled=is_upscaled) self.r, self.g, self.b = r, g, b def __len__(self): return 3 def __getitem__(self, item): if isinstance(item, slice): return tuple(map(int, self.get_value_tuple()[item])) return int(self.get_value_tuple()[item]) def norm(self): """From r, g, b values between 0-255 to 0.0-1.0""" return Color(self, is_upscaled=True) @classmethod def complementary(cls, *colors: "Color", decrement=0.01): """Return a random color that looks as different from the colors as possible The way it does this is creating a random color and checking the difference. If it is not different enough, check new color and decrement the required difference. This way it will not find the most different color, but something that is different enough for my use.""" len_colors = len(colors) tolerance = 70 lab_colours = [convert_color(color.norm(), LabColor) for color in colors] while True: new_hsv = HSVColor(uniform(0, 360), random(), random()) new_lab = convert_color(new_hsv, LabColor) min_d = inf for color in lab_colours: d = delta_e_cie2000(new_lab, color) min_d = min(min_d, d) if d < tolerance: tolerance -= decrement break else: # No break return Color(convert_color(new_lab, sRGBColor), upscale=True), min_d def contrasting(self): brightness = (self.r * 299 + self.g * 587 + self.b * 114) / 1000. return Color(0, 0, 0) if brightness > 128 else Color(255, 255, 255) def get_rgb_hex(self): return "#{0.r:02x}{0.g:02x}{0.b:02x}".format(self) BLACK = Color(0) WHITE = Color(0xffffff) DARK_GREY = Color(0x1a1a1a) LIGHT_GREY = Color(0x696969) RED = Color(0xff0000) GREEN = Color(0x00ff00) BLUE = Color(0x0000ff) YELLOW = Color(0xffff00) PINK = Color(0xff00ff) CYAN = Color(0x00ffff) LIGHT_BLUE = Color(0x1a63ce) DARK_RED = Color(0xaa0000) TRANSPARENT = PyGameColor(0, 0, 0, 0)
{ "content_hash": "6477a66ccb465dd722a5b9aad93f5a7a", "timestamp": "", "source": "github", "line_count": 86, "max_line_length": 84, "avg_line_length": 36.44186046511628, "alnum_prop": 0.5682833439693682, "repo_name": "thdb-theo/Zombie-Survival", "id": "2edf7706abce7e7ab4f262f2c141ad6bf85c8279", "size": "3134", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "src/color.py", "mode": "33188", "license": "mit", "language": [ { "name": "C#", "bytes": "4131" }, { "name": "Python", "bytes": "114499" } ], "symlink_target": "" }
""" @author: Sebi File: write_OME-XML_from_file.py Date: 11.05.2015 Version. 1.1 """ from __future__ import print_function import bftools as bf # use for BioFormtas <= 5.1.10 #urlnamespace = 'http://www.openmicroscopy.org/Schemas/OME/2015-01' # use for BioFormtas > 5.2.0 urlnamespace = 'http://www.openmicroscopy.org/Schemas/OME/2016-06' # specify bioformats_package.jar to use if required bfpackage = r'bfpackage/5.4.1/bioformats_package.jar' bf.set_bfpath(bfpackage) # INSERT THE FILES INSIDE THE LIST BELOW testfiles = [r'testdata/T=5_Z=3_CH=2_CZT_All_CH_per_Slice.czi'] bf.writeomexml(testfiles, method=1, writeczi_metadata=True)
{ "content_hash": "3520242ad0fa5a44c61e1b1b96aaa9f0", "timestamp": "", "source": "github", "line_count": 25, "max_line_length": 67, "avg_line_length": 25.68, "alnum_prop": 0.7383177570093458, "repo_name": "sebi06/BioFormatsRead", "id": "eb106f20b76ea8610418626e7580ef48c6a5a5c2", "size": "666", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "write_OME-XML_from_file.py", "mode": "33188", "license": "bsd-2-clause", "language": [ { "name": "Batchfile", "bytes": "1564" }, { "name": "Python", "bytes": "112970" } ], "symlink_target": "" }
"""The Clang Compiler. @see: Cake Build System (http://sourceforge.net/projects/cake-build) @copyright: Copyright (c) 2010 Lewis Baker, Stuart McMahon. @license: Licensed under the MIT license. """ from cake.library import memoise from cake.target import getPaths, getPath from cake.library.compilers import Compiler, makeCommand, CompilerNotFoundError import cake.path import cake.filesys import os.path import subprocess def _getClangVersion(clangExe): """Returns the Clang version number given an executable. """ args = [getPath(clangExe), '--version'] try: p = subprocess.Popen( args=args, stdout=subprocess.PIPE, ) except EnvironmentError, e: raise EnvironmentError( "cake: failed to launch %s: %s\n" % (args[0], str(e)) ) stdoutText = p.stdout.readline() p.stdout.close() exitCode = p.wait() if exitCode != 0: raise EnvironmentError( "%s: failed with exit code %i\n" % (args[0], exitCode) ) # Parse through the line to get the version number. Examples: # Ubuntu clang version 3.6.2-svn238746-1~exp1 (branches/release_36) (based on LLVM 3.6.2) # clang version 3.5.0 (217039) versionText = "version " index = stdoutText.find(versionText) if index == -1: raise EnvironmentError( "%s: version format invalid: %s\n" % (args[0], stdoutText) ) versionString = stdoutText[index + len(versionText):] index = versionString.find('-') index2 = versionString.find(' ') if index != -1: if index2 != -1: index = min(index, index2) else: if index2 != -1: index = index2 versionString = versionString[:index].strip() return versionString def _makeVersionTuple(versionString): return tuple( int(n) for n in versionString.split(".") ) class ClangCompiler(Compiler): _name = 'clang' def __init__(self, configuration, clangExe, llvmArExe, binPaths): Compiler.__init__(self, configuration=configuration, binPaths=binPaths) self._clangExe = clangExe self._llvmArExe = llvmArExe self.version = _getClangVersion(clangExe) self.versionTuple = _makeVersionTuple(self.version) def _getLanguage(self, suffix, pch=False): language = self.language if language is None: if suffix in self.cSuffixes: language = 'c' elif suffix in self.cppSuffixes: language = 'c++' return language @memoise def _getCommonCompileArgs(self, suffix, shared=False, pch=False): args = [self._clangExe, '-c', '-MD'] language = self._getLanguage(suffix) if language: args.extend(['-x', language]) if self.debugSymbols: args.append('-g') if language == 'c++': args.extend(self.cppFlags) elif language == 'c': args.extend(self.cFlags) for d in self.getDefines(): args.extend(['-D', d]) for p in getPaths(self.getIncludePaths()): args.extend(['-I', p]) for p in getPaths(self.getForcedIncludes()): args.extend(['-include', p]) return args def getObjectCommands(self, target, source, pch, shared): depPath = self._generateDependencyFile(target) args = list(self._getCommonCompileArgs(cake.path.extension(source), shared)) args.extend([source, '-o', target]) # TODO: Add support for pch def compile(): dependencies = self._runProcess(args + ['-MF', depPath], target) dependencies.extend(self._scanDependencyFile(depPath, target)) return dependencies canBeCached = True return compile, args, canBeCached @memoise def _getCommonLibraryArgs(self): args = [self._llvmArExe, 'qcs'] args.extend(self.libraryFlags) return args def getLibraryCommand(self, target, sources): args = list(self._getCommonLibraryArgs()) args.append(target) args.extend(getPaths(sources)) @makeCommand("lib-scan") def scan(): return [target], [args[0]] + sources @makeCommand(args) def archive(): cake.filesys.remove(self.configuration.abspath(target)) self._runProcess(args, target) return archive, scan def getProgramCommands(self, target, sources): return self._getLinkCommands(target, sources, dll=False) def getModuleCommands(self, target, sources, importLibrary, installName): return self._getLinkCommands(target, sources, importLibrary, installName, dll=True) @memoise def _getCommonLinkArgs(self, dll): args = [self._clangExe] if dll: args.append('--shared') args.extend(self.moduleFlags) else: args.extend(self.programFlags) return args def _getLinkCommands(self, target, sources, importLibrary=None, installName=None, dll=False): objects, libraries = self._resolveObjects() args = list(self._getCommonLinkArgs(dll)) for path in getPaths(self.getLibraryPaths()): args.append('-L' + path) args.extend(['-o', target]) args.extend(sources) args.extend(objects) for lib in libraries: if cake.path.baseName(lib) == lib: args.append('-l' + lib) else: args.append(lib) @makeCommand(args) def link(): self._runProcess(args, target) @makeCommand("link-scan") def scan(): targets = [target] if dll and importLibrary: targets.append(importLibrary) dependencies = [args[0]] dependencies += sources dependencies += objects dependencies += self._scanForLibraries(libraries) return targets, dependencies return link, scan
{ "content_hash": "3066d698338aae5ac1b6eb08d99e2bb3", "timestamp": "", "source": "github", "line_count": 215, "max_line_length": 95, "avg_line_length": 26.511627906976745, "alnum_prop": 0.6368421052631579, "repo_name": "lewissbaker/cake", "id": "353e63a0f24769aec209588ebcc63596a54df8cd", "size": "5700", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "src/cake/library/compilers/clang.py", "mode": "33188", "license": "mit", "language": [ { "name": "C", "bytes": "228" }, { "name": "C#", "bytes": "2856" }, { "name": "C++", "bytes": "1787" }, { "name": "CoffeeScript", "bytes": "2315" }, { "name": "Python", "bytes": "552156" } ], "symlink_target": "" }
from pandac.PandaModules import * from direct.gui.DirectGui import * from toontown.toonbase import ToontownGlobals, TTLocalizer from toontown.cogdominium import CogdoBarrelRoomConsts class CogdoBarrelRoomRewardPanel(DirectFrame): def __init__(self): DirectFrame.__init__(self, relief=None, geom=DGG.getDefaultDialogGeom(), geom_color=ToontownGlobals.GlobalDialogColor, geom_scale=TTLocalizer.RPdirectFrame, pos=(0, 0, 0.587)) self.initialiseoptions(CogdoBarrelRoomRewardPanel) self.avNameLabel = DirectLabel(parent=self, relief=None, pos=(0, 0, 0.3), text='Toon Ups', text_scale=0.08) self.rewardLines = [] for i in xrange(CogdoBarrelRoomConsts.MaxToons): rewardLine = {} rewardLine['frame'] = DirectFrame(parent=self, relief=None, frameSize=(-0.5, 0.5, -0.045, 0.042), pos=(0, 0, 0.1 + -0.09 * i)) rewardLine['name'] = DirectLabel(parent=rewardLine['frame'], relief=None, text='', text_scale=TTLocalizer.RPtrackLabels, text_align=TextNode.ALeft, pos=(-0.4, 0, 0), text_pos=(0, -0.02)) rewardLine['laff'] = DirectLabel(parent=rewardLine['frame'], relief=None, text='', text_scale=0.05, text_align=TextNode.ARight, pos=(0.4, 0, 0), text_pos=(0, -0.02)) self.rewardLines.append(rewardLine) return def setRewards(self, results): for p in xrange(len(results[0])): doId = results[0][p] laff = results[1][p] if doId > 0 and base.cr.doId2do.has_key(doId): toon = base.cr.doId2do[doId] self.rewardLines[p]['name'].setProp('text', toon.getName()) self.rewardLines[p]['laff'].setProp('text', str(laff)) if doId == base.localAvatar.getDoId(): self.rewardLines[p]['frame'].setProp('relief', DGG.RIDGE) self.rewardLines[p]['frame'].setProp('borderWidth', (0.01, 0.01)) self.rewardLines[p]['frame'].setProp('frameColor', (1, 1, 1, 0.5))
{ "content_hash": "70622de87afda08477782dc780fd5fa3", "timestamp": "", "source": "github", "line_count": 36, "max_line_length": 198, "avg_line_length": 56.94444444444444, "alnum_prop": 0.6170731707317073, "repo_name": "ToonTownInfiniteRepo/ToontownInfinite", "id": "d96ab630a7a1ebbc224e715f1a36569d266a60b4", "size": "2050", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "toontown/cogdominium/CogdoBarrelRoomRewardPanel.py", "mode": "33188", "license": "mit", "language": [ { "name": "C", "bytes": "1703277" }, { "name": "C#", "bytes": "9892" }, { "name": "C++", "bytes": "5468044" }, { "name": "Emacs Lisp", "bytes": "210083" }, { "name": "F#", "bytes": "4611" }, { "name": "JavaScript", "bytes": "7003" }, { "name": "Objective-C", "bytes": "23212" }, { "name": "Puppet", "bytes": "5245" }, { "name": "Python", "bytes": "34010215" }, { "name": "Shell", "bytes": "11192" }, { "name": "Tcl", "bytes": "1981257" } ], "symlink_target": "" }
"""Package metadata for enn. This is kept in a separate module so that it can be imported from setup.py, at a time when enn's dependencies may not have been installed yet. """ __version__ = '0.1.0'
{ "content_hash": "8fb51746d223d1027b403b31e9987e0a", "timestamp": "", "source": "github", "line_count": 7, "max_line_length": 78, "avg_line_length": 28.571428571428573, "alnum_prop": 0.715, "repo_name": "deepmind/enn", "id": "a73e776481bf5aa9c81d72f04651bf8fd450540a", "size": "930", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "enn/_metadata.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Jupyter Notebook", "bytes": "22244" }, { "name": "Python", "bytes": "420256" }, { "name": "Shell", "bytes": "1677" } ], "symlink_target": "" }
import contextlib import sys import mitmproxy.master import mitmproxy.options from mitmproxy import addonmanager from mitmproxy import command from mitmproxy import eventsequence from mitmproxy.addons import script class TestAddons(addonmanager.AddonManager): def __init__(self, master): super().__init__(master) def trigger(self, event, *args, **kwargs): if event == "log": self.master.logs.append(args[0]) elif event == "tick" and not args and not kwargs: pass else: self.master.events.append((event, args, kwargs)) super().trigger(event, *args, **kwargs) class RecordingMaster(mitmproxy.master.Master): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.addons = TestAddons(self) self.events = [] self.logs = [] def dump_log(self, outf=sys.stdout): for i in self.logs: print("%s: %s" % (i.level, i.msg), file=outf) def has_log(self, txt, level=None): for i in self.logs: if level and i.level != level: continue if txt.lower() in i.msg.lower(): return True return False def has_event(self, name): for i in self.events: if i[0] == name: return True return False def clear(self): self.logs = [] class context: """ A context for testing addons, which sets up the mitmproxy.ctx module so handlers can run as they would within mitmproxy. The context also provides a number of helper methods for common testing scenarios. """ def __init__(self, master=None, options=None): options = options or mitmproxy.options.Options() self.master = master or RecordingMaster( options ) self.options = self.master.options self.wrapped = None def ctx(self): """ Returns a new handler context. """ return self.master.handlecontext() def __enter__(self): self.wrapped = self.ctx() self.wrapped.__enter__() return self def __exit__(self, exc_type, exc_value, traceback): self.wrapped.__exit__(exc_type, exc_value, traceback) self.wrapped = None return False @contextlib.contextmanager def cycle(self, addon, f): """ Cycles the flow through the events for the flow. Stops if a reply is taken (as in flow interception). """ f.reply._state = "start" for evt, arg in eventsequence.iterate(f): self.master.addons.invoke_addon( addon, evt, arg ) if f.reply.state == "taken": return def configure(self, addon, **kwargs): """ A helper for testing configure methods. Modifies the registered Options object with the given keyword arguments, then calls the configure method on the addon with the updated value. """ if addon not in self.master.addons: self.master.addons.register(addon) with self.options.rollback(kwargs.keys(), reraise=True): self.options.update(**kwargs) self.master.addons.invoke_addon( addon, "configure", kwargs.keys() ) def script(self, path): """ Loads a script from path, and returns the enclosed addon. """ sc = script.Script(path) loader = addonmanager.Loader(self.master) self.master.addons.invoke_addon(sc, "load", loader) self.configure(sc) self.master.addons.invoke_addon(sc, "tick") return sc.addons[0] if sc.addons else None def invoke(self, addon, event, *args, **kwargs): """ Recursively invoke an event on an addon and all its children. """ return self.master.addons.invoke_addon(addon, event, *args, **kwargs) def command(self, func, *args): """ Invoke a command function with a list of string arguments within a command context, mimicing the actual command environment. """ cmd = command.Command(self.master.commands, "test.command", func) return cmd.call(args)
{ "content_hash": "18b0d53c37931f427c54bb601e9b7252", "timestamp": "", "source": "github", "line_count": 140, "max_line_length": 136, "avg_line_length": 31.257142857142856, "alnum_prop": 0.5744972577696527, "repo_name": "MatthewShao/mitmproxy", "id": "d966f1d5dd0ef6e1f61bc6ea3d086b87333e42c8", "size": "4376", "binary": false, "copies": "3", "ref": "refs/heads/master", "path": "mitmproxy/test/taddons.py", "mode": "33188", "license": "mit", "language": [ { "name": "CSS", "bytes": "20941" }, { "name": "HTML", "bytes": "14747" }, { "name": "JavaScript", "bytes": "276302" }, { "name": "PowerShell", "bytes": "494" }, { "name": "Python", "bytes": "1726585" }, { "name": "Shell", "bytes": "4644" } ], "symlink_target": "" }
"""Compute Wasserstein distance between two random subsets of CIFAR10. Note: comparing two fixed sets is a sanity check, not the target use case. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import sys import time import tensorflow as tf from dataset import Dataset from wasserstein import Wasserstein tf.flags.DEFINE_string('filepattern', '/tmp/cifar10/cifar_train_class_%d.pic', 'Filepattern from which to read the dataset.') tf.flags.DEFINE_integer('batch_size', 1000, 'Batch size of generator.') tf.flags.DEFINE_integer('loss_steps', 50, 'Number of optimization steps.') FLAGS = tf.flags.FLAGS def main(unused_argv): # tf.logging.set_verbosity(tf.logging.INFO) # load two copies of the dataset print('Loading datasets...') subset1 = Dataset(bs=FLAGS.batch_size, filepattern=FLAGS.filepattern) subset2 = Dataset(bs=FLAGS.batch_size, filepattern=FLAGS.filepattern) print('Computing Wasserstein distance...') with tf.Graph().as_default(): # compute Wasserstein distance between two sets of examples wasserstein = Wasserstein(subset1, subset2) loss = wasserstein.dist(C=.1, nsteps=FLAGS.loss_steps) with tf.Session() as sess: sess.run(tf.global_variables_initializer()) res = sess.run(loss) print('result: %f\n' % res) if __name__ == '__main__': tf.app.run(main)
{ "content_hash": "07352a645bcc6e19a38a312adeec5328", "timestamp": "", "source": "github", "line_count": 46, "max_line_length": 78, "avg_line_length": 31.065217391304348, "alnum_prop": 0.7130860741777467, "repo_name": "google/wasserstein-dist", "id": "d964f54a4a42b6fce74c42b243e83877c2fc6bbc", "size": "2005", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "compute_one.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Python", "bytes": "18961" } ], "symlink_target": "" }
from django.shortcuts import * from contacts.models import * from django.utils.text import slugify def index(request): companies = Company.objects.all() return render(request, 'contacts/index.html', {'companies': companies}) def create_company(request): categories = Category.objects.all() if request.method == 'POST': data = request.POST form = CreateCompany(data) if form.is_valid(): company = form.save(commit=False) # Set slug and category # TODO: Check if slug is not already taken slug = slugify(data['name']) company.slug = slug category = get_category(data) company.category = category company.save() return redirect("index") else: return render(request, 'contacts/createCompany.html', {'form': form, 'categories': categories}) else: return render(request, 'contacts/createCompany.html', {'categories': categories}) def show_company(request, slug): company = Company.objects.get(slug=slug) return render(request, 'contacts/showCompany.html', {'company': company}) def show_companies(request): companies = Company.objects.getCompanies() return render(request, 'contacts/showCompanies.html', {'companies': companies}) def edit_company(request, slug): categories = Category.objects.all() company = get_object_or_404(Company, slug=slug) if request.method == 'POST': data = request.POST form = CreateCompany(data or None, instance=company) if form.is_valid(): # Set slug and category # TODO: Check if slug is not already taken slug = slugify(data['name']) company.slug = slug category = get_category(data) company.category = category company.save() return redirect("index") else: return render(request, 'contacts/createCompany.html', {'form': form, 'categories': categories}) return render(request, 'contacts/editCompany.html', {'company': company, 'categories': categories}) def create_category(request): if request.method == 'POST': data = request.POST form = CreateCategory(data) if form.is_valid(): category = form.save() return redirect("index") else: return render(request, 'contacts/createCategory.html', {'error': 'Could not save category %s' % form}) else: return render(request, 'contacts/createCategory.html') def show_categories(request): categories = Category.objects.getCategories() return render(request, 'contacts/showCategories.html', {'categories': categories}) ## # Helper methods ## def get_category(data): try: category_id = data['category'] if 'category' in data else None category = Category.objects.get(pk=category_id) except Category.DoesNotExist: return None return category
{ "content_hash": "f32623ef77c74665fb2be110dc6a9aed", "timestamp": "", "source": "github", "line_count": 93, "max_line_length": 114, "avg_line_length": 32.344086021505376, "alnum_prop": 0.6313164893617021, "repo_name": "kikeh/contacts", "id": "b56ca746678ae52d01184fbbd1284a36c2db7b6d", "size": "3008", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "contacts/views.py", "mode": "33188", "license": "mit", "language": [ { "name": "CSS", "bytes": "88353" }, { "name": "HTML", "bytes": "13367" }, { "name": "Python", "bytes": "11976" } ], "symlink_target": "" }
import mock import os import tempfile from cinder import db from cinder import exception from cinder.image import image_utils from cinder.openstack.common import log as logging from cinder.openstack.common import timeutils from cinder import test from cinder.tests.image import fake as fake_image from cinder.tests.test_volume import DriverTestCase from cinder import units from cinder.volume import configuration as conf import cinder.volume.drivers.rbd as driver from cinder.volume.flows.manager import create_volume LOG = logging.getLogger(__name__) # This is used to collect raised exceptions so that tests may check what was # raised. # NOTE: this must be initialised in test setUp(). RAISED_EXCEPTIONS = [] class MockException(Exception): def __init__(self, *args, **kwargs): RAISED_EXCEPTIONS.append(self.__class__) class MockImageNotFoundException(MockException): """Used as mock for rbd.ImageNotFound.""" class MockImageBusyException(MockException): """Used as mock for rbd.ImageBusy.""" def common_mocks(f): """Decorator to set mocks common to all tests. The point of doing these mocks here is so that we don't accidentally set mocks that can't/dont't get unset. """ def _common_inner_inner1(inst, *args, **kwargs): @mock.patch('cinder.volume.drivers.rbd.RBDVolumeProxy') @mock.patch('cinder.volume.drivers.rbd.RADOSClient') @mock.patch('cinder.backup.drivers.ceph.rbd') @mock.patch('cinder.backup.drivers.ceph.rados') def _common_inner_inner2(mock_rados, mock_rbd, mock_client, mock_proxy): inst.mock_rbd = mock_rbd inst.mock_rados = mock_rados inst.mock_client = mock_client inst.mock_proxy = mock_proxy inst.mock_rados.Rados = mock.Mock inst.mock_rados.Rados.ioctx = mock.Mock() inst.mock_rbd.RBD = mock.Mock inst.mock_rbd.Image = mock.Mock inst.mock_rbd.Image.close = mock.Mock() inst.mock_rbd.RBD.Error = Exception inst.mock_rados.Error = Exception inst.mock_rbd.ImageBusy = MockImageBusyException inst.mock_rbd.ImageNotFound = MockImageNotFoundException inst.driver.rbd = inst.mock_rbd inst.driver.rados = inst.mock_rados return f(inst, *args, **kwargs) return _common_inner_inner2() return _common_inner_inner1 CEPH_MON_DUMP = """dumped monmap epoch 1 { "epoch": 1, "fsid": "33630410-6d93-4d66-8e42-3b953cf194aa", "modified": "2013-05-22 17:44:56.343618", "created": "2013-05-22 17:44:56.343618", "mons": [ { "rank": 0, "name": "a", "addr": "[::1]:6789\/0"}, { "rank": 1, "name": "b", "addr": "[::1]:6790\/0"}, { "rank": 2, "name": "c", "addr": "[::1]:6791\/0"}, { "rank": 3, "name": "d", "addr": "127.0.0.1:6792\/0"}, { "rank": 4, "name": "e", "addr": "example.com:6791\/0"}], "quorum": [ 0, 1, 2]} """ class TestUtil(test.TestCase): def test_ascii_str(self): self.assertIsNone(driver.ascii_str(None)) self.assertEqual('foo', driver.ascii_str('foo')) self.assertEqual('foo', driver.ascii_str(u'foo')) self.assertRaises(UnicodeEncodeError, driver.ascii_str, 'foo' + unichr(300)) class RBDTestCase(test.TestCase): def setUp(self): global RAISED_EXCEPTIONS RAISED_EXCEPTIONS = [] super(RBDTestCase, self).setUp() self.cfg = mock.Mock(spec=conf.Configuration) self.cfg.volume_tmp_dir = None self.cfg.rbd_pool = 'rbd' self.cfg.rbd_ceph_conf = None self.cfg.rbd_secret_uuid = None self.cfg.rbd_user = None self.cfg.volume_dd_blocksize = '1M' mock_exec = mock.Mock() mock_exec.return_value = ('', '') self.driver = driver.RBDDriver(execute=mock_exec, configuration=self.cfg) self.driver.set_initialized() self.volume_name = u'volume-00000001' self.snapshot_name = u'snapshot-00000001' self.volume_size = 1 self.volume = dict(name=self.volume_name, size=self.volume_size) self.snapshot = dict(volume_name=self.volume_name, name=self.snapshot_name) @common_mocks def test_create_volume(self): client = self.mock_client.return_value client.__enter__.return_value = client with mock.patch.object(self.driver, '_supports_layering') as \ mock_supports_layering: mock_supports_layering.return_value = True self.mock_rbd.RBD.create = mock.Mock() self.driver.create_volume(self.volume) args = [client.ioctx, str(self.volume_name), self.volume_size * units.GiB] kwargs = {'old_format': False, 'features': self.mock_rbd.RBD_FEATURE_LAYERING} self.mock_rbd.RBD.create.assert_called_once_with(*args, **kwargs) client.__enter__.assert_called_once() client.__exit__.assert_called_once() mock_supports_layering.assert_called_once() @common_mocks def test_create_volume_no_layering(self): client = self.mock_client.return_value client.__enter__.return_value = client with mock.patch.object(self.driver, '_supports_layering') as \ mock_supports_layering: mock_supports_layering.return_value = False self.mock_rbd.RBD.create = mock.Mock() self.driver.create_volume(self.volume) args = [client.ioctx, str(self.volume_name), self.volume_size * units.GiB] kwargs = {'old_format': True, 'features': 0} self.mock_rbd.RBD.create.assert_called_once_with(*args, **kwargs) client.__enter__.assert_called_once() client.__exit__.assert_called_once() mock_supports_layering.assert_called_once() @common_mocks def test_delete_volume(self): client = self.mock_client.return_value self.driver.rbd.Image.list_snaps = mock.Mock() self.driver.rbd.Image.list_snaps.return_value = [] self.driver.rbd.Image.close = mock.Mock() self.driver.rbd.Image.remove = mock.Mock() self.driver.rbd.Image.unprotect_snap = mock.Mock() with mock.patch.object(self.driver, '_get_clone_info') as \ mock_get_clone_info: with mock.patch.object(self.driver, '_delete_backup_snaps') as \ mock_delete_backup_snaps: mock_get_clone_info.return_value = (None, None, None) self.driver.delete_volume(self.volume) mock_get_clone_info.assert_called_once() self.driver.rbd.Image.list_snaps.assert_called_once() client.__enter__.assert_called_once() client.__exit__.assert_called_once() mock_delete_backup_snaps.assert_called_once() self.assertFalse(self.driver.rbd.Image.unprotect_snap.called) self.driver.rbd.RBD.remove.assert_called_once() @common_mocks def delete_volume_not_found(self): self.mock_rbd.Image.side_effect = self.mock_rbd.ImageNotFound self.assertIsNone(self.driver.delete_volume(self.volume)) self.mock_rbd.Image.assert_called_once() # Make sure the exception was raised self.assertEqual(RAISED_EXCEPTIONS, [self.mock_rbd.ImageNotFound]) @common_mocks def test_delete_busy_volume(self): self.mock_rbd.Image.list_snaps = mock.Mock() self.mock_rbd.Image.list_snaps.return_value = [] self.mock_rbd.Image.unprotect_snap = mock.Mock() self.mock_rbd.RBD.remove = mock.Mock() self.mock_rbd.RBD.remove.side_effect = self.mock_rbd.ImageBusy with mock.patch.object(self.driver, '_get_clone_info') as \ mock_get_clone_info: mock_get_clone_info.return_value = (None, None, None) with mock.patch.object(self.driver, '_delete_backup_snaps') as \ mock_delete_backup_snaps: with mock.patch.object(driver, 'RADOSClient') as \ mock_rados_client: self.assertRaises(exception.VolumeIsBusy, self.driver.delete_volume, self.volume) mock_get_clone_info.assert_called_once() self.mock_rbd.Image.list_snaps.assert_called_once() mock_rados_client.assert_called_once() mock_delete_backup_snaps.assert_called_once() self.assertFalse(self.mock_rbd.Image.unprotect_snap.called) self.mock_rbd.RBD.remove.assert_called_once() # Make sure the exception was raised self.assertEqual(RAISED_EXCEPTIONS, [self.mock_rbd.ImageBusy]) @common_mocks def test_create_snapshot(self): proxy = self.mock_proxy.return_value proxy.__enter__.return_value = proxy self.driver.create_snapshot(self.snapshot) args = [str(self.snapshot_name)] proxy.create_snap.assert_called_with(*args) proxy.protect_snap.assert_called_with(*args) @common_mocks def test_delete_snapshot(self): proxy = self.mock_proxy.return_value proxy.__enter__.return_value = proxy self.driver.delete_snapshot(self.snapshot) args = [str(self.snapshot_name)] proxy.remove_snap.assert_called_with(*args) proxy.unprotect_snap.assert_called_with(*args) @common_mocks def test_get_clone_info(self): volume = self.mock_rbd.Image() volume.set_snap = mock.Mock() volume.parent_info = mock.Mock() parent_info = ('a', 'b', '%s.clone_snap' % (self.volume_name)) volume.parent_info.return_value = parent_info info = self.driver._get_clone_info(volume, self.volume_name) self.assertEqual(info, parent_info) self.assertFalse(volume.set_snap.called) volume.parent_info.assert_called_once() @common_mocks def test_get_clone_info_w_snap(self): volume = self.mock_rbd.Image() volume.set_snap = mock.Mock() volume.parent_info = mock.Mock() parent_info = ('a', 'b', '%s.clone_snap' % (self.volume_name)) volume.parent_info.return_value = parent_info snapshot = self.mock_rbd.ImageSnapshot() info = self.driver._get_clone_info(volume, self.volume_name, snap=snapshot) self.assertEqual(info, parent_info) volume.set_snap.assert_called_once() self.assertEqual(volume.set_snap.call_count, 2) volume.parent_info.assert_called_once() @common_mocks def test_get_clone_info_w_exception(self): volume = self.mock_rbd.Image() volume.set_snap = mock.Mock() volume.parent_info = mock.Mock() volume.parent_info.side_effect = self.mock_rbd.ImageNotFound snapshot = self.mock_rbd.ImageSnapshot() info = self.driver._get_clone_info(volume, self.volume_name, snap=snapshot) self.assertEqual(info, (None, None, None)) volume.set_snap.assert_called_once() self.assertEqual(volume.set_snap.call_count, 2) volume.parent_info.assert_called_once() # Make sure the exception was raised self.assertEqual(RAISED_EXCEPTIONS, [self.mock_rbd.ImageNotFound]) @common_mocks def test_get_clone_info_deleted_volume(self): volume = self.mock_rbd.Image() volume.set_snap = mock.Mock() volume.parent_info = mock.Mock() parent_info = ('a', 'b', '%s.clone_snap' % (self.volume_name)) volume.parent_info.return_value = parent_info info = self.driver._get_clone_info(volume, "%s.deleted" % (self.volume_name)) self.assertEqual(info, parent_info) self.assertFalse(volume.set_snap.called) volume.parent_info.assert_called_once() @common_mocks def test_create_cloned_volume(self): src_name = u'volume-00000001' dst_name = u'volume-00000002' self.cfg.rbd_max_clone_depth = 2 self.mock_rbd.RBD.clone = mock.Mock() with mock.patch.object(self.driver, '_get_clone_depth') as \ mock_get_clone_depth: # Try with no flatten required mock_get_clone_depth.return_value = 1 self.mock_rbd.Image.create_snap = mock.Mock() self.mock_rbd.Image.protect_snap = mock.Mock() self.mock_rbd.Image.close = mock.Mock() self.driver.create_cloned_volume(dict(name=dst_name), dict(name=src_name)) self.mock_rbd.Image.create_snap.assert_called_once() self.mock_rbd.Image.protect_snap.assert_called_once() self.mock_rbd.RBD.clone.assert_called_once() self.mock_rbd.Image.close.assert_called_once() self.assertTrue(mock_get_clone_depth.called) @common_mocks def test_create_cloned_volume_w_flatten(self): src_name = u'volume-00000001' dst_name = u'volume-00000002' self.cfg.rbd_max_clone_depth = 1 self.mock_rbd.RBD.clone = mock.Mock() self.mock_rbd.RBD.clone.side_effect = self.mock_rbd.RBD.Error with mock.patch.object(self.driver, '_get_clone_depth') as \ mock_get_clone_depth: # Try with no flatten required mock_get_clone_depth.return_value = 1 self.mock_rbd.Image.create_snap = mock.Mock() self.mock_rbd.Image.protect_snap = mock.Mock() self.mock_rbd.Image.unprotect_snap = mock.Mock() self.mock_rbd.Image.remove_snap = mock.Mock() self.mock_rbd.Image.close = mock.Mock() self.assertRaises(self.mock_rbd.RBD.Error, self.driver.create_cloned_volume, dict(name=dst_name), dict(name=src_name)) self.mock_rbd.Image.create_snap.assert_called_once() self.mock_rbd.Image.protect_snap.assert_called_once() self.mock_rbd.RBD.clone.assert_called_once() self.mock_rbd.Image.unprotect_snap.assert_called_once() self.mock_rbd.Image.remove_snap.assert_called_once() self.mock_rbd.Image.close.assert_called_once() self.assertTrue(mock_get_clone_depth.called) @common_mocks def test_create_cloned_volume_w_clone_exception(self): src_name = u'volume-00000001' dst_name = u'volume-00000002' self.cfg.rbd_max_clone_depth = 2 self.mock_rbd.RBD.clone = mock.Mock() self.mock_rbd.RBD.clone.side_effect = self.mock_rbd.RBD.Error with mock.patch.object(self.driver, '_get_clone_depth') as \ mock_get_clone_depth: # Try with no flatten required mock_get_clone_depth.return_value = 1 self.mock_rbd.Image.create_snap = mock.Mock() self.mock_rbd.Image.protect_snap = mock.Mock() self.mock_rbd.Image.unprotect_snap = mock.Mock() self.mock_rbd.Image.remove_snap = mock.Mock() self.mock_rbd.Image.close = mock.Mock() self.assertRaises(self.mock_rbd.RBD.Error, self.driver.create_cloned_volume, dict(name=dst_name), dict(name=src_name)) self.mock_rbd.Image.create_snap.assert_called_once() self.mock_rbd.Image.protect_snap.assert_called_once() self.mock_rbd.RBD.clone.assert_called_once() self.mock_rbd.Image.unprotect_snap.assert_called_once() self.mock_rbd.Image.remove_snap.assert_called_once() self.mock_rbd.Image.close.assert_called_once() @common_mocks def test_good_locations(self): locations = ['rbd://fsid/pool/image/snap', 'rbd://%2F/%2F/%2F/%2F', ] map(self.driver._parse_location, locations) @common_mocks def test_bad_locations(self): locations = ['rbd://image', 'http://path/to/somewhere/else', 'rbd://image/extra', 'rbd://image/', 'rbd://fsid/pool/image/', 'rbd://fsid/pool/image/snap/', 'rbd://///', ] for loc in locations: self.assertRaises(exception.ImageUnacceptable, self.driver._parse_location, loc) self.assertFalse( self.driver._is_cloneable(loc, {'disk_format': 'raw'})) @common_mocks def test_cloneable(self): with mock.patch.object(self.driver, '_get_fsid') as mock_get_fsid: mock_get_fsid.return_value = 'abc' location = 'rbd://abc/pool/image/snap' info = {'disk_format': 'raw'} self.assertTrue(self.driver._is_cloneable(location, info)) self.assertTrue(mock_get_fsid.called) @common_mocks def test_uncloneable_different_fsid(self): with mock.patch.object(self.driver, '_get_fsid') as mock_get_fsid: mock_get_fsid.return_value = 'abc' location = 'rbd://def/pool/image/snap' self.assertFalse( self.driver._is_cloneable(location, {'disk_format': 'raw'})) self.assertTrue(mock_get_fsid.called) @common_mocks def test_uncloneable_unreadable(self): with mock.patch.object(self.driver, '_get_fsid') as mock_get_fsid: mock_get_fsid.return_value = 'abc' location = 'rbd://abc/pool/image/snap' self.mock_proxy.side_effect = self.mock_rbd.Error args = [location, {'disk_format': 'raw'}] self.assertFalse(self.driver._is_cloneable(*args)) self.mock_proxy.assert_called_once() self.assertTrue(mock_get_fsid.called) @common_mocks def test_uncloneable_bad_format(self): with mock.patch.object(self.driver, '_get_fsid') as mock_get_fsid: mock_get_fsid.return_value = 'abc' location = 'rbd://abc/pool/image/snap' formats = ['qcow2', 'vmdk', 'vdi'] for f in formats: self.assertFalse( self.driver._is_cloneable(location, {'disk_format': f})) self.assertTrue(mock_get_fsid.called) def _copy_image(self): with mock.patch.object(tempfile, 'NamedTemporaryFile'): with mock.patch.object(os.path, 'exists') as mock_exists: mock_exists.return_value = True with mock.patch.object(image_utils, 'fetch_to_raw'): with mock.patch.object(self.driver, 'delete_volume'): with mock.patch.object(self.driver, '_resize'): mock_image_service = mock.MagicMock() args = [None, {'name': 'test', 'size': 1}, mock_image_service, None] self.driver.copy_image_to_volume(*args) @common_mocks def test_copy_image_no_volume_tmp(self): self.cfg.volume_tmp_dir = None self._copy_image() @common_mocks def test_copy_image_volume_tmp(self): self.cfg.volume_tmp_dir = '/var/run/cinder/tmp' self._copy_image() @common_mocks def test_update_volume_stats(self): client = self.mock_client.return_value client.__enter__.return_value = client client.cluster = mock.Mock() client.cluster.get_cluster_stats = mock.Mock() client.cluster.get_cluster_stats.return_value = {'kb': 1024 ** 3, 'kb_avail': 1024 ** 2} self.driver.configuration.safe_get = mock.Mock() self.driver.configuration.safe_get.return_value = 'RBD' expected = dict( volume_backend_name='RBD', vendor_name='Open Source', driver_version=self.driver.VERSION, storage_protocol='ceph', total_capacity_gb=1024, free_capacity_gb=1, reserved_percentage=0) actual = self.driver.get_volume_stats(True) client.cluster.get_cluster_stats.assert_called_once() self.assertDictMatch(expected, actual) @common_mocks def test_update_volume_stats_error(self): client = self.mock_client.return_value client.__enter__.return_value = client client.cluster = mock.Mock() client.cluster.get_cluster_stats = mock.Mock() client.cluster.get_cluster_stats.side_effect = Exception self.driver.configuration.safe_get = mock.Mock() self.driver.configuration.safe_get.return_value = 'RBD' expected = dict(volume_backend_name='RBD', vendor_name='Open Source', driver_version=self.driver.VERSION, storage_protocol='ceph', total_capacity_gb='unknown', free_capacity_gb='unknown', reserved_percentage=0) actual = self.driver.get_volume_stats(True) client.cluster.get_cluster_stats.assert_called_once() self.assertDictMatch(expected, actual) @common_mocks def test_get_mon_addrs(self): with mock.patch.object(self.driver, '_execute') as mock_execute: mock_execute.return_value = (CEPH_MON_DUMP, '') hosts = ['::1', '::1', '::1', '127.0.0.1', 'example.com'] ports = ['6789', '6790', '6791', '6792', '6791'] self.assertEqual((hosts, ports), self.driver._get_mon_addrs()) @common_mocks def test_initialize_connection(self): hosts = ['::1', '::1', '::1', '127.0.0.1', 'example.com'] ports = ['6789', '6790', '6791', '6792', '6791'] with mock.patch.object(self.driver, '_get_mon_addrs') as \ mock_get_mon_addrs: mock_get_mon_addrs.return_value = (hosts, ports) expected = { 'driver_volume_type': 'rbd', 'data': { 'name': '%s/%s' % (self.cfg.rbd_pool, self.volume_name), 'hosts': hosts, 'ports': ports, 'auth_enabled': False, 'auth_username': None, 'secret_type': 'ceph', 'secret_uuid': None, } } volume = dict(name=self.volume_name) actual = self.driver.initialize_connection(volume, None) self.assertDictMatch(expected, actual) self.assertTrue(mock_get_mon_addrs.called) @common_mocks def test_clone(self): src_pool = u'images' src_image = u'image-name' src_snap = u'snapshot-name' client_stack = [] def mock__enter__(inst): def _inner(): client_stack.append(inst) return inst return _inner client = self.mock_client.return_value # capture both rados client used to perform the clone client.__enter__.side_effect = mock__enter__(client) self.mock_rbd.RBD.clone = mock.Mock() self.driver._clone(self.volume, src_pool, src_image, src_snap) args = [client_stack[0].ioctx, str(src_image), str(src_snap), client_stack[1].ioctx, str(self.volume_name)] kwargs = {'features': self.mock_rbd.RBD_FEATURE_LAYERING} self.mock_rbd.RBD.clone.assert_called_once_with(*args, **kwargs) self.assertEqual(client.__enter__.call_count, 2) @common_mocks def test_extend_volume(self): fake_size = '20' fake_vol = {'project_id': 'testprjid', 'name': self.volume_name, 'size': fake_size, 'id': 'a720b3c0-d1f0-11e1-9b23-0800200c9a66'} self.mox.StubOutWithMock(self.driver, '_resize') size = int(fake_size) * units.GiB self.driver._resize(fake_vol, size=size) self.mox.ReplayAll() self.driver.extend_volume(fake_vol, fake_size) self.mox.VerifyAll() @common_mocks def test_rbd_volume_proxy_init(self): snap = u'snapshot-name' client = self.mock_client.return_value client.__enter__.return_value = client with mock.patch.object(self.driver, '_connect_to_rados') as \ mock_connect_from_rados: with mock.patch.object(self.driver, '_disconnect_from_rados') as \ mock_disconnect_from_rados: mock_connect_from_rados.return_value = (None, None) mock_disconnect_from_rados.return_value = (None, None) with driver.RBDVolumeProxy(self.driver, self.volume_name): mock_connect_from_rados.assert_called_once() self.assertFalse(mock_disconnect_from_rados.called) mock_disconnect_from_rados.assert_called_once() mock_connect_from_rados.reset_mock() mock_disconnect_from_rados.reset_mock() with driver.RBDVolumeProxy(self.driver, self.volume_name, snapshot=snap): mock_connect_from_rados.assert_called_once() self.assertFalse(mock_disconnect_from_rados.called) mock_disconnect_from_rados.assert_called_once() @common_mocks def test_connect_to_rados(self): self.mock_rados.Rados.connect = mock.Mock() self.mock_rados.Rados.shutdown = mock.Mock() self.mock_rados.Rados.open_ioctx = mock.Mock() self.mock_rados.Rados.open_ioctx.return_value = \ self.mock_rados.Rados.ioctx # default configured pool ret = self.driver._connect_to_rados() self.assertTrue(self.mock_rados.Rados.connect.called) self.assertTrue(self.mock_rados.Rados.open_ioctx.called) self.assertIsInstance(ret[0], self.mock_rados.Rados) self.assertEqual(ret[1], self.mock_rados.Rados.ioctx) self.mock_rados.Rados.open_ioctx.assert_called_with(self.cfg.rbd_pool) # different pool ret = self.driver._connect_to_rados('alt_pool') self.assertTrue(self.mock_rados.Rados.connect.called) self.assertTrue(self.mock_rados.Rados.open_ioctx.called) self.assertIsInstance(ret[0], self.mock_rados.Rados) self.assertEqual(ret[1], self.mock_rados.Rados.ioctx) self.mock_rados.Rados.open_ioctx.assert_called_with('alt_pool') # error self.mock_rados.Rados.open_ioctx.reset_mock() self.mock_rados.Rados.shutdown.reset_mock() self.mock_rados.Rados.open_ioctx.side_effect = self.mock_rados.Error self.assertRaises(self.mock_rados.Error, self.driver._connect_to_rados) self.mock_rados.Rados.open_ioctx.assert_called_once() self.mock_rados.Rados.shutdown.assert_called_once() class RBDImageIOWrapperTestCase(test.TestCase): def setUp(self): super(RBDImageIOWrapperTestCase, self).setUp() self.meta = mock.Mock() self.meta.user = 'mock_user' self.meta.conf = 'mock_conf' self.meta.pool = 'mock_pool' self.meta.image = mock.Mock() self.meta.image.read = mock.Mock() self.meta.image.write = mock.Mock() self.meta.image.size = mock.Mock() self.mock_rbd_wrapper = driver.RBDImageIOWrapper(self.meta) self.data_length = 1024 self.full_data = 'abcd' * 256 def test_init(self): self.assertEqual(self.mock_rbd_wrapper._rbd_meta, self.meta) self.assertEqual(self.mock_rbd_wrapper._offset, 0) def test_inc_offset(self): self.mock_rbd_wrapper._inc_offset(10) self.mock_rbd_wrapper._inc_offset(10) self.assertEqual(self.mock_rbd_wrapper._offset, 20) def test_rbd_image(self): self.assertEqual(self.mock_rbd_wrapper.rbd_image, self.meta.image) def test_rbd_user(self): self.assertEqual(self.mock_rbd_wrapper.rbd_user, self.meta.user) def test_rbd_pool(self): self.assertEqual(self.mock_rbd_wrapper.rbd_conf, self.meta.conf) def test_rbd_conf(self): self.assertEqual(self.mock_rbd_wrapper.rbd_pool, self.meta.pool) def test_read(self): def mock_read(offset, length): return self.full_data[offset:length] self.meta.image.read.side_effect = mock_read self.meta.image.size.return_value = self.data_length data = self.mock_rbd_wrapper.read() self.assertEqual(data, self.full_data) data = self.mock_rbd_wrapper.read() self.assertEqual(data, '') self.mock_rbd_wrapper.seek(0) data = self.mock_rbd_wrapper.read() self.assertEqual(data, self.full_data) self.mock_rbd_wrapper.seek(0) data = self.mock_rbd_wrapper.read(10) self.assertEqual(data, self.full_data[:10]) def test_write(self): self.mock_rbd_wrapper.write(self.full_data) self.assertEqual(self.mock_rbd_wrapper._offset, 1024) def test_seekable(self): self.assertTrue(self.mock_rbd_wrapper.seekable) def test_seek(self): self.assertEqual(self.mock_rbd_wrapper._offset, 0) self.mock_rbd_wrapper.seek(10) self.assertEqual(self.mock_rbd_wrapper._offset, 10) self.mock_rbd_wrapper.seek(10) self.assertEqual(self.mock_rbd_wrapper._offset, 10) self.mock_rbd_wrapper.seek(10, 1) self.assertEqual(self.mock_rbd_wrapper._offset, 20) self.mock_rbd_wrapper.seek(0) self.mock_rbd_wrapper.write(self.full_data) self.meta.image.size.return_value = self.data_length self.mock_rbd_wrapper.seek(0) self.assertEqual(self.mock_rbd_wrapper._offset, 0) self.mock_rbd_wrapper.seek(10, 2) self.assertEqual(self.mock_rbd_wrapper._offset, self.data_length + 10) self.mock_rbd_wrapper.seek(-10, 2) self.assertEqual(self.mock_rbd_wrapper._offset, self.data_length - 10) # test exceptions. self.assertRaises(IOError, self.mock_rbd_wrapper.seek, 0, 3) self.assertRaises(IOError, self.mock_rbd_wrapper.seek, -1) # offset should not have been changed by any of the previous # operations. self.assertEqual(self.mock_rbd_wrapper._offset, self.data_length - 10) def test_tell(self): self.assertEqual(self.mock_rbd_wrapper.tell(), 0) self.mock_rbd_wrapper._inc_offset(10) self.assertEqual(self.mock_rbd_wrapper.tell(), 10) def test_flush(self): with mock.patch.object(driver, 'LOG') as mock_logger: self.meta.image.flush = mock.Mock() self.mock_rbd_wrapper.flush() self.meta.image.flush.assert_called_once() self.meta.image.flush.reset_mock() # this should be caught and logged silently. self.meta.image.flush.side_effect = AttributeError self.mock_rbd_wrapper.flush() self.meta.image.flush.assert_called_once() msg = _("flush() not supported in this version of librbd") mock_logger.warning.assert_called_with(msg) def test_fileno(self): self.assertRaises(IOError, self.mock_rbd_wrapper.fileno) def test_close(self): self.mock_rbd_wrapper.close() class ManagedRBDTestCase(DriverTestCase): driver_name = "cinder.volume.drivers.rbd.RBDDriver" def setUp(self): super(ManagedRBDTestCase, self).setUp() # TODO(dosaboy): need to remove dependency on mox stubs here once # image.fake has been converted to mock. fake_image.stub_out_image_service(self.stubs) self.volume.driver.set_initialized() self.volume.stats = {'allocated_capacity_gb': 0} self.called = [] def _create_volume_from_image(self, expected_status, raw=False, clone_error=False): """Try to clone a volume from an image, and check the status afterwards. NOTE: if clone_error is True we force the image type to raw otherwise clone_image is not called """ volume_id = 1 # See tests.image.fake for image types. if raw: image_id = '155d900f-4e14-4e4c-a73d-069cbf4541e6' else: image_id = 'c905cedb-7281-47e4-8a62-f26bc5fc4c77' # creating volume testdata db.volume_create(self.context, {'id': volume_id, 'updated_at': timeutils.utcnow(), 'display_description': 'Test Desc', 'size': 20, 'status': 'creating', 'instance_uuid': None, 'host': 'dummy'}) try: if not clone_error: self.volume.create_volume(self.context, volume_id, image_id=image_id) else: self.assertRaises(exception.CinderException, self.volume.create_volume, self.context, volume_id, image_id=image_id) volume = db.volume_get(self.context, volume_id) self.assertEqual(volume['status'], expected_status) finally: # cleanup db.volume_destroy(self.context, volume_id) def test_create_vol_from_image_status_available(self): """Clone raw image then verify volume is in available state.""" def _mock_clone_image(volume, image_location, image_id, image_meta): return {'provider_location': None}, True with mock.patch.object(self.volume.driver, 'clone_image') as \ mock_clone_image: mock_clone_image.side_effect = _mock_clone_image with mock.patch.object(self.volume.driver, 'create_volume') as \ mock_create: with mock.patch.object(create_volume.CreateVolumeFromSpecTask, '_copy_image_to_volume') as mock_copy: self._create_volume_from_image('available', raw=True) self.assertFalse(mock_copy.called) mock_clone_image.assert_called_once() self.assertFalse(mock_create.called) def test_create_vol_from_non_raw_image_status_available(self): """Clone non-raw image then verify volume is in available state.""" def _mock_clone_image(volume, image_location, image_id, image_meta): return {'provider_location': None}, False with mock.patch.object(self.volume.driver, 'clone_image') as \ mock_clone_image: mock_clone_image.side_effect = _mock_clone_image with mock.patch.object(self.volume.driver, 'create_volume') as \ mock_create: with mock.patch.object(create_volume.CreateVolumeFromSpecTask, '_copy_image_to_volume') as mock_copy: self._create_volume_from_image('available', raw=False) mock_copy.assert_called_once() mock_clone_image.assert_called_once() mock_create.assert_called_once() def test_create_vol_from_image_status_error(self): """Fail to clone raw image then verify volume is in error state.""" with mock.patch.object(self.volume.driver, 'clone_image') as \ mock_clone_image: mock_clone_image.side_effect = exception.CinderException with mock.patch.object(self.volume.driver, 'create_volume'): with mock.patch.object(create_volume.CreateVolumeFromSpecTask, '_copy_image_to_volume') as mock_copy: self._create_volume_from_image('error', raw=True, clone_error=True) self.assertFalse(mock_copy.called) mock_clone_image.assert_called_once() self.assertFalse(self.volume.driver.create_volume.called) def test_clone_failure(self): driver = self.volume.driver with mock.patch.object(driver, '_is_cloneable', lambda *args: False): image_loc = (mock.Mock(), mock.Mock()) actual = driver.clone_image(mock.Mock(), image_loc, mock.Mock(), {}) self.assertEqual(({}, False), actual) self.assertEqual(({}, False), driver.clone_image(object(), None, None, {})) def test_clone_success(self): expected = ({'provider_location': None}, True) driver = self.volume.driver with mock.patch.object(self.volume.driver, '_is_cloneable') as \ mock_is_cloneable: mock_is_cloneable.return_value = True with mock.patch.object(self.volume.driver, '_clone') as \ mock_clone: with mock.patch.object(self.volume.driver, '_resize') as \ mock_resize: image_loc = ('rbd://fee/fi/fo/fum', None) actual = driver.clone_image({'name': 'vol1'}, image_loc, 'id.foo', {'disk_format': 'raw'}) self.assertEqual(expected, actual) mock_clone.assert_called_once() mock_resize.assert_called_once()
{ "content_hash": "217bdadb2a15c1e1b09ca97c585ed7f8", "timestamp": "", "source": "github", "line_count": 963, "max_line_length": 79, "avg_line_length": 39.6822429906542, "alnum_prop": 0.5800753650494583, "repo_name": "Thingee/cinder", "id": "c2c572c3b5be9346b4d51218afc42b32df31b959", "size": "38875", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "cinder/tests/test_rbd.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Python", "bytes": "6121923" }, { "name": "SQL", "bytes": "9824" }, { "name": "Shell", "bytes": "8998" } ], "symlink_target": "" }
from __future__ import (absolute_import, division, print_function) # Copyright 2019 Fortinet, Inc. # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <https://www.gnu.org/licenses/>. # # the lib use python logging can get it if the following is set in your # Ansible config. __metaclass__ = type ANSIBLE_METADATA = {'status': ['preview'], 'supported_by': 'community', 'metadata_version': '1.1'} DOCUMENTATION = ''' --- module: fortios_log_syslogd_override_filter short_description: Override filters for remote system server in Fortinet's FortiOS and FortiGate. description: - This module is able to configure a FortiGate or FortiOS by allowing the user to set and modify log_syslogd feature and override_filter category. Examples include all parameters and values need to be adjusted to datasources before usage. Tested with FOS v6.0.2 version_added: "2.8" author: - Miguel Angel Munoz (@mamunozgonzalez) - Nicolas Thomas (@thomnico) notes: - Requires fortiosapi library developed by Fortinet - Run as a local_action in your playbook requirements: - fortiosapi>=0.9.8 options: host: description: - FortiOS or FortiGate ip address. required: true username: description: - FortiOS or FortiGate username. required: true password: description: - FortiOS or FortiGate password. default: "" vdom: description: - Virtual domain, among those defined previously. A vdom is a virtual instance of the FortiGate that can be configured and used as a different unit. default: root https: description: - Indicates if the requests towards FortiGate must use HTTPS protocol type: bool default: true log_syslogd_override_filter: description: - Override filters for remote system server. default: null suboptions: anomaly: description: - Enable/disable anomaly logging. choices: - enable - disable dns: description: - Enable/disable detailed DNS event logging. choices: - enable - disable filter: description: - Syslog filter. filter-type: description: - Include/exclude logs that match the filter. choices: - include - exclude forward-traffic: description: - Enable/disable forward traffic logging. choices: - enable - disable gtp: description: - Enable/disable GTP messages logging. choices: - enable - disable local-traffic: description: - Enable/disable local in or out traffic logging. choices: - enable - disable multicast-traffic: description: - Enable/disable multicast traffic logging. choices: - enable - disable netscan-discovery: description: - Enable/disable netscan discovery event logging. netscan-vulnerability: description: - Enable/disable netscan vulnerability event logging. severity: description: - Lowest severity level to log. choices: - emergency - alert - critical - error - warning - notification - information - debug sniffer-traffic: description: - Enable/disable sniffer traffic logging. choices: - enable - disable ssh: description: - Enable/disable SSH logging. choices: - enable - disable voip: description: - Enable/disable VoIP logging. choices: - enable - disable ''' EXAMPLES = ''' - hosts: localhost vars: host: "192.168.122.40" username: "admin" password: "" vdom: "root" tasks: - name: Override filters for remote system server. fortios_log_syslogd_override_filter: host: "{{ host }}" username: "{{ username }}" password: "{{ password }}" vdom: "{{ vdom }}" https: "False" log_syslogd_override_filter: anomaly: "enable" dns: "enable" filter: "<your_own_value>" filter-type: "include" forward-traffic: "enable" gtp: "enable" local-traffic: "enable" multicast-traffic: "enable" netscan-discovery: "<your_own_value>" netscan-vulnerability: "<your_own_value>" severity: "emergency" sniffer-traffic: "enable" ssh: "enable" voip: "enable" ''' RETURN = ''' build: description: Build number of the fortigate image returned: always type: str sample: '1547' http_method: description: Last method used to provision the content into FortiGate returned: always type: str sample: 'PUT' http_status: description: Last result given by FortiGate on last operation applied returned: always type: str sample: "200" mkey: description: Master key (id) used in the last call to FortiGate returned: success type: str sample: "id" name: description: Name of the table used to fulfill the request returned: always type: str sample: "urlfilter" path: description: Path of the table used to fulfill the request returned: always type: str sample: "webfilter" revision: description: Internal revision number returned: always type: str sample: "17.0.2.10658" serial: description: Serial number of the unit returned: always type: str sample: "FGVMEVYYQT3AB5352" status: description: Indication of the operation's result returned: always type: str sample: "success" vdom: description: Virtual domain used returned: always type: str sample: "root" version: description: Version of the FortiGate returned: always type: str sample: "v5.6.3" ''' from ansible.module_utils.basic import AnsibleModule fos = None def login(data): host = data['host'] username = data['username'] password = data['password'] fos.debug('on') if 'https' in data and not data['https']: fos.https('off') else: fos.https('on') fos.login(host, username, password) def filter_log_syslogd_override_filter_data(json): option_list = ['anomaly', 'dns', 'filter', 'filter-type', 'forward-traffic', 'gtp', 'local-traffic', 'multicast-traffic', 'netscan-discovery', 'netscan-vulnerability', 'severity', 'sniffer-traffic', 'ssh', 'voip'] dictionary = {} for attribute in option_list: if attribute in json and json[attribute] is not None: dictionary[attribute] = json[attribute] return dictionary def flatten_multilists_attributes(data): multilist_attrs = [] for attr in multilist_attrs: try: path = "data['" + "']['".join(elem for elem in attr) + "']" current_val = eval(path) flattened_val = ' '.join(elem for elem in current_val) exec(path + '= flattened_val') except BaseException: pass return data def log_syslogd_override_filter(data, fos): vdom = data['vdom'] log_syslogd_override_filter_data = data['log_syslogd_override_filter'] flattened_data = flatten_multilists_attributes(log_syslogd_override_filter_data) filtered_data = filter_log_syslogd_override_filter_data(flattened_data) return fos.set('log.syslogd', 'override-filter', data=filtered_data, vdom=vdom) def fortios_log_syslogd(data, fos): login(data) if data['log_syslogd_override_filter']: resp = log_syslogd_override_filter(data, fos) fos.logout() return not resp['status'] == "success", resp['status'] == "success", resp def main(): fields = { "host": {"required": True, "type": "str"}, "username": {"required": True, "type": "str"}, "password": {"required": False, "type": "str", "no_log": True}, "vdom": {"required": False, "type": "str", "default": "root"}, "https": {"required": False, "type": "bool", "default": True}, "log_syslogd_override_filter": { "required": False, "type": "dict", "options": { "anomaly": {"required": False, "type": "str", "choices": ["enable", "disable"]}, "dns": {"required": False, "type": "str", "choices": ["enable", "disable"]}, "filter": {"required": False, "type": "str"}, "filter-type": {"required": False, "type": "str", "choices": ["include", "exclude"]}, "forward-traffic": {"required": False, "type": "str", "choices": ["enable", "disable"]}, "gtp": {"required": False, "type": "str", "choices": ["enable", "disable"]}, "local-traffic": {"required": False, "type": "str", "choices": ["enable", "disable"]}, "multicast-traffic": {"required": False, "type": "str", "choices": ["enable", "disable"]}, "netscan-discovery": {"required": False, "type": "str"}, "netscan-vulnerability": {"required": False, "type": "str"}, "severity": {"required": False, "type": "str", "choices": ["emergency", "alert", "critical", "error", "warning", "notification", "information", "debug"]}, "sniffer-traffic": {"required": False, "type": "str", "choices": ["enable", "disable"]}, "ssh": {"required": False, "type": "str", "choices": ["enable", "disable"]}, "voip": {"required": False, "type": "str", "choices": ["enable", "disable"]} } } } module = AnsibleModule(argument_spec=fields, supports_check_mode=False) try: from fortiosapi import FortiOSAPI except ImportError: module.fail_json(msg="fortiosapi module is required") global fos fos = FortiOSAPI() is_error, has_changed, result = fortios_log_syslogd(module.params, fos) if not is_error: module.exit_json(changed=has_changed, meta=result) else: module.fail_json(msg="Error in repo", meta=result) if __name__ == '__main__': main()
{ "content_hash": "a8df14b4fcd2ddee7eb2973571bd892c", "timestamp": "", "source": "github", "line_count": 379, "max_line_length": 97, "avg_line_length": 32.15303430079155, "alnum_prop": 0.5378302970622025, "repo_name": "SergeyCherepanov/ansible", "id": "a0f5d45e871561fca8aa1bfe6ed05eb3f1953538", "size": "12204", "binary": false, "copies": "23", "ref": "refs/heads/master", "path": "ansible/ansible/modules/network/fortios/fortios_log_syslogd_override_filter.py", "mode": "33188", "license": "mit", "language": [ { "name": "Shell", "bytes": "824" } ], "symlink_target": "" }
from __future__ import with_statement, print_function # The Figure object is used to create backend-independent plot representations. from matplotlib.figure import Figure GUI_TOOLKIT = "qt5" from matplotlib.backends.qt_compat import QtCore, QtWidgets class EmbeddedPylab(object): """ Define a 'with' context manager that lets you use pylab commands to plot on an embedded canvas. This is useful for wrapping existing scripts in a GUI, and benefits from being more familiar than the underlying object oriented interface. As a convenience, the pylab module is returned on entry. *Example* The following example shows how to use the WxAgg backend in a wx panel:: from matplotlib.backends.backend_wxagg import FigureCanvasWxAgg as FigureCanvas from matplotlib.backends.backend_wxagg import NavigationToolbar2WxAgg as Toolbar from matplotlib.figure import Figure class PlotPanel(wx.Panel): def __init__(self, *args, **kw): wx.Panel.__init__(self, *args, **kw) figure = Figure(figsize=(1,1), dpi=72) canvas = FigureCanvas(self, wx.ID_ANY, figure) self.pylab_interface = EmbeddedPylab(canvas) # Instantiate the matplotlib navigation toolbar and explicitly show it. mpl_toolbar = Toolbar(canvas) mpl_toolbar.Realize() # Create a vertical box sizer to manage the widgets in the main panel. sizer = wx.BoxSizer(wx.VERTICAL) sizer.Add(canvas, 1, wx.EXPAND|wx.LEFT|wx.RIGHT, border=0) sizer.Add(mpl_toolbar, 0, wx.EXPAND|wx.ALL, border=0) # Associate the sizer with its container. self.SetSizer(sizer) sizer.Fit(self) def plot(self, *args, **kw): with self.pylab_interface as pylab: pylab.clf() pylab.plot(*args, **kw) Similar patterns should work for the other backends. Check the source code in matplotlib.backend_bases.* for examples showing how to use matplotlib with other GUI toolkits. """ def __init__(self, canvas): # delay loading pylab until matplotlib.use() is called from matplotlib.backend_bases import FigureManagerBase self.fm = FigureManagerBase(canvas, -1) def __enter__(self): # delay loading pylab until matplotlib.use() is called import pylab from matplotlib._pylab_helpers import Gcf Gcf.set_active(self.fm) return pylab def __exit__(self, *args, **kw): # delay loading pylab until matplotlib.use() is called from matplotlib._pylab_helpers import Gcf if hasattr(Gcf, '_activeQue'): # CRUFT: MPL < 3.3.1 Gcf._activeQue = [f for f in Gcf._activeQue if f is not self.fm] try: del Gcf.figs[-1] except KeyError: pass else: Gcf.figs.pop(self.fm.num, None) class _PlotViewShared(object): title = 'Plot' default_size = (600, 400) pylab_interface = None # type: EmbeddedPylab plot_state = None model = None _calculating = False _need_plot = False _need_newmodel = False def set_model(self, model): self.model = model if not self._is_shown(): self._need_newmodel = True else: self._redraw(newmodel=True) def update_model(self, model): #print "profile update model" if self.model != model: # ignore updates to different models return if not self._is_shown(): self._need_newmodel = True else: self._redraw(newmodel=True) def update_parameters(self, model): #print "profile update parameters" if self.model != model: return if not self._is_shown(): self._need_plot = True else: self._redraw(newmodel=self._need_newmodel) def _show(self): #print "showing theory" if self._need_newmodel: self._redraw(newmodel=True) elif self._need_plot: self._redraw(newmodel=False) def _redraw(self, newmodel=False): self._need_newmodel = newmodel if self._calculating: # That means that I've entered the thread through a # wx.Yield for the currently executing redraw. I need # to cancel the running thread and force it to start # the calculation over. self.cancel_calculation = True #print "canceling calculation" return with self.pylab_interface as pylab: self._calculating = True #print "calling again" while True: #print "restarting" # We are restarting the calculation, so clear the reset flag self.cancel_calculation = False if self._need_newmodel: self.newmodel() if self.cancel_calculation: continue self._need_newmodel = False self.plot() if self.cancel_calculation: continue pylab.draw() break self._need_plot = False self._calculating = False def get_state(self): #print "returning state",self.model,self.plot_state return self.model, self.plot_state def set_state(self, state): self.model, self.plot_state = state #print "setting state",self.model,self.plot_state self.plot() def menu(self): """ Return a model specific menu """ return None def newmodel(self, model=None): """ New or updated model structure. Do any sort or precalculation you need. plot will be called separately when you are done. For long calculations, periodically perform wx.YieldIfNeeded() and then if self.cancel_calculation is True, return from the plot. """ pass def plot(self): """ Plot to the current figure. If model has a plot method, just use that. For long calculations, periodically perform wx.YieldIfNeeded() and then if self.cancel_calculation is True, return from the plot. """ if hasattr(self.model, 'plot'): self.model.plot() else: raise NotImplementedError("PlotPanel needs a plot method") class PlotView(QtWidgets.QWidget, _PlotViewShared): def __init__(self, *args, **kw): QtWidgets.QWidget.__init__(self, *args, **kw) #import matplotlib.backends.backend_qt4agg #matplotlib.backends.backend_qt4agg.DEBUG = True from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas from matplotlib.backends.backend_qt5agg import NavigationToolbar2QT as Toolbar #QtWidgets.QWidget.__init__(self, *args, **kw) # Can specify name on if 'title' in kw: self.title = kw['title'] # Instantiate a figure object that will contain our plots. figure = Figure(figsize=(10,10), dpi=72) # Initialize the figure canvas, mapping the figure object to the plot # engine backend. canvas = FigureCanvas(figure) # Wx-Pylab magic ... # Make our canvas an active figure manager for pylab so that when # pylab plotting statements are executed they will operate on our # canvas and not create a new frame and canvas for display purposes. # This technique allows this application to execute code that uses # pylab stataments to generate plots and embed these plots in our # application window(s). Use _activate_figure() to set. self.pylab_interface = EmbeddedPylab(canvas) # Instantiate the matplotlib navigation toolbar and explicitly show it. mpl_toolbar = Toolbar(canvas, self, False) layout = QtWidgets.QVBoxLayout() layout.addWidget(canvas) layout.addWidget(mpl_toolbar) self.setLayout(layout) def _is_shown(self): return IS_MAC or self.IsShown()
{ "content_hash": "fcfc0e7763b6d7da7479e80228b1915b", "timestamp": "", "source": "github", "line_count": 231, "max_line_length": 88, "avg_line_length": 35.913419913419915, "alnum_prop": 0.6030617164898746, "repo_name": "SasView/sasview", "id": "b52cdcf8f710fda7b9c3d242fd536ab3cc3ed0ea", "size": "8296", "binary": false, "copies": "1", "ref": "refs/heads/main", "path": "src/sas/qtgui/Utilities/PlotView.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "AGS Script", "bytes": "60240" }, { "name": "Batchfile", "bytes": "1616" }, { "name": "C", "bytes": "11379" }, { "name": "C++", "bytes": "217553" }, { "name": "CSS", "bytes": "340" }, { "name": "Gherkin", "bytes": "565" }, { "name": "HTML", "bytes": "9252" }, { "name": "Inno Setup", "bytes": "6892" }, { "name": "JavaScript", "bytes": "27700" }, { "name": "Jupyter Notebook", "bytes": "28926" }, { "name": "Makefile", "bytes": "28052" }, { "name": "Python", "bytes": "2959880" }, { "name": "Shell", "bytes": "2063" } ], "symlink_target": "" }
__version__ = "1.0.0" """ Python script that converts WEBVTT subtitle files to the SRT format. The script is written in Python 3.5 See: https://github.com/sverrirs/ruvsarpur Author: Sverrir Sigmundarson info@sverrirs.com https://www.sverrirs.com Example WEBVTT file contents: ---------------------------------------- 1-0 00:01:07.000 --> 00:01:12.040 line:10 align:middle Hey buddy, this is the first subtitle entry that will be displayed 2-0 00:01:12.160 --> 00:01:15.360 line:10 align:middle Yeah and this is the second line <i>living the dream!</i> The SRT file that will be generated by running this script: ---------------------------------------- 1 00:01:07,000 --> 00:01:12,040 Hey buddy, this is the first subtitle entry that will be displayed 2 00:01:12,160 --> 00:01:15,360 Yeah and this is the second line <i>living the dream!</i> How to convert webvtt and vtt subtitle files to SRT and merge them with the source video file using the GPAC Mp4Box utility: ============================================================================== 1. First download the subtitles file (usually available in the source of the website that contains the web player. Search for ".webvtt" or ".vtt" to locate) 2. Convert to .srt using this script python webvtttosrt.py -i subtitles.vtt 3. Add the srt file to the mp4 video stream (assuming install location for GPAC) "C:\Program Files\GPAC\mp4box.exe" -add "video.mp4" -add "subtitles.srt":lang=is:name="Icelandic" "merged-video.mp4" if the subtitle font is too small you can make it larger by supplying the ':size=XX' parameter like "C:\Program Files\GPAC\mp4box.exe" -add "video.mp4" -add "subtitles.srt":size=32:lang=is:name="Icelandic" "merged-video.mp4" """ import sys, os.path, re from colorama import init, deinit # For colorized output to console windows (platform and shell independent) from termcolor import colored # For shorthand color printing to the console, https://pypi.python.org/pypi/termcolor from pathlib import Path # to check for file existence in the file system import argparse # Command-line argument parser import ntpath # Used to extract file name from path for all platforms http://stackoverflow.com/a/8384788 # Lambdas as shorthands for printing various types of data # See https://pypi.python.org/pypi/termcolor for more info color_err = lambda x: colored(x, 'red') def parseArguments(): parser = argparse.ArgumentParser() parser.add_argument("-o", "--output", help="The path to the folder where the converted file should be stored", type=str) parser.add_argument("-i", "--input", help="Full path to the input file (WEBVTT format)", type=str) return parser.parse_args() def readInputSubtitlesFile(webvtt_file): try: lines = [] with open(webvtt_file, 'r+', encoding='utf-8') as infile: for line in infile: lines.append(line.rstrip('\n')) return lines except FileNotFoundError: print(color_err("Error: '{0}' not found.".format(webvtt_file))) return None def saveOutputSubtitlesFile(srtdata,out_file_name): # First make sure that the output path and directories exist before writing out_dir_path = os.path.dirname(out_file_name) if( out_dir_path ): os.makedirs(os.path.dirname(out_file_name), exist_ok=True) with open(out_file_name, 'w+', encoding='utf-8') as out_file: for srt in srtdata: for line in srt: out_file.write("%s\n" % line) out_file.write('\n') # Two Empty separator lines ################################################################ # The main entry point for the script ################################################################ def runMain(): try: init() # Initialize the colorama library reg_timecode = re.compile(r"([0-9]{2}:[0-9]{2}:[0-9]{2}\.[0-9]{3} \-\-\> [0-9]{2}:[0-9]{2}:[0-9]{2}\.[0-9]{3})", re.DOTALL) # Construct the argument parser for the commandline args = parseArguments() # If no output then create it if( args.output is None ): outfile = os.path.splitext(args.input)[0]+'.srt' else: outfile = args.output vttlines = readInputSubtitlesFile(args.input) if( vttlines is None ): return # Exit immediately if( len(vttlines) <= 0 ): print("No data found in input file. Exiting as there is nothing to do.") return srtlines = [] # Contains all of the srt converted subtitle entries srtline = None # Contains the current srt subitle entry for vtt_line in vttlines: if( vtt_line == "WEBVTT"): continue # If empty input line and we have something in the srtline variable then append it to srt and continue if( vtt_line == ''): if( not srtline is None ): srtlines.append(srtline) srtline = None continue # Skip the line else: if( srtline is None ): srtline = [] srtline.append(len(srtlines)+1) # the number of subtitle entries added, add one as we want to start at 1 instead of 0 elif( len(srtline) == 1 ): # Dealing with timing info (the regex trims anything trailing the timecode) timecode = reg_timecode.match(vtt_line).group(1) srtline.append(timecode.replace('.',',')) # SRT does not handle . as separators for msec but uses , instead elif( len(srtline) > 1 ): # Dealing with the actual subtitle contents, just add all of them unchanged srtline.append(vtt_line) # End for # Remember to put the final srt into the mix if( not srtline is None ): srtlines.append(srtline) # Write the file saveOutputSubtitlesFile(srtlines, outfile) print("Success, file converted to SRT format") finally: deinit() #Deinitialize the colorama library # If the script file is called by itself then execute the main function if __name__ == '__main__': runMain()
{ "content_hash": "1b54645f9c780d8316589a9ebb124595", "timestamp": "", "source": "github", "line_count": 161, "max_line_length": 156, "avg_line_length": 38.93788819875776, "alnum_prop": 0.6103046737916733, "repo_name": "sverrirs/ruvsarpur", "id": "92bb04e88a79c006ffe6f058141b17a921a3b743", "size": "6306", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "src/webvtttosrt.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "55533" } ], "symlink_target": "" }
# import statvfs import os import ganglia descriptors = list() def Find_Metric (name): '''Find the metric definition data given the metric name. The metric name should always be unique.''' for d in descriptors: if d['name'] == name: return d pass def Remote_Mount(device, type): '''Determine if the device specifed is a local or remote device.''' return ((device.rfind(':') != -1) or ((type == "smbfs") and device.startswith('//')) or type.startswith('nfs') or (type == 'autofs') or (type == 'gfs') or (type == 'none')) def DiskTotal_Handler(name): '''Calculate the total disk space for the device that is associated with the metric name.''' d = Find_Metric(name) if not d: return 0 st = os.statvfs(d['mount']) size = st[statvfs.F_BLOCKS] blocksize = st[statvfs.F_BSIZE] vv = (size * blocksize) / 1e9 return vv def DiskUsed_Handler(name): '''Calculate the used disk space for the device that is associated with the metric name.''' d = Find_Metric(name) if not d: return float(0) st = os.statvfs(d['mount']) free = st[statvfs.F_BAVAIL] size = st[statvfs.F_BLOCKS] if size: return ((size - free) / float(size)) * 100 else: return float(0) def Init_Metric (line, name, tmax, type, units, slope, fmt, desc, handler): '''Create a metric definition dictionary object for a device.''' metric_name = line[0] + '-' + name d = {'name': metric_name.replace('/', '-').lstrip('-'), 'call_back': handler, 'time_max': tmax, 'value_type': type, 'units': units, 'slope': slope, 'format': fmt, 'description': desc, 'groups': 'disk', 'mount': line[1]} return d def metric_init(params): '''Discover all of the local disk devices on the system and create a metric definition dictionary object for each.''' global descriptors f = open('/proc/mounts', 'r') for l in f: line = l.split() if line[3].startswith('ro'): continue elif Remote_Mount(line[0], line[2]): continue elif (not line[0].startswith('/dev/')) and (not line[0].startswith('/dev2/')): continue; if ganglia.get_debug_msg_level() > 1: print 'Discovered device %s' % line[1] descriptors.append(Init_Metric(line, 'disk_total', int(1200), 'double', 'GB', 'both', '%.3f', 'Available disk space', DiskTotal_Handler)) descriptors.append(Init_Metric(line, 'disk_used', int(180), 'float', '%', 'both', '%.1f', 'Percent used disk space', DiskUsed_Handler)) f.close() return descriptors def metric_cleanup(): '''Clean up the metric module.''' pass #This code is for debugging and unit testing if __name__ == '__main__': metric_init(None) for d in descriptors: v = d['call_back'](d['name']) print 'value for %s is %f' % (d['name'], v)
{ "content_hash": "847affc46f33d61d66f94b37e83769e0", "timestamp": "", "source": "github", "line_count": 104, "max_line_length": 96, "avg_line_length": 29.53846153846154, "alnum_prop": 0.5699869791666666, "repo_name": "sdgdsffdsfff/monitor-core", "id": "8dbe92ff0237e145240afb4c023684c3f28f5009", "size": "4856", "binary": false, "copies": "9", "ref": "refs/heads/master", "path": "gmond/python_modules/disk/multidisk.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "C", "bytes": "1121353" }, { "name": "CSS", "bytes": "909" }, { "name": "Groff", "bytes": "11267" }, { "name": "HTML", "bytes": "44529" }, { "name": "JavaScript", "bytes": "389491" }, { "name": "Logos", "bytes": "3485" }, { "name": "PHP", "bytes": "3064" }, { "name": "Perl", "bytes": "78767" }, { "name": "Protocol Buffer", "bytes": "979" }, { "name": "Python", "bytes": "371506" }, { "name": "Shell", "bytes": "40284" }, { "name": "SourcePawn", "bytes": "365" }, { "name": "Visual Basic", "bytes": "279" } ], "symlink_target": "" }
import urllib import urllib2 import re import getopt import sys import xml.dom.minidom trips_url = 'http://trips.ihmc.us/parser/cgi/drum' def send_query(text, query_args=None): if query_args is None: qa = {} qa['input'] = text data = urllib.urlencode(qa) req = urllib2.Request(trips_url, data) res = urllib2.urlopen(req, timeout=3600) html = res.read() return html def get_xml(html): ekb = re.findall(r'<ekb.*?>(.*?)</ekb>', html, re.MULTILINE | re.DOTALL) if ekb: events_terms = ekb[0] else: events_terms = '' header = '<?xml version="1.0" encoding="utf-8" standalone="yes"?><extractions>' footer = '</extractions>' return header + events_terms + footer def save_xml(xml_str, file_name): try: fh = open(file_name, 'wt') xmld = xml.dom.minidom.parseString(xml_str) xml_str_pretty = xmld.toprettyxml() except IOError: print 'Could not open %s for writing.' % file_name return fh.write(xml_str_pretty.encode('utf8')) fh.close() if __name__ == '__main__': filemode = False text = 'Active BRAF phosphorylates MEK1 at Ser222.' outfile_name = 'braf_test.xml' opts, extraparams = getopt.getopt(sys.argv[1:], 's:f:o:h', ['string=', 'file=', 'output=', 'help']) for o, p in opts: if o in ['-h', '--help']: print 'String mode: python trips_client.py --string "RAS binds GTP" --output text.xml' print 'File mode: python trips_client.py --file test.txt --output text.xml' sys.exit() elif o in ['-s', '--string']: text = p elif o in ['-f', '--file']: filemode = True infile_name = p elif o in ['-o', '--output']: outfile_name = p if filemode: try: fh = open(infile_name, 'rt') except IOError: print 'Could not open %s.' % infile_name exit() text = fh.read() fh.close() print 'Parsing contents of %s...' % infile_name else: print 'Parsing string: %s' % text html = send_query(text) xml = get_xml(html) save_xml(xml, outfile_name)
{ "content_hash": "6648f21708a71273cbe156f0b5692e02", "timestamp": "", "source": "github", "line_count": 78, "max_line_length": 98, "avg_line_length": 28.67948717948718, "alnum_prop": 0.5502905677246313, "repo_name": "decarlin/indra", "id": "768e704e30b137b0523a1bec3724d25a33d6444c", "size": "2237", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "indra/trips/trips_client.py", "mode": "33188", "license": "bsd-2-clause", "language": [ { "name": "Python", "bytes": "197428" }, { "name": "Ruby", "bytes": "433" }, { "name": "Shell", "bytes": "430" } ], "symlink_target": "" }
from .fight_logic import FightLogic
{ "content_hash": "5eda04c337574d22476acd5d3372c034", "timestamp": "", "source": "github", "line_count": 1, "max_line_length": 35, "avg_line_length": 36, "alnum_prop": 0.8333333333333334, "repo_name": "HueyPark/Unreal-Knights", "id": "ddac396322126040bcf873f3430e10fa11af6b4a", "size": "36", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "Server/Code/logic/fight/__init__.py", "mode": "33188", "license": "mit", "language": [ { "name": "C", "bytes": "147" }, { "name": "C#", "bytes": "1346" }, { "name": "C++", "bytes": "362405" }, { "name": "Python", "bytes": "8218" } ], "symlink_target": "" }
from pony.orm import * from base_entities import db class Teacher(db.User): degree = Required(str) courses = Set("Course") class Student(db.User): group = Required("Group") courses = Set("Course") gpa = Required(float) class Group(db.Entity): number = PrimaryKey(int) students = Set(Student) class Course(db.Entity): name = Required(str) students = Set(Student) teachers = Set(Teacher)
{ "content_hash": "fb83013e989f5db27dccafe15e2856aa", "timestamp": "", "source": "github", "line_count": 20, "max_line_length": 29, "avg_line_length": 21.5, "alnum_prop": 0.6627906976744186, "repo_name": "kozlovsky/ponymodules", "id": "f7243b66f9c1cefc6724cfff38cd884d864c2db8", "size": "531", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "university_entities.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "5033" } ], "symlink_target": "" }
"""Provide analysis of input files by chromosomal regions. Handle splitting and analysis of files from chromosomal subsets separated by no-read regions. """ import collections import os import toolz as tz from bcbio.distributed.split import parallel_split_combine def get_max_counts(samples): """Retrieve number of regions that can be processed in parallel from current samples. """ counts = [] for data in (x[0] for x in samples): count = tz.get_in(["config", "algorithm", "callable_count"], data, 1) vcs = tz.get_in(["config", "algorithm", "variantcaller"], data, []) if isinstance(vcs, basestring): vcs = [vcs] if vcs: count *= len(vcs) counts.append(count) return max(counts) # ## BAM preparation def to_safestr(region): if region[0] in ["nochrom", "noanalysis"]: return region[0] else: return "_".join([str(x) for x in region]) # ## Split and delayed BAM combine def _split_by_regions(dirname, out_ext, in_key): """Split a BAM file data analysis into chromosomal regions. """ def _do_work(data): # XXX Need to move retrieval of regions into preparation to avoid # need for files when running in non-shared filesystems with open(data["config"]["algorithm"]["callable_regions"]) as in_handle: regions = [(xs[0], int(xs[1]), int(xs[2])) for xs in (l.rstrip().split("\t") for l in in_handle) if (len(xs) >= 3 and not xs[0].startswith(("track", "browser",)))] bam_file = data[in_key] if bam_file is None: return None, [] part_info = [] base_out = os.path.splitext(os.path.basename(bam_file))[0] nowork = [["nochrom"], ["noanalysis", data["config"]["algorithm"]["non_callable_regions"]]] for region in regions + nowork: out_dir = os.path.join(data["dirs"]["work"], dirname, data["name"][-1], region[0]) region_outfile = os.path.join(out_dir, "%s-%s%s" % (base_out, to_safestr(region), out_ext)) part_info.append((region, region_outfile)) out_file = os.path.join(data["dirs"]["work"], dirname, data["name"][-1], "%s%s" % (base_out, out_ext)) return out_file, part_info return _do_work def _add_combine_info(output, combine_map, file_key): """Do not actually combine, but add details for later combining work. Each sample will contain information on the out file and additional files to merge, enabling other splits and recombines without losing information. """ files_per_output = collections.defaultdict(list) for part_file, out_file in combine_map.items(): files_per_output[out_file].append(part_file) out_by_file = collections.defaultdict(list) out = [] for data in output: # Do not pass along nochrom, noanalysis regions if data["region"][0] not in ["nochrom", "noanalysis"]: cur_file = data[file_key] # If we didn't process, no need to add combine information if cur_file in combine_map: out_file = combine_map[cur_file] if "combine" not in data: data["combine"] = {} data["combine"][file_key] = {"out": out_file, "extras": files_per_output.get(out_file, [])} out_by_file[out_file].append(data) elif cur_file: out_by_file[cur_file].append(data) else: out.append([data]) for samples in out_by_file.values(): regions = [x["region"] for x in samples] region_bams = [x["work_bam"] for x in samples] assert len(regions) == len(region_bams) if len(set(region_bams)) == 1: region_bams = [region_bams[0]] data = samples[0] data["region_bams"] = region_bams data["region"] = regions out.append([data]) return out def parallel_prep_region(samples, run_parallel): """Perform full pre-variant calling BAM prep work on regions. """ file_key = "work_bam" split_fn = _split_by_regions("bamprep", "-prep.bam", file_key) # identify samples that do not need preparation -- no recalibration or realignment extras = [] torun = [] for data in [x[0] for x in samples]: if data.get("work_bam"): data["align_bam"] = data["work_bam"] a = data["config"]["algorithm"] if (not a.get("recalibrate") and not a.get("realign") and not a.get("variantcaller", "gatk")): extras.append([data]) elif not data.get(file_key): extras.append([data]) else: torun.append([data]) return extras + parallel_split_combine(torun, split_fn, run_parallel, "piped_bamprep", _add_combine_info, file_key, ["config"]) def delayed_bamprep_merge(samples, run_parallel): """Perform a delayed merge on regional prepared BAM files. """ needs_merge = False for data in samples: if (data[0]["config"]["algorithm"].get("merge_bamprep", True) and "combine" in data[0]): needs_merge = True break if needs_merge: return run_parallel("delayed_bam_merge", samples) else: return samples # ## Utilities def clean_sample_data(samples): """Clean unnecessary information from sample data, reducing size for message passing. """ out = [] for data in (x[0] for x in samples): if "dirs" in data: data["dirs"] = {"work": data["dirs"]["work"], "galaxy": data["dirs"]["galaxy"], "fastq": data["dirs"].get("fastq")} data["config"] = {"algorithm": data["config"]["algorithm"], "resources": data["config"]["resources"]} for remove_attr in ["config_file", "regions", "algorithm"]: data.pop(remove_attr, None) out.append([data]) return out
{ "content_hash": "cb61c1839562dc317fa6ffb9cbe1f2e3", "timestamp": "", "source": "github", "line_count": 152, "max_line_length": 116, "avg_line_length": 40.53947368421053, "alnum_prop": 0.5676728334956183, "repo_name": "verdurin/bcbio-nextgen", "id": "cd018c7d8eb35a824d0bd14ad2ef99414b36624e", "size": "6162", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "bcbio/pipeline/region.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "1417026" }, { "name": "Ruby", "bytes": "624" }, { "name": "Shell", "bytes": "10430" } ], "symlink_target": "" }
"""Support for MQTT room presence detection.""" from __future__ import annotations from datetime import timedelta import json import logging import voluptuous as vol from homeassistant.components import mqtt from homeassistant.components.mqtt import CONF_STATE_TOPIC from homeassistant.components.sensor import PLATFORM_SCHEMA, SensorEntity from homeassistant.const import ( ATTR_DEVICE_ID, ATTR_ID, CONF_DEVICE_ID, CONF_NAME, CONF_TIMEOUT, STATE_NOT_HOME, ) from homeassistant.core import HomeAssistant, callback import homeassistant.helpers.config_validation as cv from homeassistant.helpers.entity_platform import AddEntitiesCallback from homeassistant.helpers.typing import ConfigType, DiscoveryInfoType from homeassistant.util import dt, slugify _LOGGER = logging.getLogger(__name__) ATTR_DISTANCE = "distance" ATTR_ROOM = "room" CONF_AWAY_TIMEOUT = "away_timeout" DEFAULT_AWAY_TIMEOUT = 0 DEFAULT_NAME = "Room Sensor" DEFAULT_TIMEOUT = 5 DEFAULT_TOPIC = "room_presence" PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend( { vol.Required(CONF_DEVICE_ID): cv.string, vol.Required(CONF_TIMEOUT, default=DEFAULT_TIMEOUT): cv.positive_int, vol.Optional(CONF_AWAY_TIMEOUT, default=DEFAULT_AWAY_TIMEOUT): cv.positive_int, vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string, } ).extend(mqtt.config.MQTT_RO_SCHEMA.schema) MQTT_PAYLOAD = vol.Schema( vol.All( json.loads, vol.Schema( { vol.Required(ATTR_ID): cv.string, vol.Required(ATTR_DISTANCE): vol.Coerce(float), }, extra=vol.ALLOW_EXTRA, ), ) ) async def async_setup_platform( hass: HomeAssistant, config: ConfigType, async_add_entities: AddEntitiesCallback, discovery_info: DiscoveryInfoType | None = None, ) -> None: """Set up MQTT room Sensor.""" async_add_entities( [ MQTTRoomSensor( config.get(CONF_NAME), config.get(CONF_STATE_TOPIC), config.get(CONF_DEVICE_ID), config.get(CONF_TIMEOUT), config.get(CONF_AWAY_TIMEOUT), ) ] ) class MQTTRoomSensor(SensorEntity): """Representation of a room sensor that is updated via MQTT.""" def __init__(self, name, state_topic, device_id, timeout, consider_home): """Initialize the sensor.""" self._state = STATE_NOT_HOME self._name = name self._state_topic = f"{state_topic}/+" self._device_id = slugify(device_id).upper() self._timeout = timeout self._consider_home = ( timedelta(seconds=consider_home) if consider_home else None ) self._distance = None self._updated = None async def async_added_to_hass(self) -> None: """Subscribe to MQTT events.""" @callback def update_state(device_id, room, distance): """Update the sensor state.""" self._state = room self._distance = distance self._updated = dt.utcnow() self.async_write_ha_state() @callback def message_received(msg): """Handle new MQTT messages.""" try: data = MQTT_PAYLOAD(msg.payload) except vol.MultipleInvalid as error: _LOGGER.debug("Skipping update because of malformatted data: %s", error) return device = _parse_update_data(msg.topic, data) if device.get(CONF_DEVICE_ID) == self._device_id: if self._distance is None or self._updated is None: update_state(**device) else: # update if: # device is in the same room OR # device is closer to another room OR # last update from other room was too long ago timediff = dt.utcnow() - self._updated if ( device.get(ATTR_ROOM) == self._state or device.get(ATTR_DISTANCE) < self._distance or timediff.total_seconds() >= self._timeout ): update_state(**device) await mqtt.async_subscribe(self.hass, self._state_topic, message_received, 1) @property def name(self): """Return the name of the sensor.""" return self._name @property def extra_state_attributes(self): """Return the state attributes.""" return {ATTR_DISTANCE: self._distance} @property def native_value(self): """Return the current room of the entity.""" return self._state def update(self) -> None: """Update the state for absent devices.""" if ( self._updated and self._consider_home and dt.utcnow() - self._updated > self._consider_home ): self._state = STATE_NOT_HOME def _parse_update_data(topic, data): """Parse the room presence update.""" parts = topic.split("/") room = parts[-1] device_id = slugify(data.get(ATTR_ID)).upper() distance = data.get("distance") parsed_data = {ATTR_DEVICE_ID: device_id, ATTR_ROOM: room, ATTR_DISTANCE: distance} return parsed_data
{ "content_hash": "356901b7c55767fcd14364f3e9ad995d", "timestamp": "", "source": "github", "line_count": 170, "max_line_length": 88, "avg_line_length": 31.48235294117647, "alnum_prop": 0.593796711509716, "repo_name": "w1ll1am23/home-assistant", "id": "a69270480510072d3b04cea1bf05effd97358f82", "size": "5352", "binary": false, "copies": "3", "ref": "refs/heads/dev", "path": "homeassistant/components/mqtt_room/sensor.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Dockerfile", "bytes": "2963" }, { "name": "PLSQL", "bytes": "840" }, { "name": "Python", "bytes": "52277012" }, { "name": "Shell", "bytes": "6252" } ], "symlink_target": "" }
from IPython.core.magic import cell_magic, Magics, magics_class from IPython import get_ipython from IPython.display import HTML, display_javascript from .proc import Pybind11Processor @magics_class class Pybind11Magics(Magics): @cell_magic def pybind11(self, line, cell): """IPython magic inside Jupyter environment. For the entire line provided in the magic, the first element will be taken as the module name, while the additional elements will be taken as optional building flags. """ line_list = line.split(';') module_name = line_list[0] cflags = None ldflags = None if len(line_list) > 1: cflags = ' '.join(line_list[1].split()) if len(line_list) > 2: ldflags = ' '.join(line_list[2].split()) flags = {'cflags': cflags, 'ldflags': ldflags} try: _ = Pybind11Processor(module_name, flags, cell) except RuntimeError as r: return HTML("<pre>Compile FAILED\n" + r.args[0] + "</pre>") js = """ try { require(['notebook/js/codecell'], function(codecell) { codecell.CodeCell.options_default.highlight_modes[ 'magic_text/x-csrc'] = {'reg':[/^%%pybind11/]}; Jupyter.notebook.events.one('kernel_ready.Kernel', function(){ Jupyter.notebook.get_cells().map(function(cell){ if (cell.cell_type == 'code'){ cell.auto_highlight(); } }) ; }); }); } catch (e) {}; """ instance = get_ipython() if instance: get_ipython().register_magics(Pybind11Magics) display_javascript(js, raw=True)
{ "content_hash": "9ef2eddc7ea791847d5c009430aa31e9", "timestamp": "", "source": "github", "line_count": 55, "max_line_length": 77, "avg_line_length": 29.2, "alnum_prop": 0.6164383561643836, "repo_name": "Xilinx/PYNQ", "id": "7f02d9ca5ad07c37efe12d0647eff2e19eb6ebda", "size": "1687", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "pynq/lib/pybind11/magic.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "Batchfile", "bytes": "51" }, { "name": "BitBake", "bytes": "1840" }, { "name": "C", "bytes": "1062607" }, { "name": "C++", "bytes": "76769" }, { "name": "CMake", "bytes": "578" }, { "name": "JavaScript", "bytes": "239958" }, { "name": "Jupyter Notebook", "bytes": "17148467" }, { "name": "Makefile", "bytes": "165279" }, { "name": "Python", "bytes": "1388540" }, { "name": "Shell", "bytes": "67192" }, { "name": "SystemVerilog", "bytes": "53374" }, { "name": "Tcl", "bytes": "1383109" }, { "name": "VHDL", "bytes": "738710" }, { "name": "Verilog", "bytes": "284588" } ], "symlink_target": "" }
import random import socket import unittest import os import errno from manticore.core.smtlib import * from manticore.platforms import linux, linux_syscall_stubs from manticore.platforms.platform import SyscallNotImplemented def get_random_filename(): return f"/tmp/mcore_test_{int(random.getrandbits(32))}" class LinuxTest(unittest.TestCase): _multiprocess_can_split_ = True BIN_PATH = os.path.join(os.path.dirname(__file__), "binaries", "basic_linux_amd64") def setUp(self): self.linux = linux.SLinux(self.BIN_PATH) def tearDown(self): for f in self.linux.files: if isinstance(f, linux.File): f.close() def test_time(self): self.linux.current.memory.mmap(0x1000, 0x1000, "rw ") time_0 = self.linux.sys_time(0) self.linux.sys_clock_gettime(1, 0x1100) self.linux.sys_gettimeofday(0x1200, 0) time_2_0 = self.linux.current.read_int(0x1200) time_monotonic_0 = self.linux.current.read_int(0x1100) time.sleep(1.1) time_final = self.linux.sys_time(0) self.linux.sys_clock_gettime(1, 0x1100) self.linux.sys_gettimeofday(0x1200, 0) time_2_final = self.linux.current.read_int(0x1200) time_monotonic_final = self.linux.current.read_int(0x1100) self.assertGreater( time_monotonic_final, time_monotonic_0, "Monotonic clock time did not increase!" ) self.assertGreater(time_final, time_0, "Time did not increase!") self.assertGreater(time_2_final, time_2_0, "Time did not increase!") def test_directories(self): tmpdir = get_random_filename() self.linux.current.memory.mmap(0x1000, 0x1000, "rw ") self.linux.current.write_string(0x1100, tmpdir) self.assertFalse(os.path.exists(tmpdir)) self.linux.sys_mkdir(0x1100, mode=0o777) self.assertTrue(os.path.exists(tmpdir)) self.linux.sys_rmdir(0x1100) self.assertFalse(os.path.exists(tmpdir)) def test_pipe(self): self.linux.current.memory.mmap(0x1000, 0x1000, "rw ") self.linux.sys_pipe(0x1100) fd1 = self.linux.current.read_int(0x1100, 8 * 4) fd2 = self.linux.current.read_int(0x1100 + 4, 8 * 4) buf = b"0123456789ABCDEF" self.linux.current.write_bytes(0x1200, buf) self.linux.sys_write(fd1, 0x1200, len(buf)) self.linux.sys_read(fd2, 0x1300, len(buf)) self.assertEqual( buf, b"".join(self.linux.current.read_bytes(0x1300, len(buf))), "Pipe Read/Write failed" ) def test_ftruncate(self): fname = get_random_filename() self.linux.current.memory.mmap(0x1000, 0x1000, "rw ") self.linux.current.write_string(0x1100, fname) fd = self.linux.sys_open(0x1100, os.O_RDWR, 0o777) buf = b"0123456789ABCDEF" self.linux.current.write_bytes(0x1200, buf) self.linux.sys_write(fd, 0x1200, len(buf)) self.linux.sys_close(fd) fd = self.linux.sys_open(0x1100, os.O_RDWR, 0o777) self.linux.sys_ftruncate(fd, len(buf) // 2) self.linux.sys_read(fd, 0x1300, len(buf)) self.assertEqual( buf[:8] + b"\x00" * 8, b"".join(self.linux.current.read_bytes(0x1300, len(buf))) ) def test_link(self): fname = get_random_filename() newname = get_random_filename() self.linux.current.memory.mmap(0x1000, 0x1000, "rw ") self.linux.current.write_string(0x1100, fname) self.linux.current.write_string(0x1180, newname) fd = self.linux.sys_open(0x1100, os.O_RDWR, 0o777) buf = b"0123456789ABCDEF" self.linux.current.write_bytes(0x1200, buf) self.linux.sys_write(fd, 0x1200, len(buf)) self.linux.sys_close(fd) self.linux.sys_link(0x1100, 0x1180) self.assertTrue(os.path.exists(newname)) fd = self.linux.sys_open(0x1180, os.O_RDWR, 0o777) self.linux.sys_read(fd, 0x1300, len(buf)) self.assertEqual(buf, b"".join(self.linux.current.read_bytes(0x1300, len(buf)))) self.linux.sys_close(fd) self.linux.sys_unlink(0x1180) self.assertFalse(os.path.exists(newname)) def test_chmod(self): fname = get_random_filename() self.linux.current.memory.mmap(0x1000, 0x1000, "rw ") self.linux.current.write_string(0x1100, fname) print("Creating", fname) fd = self.linux.sys_open(0x1100, os.O_RDWR, 0o777) buf = b"0123456789ABCDEF" self.linux.current.write_bytes(0x1200, buf) self.linux.sys_close(fd) self.linux.sys_chmod(0x1100, 0o444) self.assertEqual(-errno.EACCES, self.linux.sys_open(0x1100, os.O_WRONLY, 0o777)) self.assertEqual(-errno.EPERM, self.linux.sys_chown(0x1100, 0, 0)) def test_recvfrom(self): self.linux.current.memory.mmap(0x1000, 0x1000, "rw ") sock_fd = self.linux.sys_socket(socket.AF_INET, socket.SOCK_STREAM, 0) self.assertEqual(sock_fd, 3) # Unimplemented # self.linux.current.write_int(0x1000, 1, size=8 * 4) # self.linux.sys_setsockopt(sock_fd, socket.SOL_SOCKET, socket.SO_REUSEPORT, 0x1000, 4) self.linux.sys_bind(sock_fd, None, None) self.linux.sys_listen(sock_fd, None) conn_fd = self.linux.sys_accept(sock_fd, None, 0) self.assertEqual(conn_fd, 4) sock_obj = self.linux.files[conn_fd] init_len = len(sock_obj.buffer) BYTES = 5 wrote = self.linux.sys_recvfrom(conn_fd, 0x1100, BYTES, 0x0, 0x0, 0x0) self.assertEqual(wrote, BYTES) wrote = self.linux.sys_recvfrom(conn_fd, 0x0, 100, 0x0, 0x0, 0x0) self.assertEqual(wrote, -errno.EFAULT) remain_len = init_len - BYTES self.assertEqual(remain_len, len(sock_obj.buffer)) wrote = self.linux.sys_recvfrom(conn_fd, 0x1100, remain_len + 10, 0x0, 0x0, 0x0) self.assertEqual(wrote, remain_len) wrote = self.linux.sys_recvfrom(conn_fd, 0x1100, 10, 0x0, 0x0, 0x0) self.assertEqual(wrote, 0) self.linux.sys_close(conn_fd) wrote = self.linux.sys_recvfrom(conn_fd, 0x1100, 10, 0x0, 0x0, 0x0) self.assertEqual(wrote, -errno.EBADF) def test_multiple_sockets(self): sock_fd = self.linux.sys_socket(socket.AF_INET, socket.SOCK_STREAM, 0) self.assertEqual(sock_fd, 3) self.linux.sys_bind(sock_fd, None, None) self.linux.sys_listen(sock_fd, None) conn_fd = self.linux.sys_accept(sock_fd, None, 0) self.assertEqual(conn_fd, 4) self.linux.sys_close(conn_fd) conn_fd = -1 conn_fd = self.linux.sys_accept(sock_fd, None, 0) self.assertEqual(conn_fd, 4) def test_unimplemented(self): stubs = linux_syscall_stubs.SyscallStubs(default_to_fail=False) if hasattr(stubs, "sys_bpf"): self.assertRaises(SyscallNotImplemented, stubs.sys_bpf, 0, 0, 0) self.linux.stubs.default_to_fail = False self.linux.current.RAX = 321 # SYS_BPF self.assertRaises(SyscallNotImplemented, self.linux.syscall) self.linux.stubs.default_to_fail = True self.linux.current.RAX = 321 self.linux.syscall() self.assertEqual(0xFFFFFFFFFFFFFFFF, self.linux.current.RAX) else: import warnings warnings.warn( "Couldn't find sys_bpf in the stubs file. " + "If you've implemented it, you need to fix test_syscalls:LinuxTest.test_unimplemented" )
{ "content_hash": "81bc78903f9b5dfcdbdcfd4002b9ada4", "timestamp": "", "source": "github", "line_count": 213, "max_line_length": 104, "avg_line_length": 35.708920187793424, "alnum_prop": 0.6273994215093347, "repo_name": "montyly/manticore", "id": "9d3f3e97ba2480398252642cc2613fc6ea387ff2", "size": "7606", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "tests/native/test_syscalls.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Python", "bytes": "13783178" }, { "name": "Shell", "bytes": "3490" } ], "symlink_target": "" }
students = [] def get_students_titlecase(): students_titlecase = [] for student in students: students_titlecase = student.title() return students_titlecase def print_students_titlecase(): # students_titlecase = [] # for student in students: # students_titlecase = student.title() # Instead of the above repeated function body we use this instead : students_titlecase = get_students_titlecase() print(students_titlecase) def add_student(name,student_id=9696): student = {"name": name, "student_id": student_id} students.append(student) def var_args(name, *args): #for keyword argument...(name,**kwargs) print(name) print(args) # For keyword arguments... #print(kwargs["description"], kwargs["feedback"])
{ "content_hash": "eca2f89c66d7defd8d1e37ac02cf7b1f", "timestamp": "", "source": "github", "line_count": 29, "max_line_length": 69, "avg_line_length": 27.75862068965517, "alnum_prop": 0.653416149068323, "repo_name": "derrick12/pythonapp", "id": "e5bd61b5b84afc94b92265d14889bc36c5ca65d2", "size": "805", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "app/functions.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "990" } ], "symlink_target": "" }
''' Google File Uploader -------------------- This program is open source. Licenced under Apache Licence v2.0: http://www.apache.org/licenses/LICENSE-2.0.txt @author: Juha Kervinen juha@apptomation.com props to: Ryan Tucker, 2009/04/28 for the captcha solution ''' import os.path try: import gdata.docs.client import gdata.docs.data import gdata.data import gdata.client except ImportError: print "You need to install Google Data API client for python. For more instructions, see http://joker.iki.fi/wp/googlefileuploader/" exit(1) try: import argparse except ImportError: print "Python v2.7 or greater required (for further instructions, see http://joker.iki.fi/wp/googlefileuploader/)." exit(1) # Modify the following parameters to suit your system # all of these are overridden from the command line # ---- email_default = 'my.account@gmail.com' password_default = 'my_p4ssw0rd' file_on_disk_default = '/home/myaccount/backup.zip' # ----- source = "GoogleFileUploader v.0.8" parser = argparse.ArgumentParser(description="Google File Uploader") parser.add_argument('-u', help='Username') parser.add_argument('-p', help='Password (remember to escape special characters like !?$" etc.') parser.add_argument('-fl', help='Local file with full path') parser.add_argument('-fg', help='File on Google') args = parser.parse_args(); if args.u is None: email = email_default else: email = args.u if args.p is None: password = password_default else: password = args.p if args.fl is None: file_on_disk = file_on_disk_default else: file_on_disk = args.fl if args.fg is None: file_on_google = os.path.basename(file_on_disk) else: file_on_google = args.fg client = gdata.docs.client.DocsClient(source=source) client.http_client.debug = False client.http_client.ssl = True login = False while login is False: try: login = True client.ClientLogin(email, password, source) except gdata.client.CaptchaChallenge as captcha: captcha_token = captcha.captcha_token captcha_url = captcha.captcha_url print "Need to complete captcha challenge at this URL: "+captcha_url captcha_response = raw_input("Please type the answer: ") try: client.ClientLogin(email,password,source,captcha_token=captcha_token,captcha_response=captcha_response) login = True except gdata.client.CaptchaChallenge: print "Captcha check failed." login = False except gdata.client.BadAuthentication: print "Wrong username or password." exit(1) except gdata.client.BadAuthentication: print "Wrong username or password." exit(1) file_resource = gdata.docs.data.Resource(type = 'document', title=file_on_google) filename = file_on_disk try: open(filename) except IOError: print "Cannot open "+filename+"." exit(1) media = gdata.data.MediaSource() media.set_file_handle(filename, 'application/octet-stream') feedUri = '%s?convert=false' % gdata.docs.client.RESOURCE_UPLOAD_URI file_resource = client.CreateResource(file_resource, create_uri = feedUri, media=media) exit(0)
{ "content_hash": "53d189f0c9949dd6f72e0594855cc5a2", "timestamp": "", "source": "github", "line_count": 95, "max_line_length": 136, "avg_line_length": 33.44210526315789, "alnum_prop": 0.6994019515265975, "repo_name": "jkervine/GoogleFileUploader", "id": "1b3782f1c8c042df8ec8701ca3f3ce4bdfd3025e", "size": "3195", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "gfu.py", "mode": "33261", "license": "apache-2.0", "language": [ { "name": "Python", "bytes": "3195" }, { "name": "Shell", "bytes": "3436" } ], "symlink_target": "" }
from oslo_policy import policy from cinder.policies import base DELETE_POLICY = 'message:delete' GET_POLICY = 'message:get' GET_ALL_POLICY = 'message:get_all' deprecated_get_policy = base.CinderDeprecatedRule( name=GET_POLICY, check_str=base.RULE_ADMIN_OR_OWNER ) deprecated_get_all_policy = base.CinderDeprecatedRule( name=GET_ALL_POLICY, check_str=base.RULE_ADMIN_OR_OWNER ) deprecated_delete_policy = base.CinderDeprecatedRule( name=DELETE_POLICY, check_str=base.RULE_ADMIN_OR_OWNER ) messages_policies = [ policy.DocumentedRuleDefault( name=GET_ALL_POLICY, check_str=base.SYSTEM_READER_OR_PROJECT_READER, description="List messages.", operations=[ { 'method': 'GET', 'path': '/messages' } ], deprecated_rule=deprecated_get_all_policy, ), policy.DocumentedRuleDefault( name=GET_POLICY, check_str=base.SYSTEM_READER_OR_PROJECT_READER, description="Show message.", operations=[ { 'method': 'GET', 'path': '/messages/{message_id}' } ], deprecated_rule=deprecated_get_policy, ), policy.DocumentedRuleDefault( name=DELETE_POLICY, check_str=base.SYSTEM_ADMIN_OR_PROJECT_MEMBER, description="Delete message.", operations=[ { 'method': 'DELETE', 'path': '/messages/{message_id}' } ], deprecated_rule=deprecated_delete_policy, ), ] def list_rules(): return messages_policies
{ "content_hash": "141c97280c4a8257af6d449f19be7617", "timestamp": "", "source": "github", "line_count": 66, "max_line_length": 55, "avg_line_length": 24.87878787878788, "alnum_prop": 0.5889159561510353, "repo_name": "openstack/cinder", "id": "25a0277ecba201b350562e125bd4c3b899c6ca64", "size": "2291", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "cinder/policies/messages.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Jinja", "bytes": "259" }, { "name": "Mako", "bytes": "976" }, { "name": "Python", "bytes": "25078349" }, { "name": "Shell", "bytes": "6456" }, { "name": "Smarty", "bytes": "67595" } ], "symlink_target": "" }
import datetime from south.db import db from south.v2 import SchemaMigration from django.db import models class Migration(SchemaMigration): def forwards(self, orm): # Adding field 'UserProfile.show_login_help' db.add_column('panda_userprofile', 'show_login_help', self.gf('django.db.models.fields.BooleanField')(default=True), keep_default=False) def backwards(self, orm): # Deleting field 'UserProfile.show_login_help' db.delete_column('panda_userprofile', 'show_login_help') models = { 'auth.group': { 'Meta': {'object_name': 'Group'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}), 'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}) }, 'auth.permission': { 'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'}, 'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}) }, 'auth.user': { 'Meta': {'object_name': 'User'}, 'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}), 'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}), 'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}) }, 'contenttypes.contenttype': { 'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"}, 'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}) }, 'panda.activitylog': { 'Meta': {'unique_together': "(('user', 'when'),)", 'object_name': 'ActivityLog'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'activity_logs'", 'to': "orm['auth.User']"}), 'when': ('django.db.models.fields.DateField', [], {'auto_now': 'True', 'blank': 'True'}) }, 'panda.category': { 'Meta': {'object_name': 'Category'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}), 'slug': ('django.db.models.fields.SlugField', [], {'max_length': '256'}) }, 'panda.dataset': { 'Meta': {'ordering': "['-creation_date']", 'object_name': 'Dataset'}, 'categories': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'datasets'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['panda.Category']"}), 'column_schema': ('panda.fields.JSONField', [], {'default': 'None', 'null': 'True'}), 'creation_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}), 'creator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'datasets'", 'to': "orm['auth.User']"}), 'current_task': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['panda.TaskStatus']", 'null': 'True', 'blank': 'True'}), 'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'initial_upload': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'initial_upload_for'", 'null': 'True', 'to': "orm['panda.DataUpload']"}), 'last_modification': ('django.db.models.fields.TextField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}), 'last_modified': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}), 'last_modified_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}), 'locked': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'locked_at': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '256'}), 'row_count': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}), 'sample_data': ('panda.fields.JSONField', [], {'default': 'None', 'null': 'True'}), 'slug': ('django.db.models.fields.SlugField', [], {'max_length': '256'}) }, 'panda.dataupload': { 'Meta': {'ordering': "['creation_date']", 'object_name': 'DataUpload'}, 'columns': ('panda.fields.JSONField', [], {'null': 'True'}), 'creation_date': ('django.db.models.fields.DateTimeField', [], {}), 'creator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}), 'data_type': ('django.db.models.fields.CharField', [], {'max_length': '4', 'null': 'True', 'blank': 'True'}), 'dataset': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'data_uploads'", 'null': 'True', 'to': "orm['panda.Dataset']"}), 'dialect': ('panda.fields.JSONField', [], {'null': 'True'}), 'encoding': ('django.db.models.fields.CharField', [], {'default': "'utf-8'", 'max_length': '32'}), 'filename': ('django.db.models.fields.CharField', [], {'max_length': '256'}), 'guessed_types': ('panda.fields.JSONField', [], {'null': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'imported': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'original_filename': ('django.db.models.fields.CharField', [], {'max_length': '256'}), 'sample_data': ('panda.fields.JSONField', [], {'null': 'True'}), 'size': ('django.db.models.fields.IntegerField', [], {}) }, 'panda.export': { 'Meta': {'ordering': "['creation_date']", 'object_name': 'Export'}, 'creation_date': ('django.db.models.fields.DateTimeField', [], {}), 'creator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}), 'dataset': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'exports'", 'null': 'True', 'to': "orm['panda.Dataset']"}), 'filename': ('django.db.models.fields.CharField', [], {'max_length': '256'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'original_filename': ('django.db.models.fields.CharField', [], {'max_length': '256'}), 'size': ('django.db.models.fields.IntegerField', [], {}) }, 'panda.notification': { 'Meta': {'ordering': "['-sent_at']", 'object_name': 'Notification'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'message': ('django.db.models.fields.TextField', [], {}), 'read_at': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}), 'recipient': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'notifications'", 'to': "orm['auth.User']"}), 'sent_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'type': ('django.db.models.fields.CharField', [], {'default': "'Info'", 'max_length': '16'}), 'url': ('django.db.models.fields.URLField', [], {'default': 'None', 'max_length': '200', 'null': 'True'}) }, 'panda.relatedupload': { 'Meta': {'ordering': "['creation_date']", 'object_name': 'RelatedUpload'}, 'creation_date': ('django.db.models.fields.DateTimeField', [], {}), 'creator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}), 'dataset': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'related_uploads'", 'to': "orm['panda.Dataset']"}), 'filename': ('django.db.models.fields.CharField', [], {'max_length': '256'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'original_filename': ('django.db.models.fields.CharField', [], {'max_length': '256'}), 'size': ('django.db.models.fields.IntegerField', [], {}) }, 'panda.searchlog': { 'Meta': {'object_name': 'SearchLog'}, 'dataset': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'searches'", 'null': 'True', 'to': "orm['panda.Dataset']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'query': ('django.db.models.fields.CharField', [], {'max_length': '256'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'search_logs'", 'to': "orm['auth.User']"}), 'when': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}) }, 'panda.searchsubscription': { 'Meta': {'object_name': 'SearchSubscription'}, 'dataset': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'subscribed_searches'", 'null': 'True', 'to': "orm['panda.Dataset']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'last_run': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'query': ('django.db.models.fields.CharField', [], {'max_length': '256'}), 'query_human': ('django.db.models.fields.TextField', [], {}), 'query_url': ('django.db.models.fields.CharField', [], {'max_length': '256'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'subscribed_searches'", 'to': "orm['auth.User']"}) }, 'panda.taskstatus': { 'Meta': {'object_name': 'TaskStatus'}, 'creator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'tasks'", 'null': 'True', 'to': "orm['auth.User']"}), 'end': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'message': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}), 'start': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}), 'status': ('django.db.models.fields.CharField', [], {'default': "'PENDING'", 'max_length': '50'}), 'task_description': ('django.db.models.fields.TextField', [], {}), 'task_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'traceback': ('django.db.models.fields.TextField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}) }, 'panda.userprofile': { 'Meta': {'object_name': 'UserProfile'}, 'activation_key': ('django.db.models.fields.CharField', [], {'max_length': '40', 'null': 'True', 'blank': 'True'}), 'activation_key_expiration': ('django.db.models.fields.DateTimeField', [], {}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'show_login_help': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True'}) } } complete_apps = ['panda']
{ "content_hash": "7399c2f63ae35e8dbfefd2ad6c0c5598", "timestamp": "", "source": "github", "line_count": 175, "max_line_length": 202, "avg_line_length": 78.45142857142856, "alnum_prop": 0.5445407531502658, "repo_name": "NUKnightLab/panda", "id": "679b604872d70aa5536e985e557254c284ae3882", "size": "13753", "binary": false, "copies": "6", "ref": "refs/heads/master", "path": "panda/migrations/0026_auto__add_field_userprofile_show_login_help.py", "mode": "33188", "license": "mit", "language": [ { "name": "Java", "bytes": "256" }, { "name": "JavaScript", "bytes": "734026" }, { "name": "Python", "bytes": "872031" }, { "name": "Shell", "bytes": "13754" } ], "symlink_target": "" }
from __future__ import unicode_literals from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): initial = True dependencies = [ ('inventory', '0001_initial'), ] operations = [ migrations.CreateModel( name='Order', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('first_name', models.CharField(max_length=50)), ('last_name', models.CharField(max_length=50)), ('email', models.EmailField(max_length=254)), ('address', models.CharField(max_length=250)), ('postal_code', models.CharField(max_length=20)), ('city', models.CharField(max_length=100)), ('created', models.DateTimeField(auto_now_add=True)), ('updated', models.DateTimeField(auto_now=True)), ('paid', models.BooleanField(default=False)), ], options={ 'ordering': ('-created',), }, ), migrations.CreateModel( name='OrderItem', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('price', models.DecimalField(decimal_places=2, max_digits=10)), ('quantity', models.PositiveIntegerField(default=1)), ('order', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='items', to='orders.Order')), ('product', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='order_items', to='inventory.Product')), ], ), ]
{ "content_hash": "af9a248e52d16b9fdcd383b36235d3dc", "timestamp": "", "source": "github", "line_count": 44, "max_line_length": 144, "avg_line_length": 40.88636363636363, "alnum_prop": 0.5630906058921623, "repo_name": "delitamakanda/jobboard", "id": "791bedd5a47a72350c932727b80ff6e1600c4328", "size": "1872", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "orders/migrations/0001_initial.py", "mode": "33188", "license": "mit", "language": [ { "name": "HTML", "bytes": "22577" }, { "name": "JavaScript", "bytes": "1489" }, { "name": "Python", "bytes": "32119" } ], "symlink_target": "" }
from mint.django_rest.test_utils import XMLTestCase, RepeaterMixIn from mint.django_rest.rbuilder.jobs import models from mint.django_rest.rbuilder.jobs import testsxml from xobj import xobj class BaseJobsTest(XMLTestCase): def _mock(self): pass def setUp(self): XMLTestCase.setUp(self) self._mock() eventUuid1 = 'eventuuid001' jobUuid1 = 'rmakeuuid001' eventUuid2 = 'eventuuid002' jobUuid2 = 'rmakeuuid002' eventUuid3 = 'eventuuid003' jobUuid3 = 'rmakeuuid003' system = self._saveSystem() user = self.getUser('testuser') self.job1 = self._newSystemJob(system, eventUuid1, jobUuid1, models.EventType.SYSTEM_REGISTRATION, createdBy=user) self.job2 = self._newSystemJob(system, eventUuid2, jobUuid2, models.EventType.SYSTEM_UPDATE, createdBy=user) self.system = system class CacheTest(XMLTestCase): "Simple test for the caching module" def testInvalidateCache(self): Cache = models.modellib.Cache # Populate the cache jt1 = Cache.get(models.EventType, name=models.EventType.SYSTEM_REGISTRATION) # Add new job type jt2 = models.EventType.objects.create(name='fake', description='fakefake', priority=1) jt3 = Cache.get(models.EventType, name='fake') self.failUnlessEqual(jt2.job_type_id, jt3.job_type_id) class JobsTestCase(BaseJobsTest): def _mock(self): models.Job.getRmakeJob = self.mockGetRmakeJob def mockGetRmakeJob(self): self.mockGetRmakeJob_called = True def testGetJobs(self): response = self._get('jobs/') self.assertEquals(response.status_code, 200) self.assertXMLEquals(response.content, testsxml.jobs_xml) def testGetJobsSortBySyntheticFields(self): response = self._get('jobs/?order_by=job_description') self.assertEquals(response.status_code, 200) self.assertXMLEquals(response.content, testsxml.jobs_xml.replace('order_by=""', 'order_by="job_description"')) def testGetJobStates(self): response = self._get('job_states/') self.assertEquals(response.status_code, 200) self.assertXMLEquals(response.content, testsxml.job_states_xml) def testGetJob(self): response = self._get('jobs/rmakeuuid001/') self.assertEquals(response.status_code, 200) self.assertXMLEquals(response.content, testsxml.job_xml) def testGetJobState(self): response = self._get('job_states/1/') self.assertEquals(response.status_code, 200) self.assertXMLEquals(response.content, testsxml.job_state_xml) def testGetSystemJobs(self): response = self._get('inventory/systems/%s/jobs/' % \ self.system.pk, username="admin", password="password") self.assertEquals(response.status_code, 200) self.assertXMLEquals(response.content, testsxml.systems_jobs_xml) def testUpdateJob(self): jobUuid = 'jobUuid1' jobToken = 'jobToken1' job = self._newJob(jobUuid, jobToken=jobToken, jobType=models.EventType.TARGET_REFRESH_IMAGES) jobXml = """ <job> <job_status>Completed</job_status> <status_code>200</status_code> <status_text>Some status here</status_text> <results encoding="identity"> <images> <image id="id1"> <imageId>id1</imageId> </image> <image id="id2"> <imageId>id2</imageId> </image> </images> </results> </job> """ response = self._put('jobs/%s' % jobUuid, jobXml, jobToken=jobToken) self.assertEquals(response.status_code, 200) obj = xobj.parse(response.content) self.failUnlessEqual(obj.job.id, "http://testserver/api/v1/jobs/%s" % job.job_uuid) class Jobs2TestCase(BaseJobsTest): def _mock(self): class DummyStatus(object): def __init__(slf, **kwargs): slf.__dict__.update(kwargs) class DummyJob(object): def __init__(slf, job_uuid, code, text, detail, final, completed, failed): slf.job_uuid = job_uuid slf.status = DummyStatus(code=code, text=text, detail=detail, final=final, completed=completed, failed=failed) class Dummy(object): data = dict( rmakeuuid001 = (101, "text 101", "detail 101", False, False, False), rmakeuuid002 = (202, "text 202", "detail 202", True, True, False), rmakeuuid003 = (404, "text 404", "detail 404", True, False, True), ) @staticmethod def mockGetRmakeJob(slf): jobUuid = slf.job_uuid code, text, detail, final, completed, failed = Dummy.data[jobUuid] j = DummyJob(jobUuid, code, text, detail, final, completed, failed) return j self.mock(models.Job, 'getRmakeJob', Dummy.mockGetRmakeJob) def testGetJobs(self): # Mark job2 as succeeded, to make sure the status doesn't get updated # from the rmake job again (this is a stretch) completedState = models.JobState.objects.get(name=models.JobState.COMPLETED) self.job2.job_state = completedState self.job2.status_code = 299 self.job2.status_text = "text 299" self.job2.status_detail = "no such luck" self.job2.save() response = self._get('jobs/') self.assertEquals(response.status_code, 200) obj = xobj.parse(response.content) jobs = obj.jobs.job self.failUnlessEqual([ str(x.job_state) for x in jobs ], [models.JobState.RUNNING, models.JobState.COMPLETED]) self.failUnlessEqual([ int(x.status_code) for x in jobs ], [100, 299]) self.failUnlessEqual([ x.status_text for x in jobs ], ["Initializing", "text 299"]) class JobCreationTest(BaseJobsTest, RepeaterMixIn): def _mock(self): RepeaterMixIn.setUpRepeaterClient(self) from mint.django_rest.rbuilder.inventory.manager import repeatermgr self.mock(repeatermgr.RepeaterManager, 'repeaterClient', self.mgr.repeaterMgr.repeaterClient) def testCreateJob(self): jobXml = """ <job> <job_type id="http://localhost/api/v1/inventory/event_types/19"/> <descriptor id="http://testserver/api/v1/target_types/6/descriptor_create_target"/> <descriptor_data> <alias>newbie</alias> <description>Brand new cloud</description> <name>newbie.eng.rpath.com</name> <zone>Local rBuilder</zone> </descriptor_data> </job> """ response = self._post('jobs', jobXml, username='testuser', password='password') self.assertEquals(response.status_code, 200) obj = xobj.parse(response.content) job = obj.job self.failUnlessEqual(job.descriptor.id, "http://testserver/api/v1/target_types/6/descriptor_create_target") dbjob = models.Job.objects.get(job_uuid=unicode(job.job_uuid)) # Make sure the job is related to the target type self.failUnlessEqual( [ x.target_type.name for x in dbjob.jobtargettype_set.all() ], [ 'xen-enterprise' ], )
{ "content_hash": "09139ad7c6660bef9e8b4791c6b844a8", "timestamp": "", "source": "github", "line_count": 197, "max_line_length": 115, "avg_line_length": 37.263959390862944, "alnum_prop": 0.6277073968124234, "repo_name": "sassoftware/mint", "id": "2b37b4242c49df619dc20529b946948eb035cb3d", "size": "7946", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "mint/django_rest/rbuilder/jobs/tests.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "CSS", "bytes": "50165" }, { "name": "Genshi", "bytes": "58741" }, { "name": "HTML", "bytes": "2814" }, { "name": "JavaScript", "bytes": "11470" }, { "name": "Makefile", "bytes": "92418" }, { "name": "NASL", "bytes": "582" }, { "name": "PLpgSQL", "bytes": "5358" }, { "name": "Puppet", "bytes": "17914" }, { "name": "Python", "bytes": "3239135" }, { "name": "Ruby", "bytes": "9268" }, { "name": "Shell", "bytes": "24834" } ], "symlink_target": "" }
import unittest try: import collections.abc as abc except ImportError: import collections as abc from dimod.variables import Variables class TestDuplicates(unittest.TestCase): def test_duplicates(self): # should have no duplicates variables = Variables(['a', 'b', 'c', 'b']) self.assertEqual(list(variables), ['a', 'b', 'c']) def test_count(self): variables = Variables([1, 1, 1, 4, 5]) self.assertEqual(list(variables), [1, 4, 5]) for v in range(10): if v in variables: self.assertEqual(variables.count(v), 1) else: self.assertEqual(variables.count(v), 0) def test_len(self): variables = Variables('aaaaa') self.assertEqual(len(variables), 1) class TestList(unittest.TestCase): iterable = list(range(5)) def test_index_api(self): variables = Variables(self.iterable) self.assertTrue(hasattr(variables, 'index')) self.assertTrue(callable(variables.index)) self.assertTrue(isinstance(variables.index, abc.Mapping)) def test_contains_unhashable(self): variables = Variables(self.iterable) self.assertFalse([] in variables) def test_count_unhashable(self): variables = Variables(self.iterable) self.assertEqual(variables.count([]), 0) def test_index(self): variables = Variables(self.iterable) for idx, v in enumerate(self.iterable): self.assertEqual(variables.index(v), idx) def test_iterable(self): variables = Variables(self.iterable) self.assertEqual(list(variables), list(self.iterable)) def test_equality(self): variables = Variables(self.iterable) self.assertEqual(variables, self.iterable) def test_len(self): variables = Variables(self.iterable) self.assertEqual(len(variables), len(self.iterable)) def test_relabel_conflict(self): variables = Variables(self.iterable) iterable = self.iterable # want a relabelling with identity relabels and that maps to the same # set of labels as the original target = [iterable[-i] for i in range(len(iterable))] mapping = dict(zip(iterable, target)) variables.relabel(mapping) self.assertEqual(variables, target) def test_relabel_not_hashable(self): variables = Variables(self.iterable) mapping = {v: [v] for v in variables} with self.assertRaises(ValueError): variables.relabel(mapping) class TestMixed(TestList): # misc hashable objects iterable = [0, ('b',), 2.1, 'c', frozenset('d')] class TestRange(TestList): iterable = range(5) class TestString(TestList): iterable = 'abcde'
{ "content_hash": "91bed57f87a6d06f84e9db5dcb304e25", "timestamp": "", "source": "github", "line_count": 97, "max_line_length": 77, "avg_line_length": 28.762886597938145, "alnum_prop": 0.6372759856630824, "repo_name": "oneklc/dimod", "id": "532c99ed1a235e3ae0965ec14aabd929afcb7c56", "size": "3482", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "tests/test_variables.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Batchfile", "bytes": "3366" }, { "name": "C++", "bytes": "59430" }, { "name": "PowerShell", "bytes": "7195" }, { "name": "Python", "bytes": "676178" } ], "symlink_target": "" }
from oslo_config import cfg from oslo_log import log as logging import oslo_messaging from oslo_utils import timeutils from osprofiler import profiler from senlin.common import consts from senlin.common import context from senlin.common import messaging as rpc from senlin.common import service from senlin.engine import health_manager LOG = logging.getLogger(__name__) CONF = cfg.CONF @profiler.trace_cls("rpc") class HealthManagerService(service.Service): def __init__(self, host, topic): super(HealthManagerService, self).__init__( self.service_name, host, topic, threads=CONF.health_manager.threads ) self.version = consts.RPC_API_VERSION self.ctx = context.get_admin_context() # The following are initialized here and will be assigned in start() # which happens after the fork when spawning multiple worker processes self.health_registry = None self.target = None @property def service_name(self): return 'senlin-health-manager' def start(self): super(HealthManagerService, self).start() self.health_registry = health_manager.RuntimeHealthRegistry( ctx=self.ctx, engine_id=self.service_id, thread_group=self.tg ) self.target = oslo_messaging.Target(server=self.service_id, topic=self.topic, version=self.version) self.server = rpc.get_rpc_server(self.target, self) self.server.start() self.tg.add_dynamic_timer(self.task, None, cfg.CONF.periodic_interval) def stop(self, graceful=False): if self.server: self.server.stop() self.server.wait() super(HealthManagerService, self).stop(graceful) def task(self): """Task that is queued on the health manager thread group. The task is here so that the service always has something to wait() on, or else the process will exit. """ start_time = timeutils.utcnow(True) try: self.health_registry.load_runtime_registry() except Exception as ex: LOG.error("Failed when loading runtime for health manager: %s", ex) return health_manager.chase_up( start_time, cfg.CONF.periodic_interval, name='Health manager task' ) def listening(self, ctx): """Respond to confirm that the rpc service is still alive.""" return True def register_cluster(self, ctx, cluster_id, interval=None, node_update_timeout=None, params=None, enabled=True): """Register a cluster for health checking. :param ctx: The context of notify request. :param cluster_id: The ID of the cluster to be unregistered. :param interval: Interval of the health check. :param node_update_timeout: Time to wait before declairing a node unhealthy. :param params: Params to be passed to health check. :param enabled: Set's if the health check is enabled or disabled. :return: None """ LOG.info("Registering health check for cluster %s.", cluster_id) self.health_registry.register_cluster( cluster_id=cluster_id, interval=interval, node_update_timeout=node_update_timeout, params=params, enabled=enabled) def unregister_cluster(self, ctx, cluster_id): """Unregister a cluster from health checking. :param ctx: The context of notify request. :param cluster_id: The ID of the cluster to be unregistered. :return: None """ LOG.info("Unregistering health check for cluster %s.", cluster_id) self.health_registry.unregister_cluster(cluster_id) def enable_cluster(self, ctx, cluster_id, params=None): self.health_registry.enable_cluster(cluster_id) def disable_cluster(self, ctx, cluster_id, params=None): self.health_registry.disable_cluster(cluster_id)
{ "content_hash": "fa4f12ac6f0f9356f91ff6cfcb0acc57", "timestamp": "", "source": "github", "line_count": 114, "max_line_length": 79, "avg_line_length": 36.06140350877193, "alnum_prop": 0.6348820238384821, "repo_name": "stackforge/senlin", "id": "b2592d93ff1d57082df7c5bef32b8e679d1a6722", "size": "4659", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "senlin/health_manager/service.py", "mode": "33261", "license": "apache-2.0", "language": [ { "name": "Python", "bytes": "2145946" }, { "name": "Shell", "bytes": "18730" } ], "symlink_target": "" }
import numpy import pandas as pd import numpy as np import datetime import dateutil.parser import hashlib import logging log = logging.getLogger(__name__) __author__ = 'Rafael Santos' __version__ = '0.1' class SlowlyChangingDimension(object): """Class to insert and update rows in a slowly changing dimension. """ def __init__(self, df, source_keys, surrogate_key='SCD id', type1_fields=None, type2_fields='all', valid_from='SCD Valid From', valid_to='SCD Valid To', as_of='now', day_first=False, year_first=True, end_date=2199, inplace=True, hash_name='SCD Hash'): """ Parameters ---------- df Required. pandas.DataFrame with dataset to be usead as dimension. source_keys Required. String or list with column names to be used as key(s) that ensure the unicity of the incoming data. surrogate_key Optional. String with column name where the generated surrogate key will be stored. Default 'SCD ID'. type1_fields Optional. List of column names where no history is kept in the database. New data overwrites old data. Use this type if tracking changes is not necessary. Default None. type2_fields Optional. List of column names where the whole history is stored in the database. This type tracks historical data by inserting a new record in the dimensional table with a separate surrogate key each time a change is made. * 'all': Use all fields not specified as type 1. * list: list of column names. Default 'all'. valid_from Optional. String with the column name to hold the start date of new records. Default 'SCD Valid From' valid_to Optional. String with the column name to hold the end date of records. When the record is currently active, the end date is defined by the end_date parameter. Default 'SCD Valid To' as_of Optional. Start date value of new records. * 'now': Uses current timestamp. * Date String: Fixed date. day_first Optional. Boolean. Whether to interpret the first value in an ambiguous 3-integer date (e.g. 01/05/09) as the day (True) or month (False). If year_first is set to True, this distinguishes between YDM and YMD. Default False. year_first Optional. Boolean. Whether to interpret the first value in an ambiguous 3-integer date (e.g. 01/05/09) as the year. If True, the first number is taken to be the year, otherwise the last number is taken to be the year. Default True. end_date Optional. Integer with year to represent currently active records. This year will be converted to datetime using the last day of the year. Default 2199. inplace Optional. Boolean. If True, do operation inplace. Default True. hash_name Optional. String with the column name to hold the hash code of records. This hash is then used to identify changes in type 2 fields. Default 'scd_hash' """ self.inplace = inplace if self.inplace: self.df = df else: self.df = df.copy() self.source_keys = source_keys self.surrogate_key = surrogate_key self.valid_from = valid_from self.valid_to = valid_to if type1_fields: self.type1_fields = type1_fields else: self.type1_fields = list() if isinstance(type2_fields, list): self.type2_fields = type2_fields elif type2_fields == 'all': ignore_fields = list(self.type1_fields) ignore_fields.extend([self.surrogate_key, self.valid_from, self.valid_to]) if isinstance(self.source_keys, list): for key in self.source_keys: ignore_fields.append(key) else: ignore_fields.append(self.source_keys) self.type2_fields =\ [col for col in self.df.columns if col not in ignore_fields] else: raise ValueError('Parameter {!r} is invalid for type2_fields'. format(type2_fields)) self.day_first = day_first self.year_first = year_first self.as_of = as_of self.end_date = end_date self.end_datetime = datetime.datetime(year=self.end_date, month=12, day=31) self.hash_name = hash_name if self.surrogate_key not in self.df.columns: self._create_scd_columns(self.df) @property def as_of(self): return self._as_of @as_of.setter def as_of(self, value): if value == 'now': self._as_of = datetime.datetime.combine( datetime.date.today(), datetime.time.min) else: self._as_of = dateutil.parser.parse( value, dayfirst=self.day_first, yearfirst=self.year_first) @property def settings(self): return {'source_keys': self.source_keys, 'surrogate_key': self.surrogate_key, 'valid_from': self.valid_from, 'valid_to': self.valid_to, 'type1_fields': self.type1_fields, 'type2_fields': self.type2_fields, 'day_first': self.day_first, 'year_first': self.year_first, 'end_date': self.end_date, 'hash_name': self.hash_name} @staticmethod def _hash_row(row, method='md5', encoding='utf-8'): """Compute hash of all fields in a given row. """ columns = sorted(row.index.tolist()) h = hashlib.new(method) for col in columns: value = row[col] if value != 'nan' and not pd.isnull(value): h.update(str(value).encode(encoding)) return h.hexdigest() def _compute_hash(self, df): """Computes hash of the type 2 columns for each row in given Dataframe. """ self._reset_index(df) # Use only source keys and type 2 fields in hash subset = list() if isinstance(self.source_keys, list): for key in self.source_keys: subset.append(key) else: subset.append(self.source_keys) if isinstance(self.type2_fields, list): for field in self.type2_fields: subset.append(field) else: subset.append(self.type2_fields) df[self.hash_name] = df[subset].apply(lambda x: self._hash_row(x), axis=1) def _create_scd_columns(self, df): """Create SCD columns for the first time in the data frame. """ df[self.surrogate_key] = df.index df[self.valid_from] = self.as_of df[self.valid_to] = self.end_datetime @staticmethod def _reset_index(df): """Check if an index exists and call pandas.DataFrame.reset_index method """ if isinstance(df, list): queue = df else: queue = [df] for _df in queue: if isinstance(_df.index, pd.MultiIndex)\ or _df.index.name is not None: _df.reset_index(inplace=True) def _set_index(self, df, keys): """Set the DataFrame index using the keys. Resets existing index before creating the new one. """ if isinstance(df, list): queue = df else: queue = [df] for _df in queue: self._reset_index(_df) _df.set_index(keys, inplace=True) def get_current_version(self): """Returns a pandas.Dataframe() with the currently active records in the dimension. """ return self.df.loc[self.df[self.valid_to] == self.end_datetime].copy() def _insert_first_version_of_new_records(self, new): """Lookup the records in the dimension. If there is no match, add the first version. """ # Get the newest version current = self.get_current_version() # Set source key as index self._set_index([self.df, current, new], self.source_keys) # Find new records new_records = new.loc[~new.index.isin(current.index)].copy() log.info('{0:,d} new dimension record(s) inserted'. format(len(new_records))) # Move surrogate key back to column self._reset_index([self.df, current, new_records]) self._create_scd_columns(new_records) self.df = self.df.append(new_records, ignore_index=True) self.df[self.surrogate_key] = self.df.index def _update_type_1_fields(self, new): # Set source key as index self._set_index([self.df, new], self.source_keys) # Updates dimension with matching rows from new data frame self.df.update(new[self.type1_fields], overwrite=True) # Move surrogate key back to column self._reset_index([self.df, new]) def _track_history_type_2_fields(self, new): # Get the newest version of records current = self.get_current_version() # Computes hash self._compute_hash(current) self._compute_hash(new) # Set source key as index self._set_index([current, new], self.source_keys) # Find surrogate key of modified rows modified =\ current.loc[(current.index.isin(new.index)) & (~current[self.hash_name].isin(new[self.hash_name])), self.surrogate_key] log.info('{0:,d} old dimension record(s) modified'. format(len(modified))) # Flag modified type 2 rows as inactive. # This is done by setting the 'valid to' field to a date different from # the default end date. self.df.loc[self.df[self.surrogate_key].isin(modified), self.valid_to] = self.as_of # Find new version of modified records new_versions =\ new.loc[(new.index.isin(current.index)) & (~new[self.hash_name].isin(current[self.hash_name]))].copy() del new_versions[self.hash_name] # Insert new version in dimension self._reset_index(new_versions) self._create_scd_columns(new_versions) self.df = self.df.append(new_versions, ignore_index=True) self.df[self.surrogate_key] = self.df.index def update(self, new): """Addresses Slowly Changing Dimension needs, receiving a source of data and logging the changes into the underlying SCD dimension. """ # Add the first version of new records self._insert_first_version_of_new_records(new) # Track history in type 2 fields if self.type2_fields: self._track_history_type_2_fields(new) # Update type 1 fields if self.type1_fields: self._update_type_1_fields(new)
{ "content_hash": "b818ec79eceed189835986c7f53c5462", "timestamp": "", "source": "github", "line_count": 337, "max_line_length": 80, "avg_line_length": 34.35014836795252, "alnum_prop": 0.5638389771941948, "repo_name": "rtogo/pyscd-pandas", "id": "f80efac5174a7bc2caeb33b84fed8bceabb713eb", "size": "12715", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "pyscd.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "22814" } ], "symlink_target": "" }
""" Tests For Scheduler Host Filters. """ import ddt import mock from oslo_serialization import jsonutils from requests import exceptions as request_exceptions from cinder.compute import nova from cinder import context from cinder import db from cinder import exception from cinder.scheduler import filters from cinder.scheduler.filters import extra_specs_ops from cinder import test from cinder.tests.unit import fake_constants as fake from cinder.tests.unit.scheduler import fakes from cinder.tests.unit import utils class HostFiltersTestCase(test.TestCase): """Test case for host filters.""" def setUp(self): super(HostFiltersTestCase, self).setUp() self.context = context.RequestContext(fake.USER_ID, fake.PROJECT_ID) # This has a side effect of testing 'get_filter_classes' # when specifying a method (in this case, our standard filters) filter_handler = filters.HostFilterHandler('cinder.scheduler.filters') classes = filter_handler.get_all_classes() self.class_map = {} for cls in classes: self.class_map[cls.__name__] = cls class CapacityFilterTestCase(HostFiltersTestCase): def setUp(self): super(CapacityFilterTestCase, self).setUp() self.json_query = jsonutils.dumps( ['and', ['>=', '$free_capacity_gb', 1024], ['>=', '$total_capacity_gb', 10 * 1024]]) @mock.patch('cinder.utils.service_is_up') def test_filter_passes(self, _mock_serv_is_up): _mock_serv_is_up.return_value = True filt_cls = self.class_map['CapacityFilter']() filter_properties = {'size': 100} service = {'disabled': False} host = fakes.FakeHostState('host1', {'total_capacity_gb': 500, 'free_capacity_gb': 200, 'updated_at': None, 'service': service}) self.assertTrue(filt_cls.host_passes(host, filter_properties)) @mock.patch('cinder.utils.service_is_up') def test_filter_current_host_passes(self, _mock_serv_is_up): _mock_serv_is_up.return_value = True filt_cls = self.class_map['CapacityFilter']() filter_properties = {'size': 100, 'vol_exists_on': 'host1'} service = {'disabled': False} host = fakes.FakeHostState('host1', {'total_capacity_gb': 100, 'free_capacity_gb': 10, 'updated_at': None, 'service': service}) self.assertTrue(filt_cls.host_passes(host, filter_properties)) @mock.patch('cinder.utils.service_is_up') def test_filter_fails(self, _mock_serv_is_up): _mock_serv_is_up.return_value = True filt_cls = self.class_map['CapacityFilter']() filter_properties = {'size': 100} service = {'disabled': False} host = fakes.FakeHostState('host1', {'total_capacity_gb': 200, 'free_capacity_gb': 120, 'reserved_percentage': 20, 'updated_at': None, 'service': service}) self.assertFalse(filt_cls.host_passes(host, filter_properties)) @mock.patch('cinder.utils.service_is_up') def test_filter_fails_free_capacity_None(self, _mock_serv_is_up): _mock_serv_is_up.return_value = True filt_cls = self.class_map['CapacityFilter']() filter_properties = {'size': 100} service = {'disabled': False} host = fakes.FakeHostState('host1', {'free_capacity_gb': None, 'updated_at': None, 'service': service}) self.assertFalse(filt_cls.host_passes(host, filter_properties)) @mock.patch('cinder.utils.service_is_up') def test_filter_passes_infinite(self, _mock_serv_is_up): _mock_serv_is_up.return_value = True filt_cls = self.class_map['CapacityFilter']() filter_properties = {'size': 100} service = {'disabled': False} host = fakes.FakeHostState('host1', {'free_capacity_gb': 'infinite', 'updated_at': None, 'service': service}) self.assertTrue(filt_cls.host_passes(host, filter_properties)) @mock.patch('cinder.utils.service_is_up') def test_filter_passes_unknown(self, _mock_serv_is_up): _mock_serv_is_up.return_value = True filt_cls = self.class_map['CapacityFilter']() filter_properties = {'size': 100} service = {'disabled': False} host = fakes.FakeHostState('host1', {'free_capacity_gb': 'unknown', 'updated_at': None, 'service': service}) self.assertTrue(filt_cls.host_passes(host, filter_properties)) @mock.patch('cinder.utils.service_is_up') def test_filter_passes_total_infinite(self, _mock_serv_is_up): _mock_serv_is_up.return_value = True filt_cls = self.class_map['CapacityFilter']() filter_properties = {'size': 100} service = {'disabled': False} host = fakes.FakeHostState('host1', {'free_capacity_gb': 'infinite', 'total_capacity_gb': 'infinite', 'reserved_percentage': 0, 'updated_at': None, 'service': service}) self.assertTrue(filt_cls.host_passes(host, filter_properties)) @mock.patch('cinder.utils.service_is_up') def test_filter_passes_total_unknown(self, _mock_serv_is_up): _mock_serv_is_up.return_value = True filt_cls = self.class_map['CapacityFilter']() filter_properties = {'size': 100} service = {'disabled': False} host = fakes.FakeHostState('host1', {'free_capacity_gb': 'unknown', 'total_capacity_gb': 'unknown', 'reserved_percentage': 0, 'updated_at': None, 'service': service}) self.assertTrue(filt_cls.host_passes(host, filter_properties)) @mock.patch('cinder.utils.service_is_up') def test_filter_fails_total_infinite(self, _mock_serv_is_up): _mock_serv_is_up.return_value = True filt_cls = self.class_map['CapacityFilter']() filter_properties = {'size': 100} service = {'disabled': False} host = fakes.FakeHostState('host1', {'total_capacity_gb': 'infinite', 'reserved_percentage': 5, 'updated_at': None, 'service': service}) self.assertFalse(filt_cls.host_passes(host, filter_properties)) @mock.patch('cinder.utils.service_is_up') def test_filter_fails_total_unknown(self, _mock_serv_is_up): _mock_serv_is_up.return_value = True filt_cls = self.class_map['CapacityFilter']() filter_properties = {'size': 100} service = {'disabled': False} host = fakes.FakeHostState('host1', {'total_capacity_gb': 'unknown', 'reserved_percentage': 5, 'updated_at': None, 'service': service}) self.assertFalse(filt_cls.host_passes(host, filter_properties)) @mock.patch('cinder.utils.service_is_up') def test_filter_fails_total_zero(self, _mock_serv_is_up): _mock_serv_is_up.return_value = True filt_cls = self.class_map['CapacityFilter']() filter_properties = {'size': 100} service = {'disabled': False} host = fakes.FakeHostState('host1', {'total_capacity_gb': 0, 'reserved_percentage': 5, 'updated_at': None, 'service': service}) self.assertFalse(filt_cls.host_passes(host, filter_properties)) @mock.patch('cinder.utils.service_is_up') def test_filter_thin_true_passes(self, _mock_serv_is_up): _mock_serv_is_up.return_value = True filt_cls = self.class_map['CapacityFilter']() filter_properties = {'size': 100, 'capabilities:thin_provisioning_support': '<is> True', 'capabilities:thick_provisioning_support': '<is> False'} service = {'disabled': False} host = fakes.FakeHostState('host1', {'total_capacity_gb': 500, 'free_capacity_gb': 200, 'provisioned_capacity_gb': 500, 'max_over_subscription_ratio': 2.0, 'reserved_percentage': 5, 'thin_provisioning_support': True, 'thick_provisioning_support': False, 'updated_at': None, 'service': service}) self.assertTrue(filt_cls.host_passes(host, filter_properties)) @mock.patch('cinder.utils.service_is_up') def test_filter_thin_true_passes2(self, _mock_serv_is_up): _mock_serv_is_up.return_value = True filt_cls = self.class_map['CapacityFilter']() filter_properties = {'size': 3000, 'capabilities:thin_provisioning_support': '<is> True', 'capabilities:thick_provisioning_support': '<is> False'} service = {'disabled': False} host = fakes.FakeHostState('host1', {'total_capacity_gb': 500, 'free_capacity_gb': 200, 'provisioned_capacity_gb': 7000, 'max_over_subscription_ratio': 20, 'reserved_percentage': 5, 'thin_provisioning_support': True, 'thick_provisioning_support': False, 'updated_at': None, 'service': service}) self.assertTrue(filt_cls.host_passes(host, filter_properties)) @mock.patch('cinder.utils.service_is_up') def test_filter_thin_false_passes(self, _mock_serv_is_up): _mock_serv_is_up.return_value = True filt_cls = self.class_map['CapacityFilter']() filter_properties = {'size': 100, 'capabilities:thin_provisioning_support': '<is> False', 'capabilities:thick_provisioning_support': '<is> True'} service = {'disabled': False} # If "thin_provisioning_support" is False, # "max_over_subscription_ratio" will be ignored. host = fakes.FakeHostState('host1', {'total_capacity_gb': 500, 'free_capacity_gb': 200, 'provisioned_capacity_gb': 300, 'max_over_subscription_ratio': 1.0, 'reserved_percentage': 5, 'thin_provisioning_support': False, 'thick_provisioning_support': True, 'updated_at': None, 'service': service}) self.assertTrue(filt_cls.host_passes(host, filter_properties)) @mock.patch('cinder.utils.service_is_up') def test_filter_over_subscription_less_than_1(self, _mock_serv_is_up): _mock_serv_is_up.return_value = True filt_cls = self.class_map['CapacityFilter']() filter_properties = {'size': 200, 'capabilities:thin_provisioning_support': '<is> True', 'capabilities:thick_provisioning_support': '<is> False'} service = {'disabled': False} host = fakes.FakeHostState('host1', {'total_capacity_gb': 500, 'free_capacity_gb': 100, 'provisioned_capacity_gb': 400, 'max_over_subscription_ratio': 0.8, 'reserved_percentage': 0, 'thin_provisioning_support': True, 'thick_provisioning_support': False, 'updated_at': None, 'service': service}) self.assertFalse(filt_cls.host_passes(host, filter_properties)) @mock.patch('cinder.utils.service_is_up') def test_filter_over_subscription_equal_to_1(self, _mock_serv_is_up): _mock_serv_is_up.return_value = True filt_cls = self.class_map['CapacityFilter']() filter_properties = {'size': 150, 'capabilities:thin_provisioning_support': '<is> True', 'capabilities:thick_provisioning_support': '<is> False'} service = {'disabled': False} host = fakes.FakeHostState('host1', {'total_capacity_gb': 500, 'free_capacity_gb': 200, 'provisioned_capacity_gb': 400, 'max_over_subscription_ratio': 1.0, 'reserved_percentage': 0, 'thin_provisioning_support': True, 'thick_provisioning_support': False, 'updated_at': None, 'service': service}) self.assertFalse(filt_cls.host_passes(host, filter_properties)) @mock.patch('cinder.utils.service_is_up') def test_filter_over_subscription_fails(self, _mock_serv_is_up): _mock_serv_is_up.return_value = True filt_cls = self.class_map['CapacityFilter']() filter_properties = {'size': 100, 'capabilities:thin_provisioning_support': '<is> True', 'capabilities:thick_provisioning_support': '<is> False'} service = {'disabled': False} host = fakes.FakeHostState('host1', {'total_capacity_gb': 500, 'free_capacity_gb': 200, 'provisioned_capacity_gb': 700, 'max_over_subscription_ratio': 1.5, 'reserved_percentage': 5, 'thin_provisioning_support': True, 'thick_provisioning_support': False, 'updated_at': None, 'service': service}) self.assertFalse(filt_cls.host_passes(host, filter_properties)) @mock.patch('cinder.utils.service_is_up') def test_filter_over_subscription_fails2(self, _mock_serv_is_up): _mock_serv_is_up.return_value = True filt_cls = self.class_map['CapacityFilter']() filter_properties = {'size': 2000, 'capabilities:thin_provisioning_support': '<is> True', 'capabilities:thick_provisioning_support': '<is> False'} service = {'disabled': False} host = fakes.FakeHostState('host1', {'total_capacity_gb': 500, 'free_capacity_gb': 30, 'provisioned_capacity_gb': 9000, 'max_over_subscription_ratio': 20, 'reserved_percentage': 0, 'thin_provisioning_support': True, 'thick_provisioning_support': False, 'updated_at': None, 'service': service}) self.assertFalse(filt_cls.host_passes(host, filter_properties)) @mock.patch('cinder.utils.service_is_up') def test_filter_reserved_thin_true_fails(self, _mock_serv_is_up): _mock_serv_is_up.return_value = True filt_cls = self.class_map['CapacityFilter']() filter_properties = {'size': 100, 'capabilities:thin_provisioning_support': '<is> True', 'capabilities:thick_provisioning_support': '<is> False'} service = {'disabled': False} host = fakes.FakeHostState('host1', {'total_capacity_gb': 500, 'free_capacity_gb': 100, 'provisioned_capacity_gb': 1000, 'max_over_subscription_ratio': 2.0, 'reserved_percentage': 5, 'thin_provisioning_support': True, 'thick_provisioning_support': False, 'updated_at': None, 'service': service}) self.assertFalse(filt_cls.host_passes(host, filter_properties)) @mock.patch('cinder.utils.service_is_up') def test_filter_reserved_thin_false_fails(self, _mock_serv_is_up): _mock_serv_is_up.return_value = True filt_cls = self.class_map['CapacityFilter']() filter_properties = {'size': 100, 'capabilities:thin_provisioning_support': '<is> False', 'capabilities:thick_provisioning_support': '<is> True'} service = {'disabled': False} # If "thin_provisioning_support" is False, # "max_over_subscription_ratio" will be ignored. host = fakes.FakeHostState('host1', {'total_capacity_gb': 500, 'free_capacity_gb': 100, 'provisioned_capacity_gb': 400, 'max_over_subscription_ratio': 1.0, 'reserved_percentage': 5, 'thin_provisioning_support': False, 'thick_provisioning_support': True, 'updated_at': None, 'service': service}) self.assertFalse(filt_cls.host_passes(host, filter_properties)) @mock.patch('cinder.utils.service_is_up') def test_filter_reserved_thin_thick_true_fails(self, _mock_serv_is_up): _mock_serv_is_up.return_value = True filt_cls = self.class_map['CapacityFilter']() filter_properties = {'size': 100, 'capabilities:thin_provisioning_support': '<is> True', 'capabilities:thick_provisioning_support': '<is> True'} service = {'disabled': False} host = fakes.FakeHostState('host1', {'total_capacity_gb': 500, 'free_capacity_gb': 0, 'provisioned_capacity_gb': 800, 'max_over_subscription_ratio': 2.0, 'reserved_percentage': 5, 'thin_provisioning_support': True, 'thick_provisioning_support': True, 'updated_at': None, 'service': service}) self.assertFalse(filt_cls.host_passes(host, filter_properties)) @mock.patch('cinder.utils.service_is_up') def test_filter_reserved_thin_thick_true_passes(self, _mock_serv_is_up): _mock_serv_is_up.return_value = True filt_cls = self.class_map['CapacityFilter']() filter_properties = {'size': 100, 'capabilities:thin_provisioning_support': '<is> True', 'capabilities:thick_provisioning_support': '<is> True'} service = {'disabled': False} host = fakes.FakeHostState('host1', {'total_capacity_gb': 500, 'free_capacity_gb': 125, 'provisioned_capacity_gb': 400, 'max_over_subscription_ratio': 2.0, 'reserved_percentage': 5, 'thin_provisioning_support': True, 'thick_provisioning_support': True, 'updated_at': None, 'service': service}) self.assertTrue(filt_cls.host_passes(host, filter_properties)) @mock.patch('cinder.utils.service_is_up') def test_filter_reserved_thin_true_passes(self, _mock_serv_is_up): _mock_serv_is_up.return_value = True filt_cls = self.class_map['CapacityFilter']() filter_properties = {'size': 100, 'capabilities:thin_provisioning_support': '<is> True', 'capabilities:thick_provisioning_support': '<is> False'} service = {'disabled': False} host = fakes.FakeHostState('host1', {'total_capacity_gb': 500, 'free_capacity_gb': 80, 'provisioned_capacity_gb': 600, 'max_over_subscription_ratio': 2.0, 'reserved_percentage': 5, 'thin_provisioning_support': True, 'thick_provisioning_support': False, 'updated_at': None, 'service': service}) self.assertTrue(filt_cls.host_passes(host, filter_properties)) @mock.patch('cinder.utils.service_is_up') def test_filter_reserved_thin_thick_true_fails2(self, _mock_serv_is_up): _mock_serv_is_up.return_value = True filt_cls = self.class_map['CapacityFilter']() filter_properties = {'size': 100, 'capabilities:thin_provisioning_support': '<is> True', 'capabilities:thick_provisioning_support': '<is> True'} service = {'disabled': False} host = fakes.FakeHostState('host1', {'total_capacity_gb': 500, 'free_capacity_gb': 99, 'provisioned_capacity_gb': 1000, 'max_over_subscription_ratio': 2.0, 'reserved_percentage': 5, 'thin_provisioning_support': True, 'thick_provisioning_support': True, 'updated_at': None, 'service': service}) self.assertFalse(filt_cls.host_passes(host, filter_properties)) @mock.patch('cinder.utils.service_is_up') def test_filter_reserved_thin_thick_true_passes2(self, _mock_serv_is_up): _mock_serv_is_up.return_value = True filt_cls = self.class_map['CapacityFilter']() filter_properties = {'size': 100, 'capabilities:thin_provisioning_support': '<is> True', 'capabilities:thick_provisioning_support': '<is> True'} service = {'disabled': False} host = fakes.FakeHostState('host1', {'total_capacity_gb': 500, 'free_capacity_gb': 100, 'provisioned_capacity_gb': 400, 'max_over_subscription_ratio': 2.0, 'reserved_percentage': 0, 'thin_provisioning_support': True, 'thick_provisioning_support': True, 'updated_at': None, 'service': service}) self.assertTrue(filt_cls.host_passes(host, filter_properties)) class AffinityFilterTestCase(HostFiltersTestCase): @mock.patch('cinder.utils.service_is_up') def test_different_filter_passes(self, _mock_serv_is_up): _mock_serv_is_up.return_value = True filt_cls = self.class_map['DifferentBackendFilter']() service = {'disabled': False} host = fakes.FakeHostState('host1:pool0', {'free_capacity_gb': '1000', 'updated_at': None, 'service': service}) volume = utils.create_volume(self.context, host='host1:pool1') vol_id = volume.id filter_properties = {'context': self.context.elevated(), 'scheduler_hints': { 'different_host': [vol_id], }} self.assertTrue(filt_cls.host_passes(host, filter_properties)) @mock.patch('cinder.utils.service_is_up') def test_different_filter_legacy_volume_hint_passes( self, _mock_serv_is_up): _mock_serv_is_up.return_value = True filt_cls = self.class_map['DifferentBackendFilter']() service = {'disabled': False} host = fakes.FakeHostState('host1:pool0', {'free_capacity_gb': '1000', 'updated_at': None, 'service': service}) volume = utils.create_volume(self.context, host='host1') vol_id = volume.id filter_properties = {'context': self.context.elevated(), 'scheduler_hints': { 'different_host': [vol_id], }} self.assertTrue(filt_cls.host_passes(host, filter_properties)) def test_different_filter_non_list_fails(self): filt_cls = self.class_map['DifferentBackendFilter']() host = fakes.FakeHostState('host2', {}) volume = utils.create_volume(self.context, host='host2') vol_id = volume.id filter_properties = {'context': self.context.elevated(), 'scheduler_hints': { 'different_host': vol_id}} self.assertFalse(filt_cls.host_passes(host, filter_properties)) def test_different_filter_fails(self): filt_cls = self.class_map['DifferentBackendFilter']() host = fakes.FakeHostState('host1', {}) volume = utils.create_volume(self.context, host='host1') vol_id = volume.id filter_properties = {'context': self.context.elevated(), 'scheduler_hints': { 'different_host': [vol_id], }} self.assertFalse(filt_cls.host_passes(host, filter_properties)) def test_different_filter_handles_none(self): filt_cls = self.class_map['DifferentBackendFilter']() host = fakes.FakeHostState('host1', {}) filter_properties = {'context': self.context.elevated(), 'scheduler_hints': None} self.assertTrue(filt_cls.host_passes(host, filter_properties)) def test_different_filter_handles_deleted_instance(self): filt_cls = self.class_map['DifferentBackendFilter']() host = fakes.FakeHostState('host1', {}) volume = utils.create_volume(self.context, host='host1') vol_id = volume.id db.volume_destroy(utils.get_test_admin_context(), vol_id) filter_properties = {'context': self.context.elevated(), 'scheduler_hints': { 'different_host': [vol_id], }} self.assertTrue(filt_cls.host_passes(host, filter_properties)) def test_different_filter_fail_nonuuid_hint(self): filt_cls = self.class_map['DifferentBackendFilter']() host = fakes.FakeHostState('host1', {}) filter_properties = {'context': self.context.elevated(), 'scheduler_hints': { 'different_host': "NOT-a-valid-UUID", }} self.assertFalse(filt_cls.host_passes(host, filter_properties)) def test_different_filter_handles_multiple_uuids(self): filt_cls = self.class_map['DifferentBackendFilter']() host = fakes.FakeHostState('host1#pool0', {}) volume1 = utils.create_volume(self.context, host='host1:pool1') vol_id1 = volume1.id volume2 = utils.create_volume(self.context, host='host1:pool3') vol_id2 = volume2.id filter_properties = {'context': self.context.elevated(), 'scheduler_hints': { 'different_host': [vol_id1, vol_id2], }} self.assertTrue(filt_cls.host_passes(host, filter_properties)) def test_different_filter_handles_invalid_uuids(self): filt_cls = self.class_map['DifferentBackendFilter']() host = fakes.FakeHostState('host1', {}) volume = utils.create_volume(self.context, host='host2') vol_id = volume.id filter_properties = {'context': self.context.elevated(), 'scheduler_hints': { 'different_host': [vol_id, "NOT-a-valid-UUID"], }} self.assertFalse(filt_cls.host_passes(host, filter_properties)) def test_same_filter_no_list_passes(self): filt_cls = self.class_map['SameBackendFilter']() host = fakes.FakeHostState('host1', {}) volume = utils.create_volume(self.context, host='host1') vol_id = volume.id filter_properties = {'context': self.context.elevated(), 'scheduler_hints': { 'same_host': vol_id}} self.assertTrue(filt_cls.host_passes(host, filter_properties)) def test_same_filter_passes(self): filt_cls = self.class_map['SameBackendFilter']() host = fakes.FakeHostState('host1#pool0', {}) volume = utils.create_volume(self.context, host='host1#pool0') vol_id = volume.id filter_properties = {'context': self.context.elevated(), 'scheduler_hints': { 'same_host': [vol_id], }} self.assertTrue(filt_cls.host_passes(host, filter_properties)) def test_same_filter_legacy_vol_fails(self): filt_cls = self.class_map['SameBackendFilter']() host = fakes.FakeHostState('host1#pool0', {}) volume = utils.create_volume(self.context, host='host1') vol_id = volume.id filter_properties = {'context': self.context.elevated(), 'scheduler_hints': { 'same_host': [vol_id], }} self.assertFalse(filt_cls.host_passes(host, filter_properties)) def test_same_filter_fails(self): filt_cls = self.class_map['SameBackendFilter']() host = fakes.FakeHostState('host1#pool0', {}) volume = utils.create_volume(self.context, host='host1#pool1') vol_id = volume.id filter_properties = {'context': self.context.elevated(), 'scheduler_hints': { 'same_host': [vol_id], }} self.assertFalse(filt_cls.host_passes(host, filter_properties)) def test_same_filter_vol_list_pass(self): filt_cls = self.class_map['SameBackendFilter']() host = fakes.FakeHostState('host1', {}) volume1 = utils.create_volume(self.context, host='host1') vol_id1 = volume1.id volume2 = utils.create_volume(self.context, host='host2') vol_id2 = volume2.id filter_properties = {'context': self.context.elevated(), 'scheduler_hints': { 'same_host': [vol_id1, vol_id2], }} self.assertTrue(filt_cls.host_passes(host, filter_properties)) def test_same_filter_handles_none(self): filt_cls = self.class_map['SameBackendFilter']() host = fakes.FakeHostState('host1', {}) filter_properties = {'context': self.context.elevated(), 'scheduler_hints': None} self.assertTrue(filt_cls.host_passes(host, filter_properties)) def test_same_filter_handles_deleted_instance(self): filt_cls = self.class_map['SameBackendFilter']() host = fakes.FakeHostState('host1', {}) volume = utils.create_volume(self.context, host='host2') vol_id = volume.id db.volume_destroy(utils.get_test_admin_context(), vol_id) filter_properties = {'context': self.context.elevated(), 'scheduler_hints': { 'same_host': [vol_id], }} self.assertFalse(filt_cls.host_passes(host, filter_properties)) def test_same_filter_fail_nonuuid_hint(self): filt_cls = self.class_map['SameBackendFilter']() host = fakes.FakeHostState('host1', {}) filter_properties = {'context': self.context.elevated(), 'scheduler_hints': { 'same_host': "NOT-a-valid-UUID", }} self.assertFalse(filt_cls.host_passes(host, filter_properties)) class DriverFilterTestCase(HostFiltersTestCase): def test_passing_function(self): filt_cls = self.class_map['DriverFilter']() host1 = fakes.FakeHostState( 'host1', { 'capabilities': { 'filter_function': '1 == 1', } }) filter_properties = {'volume_type': {}} self.assertTrue(filt_cls.host_passes(host1, filter_properties)) def test_failing_function(self): filt_cls = self.class_map['DriverFilter']() host1 = fakes.FakeHostState( 'host1', { 'capabilities': { 'filter_function': '1 == 2', } }) filter_properties = {'volume_type': {}} self.assertFalse(filt_cls.host_passes(host1, filter_properties)) def test_no_filter_function(self): filt_cls = self.class_map['DriverFilter']() host1 = fakes.FakeHostState( 'host1', { 'capabilities': { 'filter_function': None, } }) filter_properties = {'volume_type': {}} self.assertTrue(filt_cls.host_passes(host1, filter_properties)) def test_not_implemented(self): filt_cls = self.class_map['DriverFilter']() host1 = fakes.FakeHostState( 'host1', { 'capabilities': {} }) filter_properties = {'volume_type': {}} self.assertTrue(filt_cls.host_passes(host1, filter_properties)) def test_no_volume_extra_specs(self): filt_cls = self.class_map['DriverFilter']() host1 = fakes.FakeHostState( 'host1', { 'capabilities': { 'filter_function': '1 == 1', } }) filter_properties = {'volume_type': {}} self.assertTrue(filt_cls.host_passes(host1, filter_properties)) def test_function_extra_spec_replacement(self): filt_cls = self.class_map['DriverFilter']() host1 = fakes.FakeHostState( 'host1', { 'capabilities': { 'filter_function': 'extra.var == 1', } }) filter_properties = { 'volume_type': { 'extra_specs': { 'var': 1, } } } self.assertTrue(filt_cls.host_passes(host1, filter_properties)) def test_function_stats_replacement(self): filt_cls = self.class_map['DriverFilter']() host1 = fakes.FakeHostState( 'host1', { 'total_capacity_gb': 100, 'capabilities': { 'filter_function': 'stats.total_capacity_gb < 200', } }) filter_properties = {'volume_type': {}} self.assertTrue(filt_cls.host_passes(host1, filter_properties)) def test_function_volume_replacement(self): filt_cls = self.class_map['DriverFilter']() host1 = fakes.FakeHostState( 'host1', { 'capabilities': { 'filter_function': 'volume.size < 5', } }) filter_properties = { 'request_spec': { 'volume_properties': { 'size': 1 } } } self.assertTrue(filt_cls.host_passes(host1, filter_properties)) def test_function_qos_spec_replacement(self): filt_cls = self.class_map['DriverFilter']() host1 = fakes.FakeHostState( 'host1', { 'capabilities': { 'filter_function': 'qos.var == 1', } }) filter_properties = { 'qos_specs': { 'var': 1 } } self.assertTrue(filt_cls.host_passes(host1, filter_properties)) def test_function_exception_caught(self): filt_cls = self.class_map['DriverFilter']() host1 = fakes.FakeHostState( 'host1', { 'capabilities': { 'filter_function': '1 / 0 == 0', } }) filter_properties = {} self.assertFalse(filt_cls.host_passes(host1, filter_properties)) def test_function_empty_qos(self): filt_cls = self.class_map['DriverFilter']() host1 = fakes.FakeHostState( 'host1', { 'capabilities': { 'filter_function': 'qos.maxiops == 1', } }) filter_properties = { 'qos_specs': None } self.assertFalse(filt_cls.host_passes(host1, filter_properties)) def test_capabilities(self): filt_cls = self.class_map['DriverFilter']() host1 = fakes.FakeHostState( 'host1', { 'capabilities': { 'foo': 10, 'filter_function': 'capabilities.foo == 10', }, }) filter_properties = {} self.assertTrue(filt_cls.host_passes(host1, filter_properties)) def test_wrong_capabilities(self): filt_cls = self.class_map['DriverFilter']() host1 = fakes.FakeHostState( 'host1', { 'capabilities': { 'bar': 10, 'filter_function': 'capabilities.foo == 10', }, }) filter_properties = {} self.assertFalse(filt_cls.host_passes(host1, filter_properties)) class InstanceLocalityFilterTestCase(HostFiltersTestCase): def setUp(self): super(InstanceLocalityFilterTestCase, self).setUp() self.override_config('nova_endpoint_template', 'http://novahost:8774/v2/%(project_id)s') self.context.service_catalog = \ [{'type': 'compute', 'name': 'nova', 'endpoints': [{'publicURL': 'http://novahost:8774/v2/e3f0833dc08b4cea'}]}, {'type': 'identity', 'name': 'keystone', 'endpoints': [{'publicURL': 'http://keystonehost:5000/v2.0'}]}] @mock.patch('novaclient.client.discover_extensions') @mock.patch('cinder.compute.nova.novaclient') def test_same_host(self, _mock_novaclient, fake_extensions): _mock_novaclient.return_value = fakes.FakeNovaClient() fake_extensions.return_value = ( fakes.FakeNovaClient().list_extensions.show_all()) filt_cls = self.class_map['InstanceLocalityFilter']() host = fakes.FakeHostState('host1', {}) uuid = nova.novaclient().servers.create('host1') filter_properties = {'context': self.context, 'scheduler_hints': {'local_to_instance': uuid}} self.assertTrue(filt_cls.host_passes(host, filter_properties)) @mock.patch('novaclient.client.discover_extensions') @mock.patch('cinder.compute.nova.novaclient') def test_different_host(self, _mock_novaclient, fake_extensions): _mock_novaclient.return_value = fakes.FakeNovaClient() fake_extensions.return_value = ( fakes.FakeNovaClient().list_extensions.show_all()) filt_cls = self.class_map['InstanceLocalityFilter']() host = fakes.FakeHostState('host1', {}) uuid = nova.novaclient().servers.create('host2') filter_properties = {'context': self.context, 'scheduler_hints': {'local_to_instance': uuid}} self.assertFalse(filt_cls.host_passes(host, filter_properties)) def test_handles_none(self): filt_cls = self.class_map['InstanceLocalityFilter']() host = fakes.FakeHostState('host1', {}) filter_properties = {'context': self.context, 'scheduler_hints': None} self.assertTrue(filt_cls.host_passes(host, filter_properties)) def test_invalid_uuid(self): filt_cls = self.class_map['InstanceLocalityFilter']() host = fakes.FakeHostState('host1', {}) filter_properties = {'context': self.context, 'scheduler_hints': {'local_to_instance': 'e29b11d4-not-valid-a716'}} self.assertRaises(exception.InvalidUUID, filt_cls.host_passes, host, filter_properties) @mock.patch('cinder.compute.nova.novaclient') def test_nova_no_extended_server_attributes(self, _mock_novaclient): _mock_novaclient.return_value = fakes.FakeNovaClient( ext_srv_attr=False) filt_cls = self.class_map['InstanceLocalityFilter']() host = fakes.FakeHostState('host1', {}) uuid = nova.novaclient().servers.create('host1') filter_properties = {'context': self.context, 'scheduler_hints': {'local_to_instance': uuid}} self.assertRaises(exception.CinderException, filt_cls.host_passes, host, filter_properties) @mock.patch('cinder.compute.nova.novaclient') def test_nova_down_does_not_alter_other_filters(self, _mock_novaclient): # Simulate Nova API is not available _mock_novaclient.side_effect = Exception filt_cls = self.class_map['InstanceLocalityFilter']() host = fakes.FakeHostState('host1', {}) filter_properties = {'context': self.context, 'size': 100} self.assertTrue(filt_cls.host_passes(host, filter_properties)) @mock.patch('cinder.compute.nova.novaclient') def test_nova_timeout(self, mock_novaclient): # Simulate a HTTP timeout mock_show_all = mock_novaclient.return_value.list_extensions.show_all mock_show_all.side_effect = request_exceptions.Timeout filt_cls = self.class_map['InstanceLocalityFilter']() host = fakes.FakeHostState('host1', {}) filter_properties = \ {'context': self.context, 'scheduler_hints': {'local_to_instance': 'e29b11d4-15ef-34a9-a716-598a6f0b5467'}} self.assertRaises(exception.APITimeout, filt_cls.host_passes, host, filter_properties) class TestFilter(filters.BaseHostFilter): pass class TestBogusFilter(object): """Class that doesn't inherit from BaseHostFilter.""" pass class ExtraSpecsOpsTestCase(test.TestCase): def _do_extra_specs_ops_test(self, value, req, matches): assertion = self.assertTrue if matches else self.assertFalse assertion(extra_specs_ops.match(value, req)) def test_extra_specs_matches_simple(self): self._do_extra_specs_ops_test( value='1', req='1', matches=True) def test_extra_specs_fails_simple(self): self._do_extra_specs_ops_test( value='', req='1', matches=False) def test_extra_specs_fails_simple2(self): self._do_extra_specs_ops_test( value='3', req='1', matches=False) def test_extra_specs_fails_simple3(self): self._do_extra_specs_ops_test( value='222', req='2', matches=False) def test_extra_specs_fails_with_bogus_ops(self): self._do_extra_specs_ops_test( value='4', req='> 2', matches=False) def test_extra_specs_matches_with_op_eq(self): self._do_extra_specs_ops_test( value='123', req='= 123', matches=True) def test_extra_specs_matches_with_op_eq2(self): self._do_extra_specs_ops_test( value='124', req='= 123', matches=True) def test_extra_specs_fails_with_op_eq(self): self._do_extra_specs_ops_test( value='34', req='= 234', matches=False) def test_extra_specs_fails_with_op_eq3(self): self._do_extra_specs_ops_test( value='34', req='=', matches=False) def test_extra_specs_matches_with_op_seq(self): self._do_extra_specs_ops_test( value='123', req='s== 123', matches=True) def test_extra_specs_fails_with_op_seq(self): self._do_extra_specs_ops_test( value='1234', req='s== 123', matches=False) def test_extra_specs_matches_with_op_sneq(self): self._do_extra_specs_ops_test( value='1234', req='s!= 123', matches=True) def test_extra_specs_fails_with_op_sneq(self): self._do_extra_specs_ops_test( value='123', req='s!= 123', matches=False) def test_extra_specs_fails_with_op_sge(self): self._do_extra_specs_ops_test( value='1000', req='s>= 234', matches=False) def test_extra_specs_fails_with_op_sle(self): self._do_extra_specs_ops_test( value='1234', req='s<= 1000', matches=False) def test_extra_specs_fails_with_op_sl(self): self._do_extra_specs_ops_test( value='2', req='s< 12', matches=False) def test_extra_specs_fails_with_op_sg(self): self._do_extra_specs_ops_test( value='12', req='s> 2', matches=False) def test_extra_specs_matches_with_op_in(self): self._do_extra_specs_ops_test( value='12311321', req='<in> 11', matches=True) def test_extra_specs_matches_with_op_in2(self): self._do_extra_specs_ops_test( value='12311321', req='<in> 12311321', matches=True) def test_extra_specs_matches_with_op_in3(self): self._do_extra_specs_ops_test( value='12311321', req='<in> 12311321 <in>', matches=True) def test_extra_specs_fails_with_op_in(self): self._do_extra_specs_ops_test( value='12310321', req='<in> 11', matches=False) def test_extra_specs_fails_with_op_in2(self): self._do_extra_specs_ops_test( value='12310321', req='<in> 11 <in>', matches=False) def test_extra_specs_matches_with_op_is(self): self._do_extra_specs_ops_test( value=True, req='<is> True', matches=True) def test_extra_specs_matches_with_op_is2(self): self._do_extra_specs_ops_test( value=False, req='<is> False', matches=True) def test_extra_specs_matches_with_op_is3(self): self._do_extra_specs_ops_test( value=False, req='<is> Nonsense', matches=True) def test_extra_specs_fails_with_op_is(self): self._do_extra_specs_ops_test( value=True, req='<is> False', matches=False) def test_extra_specs_fails_with_op_is2(self): self._do_extra_specs_ops_test( value=False, req='<is> True', matches=False) def test_extra_specs_matches_with_op_or(self): self._do_extra_specs_ops_test( value='12', req='<or> 11 <or> 12', matches=True) def test_extra_specs_matches_with_op_or2(self): self._do_extra_specs_ops_test( value='12', req='<or> 11 <or> 12 <or>', matches=True) def test_extra_specs_fails_with_op_or(self): self._do_extra_specs_ops_test( value='13', req='<or> 11 <or> 12', matches=False) def test_extra_specs_fails_with_op_or2(self): self._do_extra_specs_ops_test( value='13', req='<or> 11 <or> 12 <or>', matches=False) def test_extra_specs_matches_with_op_le(self): self._do_extra_specs_ops_test( value='2', req='<= 10', matches=True) def test_extra_specs_fails_with_op_le(self): self._do_extra_specs_ops_test( value='3', req='<= 2', matches=False) def test_extra_specs_matches_with_op_ge(self): self._do_extra_specs_ops_test( value='3', req='>= 1', matches=True) def test_extra_specs_fails_with_op_ge(self): self._do_extra_specs_ops_test( value='2', req='>= 3', matches=False) def test_extra_specs_fails_none_req(self): self._do_extra_specs_ops_test( value='foo', req=None, matches=False) def test_extra_specs_matches_none_req(self): self._do_extra_specs_ops_test( value=None, req=None, matches=True) @ddt.ddt class BasicFiltersTestCase(HostFiltersTestCase): """Test case for host filters.""" def setUp(self): super(BasicFiltersTestCase, self).setUp() self.json_query = jsonutils.dumps( ['and', ['>=', '$free_ram_mb', 1024], ['>=', '$free_disk_mb', 200 * 1024]]) def test_all_filters(self): # Double check at least a couple of known filters exist self.assertIn('JsonFilter', self.class_map) self.assertIn('CapabilitiesFilter', self.class_map) self.assertIn('AvailabilityZoneFilter', self.class_map) self.assertIn('IgnoreAttemptedHostsFilter', self.class_map) def _do_test_type_filter_extra_specs(self, ecaps, especs, passes): filt_cls = self.class_map['CapabilitiesFilter']() capabilities = {'enabled': True} capabilities.update(ecaps) service = {'disabled': False} filter_properties = {'resource_type': {'name': 'fake_type', 'extra_specs': especs}} host = fakes.FakeHostState('host1', {'free_capacity_gb': 1024, 'capabilities': capabilities, 'service': service}) assertion = self.assertTrue if passes else self.assertFalse assertion(filt_cls.host_passes(host, filter_properties)) def test_capability_filter_passes_extra_specs_simple(self): self._do_test_type_filter_extra_specs( ecaps={'opt1': '1', 'opt2': '2'}, especs={'opt1': '1', 'opt2': '2'}, passes=True) def test_capability_filter_fails_extra_specs_simple(self): self._do_test_type_filter_extra_specs( ecaps={'opt1': '1', 'opt2': '2'}, especs={'opt1': '1', 'opt2': '222'}, passes=False) def test_capability_filter_passes_extra_specs_complex(self): self._do_test_type_filter_extra_specs( ecaps={'opt1': 10, 'opt2': 5}, especs={'opt1': '>= 2', 'opt2': '<= 8'}, passes=True) def test_capability_filter_fails_extra_specs_complex(self): self._do_test_type_filter_extra_specs( ecaps={'opt1': 10, 'opt2': 5}, especs={'opt1': '>= 2', 'opt2': '>= 8'}, passes=False) def test_capability_filter_passes_extra_specs_list_simple(self): self._do_test_type_filter_extra_specs( ecaps={'opt1': ['1', '2'], 'opt2': '2'}, especs={'opt1': '1', 'opt2': '2'}, passes=True) @ddt.data('<is> True', '<is> False') def test_capability_filter_passes_extra_specs_list_complex(self, opt1): self._do_test_type_filter_extra_specs( ecaps={'opt1': [True, False], 'opt2': ['1', '2']}, especs={'opt1': opt1, 'opt2': '<= 8'}, passes=True) def test_capability_filter_fails_extra_specs_list_simple(self): self._do_test_type_filter_extra_specs( ecaps={'opt1': ['1', '2'], 'opt2': ['2']}, especs={'opt1': '3', 'opt2': '2'}, passes=False) def test_capability_filter_fails_extra_specs_list_complex(self): self._do_test_type_filter_extra_specs( ecaps={'opt1': [True, False], 'opt2': ['1', '2']}, especs={'opt1': 'fake', 'opt2': '<= 8'}, passes=False) def test_capability_filter_passes_scope_extra_specs(self): self._do_test_type_filter_extra_specs( ecaps={'scope_lv1': {'opt1': 10}}, especs={'capabilities:scope_lv1:opt1': '>= 2'}, passes=True) def test_capability_filter_passes_fakescope_extra_specs(self): self._do_test_type_filter_extra_specs( ecaps={'scope_lv1': {'opt1': 10}, 'opt2': 5}, especs={'scope_lv1:opt1': '= 2', 'opt2': '>= 3'}, passes=True) def test_capability_filter_fails_scope_extra_specs(self): self._do_test_type_filter_extra_specs( ecaps={'scope_lv1': {'opt1': 10}}, especs={'capabilities:scope_lv1:opt1': '<= 2'}, passes=False) def test_capability_filter_passes_multi_level_scope_extra_specs(self): self._do_test_type_filter_extra_specs( ecaps={'scope_lv0': {'scope_lv1': {'scope_lv2': {'opt1': 10}}}}, especs={'capabilities:scope_lv0:scope_lv1:scope_lv2:opt1': '>= 2'}, passes=True) def test_capability_filter_fails_unenough_level_scope_extra_specs(self): self._do_test_type_filter_extra_specs( ecaps={'scope_lv0': {'scope_lv1': None}}, especs={'capabilities:scope_lv0:scope_lv1:scope_lv2:opt1': '>= 2'}, passes=False) def test_capability_filter_fails_wrong_scope_extra_specs(self): self._do_test_type_filter_extra_specs( ecaps={'scope_lv0': {'opt1': 10}}, especs={'capabilities:scope_lv1:opt1': '>= 2'}, passes=False) def test_capability_filter_passes_none_extra_specs(self): self._do_test_type_filter_extra_specs( ecaps={'scope_lv0': {'opt1': None}}, especs={'capabilities:scope_lv0:opt1': None}, passes=True) def test_capability_filter_fails_none_extra_specs(self): self._do_test_type_filter_extra_specs( ecaps={'scope_lv0': {'opt1': 10}}, especs={'capabilities:scope_lv0:opt1': None}, passes=False) def test_capability_filter_fails_none_caps(self): self._do_test_type_filter_extra_specs( ecaps={'scope_lv0': {'opt1': None}}, especs={'capabilities:scope_lv0:opt1': 'foo'}, passes=False) def test_capability_filter_passes_multi_level_scope_extra_specs_list(self): self._do_test_type_filter_extra_specs( ecaps={ 'scope_lv0': { 'scope_lv1': { 'scope_lv2': { 'opt1': [True, False], }, }, }, }, especs={ 'capabilities:scope_lv0:scope_lv1:scope_lv2:opt1': '<is> True', }, passes=True) def test_capability_filter_fails_multi_level_scope_extra_specs_list(self): self._do_test_type_filter_extra_specs( ecaps={ 'scope_lv0': { 'scope_lv1': { 'scope_lv2': { 'opt1': [True, False], 'opt2': ['1', '2'], }, }, }, }, especs={ 'capabilities:scope_lv0:scope_lv1:scope_lv2:opt1': '<is> True', 'capabilities:scope_lv0:scope_lv1:scope_lv2:opt2': '3', }, passes=False) def test_capability_filter_fails_wrong_scope_extra_specs_list(self): self._do_test_type_filter_extra_specs( ecaps={'scope_lv0': {'opt1': [True, False]}}, especs={'capabilities:scope_lv1:opt1': '<is> True'}, passes=False) def test_json_filter_passes(self): filt_cls = self.class_map['JsonFilter']() filter_properties = {'resource_type': {'memory_mb': 1024, 'root_gb': 200, 'ephemeral_gb': 0}, 'scheduler_hints': {'query': self.json_query}} capabilities = {'enabled': True} host = fakes.FakeHostState('host1', {'free_ram_mb': 1024, 'free_disk_mb': 200 * 1024, 'capabilities': capabilities}) self.assertTrue(filt_cls.host_passes(host, filter_properties)) def test_json_filter_passes_with_no_query(self): filt_cls = self.class_map['JsonFilter']() filter_properties = {'resource_type': {'memory_mb': 1024, 'root_gb': 200, 'ephemeral_gb': 0}} capabilities = {'enabled': True} host = fakes.FakeHostState('host1', {'free_ram_mb': 0, 'free_disk_mb': 0, 'capabilities': capabilities}) self.assertTrue(filt_cls.host_passes(host, filter_properties)) def test_json_filter_fails_on_memory(self): filt_cls = self.class_map['JsonFilter']() filter_properties = {'resource_type': {'memory_mb': 1024, 'root_gb': 200, 'ephemeral_gb': 0}, 'scheduler_hints': {'query': self.json_query}} capabilities = {'enabled': True} host = fakes.FakeHostState('host1', {'free_ram_mb': 1023, 'free_disk_mb': 200 * 1024, 'capabilities': capabilities}) self.assertFalse(filt_cls.host_passes(host, filter_properties)) def test_json_filter_fails_on_disk(self): filt_cls = self.class_map['JsonFilter']() filter_properties = {'resource_type': {'memory_mb': 1024, 'root_gb': 200, 'ephemeral_gb': 0}, 'scheduler_hints': {'query': self.json_query}} capabilities = {'enabled': True} host = fakes.FakeHostState('host1', {'free_ram_mb': 1024, 'free_disk_mb': (200 * 1024) - 1, 'capabilities': capabilities}) self.assertFalse(filt_cls.host_passes(host, filter_properties)) def test_json_filter_fails_on_caps_disabled(self): filt_cls = self.class_map['JsonFilter']() json_query = jsonutils.dumps( ['and', ['>=', '$free_ram_mb', 1024], ['>=', '$free_disk_mb', 200 * 1024], '$capabilities.enabled']) filter_properties = {'resource_type': {'memory_mb': 1024, 'root_gb': 200, 'ephemeral_gb': 0}, 'scheduler_hints': {'query': json_query}} capabilities = {'enabled': False} host = fakes.FakeHostState('host1', {'free_ram_mb': 1024, 'free_disk_mb': 200 * 1024, 'capabilities': capabilities}) self.assertFalse(filt_cls.host_passes(host, filter_properties)) def test_json_filter_fails_on_service_disabled(self): filt_cls = self.class_map['JsonFilter']() json_query = jsonutils.dumps( ['and', ['>=', '$free_ram_mb', 1024], ['>=', '$free_disk_mb', 200 * 1024], ['not', '$service.disabled']]) filter_properties = {'resource_type': {'memory_mb': 1024, 'local_gb': 200}, 'scheduler_hints': {'query': json_query}} capabilities = {'enabled': True} host = fakes.FakeHostState('host1', {'free_ram_mb': 1024, 'free_disk_mb': 200 * 1024, 'capabilities': capabilities}) self.assertFalse(filt_cls.host_passes(host, filter_properties)) def test_json_filter_happy_day(self): """Test json filter more thoroughly.""" filt_cls = self.class_map['JsonFilter']() raw = ['and', '$capabilities.enabled', ['=', '$capabilities.opt1', 'match'], ['or', ['and', ['<', '$free_ram_mb', 30], ['<', '$free_disk_mb', 300]], ['and', ['>', '$free_ram_mb', 30], ['>', '$free_disk_mb', 300]]]] filter_properties = { 'scheduler_hints': { 'query': jsonutils.dumps(raw), }, } # Passes capabilities = {'enabled': True, 'opt1': 'match'} service = {'disabled': False} host = fakes.FakeHostState('host1', {'free_ram_mb': 10, 'free_disk_mb': 200, 'capabilities': capabilities, 'service': service}) self.assertTrue(filt_cls.host_passes(host, filter_properties)) # Passes capabilities = {'enabled': True, 'opt1': 'match'} service = {'disabled': False} host = fakes.FakeHostState('host1', {'free_ram_mb': 40, 'free_disk_mb': 400, 'capabilities': capabilities, 'service': service}) self.assertTrue(filt_cls.host_passes(host, filter_properties)) # Fails due to capabilities being disabled capabilities = {'enabled': False, 'opt1': 'match'} service = {'disabled': False} host = fakes.FakeHostState('host1', {'free_ram_mb': 40, 'free_disk_mb': 400, 'capabilities': capabilities, 'service': service}) self.assertFalse(filt_cls.host_passes(host, filter_properties)) # Fails due to being exact memory/disk we don't want capabilities = {'enabled': True, 'opt1': 'match'} service = {'disabled': False} host = fakes.FakeHostState('host1', {'free_ram_mb': 30, 'free_disk_mb': 300, 'capabilities': capabilities, 'service': service}) self.assertFalse(filt_cls.host_passes(host, filter_properties)) # Fails due to memory lower but disk higher capabilities = {'enabled': True, 'opt1': 'match'} service = {'disabled': False} host = fakes.FakeHostState('host1', {'free_ram_mb': 20, 'free_disk_mb': 400, 'capabilities': capabilities, 'service': service}) self.assertFalse(filt_cls.host_passes(host, filter_properties)) # Fails due to capabilities 'opt1' not equal capabilities = {'enabled': True, 'opt1': 'no-match'} service = {'enabled': True} host = fakes.FakeHostState('host1', {'free_ram_mb': 20, 'free_disk_mb': 400, 'capabilities': capabilities, 'service': service}) self.assertFalse(filt_cls.host_passes(host, filter_properties)) def test_json_filter_basic_operators(self): filt_cls = self.class_map['JsonFilter']() host = fakes.FakeHostState('host1', {'capabilities': {'enabled': True}}) # (operator, arguments, expected_result) ops_to_test = [ ['=', [1, 1], True], ['=', [1, 2], False], ['<', [1, 2], True], ['<', [1, 1], False], ['<', [2, 1], False], ['>', [2, 1], True], ['>', [2, 2], False], ['>', [2, 3], False], ['<=', [1, 2], True], ['<=', [1, 1], True], ['<=', [2, 1], False], ['>=', [2, 1], True], ['>=', [2, 2], True], ['>=', [2, 3], False], ['in', [1, 1], True], ['in', [1, 1, 2, 3], True], ['in', [4, 1, 2, 3], False], ['not', [True], False], ['not', [False], True], ['or', [True, False], True], ['or', [False, False], False], ['and', [True, True], True], ['and', [False, False], False], ['and', [True, False], False], # Nested ((True or False) and (2 > 1)) == Passes ['and', [['or', True, False], ['>', 2, 1]], True]] for (op, args, expected) in ops_to_test: raw = [op] + args filter_properties = { 'scheduler_hints': { 'query': jsonutils.dumps(raw), }, } self.assertEqual(expected, filt_cls.host_passes(host, filter_properties)) # This results in [False, True, False, True] and if any are True # then it passes... raw = ['not', True, False, True, False] filter_properties = { 'scheduler_hints': { 'query': jsonutils.dumps(raw), }, } self.assertTrue(filt_cls.host_passes(host, filter_properties)) # This results in [False, False, False] and if any are True # then it passes...which this doesn't raw = ['not', True, True, True] filter_properties = { 'scheduler_hints': { 'query': jsonutils.dumps(raw), }, } self.assertFalse(filt_cls.host_passes(host, filter_properties)) def test_json_filter_unknown_operator_raises(self): filt_cls = self.class_map['JsonFilter']() raw = ['!=', 1, 2] filter_properties = { 'scheduler_hints': { 'query': jsonutils.dumps(raw), }, } host = fakes.FakeHostState('host1', {'capabilities': {'enabled': True}}) self.assertRaises(KeyError, filt_cls.host_passes, host, filter_properties) def test_json_filter_empty_filters_pass(self): filt_cls = self.class_map['JsonFilter']() host = fakes.FakeHostState('host1', {'capabilities': {'enabled': True}}) raw = [] filter_properties = { 'scheduler_hints': { 'query': jsonutils.dumps(raw), }, } self.assertTrue(filt_cls.host_passes(host, filter_properties)) raw = {} filter_properties = { 'scheduler_hints': { 'query': jsonutils.dumps(raw), }, } self.assertTrue(filt_cls.host_passes(host, filter_properties)) def test_json_filter_invalid_num_arguments_fails(self): filt_cls = self.class_map['JsonFilter']() host = fakes.FakeHostState('host1', {'capabilities': {'enabled': True}}) raw = ['>', ['and', ['or', ['not', ['<', ['>=', ['<=', ['in', ]]]]]]]] filter_properties = { 'scheduler_hints': { 'query': jsonutils.dumps(raw), }, } self.assertFalse(filt_cls.host_passes(host, filter_properties)) raw = ['>', 1] filter_properties = { 'scheduler_hints': { 'query': jsonutils.dumps(raw), }, } self.assertFalse(filt_cls.host_passes(host, filter_properties)) def test_json_filter_unknown_variable_ignored(self): filt_cls = self.class_map['JsonFilter']() host = fakes.FakeHostState('host1', {'capabilities': {'enabled': True}}) raw = ['=', '$........', 1, 1] filter_properties = { 'scheduler_hints': { 'query': jsonutils.dumps(raw), }, } self.assertTrue(filt_cls.host_passes(host, filter_properties)) raw = ['=', '$foo', 2, 2] filter_properties = { 'scheduler_hints': { 'query': jsonutils.dumps(raw), }, } self.assertTrue(filt_cls.host_passes(host, filter_properties)) @staticmethod def _make_zone_request(zone, is_admin=False): ctxt = context.RequestContext('fake', 'fake', is_admin=is_admin) return { 'context': ctxt, 'request_spec': { 'resource_properties': { 'availability_zone': zone } } } def test_availability_zone_filter_same(self): filt_cls = self.class_map['AvailabilityZoneFilter']() service = {'availability_zone': 'nova'} request = self._make_zone_request('nova') host = fakes.FakeHostState('host1', {'service': service}) self.assertTrue(filt_cls.host_passes(host, request)) def test_availability_zone_filter_different(self): filt_cls = self.class_map['AvailabilityZoneFilter']() service = {'availability_zone': 'nova'} request = self._make_zone_request('bad') host = fakes.FakeHostState('host1', {'service': service}) self.assertFalse(filt_cls.host_passes(host, request)) def test_availability_zone_filter_empty(self): filt_cls = self.class_map['AvailabilityZoneFilter']() service = {'availability_zone': 'nova'} request = {} host = fakes.FakeHostState('host1', {'service': service}) self.assertTrue(filt_cls.host_passes(host, request)) def test_ignore_attempted_hosts_filter_disabled(self): # Test case where re-scheduling is disabled. filt_cls = self.class_map['IgnoreAttemptedHostsFilter']() host = fakes.FakeHostState('host1', {}) filter_properties = {} self.assertTrue(filt_cls.host_passes(host, filter_properties)) def test_ignore_attempted_hosts_filter_pass(self): # Node not previously tried. filt_cls = self.class_map['IgnoreAttemptedHostsFilter']() host = fakes.FakeHostState('host1', {}) attempted = dict(num_attempts=2, hosts=['host2']) filter_properties = dict(retry=attempted) self.assertTrue(filt_cls.host_passes(host, filter_properties)) def test_ignore_attempted_hosts_filter_fail(self): # Node was already tried. filt_cls = self.class_map['IgnoreAttemptedHostsFilter']() host = fakes.FakeHostState('host1', {}) attempted = dict(num_attempts=2, hosts=['host1']) filter_properties = dict(retry=attempted) self.assertFalse(filt_cls.host_passes(host, filter_properties))
{ "content_hash": "19f25d5dfd1dec3da3d016834a4d3d1c", "timestamp": "", "source": "github", "line_count": 1773, "max_line_length": 79, "avg_line_length": 41.94641849971799, "alnum_prop": 0.5018757311317583, "repo_name": "bswartz/cinder", "id": "996737275990bc21083e4ab6de10caed525c0704", "size": "75007", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "cinder/tests/unit/scheduler/test_host_filters.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Python", "bytes": "16345375" }, { "name": "Shell", "bytes": "8187" } ], "symlink_target": "" }
from setuptools import setup, find_packages VERSION='0.4' setup( name='namesync', version=VERSION, packages=find_packages(exclude=["*.tests", "*.tests.*", "tests.*", "tests"]), description='Sync DNS records stored in a flat file format to your DNS provider.', author='Mark Sandstrom', author_email='mark@deliciouslynerdy.com', url='https://github.com/dnerdy/namesync', download_url='https://github.com/dnerdy/namesync/archive/v{}.tar.gz'.format(VERSION), setup_requires=[ 'setuptools>=0.8', ], install_requires=[ ], entry_points={ 'console_scripts': [ 'namesync = namesync.main:main', ], }, test_suite='nose.collector', test_loader='unittest:TestLoader', keywords=['dns', 'sync', 'syncing', 'cloudflare'], classifiers=[ "Development Status :: 4 - Beta", "Intended Audience :: Developers", "Intended Audience :: System Administrators", "License :: OSI Approved :: MIT License", "Operating System :: OS Independent", "Programming Language :: Python :: 2.7", "Programming Language :: Python :: 3.4", "Topic :: Internet :: Name Service (DNS)", ], )
{ "content_hash": "9dd047fd454aa209645a457642474709", "timestamp": "", "source": "github", "line_count": 37, "max_line_length": 89, "avg_line_length": 33.054054054054056, "alnum_prop": 0.6058871627146362, "repo_name": "dnerdy/namesync", "id": "a3ca51d6b4de4b537d277cb51ebbee9fe3253d3a", "size": "1270", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "setup.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "896733" }, { "name": "Shell", "bytes": "294" } ], "symlink_target": "" }
"""Heat API Server. An OpenStack ReST API to Heat. """ import eventlet eventlet.monkey_patch(os=False) import sys from oslo_config import cfg import oslo_i18n as i18n from oslo_log import log as logging from oslo_reports import guru_meditation_report as gmr from oslo_service import systemd import six from heat.common import config from heat.common import messaging from heat.common import profiler from heat.common import wsgi from heat import version i18n.enable_lazy() LOG = logging.getLogger('heat.api') def launch_api(setup_logging=True): if setup_logging: logging.register_options(cfg.CONF) cfg.CONF(project='heat', prog='heat-api', version=version.version_info.version_string()) if setup_logging: logging.setup(cfg.CONF, 'heat-api') config.set_config_defaults() messaging.setup() app = config.load_paste_app() port = cfg.CONF.heat_api.bind_port host = cfg.CONF.heat_api.bind_host LOG.info('Starting Heat REST API on %(host)s:%(port)s', {'host': host, 'port': port}) profiler.setup('heat-api', host) gmr.TextGuruMeditation.setup_autorun(version) server = wsgi.Server('heat-api', cfg.CONF.heat_api) server.start(app, default_port=port) return server def main(): try: server = launch_api() systemd.notify_once() server.wait() except RuntimeError as e: msg = six.text_type(e) sys.exit("ERROR: %s" % msg)
{ "content_hash": "0dff522700cfd28198c2f2b1a05cfb06", "timestamp": "", "source": "github", "line_count": 59, "max_line_length": 59, "avg_line_length": 24.864406779661017, "alnum_prop": 0.6796182685753238, "repo_name": "noironetworks/heat", "id": "5a03682fb20fe6acc53ed958b95d77e0806bc4cc", "size": "2042", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "heat/cmd/api.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Python", "bytes": "8804896" }, { "name": "Shell", "bytes": "64533" } ], "symlink_target": "" }
import unittest from test import support from test import test_urllib import os import io import socket import array import sys import urllib.request # The proxy bypass method imported below has logic specific to the OSX # proxy config data structure but is testable on all platforms. from urllib.request import Request, OpenerDirector, _parse_proxy, _proxy_bypass_macosx_sysconf from urllib.parse import urlparse import urllib.error import http.client # XXX # Request # CacheFTPHandler (hard to write) # parse_keqv_list, parse_http_list, HTTPDigestAuthHandler class TrivialTests(unittest.TestCase): def test___all__(self): # Verify which names are exposed for module in 'request', 'response', 'parse', 'error', 'robotparser': context = {} exec('from urllib.%s import *' % module, context) del context['__builtins__'] if module == 'request' and os.name == 'nt': u, p = context.pop('url2pathname'), context.pop('pathname2url') self.assertEqual(u.__module__, 'nturl2path') self.assertEqual(p.__module__, 'nturl2path') for k, v in context.items(): self.assertEqual(v.__module__, 'urllib.%s' % module, "%r is exposed in 'urllib.%s' but defined in %r" % (k, module, v.__module__)) def test_trivial(self): # A couple trivial tests self.assertRaises(ValueError, urllib.request.urlopen, 'bogus url') # XXX Name hacking to get this to work on Windows. fname = os.path.abspath(urllib.request.__file__).replace(os.sep, '/') if os.name == 'nt': file_url = "file:///%s" % fname else: file_url = "file://%s" % fname f = urllib.request.urlopen(file_url) f.read() f.close() def test_parse_http_list(self): tests = [ ('a,b,c', ['a', 'b', 'c']), ('path"o,l"og"i"cal, example', ['path"o,l"og"i"cal', 'example']), ('a, b, "c", "d", "e,f", g, h', ['a', 'b', '"c"', '"d"', '"e,f"', 'g', 'h']), ('a="b\\"c", d="e\\,f", g="h\\\\i"', ['a="b"c"', 'd="e,f"', 'g="h\\i"'])] for string, list in tests: self.assertEqual(urllib.request.parse_http_list(string), list) def test_URLError_reasonstr(self): err = urllib.error.URLError('reason') self.assertIn(err.reason, str(err)) class RequestHdrsTests(unittest.TestCase): def test_request_headers_dict(self): """ The Request.headers dictionary is not a documented interface. It should stay that way, because the complete set of headers are only accessible through the .get_header(), .has_header(), .header_items() interface. However, .headers pre-dates those methods, and so real code will be using the dictionary. The introduction in 2.4 of those methods was a mistake for the same reason: code that previously saw all (urllib2 user)-provided headers in .headers now sees only a subset. """ url = "http://example.com" self.assertEqual(Request(url, headers={"Spam-eggs": "blah"} ).headers["Spam-eggs"], "blah") self.assertEqual(Request(url, headers={"spam-EggS": "blah"} ).headers["Spam-eggs"], "blah") def test_request_headers_methods(self): """ Note the case normalization of header names here, to .capitalize()-case. This should be preserved for backwards-compatibility. (In the HTTP case, normalization to .title()-case is done by urllib2 before sending headers to http.client). Note that e.g. r.has_header("spam-EggS") is currently False, and r.get_header("spam-EggS") returns None, but that could be changed in future. Method r.remove_header should remove items both from r.headers and r.unredirected_hdrs dictionaries """ url = "http://example.com" req = Request(url, headers={"Spam-eggs": "blah"}) self.assertTrue(req.has_header("Spam-eggs")) self.assertEqual(req.header_items(), [('Spam-eggs', 'blah')]) req.add_header("Foo-Bar", "baz") self.assertEqual(sorted(req.header_items()), [('Foo-bar', 'baz'), ('Spam-eggs', 'blah')]) self.assertFalse(req.has_header("Not-there")) self.assertIsNone(req.get_header("Not-there")) self.assertEqual(req.get_header("Not-there", "default"), "default") req.remove_header("Spam-eggs") self.assertFalse(req.has_header("Spam-eggs")) req.add_unredirected_header("Unredirected-spam", "Eggs") self.assertTrue(req.has_header("Unredirected-spam")) req.remove_header("Unredirected-spam") self.assertFalse(req.has_header("Unredirected-spam")) def test_password_manager(self): mgr = urllib.request.HTTPPasswordMgr() add = mgr.add_password find_user_pass = mgr.find_user_password add("Some Realm", "http://example.com/", "joe", "password") add("Some Realm", "http://example.com/ni", "ni", "ni") add("c", "http://example.com/foo", "foo", "ni") add("c", "http://example.com/bar", "bar", "nini") add("b", "http://example.com/", "first", "blah") add("b", "http://example.com/", "second", "spam") add("a", "http://example.com", "1", "a") add("Some Realm", "http://c.example.com:3128", "3", "c") add("Some Realm", "d.example.com", "4", "d") add("Some Realm", "e.example.com:3128", "5", "e") self.assertEqual(find_user_pass("Some Realm", "example.com"), ('joe', 'password')) #self.assertEqual(find_user_pass("Some Realm", "http://example.com/ni"), # ('ni', 'ni')) self.assertEqual(find_user_pass("Some Realm", "http://example.com"), ('joe', 'password')) self.assertEqual(find_user_pass("Some Realm", "http://example.com/"), ('joe', 'password')) self.assertEqual( find_user_pass("Some Realm", "http://example.com/spam"), ('joe', 'password')) self.assertEqual( find_user_pass("Some Realm", "http://example.com/spam/spam"), ('joe', 'password')) self.assertEqual(find_user_pass("c", "http://example.com/foo"), ('foo', 'ni')) self.assertEqual(find_user_pass("c", "http://example.com/bar"), ('bar', 'nini')) self.assertEqual(find_user_pass("b", "http://example.com/"), ('second', 'spam')) # No special relationship between a.example.com and example.com: self.assertEqual(find_user_pass("a", "http://example.com/"), ('1', 'a')) self.assertEqual(find_user_pass("a", "http://a.example.com/"), (None, None)) # Ports: self.assertEqual(find_user_pass("Some Realm", "c.example.com"), (None, None)) self.assertEqual(find_user_pass("Some Realm", "c.example.com:3128"), ('3', 'c')) self.assertEqual( find_user_pass("Some Realm", "http://c.example.com:3128"), ('3', 'c')) self.assertEqual(find_user_pass("Some Realm", "d.example.com"), ('4', 'd')) self.assertEqual(find_user_pass("Some Realm", "e.example.com:3128"), ('5', 'e')) def test_password_manager_default_port(self): """ The point to note here is that we can't guess the default port if there's no scheme. This applies to both add_password and find_user_password. """ mgr = urllib.request.HTTPPasswordMgr() add = mgr.add_password find_user_pass = mgr.find_user_password add("f", "http://g.example.com:80", "10", "j") add("g", "http://h.example.com", "11", "k") add("h", "i.example.com:80", "12", "l") add("i", "j.example.com", "13", "m") self.assertEqual(find_user_pass("f", "g.example.com:100"), (None, None)) self.assertEqual(find_user_pass("f", "g.example.com:80"), ('10', 'j')) self.assertEqual(find_user_pass("f", "g.example.com"), (None, None)) self.assertEqual(find_user_pass("f", "http://g.example.com:100"), (None, None)) self.assertEqual(find_user_pass("f", "http://g.example.com:80"), ('10', 'j')) self.assertEqual(find_user_pass("f", "http://g.example.com"), ('10', 'j')) self.assertEqual(find_user_pass("g", "h.example.com"), ('11', 'k')) self.assertEqual(find_user_pass("g", "h.example.com:80"), ('11', 'k')) self.assertEqual(find_user_pass("g", "http://h.example.com:80"), ('11', 'k')) self.assertEqual(find_user_pass("h", "i.example.com"), (None, None)) self.assertEqual(find_user_pass("h", "i.example.com:80"), ('12', 'l')) self.assertEqual(find_user_pass("h", "http://i.example.com:80"), ('12', 'l')) self.assertEqual(find_user_pass("i", "j.example.com"), ('13', 'm')) self.assertEqual(find_user_pass("i", "j.example.com:80"), (None, None)) self.assertEqual(find_user_pass("i", "http://j.example.com"), ('13', 'm')) self.assertEqual(find_user_pass("i", "http://j.example.com:80"), (None, None)) class MockOpener: addheaders = [] def open(self, req, data=None, timeout=socket._GLOBAL_DEFAULT_TIMEOUT): self.req, self.data, self.timeout = req, data, timeout def error(self, proto, *args): self.proto, self.args = proto, args class MockFile: def read(self, count=None): pass def readline(self, count=None): pass def close(self): pass class MockHeaders(dict): def getheaders(self, name): return list(self.values()) class MockResponse(io.StringIO): def __init__(self, code, msg, headers, data, url=None): io.StringIO.__init__(self, data) self.code, self.msg, self.headers, self.url = code, msg, headers, url def info(self): return self.headers def geturl(self): return self.url class MockCookieJar: def add_cookie_header(self, request): self.ach_req = request def extract_cookies(self, response, request): self.ec_req, self.ec_r = request, response class FakeMethod: def __init__(self, meth_name, action, handle): self.meth_name = meth_name self.handle = handle self.action = action def __call__(self, *args): return self.handle(self.meth_name, self.action, *args) class MockHTTPResponse(io.IOBase): def __init__(self, fp, msg, status, reason): self.fp = fp self.msg = msg self.status = status self.reason = reason self.code = 200 def read(self): return '' def info(self): return {} def geturl(self): return self.url class MockHTTPClass: def __init__(self): self.level = 0 self.req_headers = [] self.data = None self.raise_on_endheaders = False self.sock = None self._tunnel_headers = {} def __call__(self, host, timeout=socket._GLOBAL_DEFAULT_TIMEOUT): self.host = host self.timeout = timeout return self def set_debuglevel(self, level): self.level = level def set_tunnel(self, host, port=None, headers=None): self._tunnel_host = host self._tunnel_port = port if headers: self._tunnel_headers = headers else: self._tunnel_headers.clear() def request(self, method, url, body=None, headers=None): self.method = method self.selector = url if headers is not None: self.req_headers += headers.items() self.req_headers.sort() if body: self.data = body if self.raise_on_endheaders: raise OSError() def getresponse(self): return MockHTTPResponse(MockFile(), {}, 200, "OK") def close(self): pass class MockHandler: # useful for testing handler machinery # see add_ordered_mock_handlers() docstring handler_order = 500 def __init__(self, methods): self._define_methods(methods) def _define_methods(self, methods): for spec in methods: if len(spec) == 2: name, action = spec else: name, action = spec, None meth = FakeMethod(name, action, self.handle) setattr(self.__class__, name, meth) def handle(self, fn_name, action, *args, **kwds): self.parent.calls.append((self, fn_name, args, kwds)) if action is None: return None elif action == "return self": return self elif action == "return response": res = MockResponse(200, "OK", {}, "") return res elif action == "return request": return Request("http://blah/") elif action.startswith("error"): code = action[action.rfind(" ")+1:] try: code = int(code) except ValueError: pass res = MockResponse(200, "OK", {}, "") return self.parent.error("http", args[0], res, code, "", {}) elif action == "raise": raise urllib.error.URLError("blah") assert False def close(self): pass def add_parent(self, parent): self.parent = parent self.parent.calls = [] def __lt__(self, other): if not hasattr(other, "handler_order"): # No handler_order, leave in original order. Yuck. return True return self.handler_order < other.handler_order def add_ordered_mock_handlers(opener, meth_spec): """Create MockHandlers and add them to an OpenerDirector. meth_spec: list of lists of tuples and strings defining methods to define on handlers. eg: [["http_error", "ftp_open"], ["http_open"]] defines methods .http_error() and .ftp_open() on one handler, and .http_open() on another. These methods just record their arguments and return None. Using a tuple instead of a string causes the method to perform some action (see MockHandler.handle()), eg: [["http_error"], [("http_open", "return request")]] defines .http_error() on one handler (which simply returns None), and .http_open() on another handler, which returns a Request object. """ handlers = [] count = 0 for meths in meth_spec: class MockHandlerSubclass(MockHandler): pass h = MockHandlerSubclass(meths) h.handler_order += count h.add_parent(opener) count = count + 1 handlers.append(h) opener.add_handler(h) return handlers def build_test_opener(*handler_instances): opener = OpenerDirector() for h in handler_instances: opener.add_handler(h) return opener class MockHTTPHandler(urllib.request.BaseHandler): # useful for testing redirections and auth # sends supplied headers and code as first response # sends 200 OK as second response def __init__(self, code, headers): self.code = code self.headers = headers self.reset() def reset(self): self._count = 0 self.requests = [] def http_open(self, req): import email, http.client, copy self.requests.append(copy.deepcopy(req)) if self._count == 0: self._count = self._count + 1 name = http.client.responses[self.code] msg = email.message_from_string(self.headers) return self.parent.error( "http", req, MockFile(), self.code, name, msg) else: self.req = req msg = email.message_from_string("\r\n\r\n") return MockResponse(200, "OK", msg, "", req.get_full_url()) class MockHTTPSHandler(urllib.request.AbstractHTTPHandler): # Useful for testing the Proxy-Authorization request by verifying the # properties of httpcon def __init__(self): urllib.request.AbstractHTTPHandler.__init__(self) self.httpconn = MockHTTPClass() def https_open(self, req): return self.do_open(self.httpconn, req) class MockPasswordManager: def add_password(self, realm, uri, user, password): self.realm = realm self.url = uri self.user = user self.password = password def find_user_password(self, realm, authuri): self.target_realm = realm self.target_url = authuri return self.user, self.password class OpenerDirectorTests(unittest.TestCase): def test_add_non_handler(self): class NonHandler(object): pass self.assertRaises(TypeError, OpenerDirector().add_handler, NonHandler()) def test_badly_named_methods(self): # test work-around for three methods that accidentally follow the # naming conventions for handler methods # (*_open() / *_request() / *_response()) # These used to call the accidentally-named methods, causing a # TypeError in real code; here, returning self from these mock # methods would either cause no exception, or AttributeError. from urllib.error import URLError o = OpenerDirector() meth_spec = [ [("do_open", "return self"), ("proxy_open", "return self")], [("redirect_request", "return self")], ] add_ordered_mock_handlers(o, meth_spec) o.add_handler(urllib.request.UnknownHandler()) for scheme in "do", "proxy", "redirect": self.assertRaises(URLError, o.open, scheme+"://example.com/") def test_handled(self): # handler returning non-None means no more handlers will be called o = OpenerDirector() meth_spec = [ ["http_open", "ftp_open", "http_error_302"], ["ftp_open"], [("http_open", "return self")], [("http_open", "return self")], ] handlers = add_ordered_mock_handlers(o, meth_spec) req = Request("http://example.com/") r = o.open(req) # Second .http_open() gets called, third doesn't, since second returned # non-None. Handlers without .http_open() never get any methods called # on them. # In fact, second mock handler defining .http_open() returns self # (instead of response), which becomes the OpenerDirector's return # value. self.assertEqual(r, handlers[2]) calls = [(handlers[0], "http_open"), (handlers[2], "http_open")] for expected, got in zip(calls, o.calls): handler, name, args, kwds = got self.assertEqual((handler, name), expected) self.assertEqual(args, (req,)) def test_handler_order(self): o = OpenerDirector() handlers = [] for meths, handler_order in [ ([("http_open", "return self")], 500), (["http_open"], 0), ]: class MockHandlerSubclass(MockHandler): pass h = MockHandlerSubclass(meths) h.handler_order = handler_order handlers.append(h) o.add_handler(h) o.open("http://example.com/") # handlers called in reverse order, thanks to their sort order self.assertEqual(o.calls[0][0], handlers[1]) self.assertEqual(o.calls[1][0], handlers[0]) def test_raise(self): # raising URLError stops processing of request o = OpenerDirector() meth_spec = [ [("http_open", "raise")], [("http_open", "return self")], ] handlers = add_ordered_mock_handlers(o, meth_spec) req = Request("http://example.com/") self.assertRaises(urllib.error.URLError, o.open, req) self.assertEqual(o.calls, [(handlers[0], "http_open", (req,), {})]) def test_http_error(self): # XXX http_error_default # http errors are a special case o = OpenerDirector() meth_spec = [ [("http_open", "error 302")], [("http_error_400", "raise"), "http_open"], [("http_error_302", "return response"), "http_error_303", "http_error"], [("http_error_302")], ] handlers = add_ordered_mock_handlers(o, meth_spec) class Unknown: def __eq__(self, other): return True req = Request("http://example.com/") o.open(req) assert len(o.calls) == 2 calls = [(handlers[0], "http_open", (req,)), (handlers[2], "http_error_302", (req, Unknown(), 302, "", {}))] for expected, got in zip(calls, o.calls): handler, method_name, args = expected self.assertEqual((handler, method_name), got[:2]) self.assertEqual(args, got[2]) def test_processors(self): # *_request / *_response methods get called appropriately o = OpenerDirector() meth_spec = [ [("http_request", "return request"), ("http_response", "return response")], [("http_request", "return request"), ("http_response", "return response")], ] handlers = add_ordered_mock_handlers(o, meth_spec) req = Request("http://example.com/") o.open(req) # processor methods are called on *all* handlers that define them, # not just the first handler that handles the request calls = [ (handlers[0], "http_request"), (handlers[1], "http_request"), (handlers[0], "http_response"), (handlers[1], "http_response")] for i, (handler, name, args, kwds) in enumerate(o.calls): if i < 2: # *_request self.assertEqual((handler, name), calls[i]) self.assertEqual(len(args), 1) self.assertIsInstance(args[0], Request) else: # *_response self.assertEqual((handler, name), calls[i]) self.assertEqual(len(args), 2) self.assertIsInstance(args[0], Request) # response from opener.open is None, because there's no # handler that defines http_open to handle it if args[1] is not None: self.assertIsInstance(args[1], MockResponse) def sanepathname2url(path): try: path.encode("utf-8") except UnicodeEncodeError: raise unittest.SkipTest("path is not encodable to utf8") urlpath = urllib.request.pathname2url(path) if os.name == "nt" and urlpath.startswith("///"): urlpath = urlpath[2:] # XXX don't ask me about the mac... return urlpath class HandlerTests(unittest.TestCase): def test_ftp(self): class MockFTPWrapper: def __init__(self, data): self.data = data def retrfile(self, filename, filetype): self.filename, self.filetype = filename, filetype return io.StringIO(self.data), len(self.data) def close(self): pass class NullFTPHandler(urllib.request.FTPHandler): def __init__(self, data): self.data = data def connect_ftp(self, user, passwd, host, port, dirs, timeout=socket._GLOBAL_DEFAULT_TIMEOUT): self.user, self.passwd = user, passwd self.host, self.port = host, port self.dirs = dirs self.ftpwrapper = MockFTPWrapper(self.data) return self.ftpwrapper import ftplib data = "rheum rhaponicum" h = NullFTPHandler(data) h.parent = MockOpener() for url, host, port, user, passwd, type_, dirs, filename, mimetype in [ ("ftp://localhost/foo/bar/baz.html", "localhost", ftplib.FTP_PORT, "", "", "I", ["foo", "bar"], "baz.html", "text/html"), ("ftp://parrot@localhost/foo/bar/baz.html", "localhost", ftplib.FTP_PORT, "parrot", "", "I", ["foo", "bar"], "baz.html", "text/html"), ("ftp://%25parrot@localhost/foo/bar/baz.html", "localhost", ftplib.FTP_PORT, "%parrot", "", "I", ["foo", "bar"], "baz.html", "text/html"), ("ftp://%2542parrot@localhost/foo/bar/baz.html", "localhost", ftplib.FTP_PORT, "%42parrot", "", "I", ["foo", "bar"], "baz.html", "text/html"), ("ftp://localhost:80/foo/bar/", "localhost", 80, "", "", "D", ["foo", "bar"], "", None), ("ftp://localhost/baz.gif;type=a", "localhost", ftplib.FTP_PORT, "", "", "A", [], "baz.gif", None), # XXX really this should guess image/gif ]: req = Request(url) req.timeout = None r = h.ftp_open(req) # ftp authentication not yet implemented by FTPHandler self.assertEqual(h.user, user) self.assertEqual(h.passwd, passwd) self.assertEqual(h.host, socket.gethostbyname(host)) self.assertEqual(h.port, port) self.assertEqual(h.dirs, dirs) self.assertEqual(h.ftpwrapper.filename, filename) self.assertEqual(h.ftpwrapper.filetype, type_) headers = r.info() self.assertEqual(headers.get("Content-type"), mimetype) self.assertEqual(int(headers["Content-length"]), len(data)) def test_file(self): import email.utils h = urllib.request.FileHandler() o = h.parent = MockOpener() TESTFN = support.TESTFN urlpath = sanepathname2url(os.path.abspath(TESTFN)) towrite = b"hello, world\n" urls = [ "file://localhost%s" % urlpath, "file://%s" % urlpath, "file://%s%s" % (socket.gethostbyname('localhost'), urlpath), ] try: localaddr = socket.gethostbyname(socket.gethostname()) except socket.gaierror: localaddr = '' if localaddr: urls.append("file://%s%s" % (localaddr, urlpath)) for url in urls: f = open(TESTFN, "wb") try: try: f.write(towrite) finally: f.close() r = h.file_open(Request(url)) try: data = r.read() headers = r.info() respurl = r.geturl() finally: r.close() stats = os.stat(TESTFN) modified = email.utils.formatdate(stats.st_mtime, usegmt=True) finally: os.remove(TESTFN) self.assertEqual(data, towrite) self.assertEqual(headers["Content-type"], "text/plain") self.assertEqual(headers["Content-length"], "13") self.assertEqual(headers["Last-modified"], modified) self.assertEqual(respurl, url) for url in [ "file://localhost:80%s" % urlpath, "file:///file_does_not_exist.txt", "file://not-a-local-host.com//dir/file.txt", "file://%s:80%s/%s" % (socket.gethostbyname('localhost'), os.getcwd(), TESTFN), "file://somerandomhost.ontheinternet.com%s/%s" % (os.getcwd(), TESTFN), ]: try: f = open(TESTFN, "wb") try: f.write(towrite) finally: f.close() self.assertRaises(urllib.error.URLError, h.file_open, Request(url)) finally: os.remove(TESTFN) h = urllib.request.FileHandler() o = h.parent = MockOpener() # XXXX why does // mean ftp (and /// mean not ftp!), and where # is file: scheme specified? I think this is really a bug, and # what was intended was to distinguish between URLs like: # file:/blah.txt (a file) # file://localhost/blah.txt (a file) # file:///blah.txt (a file) # file://ftp.example.com/blah.txt (an ftp URL) for url, ftp in [ ("file://ftp.example.com//foo.txt", False), ("file://ftp.example.com///foo.txt", False), # XXXX bug: fails with OSError, should be URLError ("file://ftp.example.com/foo.txt", False), ("file://somehost//foo/something.txt", False), ("file://localhost//foo/something.txt", False), ]: req = Request(url) try: h.file_open(req) # XXXX remove OSError when bug fixed except (urllib.error.URLError, OSError): self.assertFalse(ftp) else: self.assertIs(o.req, req) self.assertEqual(req.type, "ftp") self.assertEqual(req.type == "ftp", ftp) def test_http(self): h = urllib.request.AbstractHTTPHandler() o = h.parent = MockOpener() url = "http://example.com/" for method, data in [("GET", None), ("POST", b"blah")]: req = Request(url, data, {"Foo": "bar"}) req.timeout = None req.add_unredirected_header("Spam", "eggs") http = MockHTTPClass() r = h.do_open(http, req) # result attributes r.read; r.readline # wrapped MockFile methods r.info; r.geturl # addinfourl methods r.code, r.msg == 200, "OK" # added from MockHTTPClass.getreply() hdrs = r.info() hdrs.get; hdrs.__contains__ # r.info() gives dict from .getreply() self.assertEqual(r.geturl(), url) self.assertEqual(http.host, "example.com") self.assertEqual(http.level, 0) self.assertEqual(http.method, method) self.assertEqual(http.selector, "/") self.assertEqual(http.req_headers, [("Connection", "close"), ("Foo", "bar"), ("Spam", "eggs")]) self.assertEqual(http.data, data) # check OSError converted to URLError http.raise_on_endheaders = True self.assertRaises(urllib.error.URLError, h.do_open, http, req) # Check for TypeError on POST data which is str. req = Request("http://example.com/","badpost") self.assertRaises(TypeError, h.do_request_, req) # check adding of standard headers o.addheaders = [("Spam", "eggs")] for data in b"", None: # POST, GET req = Request("http://example.com/", data) r = MockResponse(200, "OK", {}, "") newreq = h.do_request_(req) if data is None: # GET self.assertNotIn("Content-length", req.unredirected_hdrs) self.assertNotIn("Content-type", req.unredirected_hdrs) else: # POST self.assertEqual(req.unredirected_hdrs["Content-length"], "0") self.assertEqual(req.unredirected_hdrs["Content-type"], "application/x-www-form-urlencoded") # XXX the details of Host could be better tested self.assertEqual(req.unredirected_hdrs["Host"], "example.com") self.assertEqual(req.unredirected_hdrs["Spam"], "eggs") # don't clobber existing headers req.add_unredirected_header("Content-length", "foo") req.add_unredirected_header("Content-type", "bar") req.add_unredirected_header("Host", "baz") req.add_unredirected_header("Spam", "foo") newreq = h.do_request_(req) self.assertEqual(req.unredirected_hdrs["Content-length"], "foo") self.assertEqual(req.unredirected_hdrs["Content-type"], "bar") self.assertEqual(req.unredirected_hdrs["Host"], "baz") self.assertEqual(req.unredirected_hdrs["Spam"], "foo") # Check iterable body support def iterable_body(): yield b"one" yield b"two" yield b"three" for headers in {}, {"Content-Length": 11}: req = Request("http://example.com/", iterable_body(), headers) if not headers: # Having an iterable body without a Content-Length should # raise an exception self.assertRaises(ValueError, h.do_request_, req) else: newreq = h.do_request_(req) # A file object. # Test only Content-Length attribute of request. file_obj = io.BytesIO() file_obj.write(b"Something\nSomething\nSomething\n") for headers in {}, {"Content-Length": 30}: req = Request("http://example.com/", file_obj, headers) if not headers: # Having an iterable body without a Content-Length should # raise an exception self.assertRaises(ValueError, h.do_request_, req) else: newreq = h.do_request_(req) self.assertEqual(int(newreq.get_header('Content-length')),30) file_obj.close() # array.array Iterable - Content Length is calculated iterable_array = array.array("I",[1,2,3,4]) for headers in {}, {"Content-Length": 16}: req = Request("http://example.com/", iterable_array, headers) newreq = h.do_request_(req) self.assertEqual(int(newreq.get_header('Content-length')),16) def test_http_doubleslash(self): # Checks the presence of any unnecessary double slash in url does not # break anything. Previously, a double slash directly after the host # could cause incorrect parsing. h = urllib.request.AbstractHTTPHandler() h.parent = MockOpener() data = b"" ds_urls = [ "http://example.com/foo/bar/baz.html", "http://example.com//foo/bar/baz.html", "http://example.com/foo//bar/baz.html", "http://example.com/foo/bar//baz.html" ] for ds_url in ds_urls: ds_req = Request(ds_url, data) # Check whether host is determined correctly if there is no proxy np_ds_req = h.do_request_(ds_req) self.assertEqual(np_ds_req.unredirected_hdrs["Host"],"example.com") # Check whether host is determined correctly if there is a proxy ds_req.set_proxy("someproxy:3128",None) p_ds_req = h.do_request_(ds_req) self.assertEqual(p_ds_req.unredirected_hdrs["Host"],"example.com") def test_full_url_setter(self): # Checks to ensure that components are set correctly after setting the # full_url of a Request object urls = [ 'http://example.com?foo=bar#baz', 'http://example.com?foo=bar&spam=eggs#bash', 'http://example.com', ] # testing a reusable request instance, but the url parameter is # required, so just use a dummy one to instantiate r = Request('http://example.com') for url in urls: r.full_url = url parsed = urlparse(url) self.assertEqual(r.get_full_url(), url) # full_url setter uses splittag to split into components. # splittag sets the fragment as None while urlparse sets it to '' self.assertEqual(r.fragment or '', parsed.fragment) self.assertEqual(urlparse(r.get_full_url()).query, parsed.query) def test_full_url_deleter(self): r = Request('http://www.example.com') del r.full_url self.assertIsNone(r.full_url) self.assertIsNone(r.fragment) self.assertEqual(r.selector, '') def test_fixpath_in_weirdurls(self): # Issue4493: urllib2 to supply '/' when to urls where path does not # start with'/' h = urllib.request.AbstractHTTPHandler() h.parent = MockOpener() weird_url = 'http://www.python.org?getspam' req = Request(weird_url) newreq = h.do_request_(req) self.assertEqual(newreq.host,'www.python.org') self.assertEqual(newreq.selector,'/?getspam') url_without_path = 'http://www.python.org' req = Request(url_without_path) newreq = h.do_request_(req) self.assertEqual(newreq.host,'www.python.org') self.assertEqual(newreq.selector,'') def test_errors(self): h = urllib.request.HTTPErrorProcessor() o = h.parent = MockOpener() url = "http://example.com/" req = Request(url) # all 2xx are passed through r = MockResponse(200, "OK", {}, "", url) newr = h.http_response(req, r) self.assertIs(r, newr) self.assertFalse(hasattr(o, "proto")) # o.error not called r = MockResponse(202, "Accepted", {}, "", url) newr = h.http_response(req, r) self.assertIs(r, newr) self.assertFalse(hasattr(o, "proto")) # o.error not called r = MockResponse(206, "Partial content", {}, "", url) newr = h.http_response(req, r) self.assertIs(r, newr) self.assertFalse(hasattr(o, "proto")) # o.error not called # anything else calls o.error (and MockOpener returns None, here) r = MockResponse(502, "Bad gateway", {}, "", url) self.assertIsNone(h.http_response(req, r)) self.assertEqual(o.proto, "http") # o.error called self.assertEqual(o.args, (req, r, 502, "Bad gateway", {})) def test_cookies(self): cj = MockCookieJar() h = urllib.request.HTTPCookieProcessor(cj) h.parent = MockOpener() req = Request("http://example.com/") r = MockResponse(200, "OK", {}, "") newreq = h.http_request(req) self.assertIs(cj.ach_req, req) self.assertIs(cj.ach_req, newreq) self.assertEqual(req.origin_req_host, "example.com") self.assertFalse(req.unverifiable) newr = h.http_response(req, r) self.assertIs(cj.ec_req, req) self.assertIs(cj.ec_r, r) self.assertIs(r, newr) def test_redirect(self): from_url = "http://example.com/a.html" to_url = "http://example.com/b.html" h = urllib.request.HTTPRedirectHandler() o = h.parent = MockOpener() # ordinary redirect behaviour for code in 301, 302, 303, 307: for data in None, "blah\nblah\n": method = getattr(h, "http_error_%s" % code) req = Request(from_url, data) req.timeout = socket._GLOBAL_DEFAULT_TIMEOUT req.add_header("Nonsense", "viking=withhold") if data is not None: req.add_header("Content-Length", str(len(data))) req.add_unredirected_header("Spam", "spam") try: method(req, MockFile(), code, "Blah", MockHeaders({"location": to_url})) except urllib.error.HTTPError: # 307 in response to POST requires user OK self.assertEqual(code, 307) self.assertIsNotNone(data) self.assertEqual(o.req.get_full_url(), to_url) try: self.assertEqual(o.req.get_method(), "GET") except AttributeError: self.assertFalse(o.req.data) # now it's a GET, there should not be headers regarding content # (possibly dragged from before being a POST) headers = [x.lower() for x in o.req.headers] self.assertNotIn("content-length", headers) self.assertNotIn("content-type", headers) self.assertEqual(o.req.headers["Nonsense"], "viking=withhold") self.assertNotIn("Spam", o.req.headers) self.assertNotIn("Spam", o.req.unredirected_hdrs) # loop detection req = Request(from_url) req.timeout = socket._GLOBAL_DEFAULT_TIMEOUT def redirect(h, req, url=to_url): h.http_error_302(req, MockFile(), 302, "Blah", MockHeaders({"location": url})) # Note that the *original* request shares the same record of # redirections with the sub-requests caused by the redirections. # detect infinite loop redirect of a URL to itself req = Request(from_url, origin_req_host="example.com") count = 0 req.timeout = socket._GLOBAL_DEFAULT_TIMEOUT try: while 1: redirect(h, req, "http://example.com/") count = count + 1 except urllib.error.HTTPError: # don't stop until max_repeats, because cookies may introduce state self.assertEqual(count, urllib.request.HTTPRedirectHandler.max_repeats) # detect endless non-repeating chain of redirects req = Request(from_url, origin_req_host="example.com") count = 0 req.timeout = socket._GLOBAL_DEFAULT_TIMEOUT try: while 1: redirect(h, req, "http://example.com/%d" % count) count = count + 1 except urllib.error.HTTPError: self.assertEqual(count, urllib.request.HTTPRedirectHandler.max_redirections) def test_invalid_redirect(self): from_url = "http://example.com/a.html" valid_schemes = ['http','https','ftp'] invalid_schemes = ['file','imap','ldap'] schemeless_url = "example.com/b.html" h = urllib.request.HTTPRedirectHandler() o = h.parent = MockOpener() req = Request(from_url) req.timeout = socket._GLOBAL_DEFAULT_TIMEOUT for scheme in invalid_schemes: invalid_url = scheme + '://' + schemeless_url self.assertRaises(urllib.error.HTTPError, h.http_error_302, req, MockFile(), 302, "Security Loophole", MockHeaders({"location": invalid_url})) for scheme in valid_schemes: valid_url = scheme + '://' + schemeless_url h.http_error_302(req, MockFile(), 302, "That's fine", MockHeaders({"location": valid_url})) self.assertEqual(o.req.get_full_url(), valid_url) def test_relative_redirect(self): from_url = "http://example.com/a.html" relative_url = "/b.html" h = urllib.request.HTTPRedirectHandler() o = h.parent = MockOpener() req = Request(from_url) req.timeout = socket._GLOBAL_DEFAULT_TIMEOUT valid_url = urllib.parse.urljoin(from_url,relative_url) h.http_error_302(req, MockFile(), 302, "That's fine", MockHeaders({"location": valid_url})) self.assertEqual(o.req.get_full_url(), valid_url) def test_cookie_redirect(self): # cookies shouldn't leak into redirected requests from http.cookiejar import CookieJar from test.test_http_cookiejar import interact_netscape cj = CookieJar() interact_netscape(cj, "http://www.example.com/", "spam=eggs") hh = MockHTTPHandler(302, "Location: http://www.cracker.com/\r\n\r\n") hdeh = urllib.request.HTTPDefaultErrorHandler() hrh = urllib.request.HTTPRedirectHandler() cp = urllib.request.HTTPCookieProcessor(cj) o = build_test_opener(hh, hdeh, hrh, cp) o.open("http://www.example.com/") self.assertFalse(hh.req.has_header("Cookie")) def test_redirect_fragment(self): redirected_url = 'http://www.example.com/index.html#OK\r\n\r\n' hh = MockHTTPHandler(302, 'Location: ' + redirected_url) hdeh = urllib.request.HTTPDefaultErrorHandler() hrh = urllib.request.HTTPRedirectHandler() o = build_test_opener(hh, hdeh, hrh) fp = o.open('http://www.example.com') self.assertEqual(fp.geturl(), redirected_url.strip()) def test_proxy(self): o = OpenerDirector() ph = urllib.request.ProxyHandler(dict(http="proxy.example.com:3128")) o.add_handler(ph) meth_spec = [ [("http_open", "return response")] ] handlers = add_ordered_mock_handlers(o, meth_spec) req = Request("http://acme.example.com/") self.assertEqual(req.host, "acme.example.com") o.open(req) self.assertEqual(req.host, "proxy.example.com:3128") self.assertEqual([(handlers[0], "http_open")], [tup[0:2] for tup in o.calls]) def test_proxy_no_proxy(self): os.environ['no_proxy'] = 'python.org' o = OpenerDirector() ph = urllib.request.ProxyHandler(dict(http="proxy.example.com")) o.add_handler(ph) req = Request("http://www.perl.org/") self.assertEqual(req.host, "www.perl.org") o.open(req) self.assertEqual(req.host, "proxy.example.com") req = Request("http://www.python.org") self.assertEqual(req.host, "www.python.org") o.open(req) self.assertEqual(req.host, "www.python.org") del os.environ['no_proxy'] def test_proxy_no_proxy_all(self): os.environ['no_proxy'] = '*' o = OpenerDirector() ph = urllib.request.ProxyHandler(dict(http="proxy.example.com")) o.add_handler(ph) req = Request("http://www.python.org") self.assertEqual(req.host, "www.python.org") o.open(req) self.assertEqual(req.host, "www.python.org") del os.environ['no_proxy'] def test_proxy_https(self): o = OpenerDirector() ph = urllib.request.ProxyHandler(dict(https="proxy.example.com:3128")) o.add_handler(ph) meth_spec = [ [("https_open", "return response")] ] handlers = add_ordered_mock_handlers(o, meth_spec) req = Request("https://www.example.com/") self.assertEqual(req.host, "www.example.com") o.open(req) self.assertEqual(req.host, "proxy.example.com:3128") self.assertEqual([(handlers[0], "https_open")], [tup[0:2] for tup in o.calls]) def test_proxy_https_proxy_authorization(self): o = OpenerDirector() ph = urllib.request.ProxyHandler(dict(https='proxy.example.com:3128')) o.add_handler(ph) https_handler = MockHTTPSHandler() o.add_handler(https_handler) req = Request("https://www.example.com/") req.add_header("Proxy-Authorization","FooBar") req.add_header("User-Agent","Grail") self.assertEqual(req.host, "www.example.com") self.assertIsNone(req._tunnel_host) o.open(req) # Verify Proxy-Authorization gets tunneled to request. # httpsconn req_headers do not have the Proxy-Authorization header but # the req will have. self.assertNotIn(("Proxy-Authorization","FooBar"), https_handler.httpconn.req_headers) self.assertIn(("User-Agent","Grail"), https_handler.httpconn.req_headers) self.assertIsNotNone(req._tunnel_host) self.assertEqual(req.host, "proxy.example.com:3128") self.assertEqual(req.get_header("Proxy-authorization"),"FooBar") # TODO: This should be only for OSX @unittest.skipUnless(sys.platform == 'darwin', "only relevant for OSX") def test_osx_proxy_bypass(self): bypass = { 'exclude_simple': False, 'exceptions': ['foo.bar', '*.bar.com', '127.0.0.1', '10.10', '10.0/16'] } # Check hosts that should trigger the proxy bypass for host in ('foo.bar', 'www.bar.com', '127.0.0.1', '10.10.0.1', '10.0.0.1'): self.assertTrue(_proxy_bypass_macosx_sysconf(host, bypass), 'expected bypass of %s to be True' % host) # Check hosts that should not trigger the proxy bypass for host in ('abc.foo.bar', 'bar.com', '127.0.0.2', '10.11.0.1', 'notinbypass'): self.assertFalse(_proxy_bypass_macosx_sysconf(host, bypass), 'expected bypass of %s to be False' % host) # Check the exclude_simple flag bypass = {'exclude_simple': True, 'exceptions': []} self.assertTrue(_proxy_bypass_macosx_sysconf('test', bypass)) def test_basic_auth(self, quote_char='"'): opener = OpenerDirector() password_manager = MockPasswordManager() auth_handler = urllib.request.HTTPBasicAuthHandler(password_manager) realm = "ACME Widget Store" http_handler = MockHTTPHandler( 401, 'WWW-Authenticate: Basic realm=%s%s%s\r\n\r\n' % (quote_char, realm, quote_char) ) opener.add_handler(auth_handler) opener.add_handler(http_handler) self._test_basic_auth(opener, auth_handler, "Authorization", realm, http_handler, password_manager, "http://acme.example.com/protected", "http://acme.example.com/protected", ) def test_basic_auth_with_single_quoted_realm(self): self.test_basic_auth(quote_char="'") def test_basic_auth_with_unquoted_realm(self): opener = OpenerDirector() password_manager = MockPasswordManager() auth_handler = urllib.request.HTTPBasicAuthHandler(password_manager) realm = "ACME Widget Store" http_handler = MockHTTPHandler( 401, 'WWW-Authenticate: Basic realm=%s\r\n\r\n' % realm) opener.add_handler(auth_handler) opener.add_handler(http_handler) with self.assertWarns(UserWarning): self._test_basic_auth(opener, auth_handler, "Authorization", realm, http_handler, password_manager, "http://acme.example.com/protected", "http://acme.example.com/protected", ) def test_proxy_basic_auth(self): opener = OpenerDirector() ph = urllib.request.ProxyHandler(dict(http="proxy.example.com:3128")) opener.add_handler(ph) password_manager = MockPasswordManager() auth_handler = urllib.request.ProxyBasicAuthHandler(password_manager) realm = "ACME Networks" http_handler = MockHTTPHandler( 407, 'Proxy-Authenticate: Basic realm="%s"\r\n\r\n' % realm) opener.add_handler(auth_handler) opener.add_handler(http_handler) self._test_basic_auth(opener, auth_handler, "Proxy-authorization", realm, http_handler, password_manager, "http://acme.example.com:3128/protected", "proxy.example.com:3128", ) def test_basic_and_digest_auth_handlers(self): # HTTPDigestAuthHandler raised an exception if it couldn't handle a 40* # response (http://python.org/sf/1479302), where it should instead # return None to allow another handler (especially # HTTPBasicAuthHandler) to handle the response. # Also (http://python.org/sf/14797027, RFC 2617 section 1.2), we must # try digest first (since it's the strongest auth scheme), so we record # order of calls here to check digest comes first: class RecordingOpenerDirector(OpenerDirector): def __init__(self): OpenerDirector.__init__(self) self.recorded = [] def record(self, info): self.recorded.append(info) class TestDigestAuthHandler(urllib.request.HTTPDigestAuthHandler): def http_error_401(self, *args, **kwds): self.parent.record("digest") urllib.request.HTTPDigestAuthHandler.http_error_401(self, *args, **kwds) class TestBasicAuthHandler(urllib.request.HTTPBasicAuthHandler): def http_error_401(self, *args, **kwds): self.parent.record("basic") urllib.request.HTTPBasicAuthHandler.http_error_401(self, *args, **kwds) opener = RecordingOpenerDirector() password_manager = MockPasswordManager() digest_handler = TestDigestAuthHandler(password_manager) basic_handler = TestBasicAuthHandler(password_manager) realm = "ACME Networks" http_handler = MockHTTPHandler( 401, 'WWW-Authenticate: Basic realm="%s"\r\n\r\n' % realm) opener.add_handler(basic_handler) opener.add_handler(digest_handler) opener.add_handler(http_handler) # check basic auth isn't blocked by digest handler failing self._test_basic_auth(opener, basic_handler, "Authorization", realm, http_handler, password_manager, "http://acme.example.com/protected", "http://acme.example.com/protected", ) # check digest was tried before basic (twice, because # _test_basic_auth called .open() twice) self.assertEqual(opener.recorded, ["digest", "basic"]*2) def test_unsupported_auth_digest_handler(self): opener = OpenerDirector() # While using DigestAuthHandler digest_auth_handler = urllib.request.HTTPDigestAuthHandler(None) http_handler = MockHTTPHandler( 401, 'WWW-Authenticate: Kerberos\r\n\r\n') opener.add_handler(digest_auth_handler) opener.add_handler(http_handler) self.assertRaises(ValueError,opener.open,"http://www.example.com") def test_unsupported_auth_basic_handler(self): # While using BasicAuthHandler opener = OpenerDirector() basic_auth_handler = urllib.request.HTTPBasicAuthHandler(None) http_handler = MockHTTPHandler( 401, 'WWW-Authenticate: NTLM\r\n\r\n') opener.add_handler(basic_auth_handler) opener.add_handler(http_handler) self.assertRaises(ValueError,opener.open,"http://www.example.com") def _test_basic_auth(self, opener, auth_handler, auth_header, realm, http_handler, password_manager, request_url, protected_url): import base64 user, password = "wile", "coyote" # .add_password() fed through to password manager auth_handler.add_password(realm, request_url, user, password) self.assertEqual(realm, password_manager.realm) self.assertEqual(request_url, password_manager.url) self.assertEqual(user, password_manager.user) self.assertEqual(password, password_manager.password) opener.open(request_url) # should have asked the password manager for the username/password self.assertEqual(password_manager.target_realm, realm) self.assertEqual(password_manager.target_url, protected_url) # expect one request without authorization, then one with self.assertEqual(len(http_handler.requests), 2) self.assertFalse(http_handler.requests[0].has_header(auth_header)) userpass = bytes('%s:%s' % (user, password), "ascii") auth_hdr_value = ('Basic ' + base64.encodebytes(userpass).strip().decode()) self.assertEqual(http_handler.requests[1].get_header(auth_header), auth_hdr_value) self.assertEqual(http_handler.requests[1].unredirected_hdrs[auth_header], auth_hdr_value) # if the password manager can't find a password, the handler won't # handle the HTTP auth error password_manager.user = password_manager.password = None http_handler.reset() opener.open(request_url) self.assertEqual(len(http_handler.requests), 1) self.assertFalse(http_handler.requests[0].has_header(auth_header)) def test_http_closed(self): """Test the connection is cleaned up when the response is closed""" for (transfer, data) in ( ("Connection: close", b"data"), ("Transfer-Encoding: chunked", b"4\r\ndata\r\n0\r\n\r\n"), ("Content-Length: 4", b"data"), ): header = "HTTP/1.1 200 OK\r\n{}\r\n\r\n".format(transfer) conn = test_urllib.fakehttp(header.encode() + data) handler = urllib.request.AbstractHTTPHandler() req = Request("http://dummy/") req.timeout = None with handler.do_open(conn, req) as resp: resp.read() self.assertTrue(conn.fakesock.closed, "Connection not closed with {!r}".format(transfer)) def test_invalid_closed(self): """Test the connection is cleaned up after an invalid response""" conn = test_urllib.fakehttp(b"") handler = urllib.request.AbstractHTTPHandler() req = Request("http://dummy/") req.timeout = None with self.assertRaises(http.client.BadStatusLine): handler.do_open(conn, req) self.assertTrue(conn.fakesock.closed, "Connection not closed") class MiscTests(unittest.TestCase): def opener_has_handler(self, opener, handler_class): self.assertTrue(any(h.__class__ == handler_class for h in opener.handlers)) def test_build_opener(self): class MyHTTPHandler(urllib.request.HTTPHandler): pass class FooHandler(urllib.request.BaseHandler): def foo_open(self): pass class BarHandler(urllib.request.BaseHandler): def bar_open(self): pass build_opener = urllib.request.build_opener o = build_opener(FooHandler, BarHandler) self.opener_has_handler(o, FooHandler) self.opener_has_handler(o, BarHandler) # can take a mix of classes and instances o = build_opener(FooHandler, BarHandler()) self.opener_has_handler(o, FooHandler) self.opener_has_handler(o, BarHandler) # subclasses of default handlers override default handlers o = build_opener(MyHTTPHandler) self.opener_has_handler(o, MyHTTPHandler) # a particular case of overriding: default handlers can be passed # in explicitly o = build_opener() self.opener_has_handler(o, urllib.request.HTTPHandler) o = build_opener(urllib.request.HTTPHandler) self.opener_has_handler(o, urllib.request.HTTPHandler) o = build_opener(urllib.request.HTTPHandler()) self.opener_has_handler(o, urllib.request.HTTPHandler) # Issue2670: multiple handlers sharing the same base class class MyOtherHTTPHandler(urllib.request.HTTPHandler): pass o = build_opener(MyHTTPHandler, MyOtherHTTPHandler) self.opener_has_handler(o, MyHTTPHandler) self.opener_has_handler(o, MyOtherHTTPHandler) @unittest.skipUnless(support.is_resource_enabled('network'), 'test requires network access') def test_issue16464(self): opener = urllib.request.build_opener() request = urllib.request.Request("http://www.example.com/") self.assertEqual(None, request.data) opener.open(request, "1".encode("us-ascii")) self.assertEqual(b"1", request.data) self.assertEqual("1", request.get_header("Content-length")) opener.open(request, "1234567890".encode("us-ascii")) self.assertEqual(b"1234567890", request.data) self.assertEqual("10", request.get_header("Content-length")) def test_HTTPError_interface(self): """ Issue 13211 reveals that HTTPError didn't implement the URLError interface even though HTTPError is a subclass of URLError. """ msg = 'something bad happened' url = code = fp = None hdrs = 'Content-Length: 42' err = urllib.error.HTTPError(url, code, msg, hdrs, fp) self.assertTrue(hasattr(err, 'reason')) self.assertEqual(err.reason, 'something bad happened') self.assertTrue(hasattr(err, 'headers')) self.assertEqual(err.headers, 'Content-Length: 42') expected_errmsg = 'HTTP Error %s: %s' % (err.code, err.msg) self.assertEqual(str(err), expected_errmsg) def test_parse_proxy(self): parse_proxy_test_cases = [ ('proxy.example.com', (None, None, None, 'proxy.example.com')), ('proxy.example.com:3128', (None, None, None, 'proxy.example.com:3128')), ('proxy.example.com', (None, None, None, 'proxy.example.com')), ('proxy.example.com:3128', (None, None, None, 'proxy.example.com:3128')), # The authority component may optionally include userinfo # (assumed to be # username:password): ('joe:password@proxy.example.com', (None, 'joe', 'password', 'proxy.example.com')), ('joe:password@proxy.example.com:3128', (None, 'joe', 'password', 'proxy.example.com:3128')), #Examples with URLS ('http://proxy.example.com/', ('http', None, None, 'proxy.example.com')), ('http://proxy.example.com:3128/', ('http', None, None, 'proxy.example.com:3128')), ('http://joe:password@proxy.example.com/', ('http', 'joe', 'password', 'proxy.example.com')), ('http://joe:password@proxy.example.com:3128', ('http', 'joe', 'password', 'proxy.example.com:3128')), # Everything after the authority is ignored ('ftp://joe:password@proxy.example.com/rubbish:3128', ('ftp', 'joe', 'password', 'proxy.example.com')), # Test for no trailing '/' case ('http://joe:password@proxy.example.com', ('http', 'joe', 'password', 'proxy.example.com')) ] for tc, expected in parse_proxy_test_cases: self.assertEqual(_parse_proxy(tc), expected) self.assertRaises(ValueError, _parse_proxy, 'file:/ftp.example.com'), class RequestTests(unittest.TestCase): class PutRequest(Request): method='PUT' def setUp(self): self.get = Request("http://www.python.org/~jeremy/") self.post = Request("http://www.python.org/~jeremy/", "data", headers={"X-Test": "test"}) self.head = Request("http://www.python.org/~jeremy/", method='HEAD') self.put = self.PutRequest("http://www.python.org/~jeremy/") self.force_post = self.PutRequest("http://www.python.org/~jeremy/", method="POST") def test_method(self): self.assertEqual("POST", self.post.get_method()) self.assertEqual("GET", self.get.get_method()) self.assertEqual("HEAD", self.head.get_method()) self.assertEqual("PUT", self.put.get_method()) self.assertEqual("POST", self.force_post.get_method()) def test_data(self): self.assertFalse(self.get.data) self.assertEqual("GET", self.get.get_method()) self.get.data = "spam" self.assertTrue(self.get.data) self.assertEqual("POST", self.get.get_method()) # issue 16464 # if we change data we need to remove content-length header # (cause it's most probably calculated for previous value) def test_setting_data_should_remove_content_length(self): self.assertNotIn("Content-length", self.get.unredirected_hdrs) self.get.add_unredirected_header("Content-length", 42) self.assertEqual(42, self.get.unredirected_hdrs["Content-length"]) self.get.data = "spam" self.assertNotIn("Content-length", self.get.unredirected_hdrs) # issue 17485 same for deleting data. def test_deleting_data_should_remove_content_length(self): self.assertNotIn("Content-length", self.get.unredirected_hdrs) self.get.data = 'foo' self.get.add_unredirected_header("Content-length", 3) self.assertEqual(3, self.get.unredirected_hdrs["Content-length"]) del self.get.data self.assertNotIn("Content-length", self.get.unredirected_hdrs) def test_get_full_url(self): self.assertEqual("http://www.python.org/~jeremy/", self.get.get_full_url()) def test_selector(self): self.assertEqual("/~jeremy/", self.get.selector) req = Request("http://www.python.org/") self.assertEqual("/", req.selector) def test_get_type(self): self.assertEqual("http", self.get.type) def test_get_host(self): self.assertEqual("www.python.org", self.get.host) def test_get_host_unquote(self): req = Request("http://www.%70ython.org/") self.assertEqual("www.python.org", req.host) def test_proxy(self): self.assertFalse(self.get.has_proxy()) self.get.set_proxy("www.perl.org", "http") self.assertTrue(self.get.has_proxy()) self.assertEqual("www.python.org", self.get.origin_req_host) self.assertEqual("www.perl.org", self.get.host) def test_wrapped_url(self): req = Request("<URL:http://www.python.org>") self.assertEqual("www.python.org", req.host) def test_url_fragment(self): req = Request("http://www.python.org/?qs=query#fragment=true") self.assertEqual("/?qs=query", req.selector) req = Request("http://www.python.org/#fun=true") self.assertEqual("/", req.selector) # Issue 11703: geturl() omits fragment in the original URL. url = 'http://docs.python.org/library/urllib2.html#OK' req = Request(url) self.assertEqual(req.get_full_url(), url) def test_url_fullurl_get_full_url(self): urls = ['http://docs.python.org', 'http://docs.python.org/library/urllib2.html#OK', 'http://www.python.org/?qs=query#fragment=true' ] for url in urls: req = Request(url) self.assertEqual(req.get_full_url(), req.full_url) def test_main(verbose=None): from test import test_urllib2 support.run_doctest(test_urllib2, verbose) support.run_doctest(urllib.request, verbose) tests = (TrivialTests, OpenerDirectorTests, HandlerTests, MiscTests, RequestTests, RequestHdrsTests) support.run_unittest(*tests) if __name__ == "__main__": test_main(verbose=True)
{ "content_hash": "662f41d86b35a60a64064e2c8d89ca19", "timestamp": "", "source": "github", "line_count": 1646, "max_line_length": 94, "avg_line_length": 41.023693803159176, "alnum_prop": 0.5700703443169196, "repo_name": "robobrobro/ballin-octo-shame", "id": "c746b9a6bb2168622e292400fcc3012e6b572c44", "size": "67525", "binary": false, "copies": "8", "ref": "refs/heads/master", "path": "lib/Python-3.4.3/Lib/test/test_urllib2.py", "mode": "33188", "license": "mit", "language": [ { "name": "Assembly", "bytes": "594205" }, { "name": "Batchfile", "bytes": "18943" }, { "name": "C", "bytes": "15291707" }, { "name": "C++", "bytes": "210217" }, { "name": "CSS", "bytes": "2839" }, { "name": "Common Lisp", "bytes": "24481" }, { "name": "DIGITAL Command Language", "bytes": "26402" }, { "name": "Groff", "bytes": "255056" }, { "name": "HTML", "bytes": "130785" }, { "name": "JavaScript", "bytes": "10598" }, { "name": "Makefile", "bytes": "39757" }, { "name": "Objective-C", "bytes": "1390141" }, { "name": "PostScript", "bytes": "13803" }, { "name": "PowerShell", "bytes": "1420" }, { "name": "Prolog", "bytes": "557" }, { "name": "Python", "bytes": "24215570" }, { "name": "R", "bytes": "5378" }, { "name": "Shell", "bytes": "443621" }, { "name": "TeX", "bytes": "323102" }, { "name": "Visual Basic", "bytes": "481" } ], "symlink_target": "" }
''' - login and get token - process 2FA if 2FA is setup for this account - Gets data needed to plot a chart for a ticker (these are Canopy tickers e.g. IBM_US or HPQ_US) ''' import requests import json get_token_url = "https://api.canopy.cloud:443/api/v1/sessions/" validate_otp_url = "https://api.canopy.cloud:443/api/v1/sessions/otp/validate.json" #calling the production server for OTP authentication get_partner_users_url = "https://api.canopy.cloud:443/api/v1/admin/users.json" get_analytics_chart_data_url = "https://api.canopy.cloud:443/api/v1/charts/analytics.json" #please replace below with your username and password over here username = 'userxxx' password = 'passxxx' #please enter the OTP token in case it is enabled otp_code = '123456' #first call for a fresh token payload = "user%5Busername%5D=" + username + "&user%5Bpassword%5D=" + password headers = { 'accept': "application/json", 'content-type':"application/x-www-form-urlencoded" } response = requests.request("POST", get_token_url, data=payload, headers=headers) print json.dumps(response.json(), indent=4, sort_keys = True) token = response.json()['token'] login_flow = response.json()['login_flow'] #in case 2FA is enabled use the OTP code to get the second level of authentication if login_flow == '2fa_verification': headers['Authorization'] = token payload = 'otp_code=' + otp_code response = requests.request("POST", validate_otp_url, data=payload, headers=headers) print json.dumps(response.json(), indent=4, sort_keys = True) #print response.text token = response.json()['token'] #Include user ids here to filter results user_ids = "" querystring = {"asset_class":"Equity","field":"market_cap","user_ids":user_ids} headers = { 'authorization': token, 'content-type': "application/x-www-form-urlencoded; charset=UTF-8" } response = requests.request("GET", get_analytics_chart_data_url, headers=headers, params=querystring) print json.dumps(response.json(), indent=4, sort_keys = True)
{ "content_hash": "312d65f7010e78ac9711b5893753a8ca", "timestamp": "", "source": "github", "line_count": 57, "max_line_length": 137, "avg_line_length": 35.12280701754386, "alnum_prop": 0.7282717282717283, "repo_name": "Mesitis/community", "id": "e6379a894066178e57e6307c9d12646f69a7d864", "size": "2002", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "sample-code/Python/04 Charts/get_analytics_chart_data.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "197979" } ], "symlink_target": "" }
"""Test Alexa config.""" import contextlib from unittest.mock import patch from homeassistant.components.cloud import ALEXA_SCHEMA, alexa_config from homeassistant.util.dt import utcnow from homeassistant.helpers.entity_registry import EVENT_ENTITY_REGISTRY_UPDATED from tests.common import mock_coro, async_fire_time_changed async def test_alexa_config_expose_entity_prefs(hass, cloud_prefs): """Test Alexa config should expose using prefs.""" entity_conf = {"should_expose": False} await cloud_prefs.async_update(alexa_entity_configs={"light.kitchen": entity_conf}) conf = alexa_config.AlexaConfig(hass, ALEXA_SCHEMA({}), cloud_prefs, None) assert not conf.should_expose("light.kitchen") entity_conf["should_expose"] = True assert conf.should_expose("light.kitchen") async def test_alexa_config_report_state(hass, cloud_prefs): """Test Alexa config should expose using prefs.""" conf = alexa_config.AlexaConfig(hass, ALEXA_SCHEMA({}), cloud_prefs, None) assert cloud_prefs.alexa_report_state is False assert conf.should_report_state is False assert conf.is_reporting_states is False with patch.object(conf, "async_get_access_token", return_value=mock_coro("hello")): await cloud_prefs.async_update(alexa_report_state=True) await hass.async_block_till_done() assert cloud_prefs.alexa_report_state is True assert conf.should_report_state is True assert conf.is_reporting_states is True await cloud_prefs.async_update(alexa_report_state=False) await hass.async_block_till_done() assert cloud_prefs.alexa_report_state is False assert conf.should_report_state is False assert conf.is_reporting_states is False @contextlib.contextmanager def patch_sync_helper(): """Patch sync helper. In Py3.7 this would have been an async context manager. """ to_update = [] to_remove = [] with patch("homeassistant.components.cloud.alexa_config.SYNC_DELAY", 0), patch( "homeassistant.components.cloud.alexa_config.AlexaConfig._sync_helper", side_effect=mock_coro, ) as mock_helper: yield to_update, to_remove actual_to_update, actual_to_remove = mock_helper.mock_calls[0][1] to_update.extend(actual_to_update) to_remove.extend(actual_to_remove) async def test_alexa_update_expose_trigger_sync(hass, cloud_prefs): """Test Alexa config responds to updating exposed entities.""" alexa_config.AlexaConfig(hass, ALEXA_SCHEMA({}), cloud_prefs, None) with patch_sync_helper() as (to_update, to_remove): await cloud_prefs.async_update_alexa_entity_config( entity_id="light.kitchen", should_expose=True ) await hass.async_block_till_done() async_fire_time_changed(hass, utcnow()) await hass.async_block_till_done() assert to_update == ["light.kitchen"] assert to_remove == [] with patch_sync_helper() as (to_update, to_remove): await cloud_prefs.async_update_alexa_entity_config( entity_id="light.kitchen", should_expose=False ) await cloud_prefs.async_update_alexa_entity_config( entity_id="binary_sensor.door", should_expose=True ) await cloud_prefs.async_update_alexa_entity_config( entity_id="sensor.temp", should_expose=True ) await hass.async_block_till_done() async_fire_time_changed(hass, utcnow()) await hass.async_block_till_done() assert sorted(to_update) == ["binary_sensor.door", "sensor.temp"] assert to_remove == ["light.kitchen"] async def test_alexa_entity_registry_sync(hass, mock_cloud_login, cloud_prefs): """Test Alexa config responds to entity registry.""" alexa_config.AlexaConfig(hass, ALEXA_SCHEMA({}), cloud_prefs, hass.data["cloud"]) with patch_sync_helper() as (to_update, to_remove): hass.bus.async_fire( EVENT_ENTITY_REGISTRY_UPDATED, {"action": "create", "entity_id": "light.kitchen"}, ) await hass.async_block_till_done() assert to_update == ["light.kitchen"] assert to_remove == [] with patch_sync_helper() as (to_update, to_remove): hass.bus.async_fire( EVENT_ENTITY_REGISTRY_UPDATED, {"action": "remove", "entity_id": "light.kitchen"}, ) await hass.async_block_till_done() assert to_update == [] assert to_remove == ["light.kitchen"] with patch_sync_helper() as (to_update, to_remove): hass.bus.async_fire( EVENT_ENTITY_REGISTRY_UPDATED, {"action": "update", "entity_id": "light.kitchen"}, ) await hass.async_block_till_done() assert to_update == [] assert to_remove == [] async def test_alexa_update_report_state(hass, cloud_prefs): """Test Alexa config responds to reporting state.""" alexa_config.AlexaConfig(hass, ALEXA_SCHEMA({}), cloud_prefs, None) with patch( "homeassistant.components.cloud.alexa_config.AlexaConfig." "async_sync_entities", side_effect=mock_coro, ) as mock_sync, patch( "homeassistant.components.cloud.alexa_config." "AlexaConfig.async_enable_proactive_mode", side_effect=mock_coro, ): await cloud_prefs.async_update(alexa_report_state=True) await hass.async_block_till_done() assert len(mock_sync.mock_calls) == 1
{ "content_hash": "807c01acd7729649905636cf7526bd15", "timestamp": "", "source": "github", "line_count": 150, "max_line_length": 87, "avg_line_length": 36.093333333333334, "alnum_prop": 0.6686368673808645, "repo_name": "fbradyirl/home-assistant", "id": "688d69c16f1987d45d4897c3046118747c7f8c35", "size": "5414", "binary": false, "copies": "1", "ref": "refs/heads/dev", "path": "tests/components/cloud/test_alexa_config.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Batchfile", "bytes": "1175" }, { "name": "Dockerfile", "bytes": "1829" }, { "name": "Python", "bytes": "16494727" }, { "name": "Ruby", "bytes": "745" }, { "name": "Shell", "bytes": "17784" } ], "symlink_target": "" }
from flask import Flask, render_template from api import api import scraper_control import json import os app = Flask(__name__) app.register_blueprint(api, url_prefix='/api') @app.route('/') def index(): print os.listdir('templates') return render_template('index.html') @app.route('/home') def home(): print os.listdir('templates') return render_template('index.html') @app.route('/sandbox') def sandbox_route(): print os.listdir('templates') return render_template('sandbox.html') @app.route('/documentation') def documentation(): print os.listdir('templates') return render_template('documentation.html') @app.route('/about') def about(): print os.listdir('templates') return render_template('about.html') # @app.route('/update') # def update(): # return json.dumps({"Success?": scraper_control.scrape_all_sources()}) # @app.route('/clearAll') # def clearAll(): # return json.dumps({"Cleared?": scraper_control.clear_all_sources()}) @app.after_request def add_header(response): response.cache_control.max_age = 300 return response if __name__ == '__main__': app.run(debug=True)
{ "content_hash": "515f27947388f743288f6c2ffc7b1f31", "timestamp": "", "source": "github", "line_count": 57, "max_line_length": 75, "avg_line_length": 20.333333333333332, "alnum_prop": 0.6764452113891286, "repo_name": "WesApps/wes_api", "id": "7e374dddd571bce71b525499098c0e6f0eeb6b08", "size": "1159", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "runserver.py", "mode": "33188", "license": "mit", "language": [ { "name": "CSS", "bytes": "131274" }, { "name": "HTML", "bytes": "47338" }, { "name": "JavaScript", "bytes": "88724" }, { "name": "Python", "bytes": "61662" }, { "name": "Shell", "bytes": "189" } ], "symlink_target": "" }
import sys class MultiStack: def __init__(self, stacksize): self.numstacks = 3 self.array = [0] * (stacksize * self.numstacks) self.sizes = [0] * self.numstacks self.stacksize = stacksize self.minvals = [sys.maxint] * (stacksize * self.numstacks) def Push(self, item, stacknum): if self.IsFull(stacknum): raise Exception('Stack is full') self.sizes[stacknum] += 1 if self.IsEmpty(stacknum): self.minvals[self.IndexOfTop(stacknum)] = item else: self.minvals[self.IndexOfTop(stacknum)] = min( item, self.minvals[self.IndexOfTop(stacknum) - 1]) self.array[self.IndexOfTop(stacknum)] = item def Pop(self, stacknum): if self.IsEmpty(stacknum): raise Exception('Stack is empty') value = self.array[self.IndexOfTop(stacknum)] self.array[self.IndexOfTop(stacknum)] = 0 self.sizes[stacknum] -= 1 return value def Peek(self, stacknum): if self.IsEmpty(stacknum): raise Exception('Stack is empty') return self.array[self.IndexOfTop(stacknum)] def Min(self, stacknum): return self.minvals[self.IndexOfTop(stacknum)] def IsEmpty(self, stacknum): return self.sizes[stacknum] == 0 def IsFull(self, stacknum): return self.sizes[stacknum] == self.stacksize def IndexOfTop(self, stacknum): offset = stacknum * self.stacksize return offset + self.sizes[stacknum] - 1 def Size(self, stacknum): return self.sizes[stacknum] def f(N, start, end, buff, stack): if N == 1: stack.Push(stack.Pop(start), end) else: f(N - 1, start, buff, end, stack) f(1, start, end, buff, stack) f(N - 1, buff, end, start, stack) def printTower(newstack): # while not newstack.IsEmpty(0): # print newstack.Pop(0) # print "".join("-" for i in range(newstack.Pop(0))) # while not newstack.IsEmpty(1): # print newstack.Pop(1) # print "".join("-" for i in range(newstack.Pop(1))) while not newstack.IsEmpty(2): # print newstack.Pop(2) print "".join("-" for i in range(newstack.Pop(2))) def FillTower(N): newstack = MultiStack(N * 3) for i in range(N, 0, -1): newstack.Push(i, 0) return newstack N = 3 newstack = FillTower(N) f(N, 0, 2, 1, newstack) printTower(newstack)
{ "content_hash": "34a4e1d348ead2c2224d86a2908e748a", "timestamp": "", "source": "github", "line_count": 84, "max_line_length": 66, "avg_line_length": 29.238095238095237, "alnum_prop": 0.5956840390879479, "repo_name": "Kiandr/CrackingCodingInterview", "id": "a884a0447d88db6750c4c9eca224e395e193de16", "size": "2456", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "Python/Chapter8/86TowersOfHanoi.py", "mode": "33188", "license": "mit", "language": [], "symlink_target": "" }
from .test_mocks import * from .cpython.testmock import * from .cpython.testwith import *
{ "content_hash": "fa9d75d39f6e394f3fc13ec1996027bb", "timestamp": "", "source": "github", "line_count": 3, "max_line_length": 31, "avg_line_length": 30, "alnum_prop": 0.7666666666666667, "repo_name": "nivbend/mock-open", "id": "1b37b50d2da07a4da9022d21aad1edad50a4c5b9", "size": "161", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "src/mock_open/test/__init__.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "30621" } ], "symlink_target": "" }
import time import uuid import fixtures from lxml import etree import six from nova.compute import arch from nova.virt.libvirt import config as vconfig # Allow passing None to the various connect methods # (i.e. allow the client to rely on default URLs) allow_default_uri_connection = True # Has libvirt connection been used at least once connection_used = False def _reset(): global allow_default_uri_connection allow_default_uri_connection = True # virDomainState VIR_DOMAIN_NOSTATE = 0 VIR_DOMAIN_RUNNING = 1 VIR_DOMAIN_BLOCKED = 2 VIR_DOMAIN_PAUSED = 3 VIR_DOMAIN_SHUTDOWN = 4 VIR_DOMAIN_SHUTOFF = 5 VIR_DOMAIN_CRASHED = 6 # NOTE(mriedem): These values come from include/libvirt/libvirt-domain.h VIR_DOMAIN_XML_SECURE = 1 VIR_DOMAIN_XML_INACTIVE = 2 VIR_DOMAIN_XML_UPDATE_CPU = 4 VIR_DOMAIN_XML_MIGRATABLE = 8 VIR_DOMAIN_BLOCK_REBASE_SHALLOW = 1 VIR_DOMAIN_BLOCK_REBASE_REUSE_EXT = 2 VIR_DOMAIN_BLOCK_REBASE_COPY = 8 VIR_DOMAIN_BLOCK_JOB_ABORT_PIVOT = 2 VIR_DOMAIN_EVENT_ID_LIFECYCLE = 0 VIR_DOMAIN_EVENT_DEFINED = 0 VIR_DOMAIN_EVENT_UNDEFINED = 1 VIR_DOMAIN_EVENT_STARTED = 2 VIR_DOMAIN_EVENT_SUSPENDED = 3 VIR_DOMAIN_EVENT_RESUMED = 4 VIR_DOMAIN_EVENT_STOPPED = 5 VIR_DOMAIN_EVENT_SHUTDOWN = 6 VIR_DOMAIN_EVENT_PMSUSPENDED = 7 VIR_DOMAIN_UNDEFINE_MANAGED_SAVE = 1 VIR_DOMAIN_AFFECT_CURRENT = 0 VIR_DOMAIN_AFFECT_LIVE = 1 VIR_DOMAIN_AFFECT_CONFIG = 2 VIR_CPU_COMPARE_ERROR = -1 VIR_CPU_COMPARE_INCOMPATIBLE = 0 VIR_CPU_COMPARE_IDENTICAL = 1 VIR_CPU_COMPARE_SUPERSET = 2 VIR_CRED_USERNAME = 1 VIR_CRED_AUTHNAME = 2 VIR_CRED_LANGUAGE = 3 VIR_CRED_CNONCE = 4 VIR_CRED_PASSPHRASE = 5 VIR_CRED_ECHOPROMPT = 6 VIR_CRED_NOECHOPROMPT = 7 VIR_CRED_REALM = 8 VIR_CRED_EXTERNAL = 9 VIR_MIGRATE_LIVE = 1 VIR_MIGRATE_PEER2PEER = 2 VIR_MIGRATE_TUNNELLED = 4 VIR_MIGRATE_UNDEFINE_SOURCE = 16 VIR_MIGRATE_NON_SHARED_INC = 128 VIR_NODE_CPU_STATS_ALL_CPUS = -1 VIR_DOMAIN_START_PAUSED = 1 # libvirtError enums # (Intentionally different from what's in libvirt. We do this to check, # that consumers of the library are using the symbolic names rather than # hardcoding the numerical values) VIR_FROM_QEMU = 100 VIR_FROM_DOMAIN = 200 VIR_FROM_NWFILTER = 330 VIR_FROM_REMOTE = 340 VIR_FROM_RPC = 345 VIR_FROM_NODEDEV = 666 VIR_ERR_NO_SUPPORT = 3 VIR_ERR_XML_DETAIL = 350 VIR_ERR_NO_DOMAIN = 420 VIR_ERR_OPERATION_FAILED = 510 VIR_ERR_OPERATION_INVALID = 55 VIR_ERR_OPERATION_TIMEOUT = 68 VIR_ERR_NO_NWFILTER = 620 VIR_ERR_SYSTEM_ERROR = 900 VIR_ERR_INTERNAL_ERROR = 950 VIR_ERR_CONFIG_UNSUPPORTED = 951 VIR_ERR_NO_NODE_DEVICE = 667 VIR_ERR_NO_SECRET = 66 # Readonly VIR_CONNECT_RO = 1 # virConnectBaselineCPU flags VIR_CONNECT_BASELINE_CPU_EXPAND_FEATURES = 1 # snapshotCreateXML flags VIR_DOMAIN_SNAPSHOT_CREATE_NO_METADATA = 4 VIR_DOMAIN_SNAPSHOT_CREATE_DISK_ONLY = 16 VIR_DOMAIN_SNAPSHOT_CREATE_REUSE_EXT = 32 VIR_DOMAIN_SNAPSHOT_CREATE_QUIESCE = 64 # blockCommit flags VIR_DOMAIN_BLOCK_COMMIT_RELATIVE = 4 # blockRebase flags VIR_DOMAIN_BLOCK_REBASE_RELATIVE = 8 VIR_CONNECT_LIST_DOMAINS_ACTIVE = 1 VIR_CONNECT_LIST_DOMAINS_INACTIVE = 2 # secret type VIR_SECRET_USAGE_TYPE_NONE = 0 VIR_SECRET_USAGE_TYPE_VOLUME = 1 VIR_SECRET_USAGE_TYPE_CEPH = 2 VIR_SECRET_USAGE_TYPE_ISCSI = 3 # Libvirt version FAKE_LIBVIRT_VERSION = 9011 class HostInfo(object): def __init__(self, arch=arch.X86_64, kB_mem=4096, cpus=2, cpu_mhz=800, cpu_nodes=1, cpu_sockets=1, cpu_cores=2, cpu_threads=1, cpu_model="Penryn", cpu_vendor="Intel", numa_topology='', cpu_disabled=None): """Create a new Host Info object :param arch: (string) indicating the CPU arch (eg 'i686' or whatever else uname -m might return) :param kB_mem: (int) memory size in KBytes :param cpus: (int) the number of active CPUs :param cpu_mhz: (int) expected CPU frequency :param cpu_nodes: (int) the number of NUMA cell, 1 for unusual NUMA topologies or uniform :param cpu_sockets: (int) number of CPU sockets per node if nodes > 1, total number of CPU sockets otherwise :param cpu_cores: (int) number of cores per socket :param cpu_threads: (int) number of threads per core :param cpu_model: CPU model :param cpu_vendor: CPU vendor :param numa_topology: Numa topology :param cpu_disabled: List of disabled cpus """ self.arch = arch self.kB_mem = kB_mem self.cpus = cpus self.cpu_mhz = cpu_mhz self.cpu_nodes = cpu_nodes self.cpu_cores = cpu_cores self.cpu_threads = cpu_threads self.cpu_sockets = cpu_sockets self.cpu_model = cpu_model self.cpu_vendor = cpu_vendor self.numa_topology = numa_topology self.disabled_cpus_list = cpu_disabled or [] @classmethod def _gen_numa_topology(self, cpu_nodes, cpu_sockets, cpu_cores, cpu_threads, kb_mem, numa_mempages_list=None): topology = vconfig.LibvirtConfigCapsNUMATopology() cpu_count = 0 for cell_count in range(cpu_nodes): cell = vconfig.LibvirtConfigCapsNUMACell() cell.id = cell_count cell.memory = kb_mem / cpu_nodes for socket_count in range(cpu_sockets): for cpu_num in range(cpu_cores * cpu_threads): cpu = vconfig.LibvirtConfigCapsNUMACPU() cpu.id = cpu_count cpu.socket_id = cell_count cpu.core_id = cpu_num // cpu_threads cpu.siblings = set([cpu_threads * (cpu_count // cpu_threads) + thread for thread in range(cpu_threads)]) cell.cpus.append(cpu) cpu_count += 1 # Set mempages per numa cell. if numa_mempages_list is empty # we will set only the default 4K pages. if numa_mempages_list: mempages = numa_mempages_list[cell_count] else: mempages = vconfig.LibvirtConfigCapsNUMAPages() mempages.size = 4 mempages.total = cell.memory / mempages.size mempages = [mempages] cell.mempages = mempages topology.cells.append(cell) return topology def get_numa_topology(self): return self.numa_topology VIR_DOMAIN_JOB_NONE = 0 VIR_DOMAIN_JOB_BOUNDED = 1 VIR_DOMAIN_JOB_UNBOUNDED = 2 VIR_DOMAIN_JOB_COMPLETED = 3 VIR_DOMAIN_JOB_FAILED = 4 VIR_DOMAIN_JOB_CANCELLED = 5 def _parse_disk_info(element): disk_info = {} disk_info['type'] = element.get('type', 'file') disk_info['device'] = element.get('device', 'disk') driver = element.find('./driver') if driver is not None: disk_info['driver_name'] = driver.get('name') disk_info['driver_type'] = driver.get('type') source = element.find('./source') if source is not None: disk_info['source'] = source.get('file') if not disk_info['source']: disk_info['source'] = source.get('dev') if not disk_info['source']: disk_info['source'] = source.get('path') target = element.find('./target') if target is not None: disk_info['target_dev'] = target.get('dev') disk_info['target_bus'] = target.get('bus') return disk_info def disable_event_thread(self): """Disable nova libvirt driver event thread. The Nova libvirt driver includes a native thread which monitors the libvirt event channel. In a testing environment this becomes problematic because it means we've got a floating thread calling sleep(1) over the life of the unit test. Seems harmless? It's not, because we sometimes want to test things like retry loops that should have specific sleep paterns. An unlucky firing of the libvirt thread will cause a test failure. """ # because we are patching a method in a class MonkeyPatch doesn't # auto import correctly. Import explicitly otherwise the patching # may silently fail. import nova.virt.libvirt.host # noqa def evloop(*args, **kwargs): pass self.useFixture(fixtures.MonkeyPatch( 'nova.virt.libvirt.host.Host._init_events', evloop)) class libvirtError(Exception): """This class was copied and slightly modified from `libvirt-python:libvirt-override.py`. Since a test environment will use the real `libvirt-python` version of `libvirtError` if it's installed and not this fake, we need to maintain strict compatibility with the original class, including `__init__` args and instance-attributes. To create a libvirtError instance you should: # Create an unsupported error exception exc = libvirtError('my message') exc.err = (libvirt.VIR_ERR_NO_SUPPORT,) self.err is a tuple of form: (error_code, error_domain, error_message, error_level, str1, str2, str3, int1, int2) Alternatively, you can use the `make_libvirtError` convenience function to allow you to specify these attributes in one shot. """ def __init__(self, defmsg, conn=None, dom=None, net=None, pool=None, vol=None): Exception.__init__(self, defmsg) self.err = None def get_error_code(self): if self.err is None: return None return self.err[0] def get_error_domain(self): if self.err is None: return None return self.err[1] def get_error_message(self): if self.err is None: return None return self.err[2] def get_error_level(self): if self.err is None: return None return self.err[3] def get_str1(self): if self.err is None: return None return self.err[4] def get_str2(self): if self.err is None: return None return self.err[5] def get_str3(self): if self.err is None: return None return self.err[6] def get_int1(self): if self.err is None: return None return self.err[7] def get_int2(self): if self.err is None: return None return self.err[8] class NWFilter(object): def __init__(self, connection, xml): self._connection = connection self._xml = xml self._parse_xml(xml) def _parse_xml(self, xml): tree = etree.fromstring(xml) root = tree.find('.') self._name = root.get('name') def undefine(self): self._connection._remove_filter(self) class NodeDevice(object): def __init__(self, connection, xml=None): self._connection = connection self._xml = xml if xml is not None: self._parse_xml(xml) def _parse_xml(self, xml): tree = etree.fromstring(xml) root = tree.find('.') self._name = root.get('name') def attach(self): pass def dettach(self): pass def reset(self): pass class Domain(object): def __init__(self, connection, xml, running=False, transient=False): self._connection = connection if running: connection._mark_running(self) self._state = running and VIR_DOMAIN_RUNNING or VIR_DOMAIN_SHUTOFF self._transient = transient self._def = self._parse_definition(xml) self._has_saved_state = False self._snapshots = {} self._id = self._connection._id_counter def _parse_definition(self, xml): try: tree = etree.fromstring(xml) except etree.ParseError: raise make_libvirtError( libvirtError, "Invalid XML.", error_code=VIR_ERR_XML_DETAIL, error_domain=VIR_FROM_DOMAIN) definition = {} name = tree.find('./name') if name is not None: definition['name'] = name.text uuid_elem = tree.find('./uuid') if uuid_elem is not None: definition['uuid'] = uuid_elem.text else: definition['uuid'] = str(uuid.uuid4()) vcpu = tree.find('./vcpu') if vcpu is not None: definition['vcpu'] = int(vcpu.text) memory = tree.find('./memory') if memory is not None: definition['memory'] = int(memory.text) os = {} os_type = tree.find('./os/type') if os_type is not None: os['type'] = os_type.text os['arch'] = os_type.get('arch', self._connection.host_info.arch) os_kernel = tree.find('./os/kernel') if os_kernel is not None: os['kernel'] = os_kernel.text os_initrd = tree.find('./os/initrd') if os_initrd is not None: os['initrd'] = os_initrd.text os_cmdline = tree.find('./os/cmdline') if os_cmdline is not None: os['cmdline'] = os_cmdline.text os_boot = tree.find('./os/boot') if os_boot is not None: os['boot_dev'] = os_boot.get('dev') definition['os'] = os features = {} acpi = tree.find('./features/acpi') if acpi is not None: features['acpi'] = True definition['features'] = features devices = {} device_nodes = tree.find('./devices') if device_nodes is not None: disks_info = [] disks = device_nodes.findall('./disk') for disk in disks: disks_info += [_parse_disk_info(disk)] devices['disks'] = disks_info nics_info = [] nics = device_nodes.findall('./interface') for nic in nics: nic_info = {} nic_info['type'] = nic.get('type') mac = nic.find('./mac') if mac is not None: nic_info['mac'] = mac.get('address') source = nic.find('./source') if source is not None: if nic_info['type'] == 'network': nic_info['source'] = source.get('network') elif nic_info['type'] == 'bridge': nic_info['source'] = source.get('bridge') nics_info += [nic_info] devices['nics'] = nics_info definition['devices'] = devices return definition def create(self): self.createWithFlags(0) def createWithFlags(self, flags): # FIXME: Not handling flags at the moment self._state = VIR_DOMAIN_RUNNING self._connection._mark_running(self) self._has_saved_state = False def isActive(self): return int(self._state == VIR_DOMAIN_RUNNING) def undefine(self): self._connection._undefine(self) def isPersistent(self): return True def undefineFlags(self, flags): self.undefine() if flags & VIR_DOMAIN_UNDEFINE_MANAGED_SAVE: if self.hasManagedSaveImage(0): self.managedSaveRemove() def destroy(self): self._state = VIR_DOMAIN_SHUTOFF self._connection._mark_not_running(self) def ID(self): return self._id def name(self): return self._def['name'] def UUIDString(self): return self._def['uuid'] def interfaceStats(self, device): return [10000242400, 1234, 0, 2, 213412343233, 34214234, 23, 3] def blockStats(self, device): return [2, 10000242400, 234, 2343424234, 34] def suspend(self): self._state = VIR_DOMAIN_PAUSED def shutdown(self): self._state = VIR_DOMAIN_SHUTDOWN self._connection._mark_not_running(self) def reset(self, flags): # FIXME: Not handling flags at the moment self._state = VIR_DOMAIN_RUNNING self._connection._mark_running(self) def info(self): return [self._state, long(self._def['memory']), long(self._def['memory']), self._def['vcpu'], 123456789] def migrateToURI(self, desturi, flags, dname, bandwidth): raise make_libvirtError( libvirtError, "Migration always fails for fake libvirt!", error_code=VIR_ERR_INTERNAL_ERROR, error_domain=VIR_FROM_QEMU) def migrateToURI2(self, dconnuri, miguri, dxml, flags, dname, bandwidth): raise make_libvirtError( libvirtError, "Migration always fails for fake libvirt!", error_code=VIR_ERR_INTERNAL_ERROR, error_domain=VIR_FROM_QEMU) def attachDevice(self, xml): disk_info = _parse_disk_info(etree.fromstring(xml)) disk_info['_attached'] = True self._def['devices']['disks'] += [disk_info] return True def attachDeviceFlags(self, xml, flags): if (flags & VIR_DOMAIN_AFFECT_LIVE and self._state != VIR_DOMAIN_RUNNING): raise make_libvirtError( libvirtError, "AFFECT_LIVE only allowed for running domains!", error_code=VIR_ERR_INTERNAL_ERROR, error_domain=VIR_FROM_QEMU) self.attachDevice(xml) def detachDevice(self, xml): disk_info = _parse_disk_info(etree.fromstring(xml)) disk_info['_attached'] = True return disk_info in self._def['devices']['disks'] def detachDeviceFlags(self, xml, flags): self.detachDevice(xml) def XMLDesc(self, flags): disks = '' for disk in self._def['devices']['disks']: disks += '''<disk type='%(type)s' device='%(device)s'> <driver name='%(driver_name)s' type='%(driver_type)s'/> <source file='%(source)s'/> <target dev='%(target_dev)s' bus='%(target_bus)s'/> <address type='drive' controller='0' bus='0' unit='0'/> </disk>''' % disk nics = '' for nic in self._def['devices']['nics']: nics += '''<interface type='%(type)s'> <mac address='%(mac)s'/> <source %(type)s='%(source)s'/> <address type='pci' domain='0x0000' bus='0x00' slot='0x03' function='0x0'/> </interface>''' % nic return '''<domain type='kvm'> <name>%(name)s</name> <uuid>%(uuid)s</uuid> <memory>%(memory)s</memory> <currentMemory>%(memory)s</currentMemory> <vcpu>%(vcpu)s</vcpu> <os> <type arch='%(arch)s' machine='pc-0.12'>hvm</type> <boot dev='hd'/> </os> <features> <acpi/> <apic/> <pae/> </features> <clock offset='localtime'/> <on_poweroff>destroy</on_poweroff> <on_reboot>restart</on_reboot> <on_crash>restart</on_crash> <devices> <emulator>/usr/bin/kvm</emulator> %(disks)s <controller type='ide' index='0'> <address type='pci' domain='0x0000' bus='0x00' slot='0x01' function='0x1'/> </controller> %(nics)s <serial type='file'> <source path='dummy.log'/> <target port='0'/> </serial> <serial type='pty'> <source pty='/dev/pts/27'/> <target port='1'/> </serial> <serial type='tcp'> <source host="-1" service="-1" mode="bind"/> </serial> <console type='file'> <source path='dummy.log'/> <target port='0'/> </console> <input type='tablet' bus='usb'/> <input type='mouse' bus='ps2'/> <graphics type='vnc' port='-1' autoport='yes'/> <graphics type='spice' port='-1' autoport='yes'/> <video> <model type='cirrus' vram='9216' heads='1'/> <address type='pci' domain='0x0000' bus='0x00' slot='0x02' function='0x0'/> </video> <memballoon model='virtio'> <address type='pci' domain='0x0000' bus='0x00' slot='0x04' function='0x0'/> </memballoon> </devices> </domain>''' % {'name': self._def['name'], 'uuid': self._def['uuid'], 'memory': self._def['memory'], 'vcpu': self._def['vcpu'], 'arch': self._def['os']['arch'], 'disks': disks, 'nics': nics} def managedSave(self, flags): self._connection._mark_not_running(self) self._has_saved_state = True def managedSaveRemove(self, flags): self._has_saved_state = False def hasManagedSaveImage(self, flags): return int(self._has_saved_state) def resume(self): self._state = VIR_DOMAIN_RUNNING def snapshotCreateXML(self, xml, flags): tree = etree.fromstring(xml) name = tree.find('./name').text snapshot = DomainSnapshot(name, self) self._snapshots[name] = snapshot return snapshot def vcpus(self): vcpus = ([], []) for i in range(0, self._def['vcpu']): vcpus[0].append((i, 1, 120405, i)) vcpus[1].append((True, True, True, True)) return vcpus def memoryStats(self): return {} def maxMemory(self): return self._def['memory'] def blockJobInfo(self, disk, flags): return {} def jobInfo(self): return [] def jobStats(self, flags=0): return {} class DomainSnapshot(object): def __init__(self, name, domain): self._name = name self._domain = domain def delete(self, flags): del self._domain._snapshots[self._name] class Connection(object): def __init__(self, uri=None, readonly=False, version=9011, hv_version=1001000, host_info=None): if not uri or uri == '': if allow_default_uri_connection: uri = 'qemu:///session' else: raise ValueError("URI was None, but fake libvirt is " "configured to not accept this.") uri_whitelist = ['qemu:///system', 'qemu:///session', 'lxc:///', # from LibvirtDriver._uri() 'xen:///', # from LibvirtDriver._uri() 'uml:///system', 'test:///default', 'parallels:///system'] if uri not in uri_whitelist: raise make_libvirtError( libvirtError, "libvirt error: no connection driver " "available for No connection for URI %s" % uri, error_code=5, error_domain=0) self.readonly = readonly self._uri = uri self._vms = {} self._running_vms = {} self._id_counter = 1 # libvirt reserves 0 for the hypervisor. self._nwfilters = {} self._nodedevs = {} self._event_callbacks = {} self.fakeLibVersion = version self.fakeVersion = hv_version self.host_info = host_info or HostInfo() def _add_filter(self, nwfilter): self._nwfilters[nwfilter._name] = nwfilter def _remove_filter(self, nwfilter): del self._nwfilters[nwfilter._name] def _add_nodedev(self, nodedev): self._nodedevs[nodedev._name] = nodedev def _remove_nodedev(self, nodedev): del self._nodedevs[nodedev._name] def _mark_running(self, dom): self._running_vms[self._id_counter] = dom self._emit_lifecycle(dom, VIR_DOMAIN_EVENT_STARTED, 0) self._id_counter += 1 def _mark_not_running(self, dom): if dom._transient: self._undefine(dom) dom._id = -1 for (k, v) in six.iteritems(self._running_vms): if v == dom: del self._running_vms[k] self._emit_lifecycle(dom, VIR_DOMAIN_EVENT_STOPPED, 0) return def _undefine(self, dom): del self._vms[dom.name()] if not dom._transient: self._emit_lifecycle(dom, VIR_DOMAIN_EVENT_UNDEFINED, 0) def getInfo(self): return [self.host_info.arch, self.host_info.kB_mem, self.host_info.cpus, self.host_info.cpu_mhz, self.host_info.cpu_nodes, self.host_info.cpu_sockets, self.host_info.cpu_cores, self.host_info.cpu_threads] def numOfDomains(self): return len(self._running_vms) def listDomainsID(self): return self._running_vms.keys() def lookupByID(self, id): if id in self._running_vms: return self._running_vms[id] raise make_libvirtError( libvirtError, 'Domain not found: no domain with matching id %d' % id, error_code=VIR_ERR_NO_DOMAIN, error_domain=VIR_FROM_QEMU) def lookupByName(self, name): if name in self._vms: return self._vms[name] raise make_libvirtError( libvirtError, 'Domain not found: no domain with matching name "%s"' % name, error_code=VIR_ERR_NO_DOMAIN, error_domain=VIR_FROM_QEMU) def listAllDomains(self, flags): vms = [] for vm in self._vms: if flags & VIR_CONNECT_LIST_DOMAINS_ACTIVE: if vm.state != VIR_DOMAIN_SHUTOFF: vms.append(vm) if flags & VIR_CONNECT_LIST_DOMAINS_INACTIVE: if vm.state == VIR_DOMAIN_SHUTOFF: vms.append(vm) return vms def _emit_lifecycle(self, dom, event, detail): if VIR_DOMAIN_EVENT_ID_LIFECYCLE not in self._event_callbacks: return cbinfo = self._event_callbacks[VIR_DOMAIN_EVENT_ID_LIFECYCLE] callback = cbinfo[0] opaque = cbinfo[1] callback(self, dom, event, detail, opaque) def defineXML(self, xml): dom = Domain(connection=self, running=False, transient=False, xml=xml) self._vms[dom.name()] = dom self._emit_lifecycle(dom, VIR_DOMAIN_EVENT_DEFINED, 0) return dom def createXML(self, xml, flags): dom = Domain(connection=self, running=True, transient=True, xml=xml) self._vms[dom.name()] = dom self._emit_lifecycle(dom, VIR_DOMAIN_EVENT_STARTED, 0) return dom def getType(self): if self._uri == 'qemu:///system': return 'QEMU' def getLibVersion(self): return self.fakeLibVersion def getVersion(self): return self.fakeVersion def getHostname(self): return 'compute1' def domainEventRegisterAny(self, dom, eventid, callback, opaque): self._event_callbacks[eventid] = [callback, opaque] def registerCloseCallback(self, cb, opaque): pass def getCPUMap(self): """Return calculated CPU map from HostInfo, by default showing 2 online CPUs. """ active_cpus = self.host_info.cpus total_cpus = active_cpus + len(self.host_info.disabled_cpus_list) cpu_map = [True if cpu_num not in self.host_info.disabled_cpus_list else False for cpu_num in range(total_cpus)] return (total_cpus, cpu_map, active_cpus) def getCapabilities(self): """Return spoofed capabilities.""" numa_topology = self.host_info.get_numa_topology() if isinstance(numa_topology, vconfig.LibvirtConfigCapsNUMATopology): numa_topology = numa_topology.to_xml() return '''<capabilities> <host> <uuid>cef19ce0-0ca2-11df-855d-b19fbce37686</uuid> <cpu> <arch>x86_64</arch> <model>Penryn</model> <vendor>Intel</vendor> <topology sockets='%(sockets)s' cores='%(cores)s' threads='%(threads)s'/> <feature name='xtpr'/> <feature name='tm2'/> <feature name='est'/> <feature name='vmx'/> <feature name='ds_cpl'/> <feature name='monitor'/> <feature name='pbe'/> <feature name='tm'/> <feature name='ht'/> <feature name='ss'/> <feature name='acpi'/> <feature name='ds'/> <feature name='vme'/> </cpu> <migration_features> <live/> <uri_transports> <uri_transport>tcp</uri_transport> </uri_transports> </migration_features> %(topology)s <secmodel> <model>apparmor</model> <doi>0</doi> </secmodel> </host> <guest> <os_type>hvm</os_type> <arch name='i686'> <wordsize>32</wordsize> <emulator>/usr/bin/qemu</emulator> <machine>pc-0.14</machine> <machine canonical='pc-0.14'>pc</machine> <machine>pc-0.13</machine> <machine>pc-0.12</machine> <machine>pc-0.11</machine> <machine>pc-0.10</machine> <machine>isapc</machine> <domain type='qemu'> </domain> <domain type='kvm'> <emulator>/usr/bin/kvm</emulator> <machine>pc-0.14</machine> <machine canonical='pc-0.14'>pc</machine> <machine>pc-0.13</machine> <machine>pc-0.12</machine> <machine>pc-0.11</machine> <machine>pc-0.10</machine> <machine>isapc</machine> </domain> </arch> <features> <cpuselection/> <deviceboot/> <pae/> <nonpae/> <acpi default='on' toggle='yes'/> <apic default='on' toggle='no'/> </features> </guest> <guest> <os_type>hvm</os_type> <arch name='x86_64'> <wordsize>64</wordsize> <emulator>/usr/bin/qemu-system-x86_64</emulator> <machine>pc-0.14</machine> <machine canonical='pc-0.14'>pc</machine> <machine>pc-0.13</machine> <machine>pc-0.12</machine> <machine>pc-0.11</machine> <machine>pc-0.10</machine> <machine>isapc</machine> <domain type='qemu'> </domain> <domain type='kvm'> <emulator>/usr/bin/kvm</emulator> <machine>pc-0.14</machine> <machine canonical='pc-0.14'>pc</machine> <machine>pc-0.13</machine> <machine>pc-0.12</machine> <machine>pc-0.11</machine> <machine>pc-0.10</machine> <machine>isapc</machine> </domain> </arch> <features> <cpuselection/> <deviceboot/> <acpi default='on' toggle='yes'/> <apic default='on' toggle='no'/> </features> </guest> <guest> <os_type>hvm</os_type> <arch name='armv7l'> <wordsize>32</wordsize> <emulator>/usr/bin/qemu-system-arm</emulator> <machine>integratorcp</machine> <machine>vexpress-a9</machine> <machine>syborg</machine> <machine>musicpal</machine> <machine>mainstone</machine> <machine>n800</machine> <machine>n810</machine> <machine>n900</machine> <machine>cheetah</machine> <machine>sx1</machine> <machine>sx1-v1</machine> <machine>beagle</machine> <machine>beaglexm</machine> <machine>tosa</machine> <machine>akita</machine> <machine>spitz</machine> <machine>borzoi</machine> <machine>terrier</machine> <machine>connex</machine> <machine>verdex</machine> <machine>lm3s811evb</machine> <machine>lm3s6965evb</machine> <machine>realview-eb</machine> <machine>realview-eb-mpcore</machine> <machine>realview-pb-a8</machine> <machine>realview-pbx-a9</machine> <machine>versatilepb</machine> <machine>versatileab</machine> <domain type='qemu'> </domain> </arch> <features> <deviceboot/> </features> </guest> <guest> <os_type>hvm</os_type> <arch name='mips'> <wordsize>32</wordsize> <emulator>/usr/bin/qemu-system-mips</emulator> <machine>malta</machine> <machine>mipssim</machine> <machine>magnum</machine> <machine>pica61</machine> <machine>mips</machine> <domain type='qemu'> </domain> </arch> <features> <deviceboot/> </features> </guest> <guest> <os_type>hvm</os_type> <arch name='mipsel'> <wordsize>32</wordsize> <emulator>/usr/bin/qemu-system-mipsel</emulator> <machine>malta</machine> <machine>mipssim</machine> <machine>magnum</machine> <machine>pica61</machine> <machine>mips</machine> <domain type='qemu'> </domain> </arch> <features> <deviceboot/> </features> </guest> <guest> <os_type>hvm</os_type> <arch name='sparc'> <wordsize>32</wordsize> <emulator>/usr/bin/qemu-system-sparc</emulator> <machine>SS-5</machine> <machine>leon3_generic</machine> <machine>SS-10</machine> <machine>SS-600MP</machine> <machine>SS-20</machine> <machine>Voyager</machine> <machine>LX</machine> <machine>SS-4</machine> <machine>SPARCClassic</machine> <machine>SPARCbook</machine> <machine>SS-1000</machine> <machine>SS-2000</machine> <machine>SS-2</machine> <domain type='qemu'> </domain> </arch> </guest> <guest> <os_type>hvm</os_type> <arch name='ppc'> <wordsize>32</wordsize> <emulator>/usr/bin/qemu-system-ppc</emulator> <machine>g3beige</machine> <machine>virtex-ml507</machine> <machine>mpc8544ds</machine> <machine canonical='bamboo-0.13'>bamboo</machine> <machine>bamboo-0.13</machine> <machine>bamboo-0.12</machine> <machine>ref405ep</machine> <machine>taihu</machine> <machine>mac99</machine> <machine>prep</machine> <domain type='qemu'> </domain> </arch> <features> <deviceboot/> </features> </guest> </capabilities>''' % {'sockets': self.host_info.cpu_sockets, 'cores': self.host_info.cpu_cores, 'threads': self.host_info.cpu_threads, 'topology': numa_topology} def compareCPU(self, xml, flags): tree = etree.fromstring(xml) arch_node = tree.find('./arch') if arch_node is not None: if arch_node.text not in [arch.X86_64, arch.I686]: return VIR_CPU_COMPARE_INCOMPATIBLE model_node = tree.find('./model') if model_node is not None: if model_node.text != self.host_info.cpu_model: return VIR_CPU_COMPARE_INCOMPATIBLE vendor_node = tree.find('./vendor') if vendor_node is not None: if vendor_node.text != self.host_info.cpu_vendor: return VIR_CPU_COMPARE_INCOMPATIBLE # The rest of the stuff libvirt implements is rather complicated # and I don't think it adds much value to replicate it here. return VIR_CPU_COMPARE_IDENTICAL def getCPUStats(self, cpuNum, flag): if cpuNum < 2: return {'kernel': 5664160000000, 'idle': 1592705190000000, 'user': 26728850000000, 'iowait': 6121490000000} else: raise make_libvirtError( libvirtError, "invalid argument: Invalid cpu number", error_code=VIR_ERR_INTERNAL_ERROR, error_domain=VIR_FROM_QEMU) def nwfilterLookupByName(self, name): try: return self._nwfilters[name] except KeyError: raise make_libvirtError( libvirtError, "no nwfilter with matching name %s" % name, error_code=VIR_ERR_NO_NWFILTER, error_domain=VIR_FROM_NWFILTER) def nwfilterDefineXML(self, xml): nwfilter = NWFilter(self, xml) self._add_filter(nwfilter) def nodeDeviceLookupByName(self, name): try: return self._nodedevs[name] except KeyError: raise make_libvirtError( libvirtError, "no nodedev with matching name %s" % name, error_code=VIR_ERR_NO_NODE_DEVICE, error_domain=VIR_FROM_NODEDEV) def listDefinedDomains(self): return [] def listDevices(self, cap, flags): return [] def baselineCPU(self, cpu, flag): """Add new libvirt API.""" return """<cpu mode='custom' match='exact'> <model>Penryn</model> <vendor>Intel</vendor> <feature name='xtpr'/> <feature name='tm2'/> <feature name='est'/> <feature name='vmx'/> <feature name='ds_cpl'/> <feature name='monitor'/> <feature name='pbe'/> <feature name='tm'/> <feature name='ht'/> <feature name='ss'/> <feature name='acpi'/> <feature name='ds'/> <feature name='vme'/> <feature policy='require' name='aes'/> </cpu>""" def secretLookupByUsage(self, usage_type_obj, usage_id): pass def secretDefineXML(self, xml): pass def openAuth(uri, auth, flags=0): if type(auth) != list: raise Exception("Expected a list for 'auth' parameter") if type(auth[0]) != list: raise Exception("Expected a function in 'auth[0]' parameter") if not callable(auth[1]): raise Exception("Expected a function in 'auth[1]' parameter") return Connection(uri, (flags == VIR_CONNECT_RO)) def virEventRunDefaultImpl(): time.sleep(1) def virEventRegisterDefaultImpl(): if connection_used: raise Exception("virEventRegisterDefaultImpl() must be " "called before connection is used.") def registerErrorHandler(handler, ctxt): pass def make_libvirtError(error_class, msg, error_code=None, error_domain=None, error_message=None, error_level=None, str1=None, str2=None, str3=None, int1=None, int2=None): """Convenience function for creating `libvirtError` exceptions which allow you to specify arguments in constructor without having to manipulate the `err` tuple directly. We need to pass in `error_class` to this function because it may be `libvirt.libvirtError` or `fakelibvirt.libvirtError` depending on whether `libvirt-python` is installed. """ exc = error_class(msg) exc.err = (error_code, error_domain, error_message, error_level, str1, str2, str3, int1, int2) return exc virDomain = Domain virNodeDevice = NodeDevice virConnect = Connection class FakeLibvirtFixture(fixtures.Fixture): """Performs global setup/stubbing for all libvirt tests. """ def setUp(self): super(FakeLibvirtFixture, self).setUp() disable_event_thread(self)
{ "content_hash": "8b43bbb0cb8db533a6bfd1b77177d3f2", "timestamp": "", "source": "github", "line_count": 1294, "max_line_length": 79, "avg_line_length": 30.10355486862442, "alnum_prop": 0.5757816912255481, "repo_name": "CloudServer/nova", "id": "13665b9dd3816234e6bf44de7dc2cfd490165904", "size": "39570", "binary": false, "copies": "3", "ref": "refs/heads/master", "path": "nova/tests/unit/virt/libvirt/fakelibvirt.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Python", "bytes": "16292014" }, { "name": "Shell", "bytes": "20716" }, { "name": "Smarty", "bytes": "282020" } ], "symlink_target": "" }
from django.core.mail.backends.smtp import EmailBackend from django.core.mail import get_connection from django.conf import settings class JumoEmailBackend(EmailBackend): def send_messages(self, email_messages): for msg in email_messages: super(JumoEmailBackend, self).send_messages([msg]) def send(self, msg): super(JumoEmailBackend, self).send_messages([msg])
{ "content_hash": "c5e7ea0b89dac9c22fe6889fa6c00797", "timestamp": "", "source": "github", "line_count": 12, "max_line_length": 62, "avg_line_length": 33.416666666666664, "alnum_prop": 0.7306733167082294, "repo_name": "jumoconnect/openjumo", "id": "8743dd277f53b17f3f4baeee3ebcd308574d03a4", "size": "401", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "jumodjango/mailer/backend.py", "mode": "33188", "license": "mit", "language": [ { "name": "ActionScript", "bytes": "5874" }, { "name": "JavaScript", "bytes": "341559" }, { "name": "Python", "bytes": "928137" }, { "name": "Shell", "bytes": "871" } ], "symlink_target": "" }
"""This example updates the CPC bid and status for a given ad group. To get ad groups, run get_ad_groups.py. The LoadFromStorage method is pulling credentials and properties from a "googleads.yaml" file. By default, it looks for this file in your home directory. For more information, see the "Caching authentication information" section of our README. """ from googleads import adwords AD_GROUP_ID = 'INSERT_AD_GROUP_ID_HERE' CPC_BID_MICRO_AMOUNT = 'INSERT_CPC_BID_MICRO_AMOUNT_HERE' def main(client, ad_group_id, bid_micro_amount=None): # Initialize appropriate service. ad_group_service = client.GetService('AdGroupService', version='v201809') # Construct operations and update an ad group. operations = [{ 'operator': 'SET', 'operand': { 'id': ad_group_id, 'status': 'PAUSED' } }] if bid_micro_amount: operations[0]['operand']['biddingStrategyConfiguration'] = { 'bids': [{ 'xsi_type': 'CpcBid', 'bid': { 'microAmount': bid_micro_amount, } }] } ad_groups = ad_group_service.mutate(operations) # Display results. for ad_group in ad_groups['value']: bidding_strategy_configuration = ad_group['biddingStrategyConfiguration'] # Find the CpcBid in the bidding strategy configuration's bids collection. cpc_bid_micros = None if bidding_strategy_configuration: bids = bidding_strategy_configuration['bids'] if bids: for bid in bids: if bid['Bids.Type'] == 'CpcBid': cpc_bid_micros = bid['bid']['microAmount'] break print ('Ad group with name "%s", and id "%s" was updated to have status ' '"%s" and CPC bid %d.' % (ad_group['name'], ad_group['id'], ad_group['status'], cpc_bid_micros)) if __name__ == '__main__': # Initialize client object. adwords_client = adwords.AdWordsClient.LoadFromStorage() main(adwords_client, AD_GROUP_ID, CPC_BID_MICRO_AMOUNT)
{ "content_hash": "23da9eb8a3f76476e40b8412f5797f40", "timestamp": "", "source": "github", "line_count": 69, "max_line_length": 78, "avg_line_length": 29.231884057971016, "alnum_prop": 0.6351016360932077, "repo_name": "Aloomaio/googleads-python-lib", "id": "1385903f1e1f3b7d4650a656fe018215b5ffcf9b", "size": "2639", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "examples/adwords/v201809/basic_operations/update_ad_group.py", "mode": "33261", "license": "apache-2.0", "language": [ { "name": "Python", "bytes": "491015" } ], "symlink_target": "" }
import string import os def PathCorrection(inData): outData=inData for i in string.ascii_lowercase: #Replace drive letters with /mnt/ outData=outData.replace(i+':','/mnt/'+i) #if drive letter is supplied in lowercase outData=outData.replace(i.upper()+':','/mnt/'+i) #if drive letter is supplied as uppercase outData=outData.replace(os.path.sep, '/') #Change windows filesep to linux filesep return outData
{ "content_hash": "30c3ff955643224e681b6f89d1b2de4c", "timestamp": "", "source": "github", "line_count": 11, "max_line_length": 92, "avg_line_length": 37.81818181818182, "alnum_prop": 0.7451923076923077, "repo_name": "mrirecon/bart", "id": "e447651ba06d02f9f85c1f646dc257f12b21df21", "size": "416", "binary": false, "copies": "3", "ref": "refs/heads/master", "path": "python/wslsupport.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "C", "bytes": "3791759" }, { "name": "C++", "bytes": "1077625" }, { "name": "CMake", "bytes": "1515" }, { "name": "Cuda", "bytes": "157841" }, { "name": "MATLAB", "bytes": "13176" }, { "name": "Makefile", "bytes": "245632" }, { "name": "Python", "bytes": "51671" }, { "name": "Shell", "bytes": "20150" } ], "symlink_target": "" }
""" FILE: sample_recognize_custom_forms_async.py DESCRIPTION: This sample demonstrates how to analyze a form from a document with a custom trained model. The form must be of the same type as the forms the custom model was trained on. To learn how to train your own models, look at sample_train_model_without_labels_async.py and sample_train_model_with_labels_async.py The model can be trained using the training files found here: https://aka.ms/azsdk/formrecognizer/sampletrainingfiles-v3.1 USAGE: python sample_recognize_custom_forms_async.py Set the environment variables with your own values before running the sample: 1) AZURE_FORM_RECOGNIZER_ENDPOINT - the endpoint to your Form Recognizer resource. 2) AZURE_FORM_RECOGNIZER_KEY - your Form Recognizer API key 3) CUSTOM_TRAINED_MODEL_ID - the ID of your custom trained model -OR- CONTAINER_SAS_URL_V2 - The shared access signature (SAS) Url of your Azure Blob Storage container with your forms. A model will be trained and used to run the sample. """ import os import asyncio class RecognizeCustomFormsSampleAsync(object): async def recognize_custom_forms(self, custom_model_id): path_to_sample_forms = os.path.abspath(os.path.join(os.path.abspath(__file__), "..", "..", "..", "./sample_forms/forms/Form_1.jpg")) # [START recognize_custom_forms_async] from azure.core.credentials import AzureKeyCredential from azure.ai.formrecognizer.aio import FormRecognizerClient endpoint = os.environ["AZURE_FORM_RECOGNIZER_ENDPOINT"] key = os.environ["AZURE_FORM_RECOGNIZER_KEY"] model_id = os.getenv("CUSTOM_TRAINED_MODEL_ID", custom_model_id) async with FormRecognizerClient( endpoint=endpoint, credential=AzureKeyCredential(key) ) as form_recognizer_client: # Make sure your form's type is included in the list of form types the custom model can recognize with open(path_to_sample_forms, "rb") as f: poller = await form_recognizer_client.begin_recognize_custom_forms( model_id=model_id, form=f, include_field_elements=True ) forms = await poller.result() for idx, form in enumerate(forms): print("--------Recognizing Form #{}--------".format(idx+1)) print("Form has type {}".format(form.form_type)) print("Form has form type confidence {}".format(form.form_type_confidence)) print("Form was analyzed with model with ID {}".format(form.model_id)) for name, field in form.fields.items(): # each field is of type FormField # label_data is populated if you are using a model trained without labels, # since the service needs to make predictions for labels if not explicitly given to it. if field.label_data: print("...Field '{}' has label '{}' with a confidence score of {}".format( name, field.label_data.text, field.confidence )) print("...Label '{}' has value '{}' with a confidence score of {}".format( field.label_data.text if field.label_data else name, field.value, field.confidence )) # iterate over tables, lines, and selection marks on each page for page in form.pages: for i, table in enumerate(page.tables): print("\nTable {} on page {}".format(i + 1, table.page_number)) for cell in table.cells: print("...Cell[{}][{}] has text '{}' with confidence {}".format( cell.row_index, cell.column_index, cell.text, cell.confidence )) print("\nLines found on page {}".format(page.page_number)) for line in page.lines: print("...Line '{}' is made up of the following words: ".format(line.text)) for word in line.words: print("......Word '{}' has a confidence of {}".format( word.text, word.confidence )) if page.selection_marks: print("\nSelection marks found on page {}".format(page.page_number)) for selection_mark in page.selection_marks: print("......Selection mark is '{}' and has a confidence of {}".format( selection_mark.state, selection_mark.confidence )) print("-----------------------------------") # [END recognize_custom_forms_async] async def main(): sample = RecognizeCustomFormsSampleAsync() model_id = None if os.getenv("CONTAINER_SAS_URL_V2"): from azure.core.credentials import AzureKeyCredential from azure.ai.formrecognizer.aio import FormTrainingClient endpoint = os.getenv("AZURE_FORM_RECOGNIZER_ENDPOINT") key = os.getenv("AZURE_FORM_RECOGNIZER_KEY") if not endpoint or not key: raise ValueError("Please provide endpoint and API key to run the samples.") form_training_client = FormTrainingClient( endpoint=endpoint, credential=AzureKeyCredential(key) ) async with form_training_client: model = await (await form_training_client.begin_training( os.getenv("CONTAINER_SAS_URL_V2"), use_training_labels=True)).result() model_id = model.model_id await sample.recognize_custom_forms(model_id) if __name__ == '__main__': asyncio.run(main())
{ "content_hash": "559850bc3bde8a3947b01ad15c91d77c", "timestamp": "", "source": "github", "line_count": 127, "max_line_length": 121, "avg_line_length": 47.68503937007874, "alnum_prop": 0.5696829590488771, "repo_name": "Azure/azure-sdk-for-python", "id": "ee67d6b7e06f318d1f04d376693edfe66be3b0e8", "size": "6384", "binary": false, "copies": "1", "ref": "refs/heads/main", "path": "sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.1/async_samples/sample_recognize_custom_forms_async.py", "mode": "33188", "license": "mit", "language": [ { "name": "Batchfile", "bytes": "1224" }, { "name": "Bicep", "bytes": "24196" }, { "name": "CSS", "bytes": "6089" }, { "name": "Dockerfile", "bytes": "4892" }, { "name": "HTML", "bytes": "12058" }, { "name": "JavaScript", "bytes": "8137" }, { "name": "Jinja", "bytes": "10377" }, { "name": "Jupyter Notebook", "bytes": "272022" }, { "name": "PowerShell", "bytes": "518535" }, { "name": "Python", "bytes": "715484989" }, { "name": "Shell", "bytes": "3631" } ], "symlink_target": "" }
import pika import time server = "hostname" port = 5672 vhost = "yourvhost" username = "username" password = "password" exchangeName = "testEx" requestQueue = "requestQ" requestKey = "request" #callback funtion on receiving request messages, reply to the reply_to header def onMessage(channel, method, properties, body): print body try: replyProp = pika.BasicProperties(content_type = "text/plain", delivery_mode = 1) channel.basic_publish(exchange = exchangeName, routing_key = properties.reply_to, properties = replyProp, body = "Reply to %s" % (body)) channel.basic_ack(delivery_tag = method.delivery_tag) except: channel.basic_nack(delivery_tag = method.delivery_tag) while True: try: #connect credentials = pika.PlainCredentials(username, password) connection = pika.BlockingConnection(pika.ConnectionParameters(host = server, port = port, virtual_host = vhost, credentials = credentials, heartbeat_interval = 60)) channel = connection.channel() #declare exchange and queue, bind them and consume messages channel.exchange_declare(exchange = exchangeName, exchange_type = "direct", auto_delete = True) channel.queue_declare(queue = requestQueue, exclusive = True, auto_delete = True) channel.queue_bind(exchange = exchangeName, queue = requestQueue, routing_key = requestKey) channel.basic_consume(consumer_callback = onMessage, queue = requestQueue, no_ack = False) channel.start_consuming() except Exception, e: #reconnect on exception print "Exception handled, reconnecting...\nDetail:\n%s" % e try: connection.close() except: pass time.sleep(5)
{ "content_hash": "21555d90c6a32f4e3d9809df5cbab066", "timestamp": "", "source": "github", "line_count": 43, "max_line_length": 167, "avg_line_length": 37.395348837209305, "alnum_prop": 0.7475124378109452, "repo_name": "robomq/robomq.io", "id": "721700bf2a1ee7042856d208a56d5db3ecb9e2a5", "size": "1967", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "sdk/AMQP/Python/request-reply/consumer.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "C", "bytes": "44996" }, { "name": "C++", "bytes": "4526" }, { "name": "Go", "bytes": "28122" }, { "name": "HTML", "bytes": "5634" }, { "name": "Java", "bytes": "29375" }, { "name": "JavaScript", "bytes": "22393" }, { "name": "Makefile", "bytes": "2031" }, { "name": "PHP", "bytes": "19879" }, { "name": "Python", "bytes": "24957" }, { "name": "Ruby", "bytes": "20964" } ], "symlink_target": "" }
from swgpy.object import * def create(kernel): result = Tangible() result.template = "object/tangible/encoded_disk/shared_message_fragment_base.iff" result.attribute_template_id = -1 result.stfName("item_n","message_fragment_base") #### BEGIN MODIFICATIONS #### #### END MODIFICATIONS #### return result
{ "content_hash": "6cc79fe0df197f4a5149b1d727c7c516", "timestamp": "", "source": "github", "line_count": 13, "max_line_length": 82, "avg_line_length": 24.692307692307693, "alnum_prop": 0.7040498442367601, "repo_name": "anhstudios/swganh", "id": "0a06032ad904d18c95fad5c4faf4ca8e8ffbb647", "size": "466", "binary": false, "copies": "2", "ref": "refs/heads/develop", "path": "data/scripts/templates/object/tangible/encoded_disk/shared_message_fragment_base.py", "mode": "33188", "license": "mit", "language": [ { "name": "Batchfile", "bytes": "11887" }, { "name": "C", "bytes": "7699" }, { "name": "C++", "bytes": "2357839" }, { "name": "CMake", "bytes": "41264" }, { "name": "PLSQL", "bytes": "42065" }, { "name": "Python", "bytes": "7503510" }, { "name": "SQLPL", "bytes": "42770" } ], "symlink_target": "" }
import urllib2 import re import itertools import datetime import gzip import urllib import os, os.path import StringIO import string import shutil UNIPROT_BASEURL="ftp://ftp.uniprot.org/pub/databases/uniprot/current_release/knowledgebase/proteomes/" BACKGROUND_BUILD_DIR = "bg_freqs_build" BACKGROUND_WEB_DIR = "../web/bg_freqs" METADATA_FILENAME = "background_metadata.txt" msg = 'Copy final background frequency files and metadata to' + BACKGROUND_WEB_DIR + '?' copy_files_flag = True if raw_input("%s (y/N) " % msg).lower() == 'y' else False # create output directory if it doesn't already exist if not os.path.isdir(BACKGROUND_BUILD_DIR): os.mkdir(BACKGROUND_BUILD_DIR) # get current date download_date = str(datetime.date.today()) # parse the release notes response = urllib2.urlopen(UNIPROT_BASEURL + "relnotes.txt") release = re.compile("Release ([0-9]+_[0-9]+)") species = re.compile("([a-zA-Z]+ [a-zA-Z]+) \(([A-Z]+)\)") # build list of proteomes to download proteomes = {} for line in response.readlines(): rel = re.match(release, line) spec = re.match(species, line) if rel: uniprot_version = rel.group(1) if spec: species_name, id_name = spec.group(1, 2) proteomes[id_name] = species_name # write metadata header metadata_path = os.path.join(BACKGROUND_BUILD_DIR, METADATA_FILENAME) metadata = open(metadata_path, 'w') header = string.join(["uniprot_version", "id_name", "species_name", "fasta_url", "download_date", "aa_freq_filename", "\n"], "\t") metadata.write(header) for id_name, species_name in proteomes.iteritems(): # generate filenames fasta_filename = id_name + '.fasta' fasta_filename_gzip = fasta_filename + '.gz' fasta_url = UNIPROT_BASEURL + fasta_filename_gzip aa_freq_filename = "bg_freqs_" + id_name + '.txt' # write metadata line output_species = string.join([uniprot_version, id_name, species_name, fasta_url, download_date, aa_freq_filename, "\n"], "\t") metadata.write(output_species) # downloading and uncompress the file fasta_outpath = os.path.join(BACKGROUND_BUILD_DIR, fasta_filename) aa_freq_outpath = os.path.join(BACKGROUND_BUILD_DIR, aa_freq_filename) if not os.path.isfile(fasta_outpath): response = urllib2.urlopen(fasta_url) compressedFile = StringIO.StringIO(response.read()) decompressedFile = gzip.GzipFile(fileobj=compressedFile) with open(fasta_outpath, 'w') as fasta_out: fasta_out.write(decompressedFile.read()) cmd_line = "java -jar target/plaac.jar -b " + fasta_outpath + " > " + aa_freq_outpath print cmd_line os.system(cmd_line) # copy to web area if copy_files_flag: shutil.copyfile(aa_freq_outpath, os.path.join(BACKGROUND_WEB_DIR, aa_freq_filename)) # close metadata file metadata.close() if copy_files_flag: shutil.copyfile(metadata_path, os.path.join(BACKGROUND_WEB_DIR, METADATA_FILENAME))
{ "content_hash": "8503e67c2a1daa0984d8dae67eb0bea5", "timestamp": "", "source": "github", "line_count": 90, "max_line_length": 130, "avg_line_length": 33.24444444444445, "alnum_prop": 0.6811497326203209, "repo_name": "whitehead/plaac", "id": "d3e3f011e7f33bcba1088e77a1b79b4ef2599040", "size": "3302", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "cli/build_background_files.py", "mode": "33261", "license": "mit", "language": [ { "name": "CSS", "bytes": "3228" }, { "name": "CoffeeScript", "bytes": "6873" }, { "name": "HTML", "bytes": "36494" }, { "name": "Java", "bytes": "146584" }, { "name": "JavaScript", "bytes": "1782" }, { "name": "Python", "bytes": "15075" }, { "name": "R", "bytes": "46130" }, { "name": "Ruby", "bytes": "23934" }, { "name": "Shell", "bytes": "1880" } ], "symlink_target": "" }
from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('feer', '0012_auto_20160206_1329'), ] operations = [ migrations.AlterField( model_name='rating', name='index', field=models.IntegerField(), ), ]
{ "content_hash": "828cf5d6740f529b1d858adc603e4d32", "timestamp": "", "source": "github", "line_count": 18, "max_line_length": 44, "avg_line_length": 20.22222222222222, "alnum_prop": 0.5796703296703297, "repo_name": "mKaloer/Feer-Club", "id": "32dd6a944cae3fcf26bd6b188dc93fc77c86389c", "size": "436", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "feer_club/feer/migrations/0013_auto_20160206_1855.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "CSS", "bytes": "852" }, { "name": "HTML", "bytes": "21997" }, { "name": "Python", "bytes": "39444" } ], "symlink_target": "" }
from __future__ import absolute_import, unicode_literals from django.db import migrations def create_homepage(apps, schema_editor): # Get models ContentType = apps.get_model('contenttypes.ContentType') Page = apps.get_model('wagtailcore.Page') Site = apps.get_model('wagtailcore.Site') HomePage = apps.get_model('home.HomePage') # Delete the default homepage # If migration is run multiple times, it may have already been deleted Page.objects.filter(id=2).delete() # Create content type for homepage model homepage_content_type, __ = ContentType.objects.get_or_create( model='homepage', app_label='home') # Create a new homepage homepage = HomePage.objects.create( title="Homepage", slug='home', content_type=homepage_content_type, path='00010001', depth=2, numchild=0, url_path='/home/', ) # Create a site with the new homepage set as the root Site.objects.create( hostname='localhost', root_page=homepage, is_default_site=True) def remove_homepage(apps, schema_editor): # Get models ContentType = apps.get_model('contenttypes.ContentType') HomePage = apps.get_model('home.HomePage') # Delete the default homepage # Page and Site objects CASCADE HomePage.objects.filter(slug='home', depth=2).delete() # Delete content type for homepage model ContentType.objects.filter(model='homepage', app_label='home').delete() class Migration(migrations.Migration): dependencies = [ ('home', '0001_initial'), ] operations = [ migrations.RunPython(create_homepage, remove_homepage), ]
{ "content_hash": "76e05fd67c24c80f19f5dd8bb9cdc872", "timestamp": "", "source": "github", "line_count": 58, "max_line_length": 75, "avg_line_length": 29.017241379310345, "alnum_prop": 0.6690433749257279, "repo_name": "gasman/wagtaildraftail", "id": "f0de656729e1a5c685e3fe3c995bc1629cc9cb73", "size": "1707", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "tests/testapp/home/migrations/0002_create_homepage.py", "mode": "33188", "license": "mit", "language": [ { "name": "CSS", "bytes": "974" }, { "name": "HTML", "bytes": "2548" }, { "name": "JavaScript", "bytes": "21637" }, { "name": "Makefile", "bytes": "1543" }, { "name": "Python", "bytes": "48232" }, { "name": "Shell", "bytes": "1585" } ], "symlink_target": "" }
import datetime import itertools import os import random import sys import unittest from collections import OrderedDict from unittest import mock import pandas as pd from hmsclient import HMSClient from airflow import DAG, configuration from airflow.exceptions import AirflowException from airflow.hooks.hive_hooks import HiveCliHook, HiveMetastoreHook, HiveServer2Hook from airflow.models.connection import Connection from airflow.operators.hive_operator import HiveOperator from airflow.utils import timezone from airflow.utils.operator_helpers import AIRFLOW_VAR_NAME_FORMAT_MAPPING from airflow.utils.tests import assertEqualIgnoreMultipleSpaces configuration.load_test_config() DEFAULT_DATE = timezone.datetime(2015, 1, 1) DEFAULT_DATE_ISO = DEFAULT_DATE.isoformat() DEFAULT_DATE_DS = DEFAULT_DATE_ISO[:10] NOT_ASSERTLOGS_VERSION = sys.version_info.major + sys.version_info.minor / 10 class HiveEnvironmentTest(unittest.TestCase): def setUp(self): configuration.load_test_config() args = {'owner': 'airflow', 'start_date': DEFAULT_DATE} self.dag = DAG('test_dag_id', default_args=args) self.next_day = (DEFAULT_DATE + datetime.timedelta(days=1)).isoformat()[:10] self.database = 'airflow' self.partition_by = 'ds' self.table = 'static_babynames_partitioned' self.hql = """ CREATE DATABASE IF NOT EXISTS {{ params.database }}; USE {{ params.database }}; DROP TABLE IF EXISTS {{ params.table }}; CREATE TABLE IF NOT EXISTS {{ params.table }} ( state string, year string, name string, gender string, num int) PARTITIONED BY ({{ params.partition_by }} string); ALTER TABLE {{ params.table }} ADD PARTITION({{ params.partition_by }}='{{ ds }}'); """ self.hook = HiveMetastoreHook() t = HiveOperator( task_id='HiveHook_' + str(random.randint(1, 10000)), params={ 'database': self.database, 'table': self.table, 'partition_by': self.partition_by }, hive_cli_conn_id='hive_cli_default', hql=self.hql, dag=self.dag) t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True) def tearDown(self): hook = HiveMetastoreHook() with hook.get_conn() as metastore: metastore.drop_table(self.database, self.table, deleteData=True) class TestHiveCliHook(unittest.TestCase): def test_run_cli(self): hook = HiveCliHook() hook.run_cli("SHOW DATABASES") def test_run_cli_with_hive_conf(self): hql = "set key;\n" \ "set airflow.ctx.dag_id;\nset airflow.ctx.dag_run_id;\n" \ "set airflow.ctx.task_id;\nset airflow.ctx.execution_date;\n" dag_id_ctx_var_name = \ AIRFLOW_VAR_NAME_FORMAT_MAPPING['AIRFLOW_CONTEXT_DAG_ID']['env_var_format'] task_id_ctx_var_name = \ AIRFLOW_VAR_NAME_FORMAT_MAPPING['AIRFLOW_CONTEXT_TASK_ID']['env_var_format'] execution_date_ctx_var_name = \ AIRFLOW_VAR_NAME_FORMAT_MAPPING['AIRFLOW_CONTEXT_EXECUTION_DATE'][ 'env_var_format'] dag_run_id_ctx_var_name = \ AIRFLOW_VAR_NAME_FORMAT_MAPPING['AIRFLOW_CONTEXT_DAG_RUN_ID'][ 'env_var_format'] os.environ[dag_id_ctx_var_name] = 'test_dag_id' os.environ[task_id_ctx_var_name] = 'test_task_id' os.environ[execution_date_ctx_var_name] = 'test_execution_date' os.environ[dag_run_id_ctx_var_name] = 'test_dag_run_id' hook = HiveCliHook() output = hook.run_cli(hql=hql, hive_conf={'key': 'value'}) self.assertIn('value', output) self.assertIn('test_dag_id', output) self.assertIn('test_task_id', output) self.assertIn('test_execution_date', output) self.assertIn('test_dag_run_id', output) del os.environ[dag_id_ctx_var_name] del os.environ[task_id_ctx_var_name] del os.environ[execution_date_ctx_var_name] del os.environ[dag_run_id_ctx_var_name] @mock.patch('airflow.hooks.hive_hooks.HiveCliHook.run_cli') def test_load_file(self, mock_run_cli): filepath = "/path/to/input/file" table = "output_table" hook = HiveCliHook() hook.load_file(filepath=filepath, table=table, create=False) query = ( "LOAD DATA LOCAL INPATH '{filepath}' " "OVERWRITE INTO TABLE {table} ;\n" .format(filepath=filepath, table=table) ) mock_run_cli.assert_called_with(query) @mock.patch('airflow.hooks.hive_hooks.HiveCliHook.load_file') @mock.patch('pandas.DataFrame.to_csv') def test_load_df(self, mock_to_csv, mock_load_file): df = pd.DataFrame({"c": ["foo", "bar", "baz"]}) table = "t" delimiter = "," encoding = "utf-8" hook = HiveCliHook() hook.load_df(df=df, table=table, delimiter=delimiter, encoding=encoding) assert mock_to_csv.call_count == 1 kwargs = mock_to_csv.call_args[1] self.assertEqual(kwargs["header"], False) self.assertEqual(kwargs["index"], False) self.assertEqual(kwargs["sep"], delimiter) assert mock_load_file.call_count == 1 kwargs = mock_load_file.call_args[1] self.assertEqual(kwargs["delimiter"], delimiter) self.assertEqual(kwargs["field_dict"], {"c": "STRING"}) self.assertTrue(isinstance(kwargs["field_dict"], OrderedDict)) self.assertEqual(kwargs["table"], table) @mock.patch('airflow.hooks.hive_hooks.HiveCliHook.load_file') @mock.patch('pandas.DataFrame.to_csv') def test_load_df_with_optional_parameters(self, mock_to_csv, mock_load_file): hook = HiveCliHook() b = (True, False) for create, recreate in itertools.product(b, b): mock_load_file.reset_mock() hook.load_df(df=pd.DataFrame({"c": range(0, 10)}), table="t", create=create, recreate=recreate) assert mock_load_file.call_count == 1 kwargs = mock_load_file.call_args[1] self.assertEqual(kwargs["create"], create) self.assertEqual(kwargs["recreate"], recreate) @mock.patch('airflow.hooks.hive_hooks.HiveCliHook.run_cli') def test_load_df_with_data_types(self, mock_run_cli): d = OrderedDict() d['b'] = [True] d['i'] = [-1] d['t'] = [1] d['f'] = [0.0] d['c'] = ['c'] d['M'] = [datetime.datetime(2018, 1, 1)] d['O'] = [object()] d['S'] = ['STRING'.encode('utf-8')] d['U'] = ['STRING'] d['V'] = [None] df = pd.DataFrame(d) hook = HiveCliHook() hook.load_df(df, 't') query = """ CREATE TABLE IF NOT EXISTS t ( b BOOLEAN, i BIGINT, t BIGINT, f DOUBLE, c STRING, M TIMESTAMP, O STRING, S STRING, U STRING, V STRING) ROW FORMAT DELIMITED FIELDS TERMINATED BY ',' STORED AS textfile ; """ assertEqualIgnoreMultipleSpaces(self, mock_run_cli.call_args_list[0][0][0], query) class TestHiveMetastoreHook(HiveEnvironmentTest): VALID_FILTER_MAP = {'key2': 'value2'} def test_get_max_partition_from_empty_part_specs(self): max_partition = \ HiveMetastoreHook._get_max_partition_from_part_specs([], 'key1', self.VALID_FILTER_MAP) self.assertIsNone(max_partition) def test_get_max_partition_from_valid_part_specs_and_invalid_filter_map(self): with self.assertRaises(AirflowException): HiveMetastoreHook._get_max_partition_from_part_specs( [{'key1': 'value1', 'key2': 'value2'}, {'key1': 'value3', 'key2': 'value4'}], 'key1', {'key3': 'value5'}) def test_get_max_partition_from_valid_part_specs_and_invalid_partition_key(self): with self.assertRaises(AirflowException): HiveMetastoreHook._get_max_partition_from_part_specs( [{'key1': 'value1', 'key2': 'value2'}, {'key1': 'value3', 'key2': 'value4'}], 'key3', self.VALID_FILTER_MAP) def test_get_max_partition_from_valid_part_specs_and_none_partition_key(self): with self.assertRaises(AirflowException): HiveMetastoreHook._get_max_partition_from_part_specs( [{'key1': 'value1', 'key2': 'value2'}, {'key1': 'value3', 'key2': 'value4'}], None, self.VALID_FILTER_MAP) def test_get_max_partition_from_valid_part_specs_and_none_filter_map(self): max_partition = \ HiveMetastoreHook._get_max_partition_from_part_specs( [{'key1': 'value1', 'key2': 'value2'}, {'key1': 'value3', 'key2': 'value4'}], 'key1', None) # No partition will be filtered out. self.assertEqual(max_partition, b'value3') def test_get_max_partition_from_valid_part_specs(self): max_partition = \ HiveMetastoreHook._get_max_partition_from_part_specs( [{'key1': 'value1', 'key2': 'value2'}, {'key1': 'value3', 'key2': 'value4'}], 'key1', self.VALID_FILTER_MAP) self.assertEqual(max_partition, b'value1') def test_get_metastore_client(self): self.assertIsInstance(self.hook.get_metastore_client(), HMSClient) @mock.patch("airflow.hooks.hive_hooks.HiveMetastoreHook.get_connection", return_value=[Connection(host="localhost", port="9802")]) @mock.patch("airflow.hooks.hive_hooks.socket") def test_error_metastore_client(self, socket_mock, _find_vaild_server_mock): socket_mock.socket.return_value.connect_ex.return_value = 0 self.hook.get_metastore_client() def test_get_conn(self): self.assertIsInstance(self.hook.get_conn(), HMSClient) def test_check_for_partition(self): partition = "{p_by}='{date}'".format(date=DEFAULT_DATE_DS, p_by=self.partition_by) missing_partition = "{p_by}='{date}'".format(date=self.next_day, p_by=self.partition_by) self.assertTrue( self.hook.check_for_partition(self.database, self.table, partition) ) self.assertFalse( self.hook.check_for_partition(self.database, self.table, missing_partition) ) def test_check_for_named_partition(self): partition = "{p_by}={date}".format(date=DEFAULT_DATE_DS, p_by=self.partition_by) missing_partition = "{p_by}={date}".format(date=self.next_day, p_by=self.partition_by) self.assertTrue( self.hook.check_for_named_partition(self.database, self.table, partition) ) self.assertFalse( self.hook.check_for_named_partition(self.database, self.table, missing_partition) ) def test_get_table(self): table_info = self.hook.get_table(db=self.database, table_name=self.table) self.assertEqual(table_info.tableName, self.table) columns = ['state', 'year', 'name', 'gender', 'num'] self.assertEqual([col.name for col in table_info.sd.cols], columns) def test_get_tables(self): tables = self.hook.get_tables(db=self.database, pattern=self.table + "*") self.assertIn(self.table, {table.tableName for table in tables}) def test_get_databases(self): databases = self.hook.get_databases(pattern='*') self.assertIn(self.database, databases) def test_get_partitions(self): partitions = self.hook.get_partitions(schema=self.database, table_name=self.table) self.assertEqual(len(partitions), 1) self.assertEqual(partitions, [{self.partition_by: DEFAULT_DATE_DS}]) def test_max_partition(self): filter_map = {self.partition_by: DEFAULT_DATE_DS} partition = self.hook.max_partition(schema=self.database, table_name=self.table, field=self.partition_by, filter_map=filter_map) self.assertEqual(partition, DEFAULT_DATE_DS.encode('utf-8')) def test_table_exists(self): self.assertTrue(self.hook.table_exists(self.table, db=self.database)) self.assertFalse( self.hook.table_exists(str(random.randint(1, 10000))) ) class TestHiveServer2Hook(unittest.TestCase): def _upload_dataframe(self): df = pd.DataFrame({'a': [1, 2], 'b': [1, 2]}) self.local_path = '/tmp/TestHiveServer2Hook.csv' df.to_csv(self.local_path, header=False, index=False) def setUp(self): configuration.load_test_config() self._upload_dataframe() args = {'owner': 'airflow', 'start_date': DEFAULT_DATE} self.dag = DAG('test_dag_id', default_args=args) self.database = 'airflow' self.table = 'hive_server_hook' self.hql = """ CREATE DATABASE IF NOT EXISTS {{ params.database }}; USE {{ params.database }}; DROP TABLE IF EXISTS {{ params.table }}; CREATE TABLE IF NOT EXISTS {{ params.table }} ( a int, b int) ROW FORMAT DELIMITED FIELDS TERMINATED BY ','; LOAD DATA LOCAL INPATH '{{ params.csv_path }}' OVERWRITE INTO TABLE {{ params.table }}; """ self.columns = ['{}.a'.format(self.table), '{}.b'.format(self.table)] self.hook = HiveMetastoreHook() t = HiveOperator( task_id='HiveHook_' + str(random.randint(1, 10000)), params={ 'database': self.database, 'table': self.table, 'csv_path': self.local_path }, hive_cli_conn_id='hive_cli_default', hql=self.hql, dag=self.dag) t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True) def tearDown(self): hook = HiveMetastoreHook() with hook.get_conn() as metastore: metastore.drop_table(self.database, self.table, deleteData=True) os.remove(self.local_path) def test_get_conn(self): hook = HiveServer2Hook() hook.get_conn() @mock.patch('pyhive.hive.connect') def test_get_conn_with_password(self, mock_connect): from airflow.hooks.base_hook import CONN_ENV_PREFIX conn_id = "conn_with_password" conn_env = CONN_ENV_PREFIX + conn_id.upper() conn_value = os.environ.get(conn_env) os.environ[conn_env] = "jdbc+hive2://conn_id:conn_pass@localhost:10000/default?authMechanism=LDAP" HiveServer2Hook(hiveserver2_conn_id=conn_id).get_conn() mock_connect.assert_called_with( host='localhost', port=10000, auth='LDAP', kerberos_service_name=None, username='conn_id', password='conn_pass', database='default') if conn_value: os.environ[conn_env] = conn_value def test_get_records(self): hook = HiveServer2Hook() query = "SELECT * FROM {}".format(self.table) results = hook.get_records(query, schema=self.database) self.assertListEqual(results, [(1, 1), (2, 2)]) def test_get_pandas_df(self): hook = HiveServer2Hook() query = "SELECT * FROM {}".format(self.table) df = hook.get_pandas_df(query, schema=self.database) self.assertEqual(len(df), 2) self.assertListEqual(df.columns.tolist(), self.columns) self.assertListEqual(df[self.columns[0]].values.tolist(), [1, 2]) def test_get_results_header(self): hook = HiveServer2Hook() query = "SELECT * FROM {}".format(self.table) results = hook.get_results(query, schema=self.database) self.assertListEqual([col[0] for col in results['header']], self.columns) def test_get_results_data(self): hook = HiveServer2Hook() query = "SELECT * FROM {}".format(self.table) results = hook.get_results(query, schema=self.database) self.assertListEqual(results['data'], [(1, 1), (2, 2)]) @unittest.skipIf(NOT_ASSERTLOGS_VERSION < 3.4, 'assertLogs not support before python 3.4') def test_to_csv_assertlogs(self): hook = HiveServer2Hook() query = "SELECT * FROM {}".format(self.table) csv_filepath = 'query_results.csv' with self.assertLogs() as cm: hook.to_csv(query, csv_filepath, schema=self.database, delimiter=',', lineterminator='\n', output_header=True, fetch_size=2) df = pd.read_csv(csv_filepath, sep=',') self.assertListEqual(df.columns.tolist(), self.columns) self.assertListEqual(df[self.columns[0]].values.tolist(), [1, 2]) self.assertEqual(len(df), 2) self.assertIn('INFO:airflow.hooks.hive_hooks.HiveServer2Hook:' 'Written 2 rows so far.', cm.output) @unittest.skipIf(NOT_ASSERTLOGS_VERSION >= 3.4, 'test could cover by test_to_csv_assertLogs') def test_to_csv_without_assertlogs(self): hook = HiveServer2Hook() query = "SELECT * FROM {}".format(self.table) csv_filepath = 'query_results.csv' hook.to_csv(query, csv_filepath, schema=self.database, delimiter=',', lineterminator='\n', output_header=True) df = pd.read_csv(csv_filepath, sep=',') self.assertListEqual(df.columns.tolist(), self.columns) self.assertListEqual(df[self.columns[0]].values.tolist(), [1, 2]) self.assertEqual(len(df), 2) def test_multi_statements(self): sqls = [ "CREATE TABLE IF NOT EXISTS test_multi_statements (i INT)", "SELECT * FROM {}".format(self.table), "DROP TABLE test_multi_statements", ] hook = HiveServer2Hook() results = hook.get_records(sqls, schema=self.database) self.assertListEqual(results, [(1, 1), (2, 2)]) def test_get_results_with_hive_conf(self): hql = ["set key", "set airflow.ctx.dag_id", "set airflow.ctx.dag_run_id", "set airflow.ctx.task_id", "set airflow.ctx.execution_date"] dag_id_ctx_var_name = \ AIRFLOW_VAR_NAME_FORMAT_MAPPING['AIRFLOW_CONTEXT_DAG_ID']['env_var_format'] task_id_ctx_var_name = \ AIRFLOW_VAR_NAME_FORMAT_MAPPING['AIRFLOW_CONTEXT_TASK_ID']['env_var_format'] execution_date_ctx_var_name = \ AIRFLOW_VAR_NAME_FORMAT_MAPPING['AIRFLOW_CONTEXT_EXECUTION_DATE'][ 'env_var_format'] dag_run_id_ctx_var_name = \ AIRFLOW_VAR_NAME_FORMAT_MAPPING['AIRFLOW_CONTEXT_DAG_RUN_ID'][ 'env_var_format'] os.environ[dag_id_ctx_var_name] = 'test_dag_id' os.environ[task_id_ctx_var_name] = 'test_task_id' os.environ[execution_date_ctx_var_name] = 'test_execution_date' os.environ[dag_run_id_ctx_var_name] = 'test_dag_run_id' hook = HiveServer2Hook() output = '\n'.join(res_tuple[0] for res_tuple in hook.get_results(hql=hql, hive_conf={'key': 'value'})['data']) self.assertIn('value', output) self.assertIn('test_dag_id', output) self.assertIn('test_task_id', output) self.assertIn('test_execution_date', output) self.assertIn('test_dag_run_id', output) del os.environ[dag_id_ctx_var_name] del os.environ[task_id_ctx_var_name] del os.environ[execution_date_ctx_var_name] del os.environ[dag_run_id_ctx_var_name]
{ "content_hash": "f5d50293154511770d849cce31d9d21d", "timestamp": "", "source": "github", "line_count": 516, "max_line_length": 106, "avg_line_length": 40.73449612403101, "alnum_prop": 0.5650601836433703, "repo_name": "r39132/airflow", "id": "59c39d3f97f3f7c28ee76e30ffeaf1467cd3376d", "size": "21833", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "tests/hooks/test_hive_hook.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "CSS", "bytes": "12126" }, { "name": "Dockerfile", "bytes": "4111" }, { "name": "HTML", "bytes": "128531" }, { "name": "JavaScript", "bytes": "22118" }, { "name": "Mako", "bytes": "1284" }, { "name": "Python", "bytes": "5928206" }, { "name": "Shell", "bytes": "41869" } ], "symlink_target": "" }
from twisted.internet import reactor from twisted.web.server import Site from twisted.web.resource import Resource import time class ClockPage(Resource): isLeaf=True def render_GET(self,request): return "The local time is %s"%(time.ctime(),) resource=ClockPage() factory=Site(resource) reactor.listenTCP(8000,factory) reactor.run()
{ "content_hash": "9bc6fad967892d7e7198eadb8090f59e", "timestamp": "", "source": "github", "line_count": 16, "max_line_length": 53, "avg_line_length": 22.5, "alnum_prop": 0.7388888888888889, "repo_name": "yangdw/repo.python", "id": "7350c13a1f7498e8401935a2f9d7e44d829812bf", "size": "375", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "src/repo/extension-lib/twisted/dynamic_content.py", "mode": "33188", "license": "mit", "language": [ { "name": "HTML", "bytes": "2746" }, { "name": "Protocol Buffer", "bytes": "11054" }, { "name": "Python", "bytes": "1549455" }, { "name": "SQLPL", "bytes": "3364" } ], "symlink_target": "" }