repo_name
stringlengths
5
100
path
stringlengths
4
231
language
stringclasses
1 value
license
stringclasses
15 values
size
int64
6
947k
score
float64
0
0.34
prefix
stringlengths
0
8.16k
middle
stringlengths
3
512
suffix
stringlengths
0
8.17k
bruckhaus/challenges
python_challenges/project_euler/p007_ten_thousand_first_prime.py
Python
mit
873
0.001145
__author__ = 'tilmannbruckhaus' from collections import defaultdict class TenThousandFirstPrime: # 10001st prime # Problem 7 # By listing the first six prime numbers: 2, 3, 5, 7, 11, and 13, we can see that the 6th prime is 13. # What is the 10 001st prime number? LIMIT = 1000000 TARGET = 10001 prime_dict = defaultdict(lambda: True) def __init__(self): pass @staticmethod def find(): prime_count = 0 t = TenThousandFirstPrime for i in range(2, t.LIMIT): if not t.prime_dict[i]: continue prime_count += 1 if prime_count == t.TARGET: return i for j in range(i + i, t.LIMIT, i):
t.prime_dict[j] = False if __name__ == '__m
ain__': print "The 10,001st prime is", TenThousandFirstPrime.find()
xjlin0/cs246
w2015/hw2/q4_clustering_kMeans_spark.py
Python
mit
2,787
0.011123
# Under the PySpark shell, type: # execfile('q4_clustering_kMeans_spark.py') #from pyspark import SparkContext, SparkConf # conf = SparkConf() # conf.setMaster("local") # conf.setAppName("Recommendation System") # conf.set("spark.executor.memory", "16g") #sc = SparkContext(conf=conf) ############################################################################### # NOT MY CODE, modified from Apache Spark Python example of MLlib - Clustering # http://spark.apache.org/docs/latest/mllib-clustering.html ############################################################################### from pyspark.mllib.clustering import KMeans, KMeansModel from numpy import array #from math import sqrt # Load and parse the data #fileName = "data/mllib/kmeans_data.txt" fileName = "dat
a.txt" data = sc.textFile(fileName, 8) #partition goes here parsedData = data.map(lambda line: array([float(x) for x in line.split(' ')])).cache() # Build the models with different seeders: rando
m or fariest spots c1_clusters = KMeans.train(parsedData, 10, maxIterations=20, runs=1, initializationMode="random") c2_clusters = KMeans.train(parsedData, 10, maxIterations=20, runs=1, initializationMode='k-means||') #c1_initials=sc.textFile('c1.txt').map(lambda line: array([float(x) for x in line.split(' ')])) #c1_preset_clusters = KMeans.train(parsedData, 10, maxIterations=20, initialModel=c1_initials) #new parameters in Spark v1.5.0 # Evaluate clustering by computing Within Set Sum of Squared Errors def error(point, model): center = model.centers[model.predict(point)] return sum([x**2 for x in (point - center)])**0.5 def wssse(dataRDD, model): return dataRDD.map(lambda point: error(point, model)).reduce(lambda x, y: x + y) c1_WSSSE = wssse(parsedData, c1_clusters) c2_WSSSE = wssse(parsedData, c2_clusters) #c1_cost = c1_clusters.computeCost(parsedData) # Evaluating costs by KMeans.computeCost(RDD) in Spark v1.5.0 print("\n(c1 random) Within Set Sum of Squared Error = " + str(c1_WSSSE)) ## No control of specific seeder or iterations whatsoever..... second = [float(i) for i in '0.21 0.28 0.5 0 0.14 0.28 0.21 0.07 0 0.94 0.21 0.79 0.65 0.21 0.14 0.14 0.07 0.28 3.47 0 1.59 0 0.43 0.43 0 0 0 0 0 0 0 0 0 0 0 0 0.07 0 0 0 0 0 0 0 0 0 0 0 0 0.132 0 0.372 0.18 0.048 5.114 101 1028 1'.split(' ')] #this is copy from the second line of data.txt ans1 = c1_clusters.clusterCenters print [c1_clusters.predict(ans1[i]) == c1_clusters.predict(second) for i in range(10)] # => [False, False, False, False, False, False, False, False, True, False] # second document matched to 9th tag!! print("\n(c2 fariest spots) Within Set Sum of Squared Error = " + str(c2_WSSSE)) ans2 = c2_clusters.clusterCenters print [c2_clusters.predict(ans2[i]) == c2_clusters.predict(second) for i in range(10)]
knipknap/exscript
Exscript/util/url.py
Python
mit
7,356
0.001767
# # Copyright (C) 2010-2017 Samuel Abels # The MIT License (MIT) # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files # (the "Software"), to deal in the Software without restriction, # including without limitation the rights to use, copy, modify, merge, # publish, distribute, sublicense, and/or sell copies of the Software, # and to permit persons to whom the Software is furnished to do so, # subject to the following conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. # IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY # CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, # TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE # SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ Working with URLs (as used in URL formatted hostnames). """ from __future__ import unicode_literals, absolute_import from future import standard_library standard_library.install_aliases() from builtins import str from builtins import chr from builtins import range from builtins import object import re from urllib.parse import urlencode, quote from urllib.parse import urlparse, urlsplit from .collections import OrderedDefaultDict def _make_hexmap(): hexmap = dict() for i in range(256): hexmap['%02x' % i] = chr(i) hexmap['%02X' % i] = chr(i) return hexmap _HEXTOCHR = _make_hexmap() _WELL_KNOWN_PORTS = { 'ftp': 21, 'ssh': 22, 'ssh1': 22, 'ssh2': 22, 'telnet': 23, 'smtp': 25, 'http': 80, 'pop3': 110, 'imap': 143 } def _unquote(string): """_unquote('abc%20def') -> 'abc def'.""" result = string.split('%') for i, item in enumerate(result[1:]): i += 1 try: result[i] = _HEXTOCHR[item[:2]] + item[2:] except KeyError: result[i] = '%' + item except UnicodeDecodeError: result[i] = chr(int(item[:2], 16)) + item[2:] return ''.join(result) def _urlparse_qs(url): """ Parse a URL query string and return the components as a dictionary. Based on the cgi.parse_qs method.This is a utility function provided with urlparse so that users need not use cgi module for parsing the url query string. Arguments: :type url: str :param url: URL with query string to be parsed """ # Extract the query part from the URL. querystring = urlparse(url)[4] # Split the query into name/value pairs. pairs = [s2 for s1 in querystring.split('&') for s2 in s1.split(';')] # Split the name/value pairs. result = OrderedDefaultDict(list) for name_value in pairs: pair = name_value.split('=', 1) if len(pair) != 2: continue if len(pair[1]) > 0: name = _unquote(pair[0].replace('+', ' ')) value = _unquote(pair[1].replace('+', ' ')) result[name].append(value) return result class Url(object): """ Represents a URL. """ def __init__(self): self.protocol = None self.username = None self.password1 = None self.password2 = None self.hostname = None self.port = None self.path = None self.var
s = None def __str__(self): """ Like :class:`to_string()`. :rtype: str :return: A URL. """ url = ''
if self.protocol is not None: url += self.protocol + '://' if self.username is not None or \ self.password1 is not None or \ self.password2 is not None: if self.username is not None: url += quote(self.username, '') if self.password1 is not None or self.password2 is not None: url += ':' if self.password1 is not None: url += quote(self.password1, '') if self.password2 is not None: url += ':' + quote(self.password2, '') url += '@' url += self.hostname if self.port: url += ':' + str(self.port) if self.path: url += '/' + self.path if self.vars: pairs = [] for key, values in self.vars.items(): for value in values: pairs.append((key, value)) url += '?' + urlencode(pairs) return url def to_string(self): """ Returns the URL, including all attributes, as a string. :rtype: str :return: A URL. """ return str(self) @staticmethod def from_string(url, default_protocol='telnet'): """ Parses the given URL and returns an URL object. There are some differences to Python's built-in URL parser: - It is less strict, many more inputs are accepted. This is necessary to allow for passing a simple hostname as a URL. - You may specify a default protocol that is used when the http:// portion is missing. - The port number defaults to the well-known port of the given protocol. - The query variables are parsed into a dictionary (Url.vars). :type url: str :param url: A URL. :type default_protocol: string :param default_protocol: A protocol name. :rtype: Url :return: The Url object constructed from the given URL. """ if url is None: raise TypeError('Expected string but got' + type(url)) # Extract the protocol name from the URL. result = Url() match = re.match(r'(\w+)://', url) if match: result.protocol = match.group(1) else: result.protocol = default_protocol # Now remove the query from the url. query = '' if '?' in url: url, query = url.split('?', 1) result.vars = _urlparse_qs('http://dummy/?' + query) # Substitute the protocol name by 'http', because Python's urlsplit # fails on our protocol names otherwise. prefix = result.protocol + '://' if url.startswith(prefix): url = url[len(prefix):] url = 'http://' + url # Parse the remaining url. parsed = urlsplit(url, 'http', False) netloc = parsed[1] # Parse username and password. auth = '' if '@' in netloc: auth, netloc = netloc.split('@') auth = auth.split(':') try: result.username = _unquote(auth[0]) result.password1 = _unquote(auth[1]) result.password2 = _unquote(auth[2]) except IndexError: pass # Parse hostname and port number. result.hostname = netloc + parsed.path result.port = _WELL_KNOWN_PORTS.get(result.protocol) if ':' in netloc: result.hostname, port = netloc.split(':') result.port = int(port) return result
tjcsl/director
web3/apps/sites/migrations/0017_auto_20170707_1608.py
Python
mit
572
0.001748
# -*- coding: utf-8 -*- # Generated by Django 1.11.3 on 2017-07-07 16:08 from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('sites', '0016_remove_site_domain_old'), ] operations = [
migrations.AlterField( model_name='site', name='purpose', field=models.CharField(choices=[('legacy', 'Legacy'), ('user', 'User'), ('project', 'Project'), ('activity', 'Activity'), ('other', 'Other')], max_length=16),
), ]
dendory/scripts
aws.py
Python
mit
22,042
0.032937
#!/usr/bin/python3 # # Simple AWS automation script - Patrick Lambert - http://dendory.net # Prerequisites: `pip3 install boto3` and `aws configure` # import os import sys import json import time cfgfile = os.path.join(os.path.expanduser("~"), ".aws.templates") # # Internal functions # def fail(msg): if os.name != "nt": print("\033[91m* " + msg + "\033[0m") else: print("[ERROR] " + msg) def success(msg): if os.name != "nt": print("\033[92m* " + msg + "\033[0m") else: print("[SUCCESS] " + msg) def info(msg): if os.name != "nt": print("\033[94m* " + msg + "\033[0m") else: print("[INFO] " + msg) def ask(msg, default): tmp = input(msg + " [" + str(default) + "]: ") if tmp == "": return default if type(True) == type(default) and str(tmp).lower() == "true": return True elif type(False) == type(default) and str(tmp).lower() == "false": return False elif type(int(2)) == type(default): return int(tmp) else: return tmp def load_templates(): templates = [] try: f = open(cfgfile, 'r') rows = json.loads(f.read()) f.close() for row in rows: if "name" in row.keys(): templates.append(row) except: pass return templates def save_templates(templates): try: f = open(cfgfile,
'w') f.write(json.dumps(templates)) f.close() return True except: a, b, c = sys.exc_info() fail("Could not save templates file: " + str(b)) return False def usage(): info("Usage: aws.py <command> [options]") print() info("Available commands:") print("list-vms [-v|instanc
e id|name] : List all VMs, or details on one VM") print("start-vm <instance id|name> : Start a VM") print("stop-vm <instance id|name> : Stop a VM") print("restart-vm <instance id|name> : Reboot a VM") print("delete-vm <instance id> : Terminate a VM") print("set-tag <instance id|name> <tag> <value> : Change the tag of a VM") print("list-templates [-v] : List existing templates") print("create-template : Create a new VM template") print("create-vm <vm name|%> <template to use> [-w] : Create a new VM based on a template") print("dump-inventory <file> [filter] : Put all internal IPs in a text file") print("get-password <instance id|name> [key file] : Retrieve the password of a Windows VM") print("create-volume <instance id> <device name> <size in GB>: Create a new volume and attach to a VM") print("attach-volume <instance id> <device name> <volume id> : Attach an existing volume to a VM") print("detach-volume <volume id> : Detach a volume from a VM") print("delete-volume <volume id> : Delete a detached volume") print("create-snapshot <instance id> <description> : Create a snapshot of all attached volumes") print("delete-snapshot <snapshot id> : Delete an existing snapshot") print("list-dns-zones [-v] : List hosted zones in Route53") print("list-dns-records <zone id> : List record sets in a zone") print("create-dns-record <zone id> <name> <type> <value> : Create a record set") print("delete-dns-record <zone id> <name> <type> <value> : Delete a record set") print("list-public-ips : List current elastic IPs") print("create-public-ip : Allocate a new elastic IP") print("delete-public-ip <address id> : Revoke an elastic IP") print("attach-public-ip <address id> <instance id|name> : Attach an IP to a VM") print("detach-public-ip <address id> : Detach an IP from a VM") print("list-load-balancers : List all load balancers") print("attach-balanced-vm <balancer id> <instance id|name> : Attach a VM to a load balancer") print("detach-balanced-vm <balancer id> <instance id|name> : Detach a VM from a load balancer") # # AWS operations # def create_snapshot(volid, desc): try: ec2 = boto3.resource("ec2") snapshot = ec2.create_snapshot(VolumeId=volid, Description=desc) return snapshot.id except: a, b, c = sys.exc_info() fail("Could not create snapshot: " + str(b)) return None def delete_snapshot(snapid): try: ec2 = boto3.resource("ec2") snapshot = ec2.Snapshot(snapid) snapshot.delete() return True except: a, b, c = sys.exc_info() fail("Could not delete snapshot: " + str(b)) return False def start_vm(instid): try: if instid[0:1] != "i-": for ins in list_vms(): if ins['name'] == instid: instid = ins['id'] ec2 = boto3.resource("ec2") instance = ec2.Instance(instid) instance.start() return True except: a, b, c = sys.exc_info() fail("Could not start VM: " + str(b)) return False def list_hosted_zones(): try: r53 = boto3.client("route53") zones = r53.list_hosted_zones() return zones['HostedZones'] except: a, b, c = sys.exc_info() fail("Could not list hosted zone: " + str(b)) return [] def list_record_sets(zoneid): try: r53 = boto3.client("route53") sets = r53.list_resource_record_sets(HostedZoneId=zoneid) return sets['ResourceRecordSets'] except: a, b, c = sys.exc_info() fail("Could not list record sets: " + str(b)) return [] def stop_vm(instid): try: if instid[0:1] != "i-": for ins in list_vms(): if ins['name'] == instid: instid = ins['id'] ec2 = boto3.resource("ec2") instance = ec2.Instance(instid) instance.stop() return True except: a, b, c = sys.exc_info() fail("Could not stop VM: " + str(b)) return False def attach_ip(ipid, instid): try: if instid[0:1] != "i-": for ins in list_vms(): if ins['name'] == instid: instid = ins['id'] ec2 = boto3.client("ec2") resp = ec2.associate_address(InstanceId=instid, AllocationId=ipid) return resp['AssociationId'] except: a, b, c = sys.exc_info() fail("Could not associate IP: " + str(b)) return None def detach_ip(ipid): try: ec2 = boto3.client("ec2") ips = list_ips() for ip in ips: if ip['AllocationId'] == ipid and "AssociationId" in ip: resp = ec2.disassociate_address(AssociationId=ip['AssociationId']) return True print("No association found.") return False except: a, b, c = sys.exc_info() fail("Could not detach IP: " + str(b)) return False def restart_vm(instid): try: if instid[0:1] != "i-": for ins in list_vms(): if ins['name'] == instid: instid = ins['id'] ec2 = boto3.resource("ec2") instance = ec2.Instance(instid) instance.reboot() return True except: a, b, c = sys.exc_info() fail("Could not restart VM: " + str(b)) return False def get_password(instid, keyfile): try: if instid[0:1] != "i-": for ins in list_vms(): if ins['name'] == instid: instid = ins['id'] ec2 = boto3.client("ec2") data = ec2.get_password_data(InstanceId=instid) if data['PasswordData'] == "": return "" if keyfile: cmd = "echo \"" + "".join(data['PasswordData'].split()) + "\" |base64 -d |openssl rsautl -decrypt -inkey \"" + keyfile + "\"" return os.popen(cmd).read() else: return data['PasswordData'] except: a, b, c = sys.exc_info() fail("Could not fetch password: " + str(b)) return None def delete_vm(instid): try: ec2 = boto3.resource("ec2") instance = ec2.Instance(instid) instance.terminate() return True except: a, b, c = sys.exc_info() fail("Could not terminate VM: " + str(b)) return False def create_vm(name, template, dowait): try: ec2 = boto3.resource("ec2") info("Creating instance...") devmap = [{"DeviceName": template['volume name'], "Ebs": {"VolumeSize": int(template['volume size']), "VolumeType": "gp2"}}] content = "" if template['script'] != "": f = open(template['script'], 'r') content = f.read() f.close() instance = ec2.create_instances(ImageId = template['ami'], MinCount = 1, MaxCount = 1, KeyName = template['key'], SecurityGroupIds = [templ
tigerlinux/tigerlinux-extra-recipes
recipes/misc/python-learning/CORE/0002-Print-and-Import-and-some-operations/print-and-import.py
Python
gpl-3.0
1,618
0.038937
#!/usr/bin/python3 # # By Reynaldo R. Martinez P. # Sept 09 2016 # TigerLinux AT Gmail DOT Com # Sa sample with some operations including usage of # a library import and some operations # # # # Here, weproceed to import the "SYS" library import sys # Next, we print the "sys.platform" information. This comes from the "sys" library # previouslly imported print("We are running in:") print(sys.platform) print ("") # Now, some basic operations: # A sum: print( "This is a SUM: 5 + 200" ) print( 5 + 200 ) print ("") # A rest: print( "This is a REST: 205 - 5" ) print ( 205 - 5 ) print ("") # A multiplication: print ( "This is a multiplication: 34 x 6" ) print ( 34 * 6 ) print ("") # A Division print ( "This is a Division: 342 / 20" ) print ( 342 / 20 ) print ("") # This is
a MODULE print ( "This is a MODULE from the last division: 342 Module 20" ) print ( 342%20 ) print ("") # This is an exponential: print ( "This is an exponential: 2 ^ 200" ) print(2 ** 100) print ("") # Define two string variable and concatenate them in the print statement var1 = "The Life is " var2 = "Strange
....." print ( "We define two strings, and concatenate them in a single print:") print ( var1 + var2 ) print ("") # Define 3 strings and print them in the same line: var3 = "Kiki" var4 = "Rayita" var5 = "Negrito" print ( "My 3 cats are named:" ) print ( var3, var4, var5 ) print ( "" ) # Next, we define a string variable, and print it eight times: mystring = " !String! " print ( "Let's print a string 10 times... Just because we can...jejeje:" ) print( mystring * 10 ) print ("") print ("") # END
isudox/leetcode-solution
python-algorithm/leetcode/problem_94.py
Python
mit
1,229
0
"""94. Binary Tree Inorder Traversal https://leetcode.com/problems/binary-tree-inorder-traversal/ Given a binary tree, return the in-order traversal of its nodes' values. Example: Input: [1,null,2,3] 1 \ 2 / 3 Output: [1,3,2] Follow up: Recursive solution is trivial, could you do it iteratively? """ from typing import List from common.tree_node import TreeNode class Solution: def iterative_inorder_traversal(self, root: TreeNode) -> List[int]: """ iterative traversal """ ans = [] stack = [] while root or stack: if root: stack.append(root) root = root.left else: root = stack.pop()
ans.append(root.val) root = root.right return ans def recursive_inorder_traversal(self, root: TreeNode) -> List[int]: """ recursive traversal, process left if needed, then val, at last right """ if not root: return [] ans = [] ans += self.recursive_inorder_traversal(root.left) ans.append(root.
val) ans += self.recursive_inorder_traversal(root.right) return ans
joshmoore/openmicroscopy
components/tools/OmeroWeb/omeroweb/webmobile/views.py
Python
gpl-2.0
20,846
0.012952
from django.http import HttpResponse, HttpResponseRedirect from django.core.urlresolvers import reverse from django.shortcuts import render_to_response from omeroweb.webgateway.views import getBlitzConnection, _session_logout from omeroweb.webgateway import views as webgateway_views import settings import logging import traceback import omero # use the webclient's gateway connection wrapper from webclient.webclient_gateway import OmeroWebGateway import webmobile_util logger = logging.getLogger('webmobilewebmobile') def isUserConnected (f): """ connection decorator (wraps methods that require connection) - adapted from webclient.views retrieves connection and passes it to the wrapped method in kwargs TODO: would be nice to refactor isUserConnected from webclient to be usable from here. """ def wrapped (request, *args, **kwargs): #this check the connection exist, if not it will redirect to login page url = request.REQUEST.get('url') if url is None or len(url) == 0: if request.META.get('QUERY_STRING'): url = '%s?%s' % (request.META.get('PATH_INFO'), request.META.get('QUERY_STRING')) else: url = '%s' % (request.META.get('PATH_INFO')) conn = None loginUrl = reverse("webmobile_login") try: conn = getBlitzConnection(request, useragent="OMERO.webmobile") except Exception, x: logger.error(traceback.format_exc()) return HttpResponseRedirect("%s?error=%s&url=%s" % (loginUrl, str(x), url)) # if we failed to connect - redirect to login page, passing the destination url if conn is None: return HttpResponseRedirect("%s?url=%s" % (loginUrl, url)) # if we got a connection, pass it to the wrapped method in kwargs kwargs["error"] = request.REQUEST.get('error') kwargs["conn"] = conn kwargs["url"] = url return f(request, *args, **kwargs) return wrapped def groups_members(request): """ List the users of the current group - if permitted """ conn = getBlitzConnection (request, useragent="OMERO.webmobile") if conn is None or not conn.isConnected(): return HttpResponseRedirect(reverse('webmobile_login')) groupId = conn.getEventContext().groupId showMembers = True if str(conn.getEventContext().groupPermissions) == "rw----": showMembers = False members = conn.containedExperimenters(groupId) groups = [] perms = {"rw----":'private', "rwr---":'read-only', "rwrw--":'collaborative'} for g in conn.getGroupsMemberOf(): try: p = perms[str(g.getDetails().permissions)] except KeyError: p = "" groups.append({ "id": g.id, "name": g.getName(), "permissions": p }) return render_to_response('webmobile/groups_members.html', {'client': conn, 'showMembers': showMembers, 'members': members, 'groups': groups}) def switch_group(request, groupId): """ Switch to the specified group, then redirect to index. """ conn = getBlitzConnection (request, useragent="OMERO.webmobile") if conn is None or not conn.isConnected(): return HttpResponseRedirect(reverse('webmobile_login')) from webclient.views import change_active_group try: #change_active_group(request, kwargs={'conn': conn}) conn.changeActiveGroup(long(groupId)) # works except after viewing thumbnails in private group! except: logger.error(traceback.format_exc()) return HttpResponse(traceback.format_exc()) return HttpResponseRedirect(reverse('webmobile_index')) @isUserConnected def change_active_group(request, groupId, **kwargs): try: conn = kwargs["conn"] except: logger.error(traceback.format_exc()) return handlerInternalError("Connection is not available. Please contact your administrator.") url = reverse('webmobile_index') server = request.session.get('server') username = request.session.get('username') password = request.session.get('password') ssl = request.session.get('ssl') version = request.session.get('version') webgateway_views._session_logout(request, request.session.get('server')) blitz = settings.SERVER_LIST.get(pk=server) request.session['server'] = blitz.id request.session['host'] = blitz.host request.session['port'] = blitz.port request.session['username'] = username request.session['password'] = password request.session['ssl'] = (True, False)[request.REQUEST.get('ssl') is None] request.session['clipboard'] = {'images': None, 'datasets': None, 'plates': None} request.session['shares'] = dict() request.session['imageInBasket'] = set() blitz_host = "%s:%s" % (blitz.host, blitz.port) request.session['nav']={"error": None, "blitz": blitz_host, "menu": "start", "view": "icon", "basket": 0, "experimenter":None, 'callback':dict()} #conn = getBlitzConnection(request, useragent="OMERO.webmobile") if conn.changeActiveGroup(groupId): request.session.modified = True else: error = 'You c
annot change your group becuase the data is currently processing. You can force it by logging out and logging in again.' url = reverse("webindex")+ ("?error=%s" % error) if request.session.get('nav')['experimenter'] is not None: url += "&experimen
ter=%s" % request.session.get('nav')['experimenter'] request.session['version'] = conn.getServerVersion() return HttpResponseRedirect(url) def viewer(request, imageId): conn = getBlitzConnection (request, useragent="OMERO.webmobile") if conn is None or not conn.isConnected(): return HttpResponseRedirect(reverse('webmobile_login')) image = conn.getObject("Image", imageId) w = image.getSizeX() h = image.getSizeY() return render_to_response('webmobile/viewers/viewer_iphone.html', {'image':image}) @isUserConnected def viewer_big(request, imageId, **kwargs): conn = None try: conn = kwargs["conn"] except: logger.error(traceback.format_exc()) return HttpResponse(traceback.format_exc()) image = conn.getImage(imageId) w = image.getWidth() h = image.getHeight() z = image.z_count() /2 return render_to_response('webmobile/viewers/big_iphone.html', {'image':image, 'w':w, 'h': h, 'z':z}) @isUserConnected def projects (request, eid=None, **kwargs): """ List the projects owned by the current user, or another user specified by eId """ conn = None try: conn = kwargs["conn"] except: logger.error(traceback.format_exc()) return HttpResponse(traceback.format_exc()) #projects = filter(lambda x: x.isOwned(), conn.listProjects()) #eId = request.REQUEST.get('experimenter', None) experimenter = None if eid is not None: experimenter = conn.getObject("Experimenter", eid) else: # show current user's projects by default eid = conn.getEventContext().userId projs = conn.listProjects(eid=eid) projs = list(projs) if request.REQUEST.get('sort', None) == 'recent': projs.sort(key=lambda x: x.creationEventDate()) projs.reverse() else: projs.sort(key=lambda x: x.getName().lower()) ods = conn.listOrphans("Dataset", eid=eid) orphanedDatasets = list(ods) return render_to_response('webmobile/browse/projects.html', { 'client':conn, 'projects':projs, 'datasets':orphanedDatasets, 'experimenter':experimenter }) @isUserConnected def project(request, id, **kwargs): """ Show datasets belonging to the specified project """ conn = None try: conn = kwargs["conn"] except: logger.error(traceback.format_exc()) return HttpResponse(traceback.format_exc()) prj = conn.getObject("Proj
Pseudonick47/sherlock
backend/api/data.py
Python
gpl-3.0
37,646
0.003294
# -*- coding=utf-8 -*- """ All entry points for API are defined here. Attributes: mod (Blueprint): Flask Blueprint object used to separate api from website code. """ from flask import Blueprint, request, send_from_directory from flask_restful import Api, Resource import os from datetime import datetime from PIL import Image as PILImage from app import db from models.data import City, Country, Location, Price, Tour, Image, Comment, Rating, SpecificTour from models.users import User mod = Blueprint('api/data', __name__) api = Api(mod) class TourAPI(Resource): """Services that allow user to get, update or delete tour identified with the given key. """ def put(self, oid): """Update already existing tour. Request should be formated as JSON file. For example: { "name": "", "description": "", "guide_fee": 10.25, "thumbnail_id": 2, "locations": [2, 51, 16, 43] } Available fields are: name (str), description (str), guide_fee (float), thumbnail_id (int), locations (list of integers) Fields can be omitted. Returns: JSON file. For example: Update succeeded { "success": true } Update failed { "success": false, "message": "Field name incorrect" } """ req = request.get_json(force=True, silent=True) if req: tour = db.session.query(Tour).filter_by(oid=oid,).one_or_none() if not tour: return ({'success':False, 'message':'Specified tour not found.'}, 404) for key in req.keys(): if key == 'locations': for location in req['locations']: loc = db.session.query(Location).filter_by(oid=location,).one() loc.tours.append(tour) elif hasattr(tour, key): setattr(tour, key, req[key]) else: return ({'success':False, 'message':'Field name incorrect'}, 400) db.session.commit() return ({'success': True}, 200) return ({'success':False, 'message':'Not JSON'}, 400) def get(self, oid): """Fetch tour corresponding to the given identifier. Returns: JSON file containing id, name, description, guide_fee, thumbnail_id and location identifiers of selected tour. For example: { "id": 17, "name": "Walk Through History", "description": "Visit some of the finest castles and mansions in all of Europe.", "guide_fee": 10, "thumbnail": 2, "locations": [...] } If a requested tour is not found, then JSON file has an empty object. """ response = {} tour = db.session.query(Tour).filter_by(oid=oid,).one_or_none() if tour: thumbnail = db.session.query(Image).filter_by(oid=tour.thumbnail_id).one() comments = [] ratings = db.session.query(Rating).filter_by(tour=tour.oid,).all() rating = 0 if not ratings: rating = 0 else: for r in ratings: rating = rating + r.rating rating = rating / len(ratings) for comment in tour.comments: comments.append(comment.oid) response = { 'id':tour.oid, 'name':tour.name, 'description':tour.description, 'guide_fee':tour.guide_fee, 'thumbnail': { 'id': thumbnail.oid, 'src': 'http://localhost:5000/static/' + thumbnail.file_name, 'width': thumbnail.width, 'height': thumbnail.height, 'alt': 'thumbnail' }, 'locations': [ {'id': location.oid, 'name': location.name} \ for location in tour.locations ], 'images': [ { 'id': image.oid, 'src': 'http://localhost:5000/static/' + image.file_name, 'width': image.width, 'height': image.height, 'alt': 'image' } for image in tour.images ], 'rating': rating, 'commentIds': comments } return (response, 200) return (response, 404) def delete(self, oid): """Delete tour corresponding to the given identifier. Returns: JSON file. For example: Deletion succeeded { "success": true } Deletion failed { "success": false, "message": "Specified tour not found" } """ num = db.session.query(Tour).filter_by(oid=oid,).delete() if num: return ({'success': True}, 200) return ({'success':False, 'message':'Specified tour not found'}, 404) class TourListAPI(Resource): """Services that allow user to get all tours or to add new tour.""" def post(self): """Add new tour. Request should be formated as JSON file. For example: { "name": "", "description": "", "guide_fee": 10.25, "thumbnail": 2, "locations": [2, 51, 16, 43], "images": [ 34, 5, 63] } Returns: JSON file. For example: Addition succeeded { "success": true } Addition failed { "success": false, "message": "Not JSON" } """ req = request.get_json(force=True, silent=True) if req: tour = Tour( name=req['name'], guide_fee=req['guide_fee'], description=req['description'], thumbnail_id=req['thumbnail'] ) for location in req['locations']: loc = db.session.query(Location).filter_by(oid=location,).one() loc.tours.append(tour) for image_id in req['images']: image = db.session.query(Image).filter_by(oid=image_id).one() tour.images.append(image) db.session.add(tour) db.session.commit() return ({'success': True, 'id': tour.oid}, 200) return ({'success':False, 'message':'Not JSON'}, 400) def get(self): """Fetch all tours. Returns: JSON file containing id, name, description, guide_fee, thumbnail_id and location identifiers of all selected tours. For example: [ { "id": 17, "name": "Walk Through History", "description": "Visit some of the finest castles and mansions in all of Europe.", "guide_fee": 10, "thumbnail"
: 2, "locations": [...] }, {
"id": 17, "name": "It's Time to Party", "description": "Have a wonderful time with young and wicked people in Sydney's most sp
seleniumbase/SeleniumBase
examples/image_test.py
Python
mit
2,523
0
import os import pytest from seleniumbase import BaseCase class ImageTests(BaseCase): @pytest.mark.run(order=1) def test_pull_image_from_website(self): """ Pull an image from a website and save it as a PNG file. """ self.open("https://xkcd.com/1117/") selector = "#comic" file_name = "comic.png" folder = "images_exported" self.save_element_as_image_file(selector, file_name, folder) self.assert_true(os.path.exists("%s/%s" % (folder, file_name))) print('\n"%s/%s" was saved!' % (folder, file_name)) @pytest.mark.run(order=2) def test_add_text_overlay_to_image(self): """ Add a text overlay to an image. """ self.open("https://xkcd.com/1117/") selector = "#comic" file_name = "image_overlay.png" folder = "images_exported" overlay_text = 'This is an XKCD comic!\nTitle: "My Sky"' self.save_element_as_image_file( selector, file_name, folder, overlay_text ) self.assert_true(os.path.exists("%s/%s" % (folder, file_name))) print('\n"%s/%s" was saved!' % (folder, file_name)) @pytest.mark.run(order=3) def test_add_text_overlay_to_page_section(self): """ Add a text overlay to a section of a page. """ self.open("https://xkcd.com/2200/") selector = "#middleContainer" file_name = "section_overlay.png" folder = "images_exported" overlay_text = ( "Welcome to %s\n" "This is a comment added to the image.\n" "Unreachable states come from logic errors." % self.get_current_url() ) self.save_element_as_image_f
ile( selector, file_name, folder, overlay_text ) self.assert_true(os.path.exists("%s/%s" % (folder, fil
e_name))) print('\n"%s/%s" was saved!' % (folder, file_name)) @pytest.mark.run(order=4) def test_add_text_overlay_to_full_page(self): """ Add a text overlay to a full page. """ self.open("https://xkcd.com/1922/") self.remove_element("#bottom") selector = "body" file_name = "page_overlay.png" folder = "images_exported" overlay_text = "A text overlay on %s" % self.get_current_url() self.save_element_as_image_file( selector, file_name, folder, overlay_text ) self.assert_true(os.path.exists("%s/%s" % (folder, file_name))) print('\n"%s/%s" was saved!' % (folder, file_name))
ramalho/eagle-py
tests/entries.py
Python
lgpl-2.1
572
0.001748
#!/usr/bin/env python2 from eagle import * def changed
(app, entry, value): print "app %s, entry %s, value %r" % (app.id, entry.id, value) App(title="Entries Test", center=(Entry(id="single"), Entry(id="multi", multiline=True), Entry(id="non-editable", label="non-editable", value="Value", editable=False), Entry(id="non-editable-multi", label="non-editable", v
alue="Value", editable=False, multiline=True), ), data_changed_callback=changed, ) run()
lavagetto/plumber
setup.py
Python
gpl-3.0
589
0.001698
#!/usr/bin/python from setuptools import setup, find_pa
ckages setup( name='plumber', version='0.0.1-alpha', description='simple, mundane script to build and publish containers to marathon/mesos', author='Giuseppe Lavagetto', author_email='glavagetto@wikimedia.org', url='https://github.com/lavagetto/plumber', install_requires=['argparse', 'Flask', 'jinja2'], setup_requires=[], zip_safe=True, packages=find_packages(), entry_points={ 'console_scripts': [ 'p
lumber-run = plumber.main:run', ], }, )
astooke/synkhronos
synkhronos/comm.py
Python
mit
13,018
0.000615
import zmq import numpy as np import theano import theano.gpuarray try: from pygpu import collectives as gpu_coll except ImportError as exc: gpu_coll = None from .reducers import reducers sync = None cpu = None gpu = None def connect_as_master(n_parallel, rank, master_rank, use_gpu, min_port=1024, max_port=65535): global cpu, gpu cpu = CpuCommMaster(n_parallel, master_rank, min_port, max_port) if use_gpu: if gpu_coll is None: print("WARNING: Using GPUs but unable to import GPU " "collectives from pygpu (may need to install NCCL); " "reverting to CPU-based collectives.") else: gpu = GpuCommMaster(n_parallel, rank, master_rank) def connect_as_worker(n_parallel, rank, master_rank, use_gpu): global cpu, gpu cpu = CpuCommWorker(rank) if use_gpu and gpu_coll is not None: gpu = GpuCommWorker(n_parallel, rank, master_rank) ############################################################################### # # # CPU Comm (using ZeroMQ) # # # ############################################################################### class CpuCommMaster(object): def __init__(self, n_parallel, master_rank, min_port=1024, max_port=65535): context = zmq.Context() rank_pair_sockets = list() rank_pair_ports = list() for i in range(n_parallel): if i == master_rank: rank_pair_sockets.append(None) rank_pair_ports.append(None) else: socket = context.socket(zmq.PAIR) port = socket.bind_to_random_port( "tcp://*", min_port=min_port, max_port=max_port) rank_pair_sockets.append(socket) rank_pair_ports.append(port) pair_sockets = list(rank_pair_sockets) pair_sockets.pop(master_rank) pub_socket = context.socket(zmq.PUB) pub_port = pub_socket.bind_to_random_port( "tcp://*", min_port=min_port, max_port=max_port) sync.dict["pair_ports"] = rank_pair_ports sync.dict["pub_port"] = pub_port for _ in range(n_parallel - 1): sync.semaphore.release() # (let the workers connect) self.context = context self.rank_pair_sockets = rank_pair_sockets self.pair_sockets = pair_sockets self.rank_pair_ports = rank_pair_ports self.pub_socket = pub_socket self.pub_port = pub_port self.n = n_parallel self.vec_ones = np.ones(self.n) ########################################################################### # Support for Functions # def collect(self, arr, op): if op == "gather": return self.gather(arr) else: return self.reduce(arr, op) ########################################################################### # Support for Shared Variable Collectives # def reduce(self, arr, op, dest=None): dtype = arr.dtype shape = (1,) if not arr.shape else arr.shape # (recv scalar as array) recv_buf = np.empty((self.n, *shape), dtype=dtype) dest = np.empty(*shape, dtype=dtype) if dest is None else dest assert dest.dtype == dtype assert dest.shape == shape # (can't use with scalar, that's OK) recv_buf[-1] = np.asarray(arr) for i, socket in enumerate(self.pair_sockets): recv_buf[i] = recv_nd_array(socket) if op in ["sum", "avg"]: dest[:] = self.vec_ones.dot(recv_buf) # parallel; np.mean is not if op == "avg": dest *= (1. / self.n) elif op == "max": dest[:] = recv_buf.max(axis=0) elif op == "min": dest[:] = recv_buf.min(axis=0) elif op == "prod": dest[:] = recv_buf.prod(axis=0) else: raise ValueError("Unrecognized op: {}".format(op)) if not arr.shape: dest = dest.reshape(()) return dest def all_reduce(self, arr, op, dest=None): recv_arr = self.reduce(arr, op, dest) self.broadcast(recv_arr) return recv_arr def broadcast(self, arr): send_nd_array(self.pub_socket, np.asarray(arr)) def gather(self, arr, nd_up=0, dest=None): if nd_up > 1: raise ValueError("Only nd_up 0 or 1 supported.") recv_arrs = list() for socket in self.rank_p
air_sockets: if soc
ket is None: recv_arrs.append(np.asarray(arr)) else: recv_arrs.append(recv_nd_array(socket)) return combine_nd_arrays(recv_arrs, nd_up, dest) def all_gather(self, arr, nd_up=0, dest=None): recv_arr = self.gather(arr, nd_up, dest) self.broadcast(recv_arr) def scatter(self, arr): arr = np.asarray(arr) n_data = len(arr) n = -(- n_data // self.n) # (ceiling div) current_n = n for socket in self.pair_sockets[:-1]: send_nd_array(socket, arr[current_n:current_n + n]) current_n += n send_nd_array(self.pair_sockets[-1], arr[current_n:]) return arr[:n] def send(self, rank, arr): # NOTE: need an assertion: socket is not None? (if rank==master_rank) send_nd_array(self.rank_pair_sockets[rank], np.asarray(arr)) def recv(self, rank): return recv_nd_array(self.rank_pair_sockets[rank]) def recv_lengths(self, master_len): lengths = list() for sock in self.rank_pair_sockets: if sock is None: lengths.append(master_len) else: lengths.append(int(sock.recv_string())) return lengths def recv_shapes(self, master_shape): shapes = list() for sock in self.rank_pair_sockets: if sock is None: shapes.append(master_shape) else: shapes.append(str_to_shape(sock.recv_string())) return shapes def combine_nd_arrays(arrays, nd_up, dest=None): shapes = [arr.shape for arr in arrays] shapes_match = all([shp == shapes[0] for shp in shapes]) concat_match = all([shp[1:] == shapes[0][1:] for shp in shapes]) if nd_up == 0 and concat_match: if dest is not None: dest[:] = np.concatenate(arrays) else: dest = np.concatenate(arrays) elif nd_up == 1 and shapes_match: if dest is not None: dest[:] = np.concatenate([arr[np.newaxis] for arr in arrays]) else: dest = np.concatenate([arr[np.newaxis] for arr in arrays]) else: dest = arrays return dest class CpuCommWorker(object): def __init__(self, rank): context = zmq.Context() pair_socket = context.socket(zmq.PAIR) sync.semaphore.acquire() pair_port = sync.dict["pair_ports"][rank] pair_socket.connect("tcp://localhost:%s" % pair_port) sub_socket = context.socket(zmq.SUB) sub_port = sync.dict["pub_port"] sub_socket.connect("tcp://localhost:%s" % sub_port) self.context = context self.pair_socket = pair_socket self.pair_port = pair_port self.sub_socket = sub_socket self.sub_port = sub_port def send(self, arr): # (Functions, reduce, gather) send_nd_array(self.pair_socket, np.asarray(arr)) def recv_pub(self): # (broadcast) return recv_nd_array(self.sub_socket) def recv_pair(self): # (scatter) return recv_nd_array(self.pair_socket) def send_recv(self, arr): # (all_gather, all_reduce) send_nd_array(self.pair_socket, np.asarray(arr)) return recv_nd_array(self.sub_socket) def send_length(self, arr): self.pair_socket.send_string(str(arr.shape[0])) def send_shape(self, arr): self.pair_socket.
anyweez/regis
face/offline/ParserTools/ParserTools.py
Python
gpl-2.0
1,892
0.005814
import os # ParserTools class class ParserTools(object): def __init__(self): self.qset = None self.template = None # Called before running any PT functions in a term definition. def term_focus(self, term): self.qset = term.qset self.template = term.template # Called after running all PT functions in a term definition. def term_unfocus(self): self.qset = None self.template = None # Load a full datafile.
def load_datafile(self, filename): directory = '../resources
/full' fp = open('%s/%s' % (directory, filename)) lines = fp.readlines() fp.close() return lines # Stores a file for the user that's currently being parsed. def store_userfile(self, contents): directory = '../resources/user' fp = open('%s/%d.%d.txt' % (directory, self.qset.id, self.template.id), 'w') for content in contents: fp.write('%s\n' % content) fp.close() return 'question/files/%d' % self.template.id def prepare_params(self, params): finals = [] for param in params: if tuple(param) == param or list(param) == param: finals.append(param[0]) else: finals.append(param) if len(finals) is 1: return finals[0] else: return finals def make_sets(self, lines): sets = [] current = [] for line in lines: if len(line.strip()) is 0: sets.append(current) current = [] else: current.append(line.strip()) # Add the last set, which likely won't end with a newline. if len(current) > 0: sets.append(current) return sets
padmacho/pythontutorial
objects/id_demo.py
Python
apache-2.0
113
0
x = 10 print("id(x) is ", id(x)) y = 20 print("id(y) i
s ", id(y)) print("id(x) == id(y): ", id(x
) == id(y))
tseaver/gcloud-python
firestore/tests/unit/gapic/v1beta1/test_firestore_client_v1beta1.py
Python
apache-2.0
20,576
0
# Copyright 2018 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Unit tests.""" import pytest from google.cloud.firestore_v1beta1.gapic import firestore_client from google.cloud.firestore_v1beta1.proto import common_pb2 from google.cloud.firestore_v1beta1.proto import document_pb2 from google.cloud.firestore_v1beta1.proto import firestore_pb2 from google.protobuf import empty_pb2 class MultiCallableStub(object): """Stub for the grpc.UnaryUnaryMultiCallable interface.""" def __init__(self, method, channel_stub): self.method = method self.channel_stub = channel_stub def __call__(self, request, timeout=None, metadata=None, credentials=None): self.channel_stub.requests.append((self.method, request)) response = None if self.channel_stub.responses: response = self.channel_stub.responses.pop() if isinstance(response, Exception): raise response if response: return response class ChannelStub(object): """Stub for the grpc.Channel interface.""" def __init__(self, responses=[]): self.responses = responses self.requests = [] def unary_unary(self, method, request_serializer=None, response_deserializer=None): return MultiCallableStu
b(method, self) def unary_stream(self, method, request_serializer=None, res
ponse_deserializer=None): return MultiCallableStub(method, self) def stream_stream(self, method, request_serializer=None, response_deserializer=None): return MultiCallableStub(method, self) class CustomException(Exception): pass class TestFirestoreClient(object): def test_get_document(self): # Setup Expected Response name_2 = 'name2-1052831874' expected_response = {'name': name_2} expected_response = document_pb2.Document(**expected_response) # Mock the API response channel = ChannelStub(responses=[expected_response]) client = firestore_client.FirestoreClient(channel=channel) # Setup Request name = client.any_path_path('[PROJECT]', '[DATABASE]', '[DOCUMENT]', '[ANY_PATH]') response = client.get_document(name) assert expected_response == response assert len(channel.requests) == 1 expected_request = firestore_pb2.GetDocumentRequest(name=name) actual_request = channel.requests[0][1] assert expected_request == actual_request def test_get_document_exception(self): # Mock the API response channel = ChannelStub(responses=[CustomException()]) client = firestore_client.FirestoreClient(channel=channel) # Setup request name = client.any_path_path('[PROJECT]', '[DATABASE]', '[DOCUMENT]', '[ANY_PATH]') with pytest.raises(CustomException): client.get_document(name) def test_list_documents(self): # Setup Expected Response next_page_token = '' documents_element = {} documents = [documents_element] expected_response = { 'next_page_token': next_page_token, 'documents': documents } expected_response = firestore_pb2.ListDocumentsResponse( **expected_response) # Mock the API response channel = ChannelStub(responses=[expected_response]) client = firestore_client.FirestoreClient(channel=channel) # Setup Request parent = client.any_path_path('[PROJECT]', '[DATABASE]', '[DOCUMENT]', '[ANY_PATH]') collection_id = 'collectionId-821242276' paged_list_response = client.list_documents(parent, collection_id) resources = list(paged_list_response) assert len(resources) == 1 assert expected_response.documents[0] == resources[0] assert len(channel.requests) == 1 expected_request = firestore_pb2.ListDocumentsRequest( parent=parent, collection_id=collection_id) actual_request = channel.requests[0][1] assert expected_request == actual_request def test_list_documents_exception(self): channel = ChannelStub(responses=[CustomException()]) client = firestore_client.FirestoreClient(channel=channel) # Setup request parent = client.any_path_path('[PROJECT]', '[DATABASE]', '[DOCUMENT]', '[ANY_PATH]') collection_id = 'collectionId-821242276' paged_list_response = client.list_documents(parent, collection_id) with pytest.raises(CustomException): list(paged_list_response) def test_create_document(self): # Setup Expected Response name = 'name3373707' expected_response = {'name': name} expected_response = document_pb2.Document(**expected_response) # Mock the API response channel = ChannelStub(responses=[expected_response]) client = firestore_client.FirestoreClient(channel=channel) # Setup Request parent = client.any_path_path('[PROJECT]', '[DATABASE]', '[DOCUMENT]', '[ANY_PATH]') collection_id = 'collectionId-821242276' document_id = 'documentId506676927' document = {} response = client.create_document(parent, collection_id, document_id, document) assert expected_response == response assert len(channel.requests) == 1 expected_request = firestore_pb2.CreateDocumentRequest( parent=parent, collection_id=collection_id, document_id=document_id, document=document) actual_request = channel.requests[0][1] assert expected_request == actual_request def test_create_document_exception(self): # Mock the API response channel = ChannelStub(responses=[CustomException()]) client = firestore_client.FirestoreClient(channel=channel) # Setup request parent = client.any_path_path('[PROJECT]', '[DATABASE]', '[DOCUMENT]', '[ANY_PATH]') collection_id = 'collectionId-821242276' document_id = 'documentId506676927' document = {} with pytest.raises(CustomException): client.create_document(parent, collection_id, document_id, document) def test_update_document(self): # Setup Expected Response name = 'name3373707' expected_response = {'name': name} expected_response = document_pb2.Document(**expected_response) # Mock the API response channel = ChannelStub(responses=[expected_response]) client = firestore_client.FirestoreClient(channel=channel) # Setup Request document = {} update_mask = {} response = client.update_document(document, update_mask) assert expected_response == response assert len(channel.requests) == 1 expected_request = firestore_pb2.UpdateDocumentRequest( document=document, update_mask=update_mask) actual_request = channel.requests[0][1] assert expected_request == actual_request def test_update_document_exception(self): # Mock the API response channel = ChannelStub(responses=[CustomException()])
lancekrogers/music-network
cleff/custom_wrappers.py
Python
apache-2.0
1,287
0.006216
from django.contrib.auth.decorators import user_passes_test, login_required from profiles.models import Musician, NonMusician from django.core.exceptions import ObjectDoesNotExist from django.shortcuts import redirect # import login_required user_passes_test # these functions are made to be passed into user_passes_test musician_wrapper_func = lambda x: musician_check(x) non_musician_wrapper_func = lambda x: non_musician_check(x) def musician_check(x): try: return Musician.objects.filter
(pk=x.pk) except ObjectDoesNotExist: return redirect('profiles:
register_musician') except AttributeError: return redirect('profiles:register_musician') def non_musician_check(x): try: return NonMusician.objects.filter(pk=x.pk) except ObjectDoesNotExist: return redirect('main:denied') except AttributeError: return redirect('main:denied') #def profile_auth(x): ''' from django.contrib.auth.decorators import login_required @method_decorator(login_required(redirect_field_name='restaurant_app:login')) def dispatch(self, *args, **kwargs): return super().dispatch(*args, **kwargs) ''' ''' The above method_decorator setup can be copied and used to require login in class based views. '''
esurharun/aircrack-ng-cell
scripts/airgraph-ng/airgraph-ng.py
Python
gpl-2.0
16,007
0.03892
#!/usr/bin/python __author__ = 'Ben "TheX1le" Smith' __email__ = 'thex1le@gmail.com' __website__= 'http://trac.aircrack-ng.org/browser/trunk/scripts/airgraph-ng/' __date__ = '03/02/09' __version__ = '' __file__ = 'airgraph-ng' __data
__ = 'This is the main airgraph-ng file' """ Welcome to airgraph written by TheX1le Special Thanks to Rel1k and Zero_Chaos two people whom with out i would not be who I am! More Thanks to Brandon x0ne Dixon who really cleaned up the code forced it into pydoc format and cleaned up the logic a bit Thanks Man! I would also like to thank muts an
d Remote Exploit Community for all their help and support! ######################################## # # Airgraph-ng.py --- Generate Graphs from airodump CSV Files # # Copyright (C) 2008 Ben Smith <thex1le@gmail.com> # # This program and its support programs are free software; you can redistribute it and/or modify it # under the terms of the GNU General Public License version 2 as # published by the Free Software Foundation; version 2. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # ######################################### """ """ Airgraph-ng """ import getopt, subprocess, sys, pdb, optparse def importPsyco(): try: # Import Psyco if available to speed up execution import psyco psyco.full() except ImportError: print "Psyco optimizer not installed, You may want to download and install it!" try: sys.path.append("./lib/") # The previous line works fine and find the lib if psyco isn't installed # When psyco is installed, it does not work anymore and a full path has to be used sys.path.append("/usr/local/bin/lib/") import lib_Airgraphviz dot_libs = lib_Airgraphviz #i dont think i need this but ill look at it later except ImportError: print "Support libary import error. Does lib_Airgraphviz exist?" sys.exit(1) def airgraphMaltego(inFile,graphType="CAPR"): """ Enables airgraph-ng to have support with Maltego TODO: Comment out code and show what is going on """ returned_var = airDumpOpen(inFile) returned_var = airDumpParse(returned_var) #returns the info dictionary list with the client and ap dictionarys info_lst = returned_var returned_var = dotCreate(returned_var,graphType,"true") maltegoRTN = [info_lst,returned_var[2],returned_var[3],returned_var[4]] return maltegoRTN def airDumpOpen(file): """ Takes one argument (the input file) and opens it for reading Returns a list full of data """ openedFile = open(file, "r") data = openedFile.readlines() cleanedData = [] for line in data: cleanedData.append(line.rstrip()) openedFile.close() return cleanedData def airDumpParse(cleanedDump): """ Function takes parsed dump file list and does some more cleaning. Returns a list of 2 dictionaries (Clients and APs) """ try: #some very basic error handeling to make sure they are loading up the correct file try: apStart = cleanedDump.index('BSSID, First time seen, Last time seen, Channel, Speed, Privacy, Power, # beacons, # data, LAN IP, ESSID') except Exception: apStart = cleanedDump.index('BSSID, First time seen, Last time seen, channel, Speed, Privacy, Cipher, Authentication, Power, # beacons, # IV, LAN IP, ID-length, ESSID, Key') del cleanedDump[apStart] #remove the first line of text with the headings try: stationStart = cleanedDump.index('Station MAC, First time seen, Last time seen, Power, # packets, BSSID, Probed ESSIDs') except Exception: stationStart = cleanedDump.index('Station MAC, First time seen, Last time seen, Power, # packets, BSSID, ESSID') except Exception: print "You Seem to have provided an improper input file please make sure you are loading an airodump txt file and not a pcap" sys.exit(1) #pdb.set_trace() del cleanedDump[stationStart] #Remove the heading line clientList = cleanedDump[stationStart:] #Splits all client data into its own list del cleanedDump[stationStart:] #The remaining list is all of the AP information #apDict = dictCreate(cleanedDump) #Create a dictionary from the list #clientDict = dictCreate(clientList) #Create a dictionary from the list apDict = apTag(cleanedDump) clientDict = clientTag(clientList) resultDicts = [clientDict,apDict] #Put both dictionaries into a list return resultDicts def apTag(devices): """ Create a ap dictionary with tags of the data type on an incoming list """ dict = {} for entry in devices: ap = {} string_list = entry.split(',') #entry = entry.replace(' ','') #sorry for the clusterfuck but i swear it all makse sense len(string_list) if len(string_list) == 15: ap = {"bssid":string_list[0].replace(' ',''),"fts":string_list[1],"lts":string_list[2],"channel":string_list[3].replace(' ',''),"speed":string_list[4],"privacy":string_list[5].replace(' ',''),"cipher":string_list[6],"auth":string_list[7],"power":string_list[8],"beacons":string_list[9],"iv":string_list[10],"ip":string_list[11],"id":string_list[12],"essid":string_list[13][1:],"key":string_list[14]} elif len(string_list) == 11: ap = {"bssid":string_list[0].replace(' ',''),"fts":string_list[1],"lts":string_list[2],"channel":string_list[3].replace(' ',''),"speed":string_list[4],"privacy":string_list[5].replace(' ',''),"power":string_list[6],"beacons":string_list[7],"data":string_list[8],"ip":string_list[9],"essid":string_list[10][1:]} if len(ap) != 0: dict[string_list[0]] = ap return dict def clientTag(devices): """ Create a client dictionary with tags of the data type on an incoming list """ dict = {} for entry in devices: client = {} string_list = entry.split(',') if len(string_list) >= 7: client = {"station":string_list[0].replace(' ',''),"fts":string_list[1],"lts":string_list[2],"power":string_list[3],"packets":string_list[4],"bssid":string_list[5].replace(' ',''),"probe":string_list[6:][1:]} if len(client) != 0: dict[string_list[0]] = client return dict def dictCreate(device): #deprecated """ Create a dictionary using an incoming list """ dict = {} for entry in device: #the following loop through the Clients List creates a nested list of each client in its own list grouped by a parent list of client info entry = entry.replace(' ','') string_list = entry.split(',') if string_list[0] != '': dict[string_list[0]] = string_list[:] #if the line isnt a blank line then it is stored in dictionlary with the MAC/BSSID as the key return dict def usage(): """ Prints the usage to use airgraph-ng """ print "############################################","\n# Welcome to Airgraph-ng #","\n############################################\n" print "Usage: python airgraph-ng -i [airodumpfile.txt] -o [outputfile.png] -g [CAPR OR CPG]" print "\n-i\tInput File\n-o\tOutput File\n-g\tGraph Type [CAPR (Client to AP Relationship) OR CPG (Common probe graph)]\n-p\tDisable Psyco JIT compiler\n-h\tPrint this help" def dotCreate(info,graphType,maltego="false"): """ Graphviz function to support the graph types TODO: Possibly move this to the library? """ #please dont try to use this feature yet its not finish and will error def ZKS_main(info): # Zero_Chaos Kitchen Sink Mode..... Every Thing but the Kitchen Sink! #info comes in as list Clients Dictionary at postion 0 and AP Dictionary at postion1 print "Feature is not ready yet" sys.exit(1) #pdb.set_trace() #debug point return_var = CAPR_main(info) #dot_file = return_var[0] APNC = return_var[2] CNAP = return_var[3] CAPR = return_var[0] del CAPR[:1] #remove the graphviz heading... dot_file = ['digraph G {\n\tsize ="96,96";\n\toverlap=scale;\n'] #start the graphviz config file dot_file.extend(dot_libs.subGraph(CAPR,'Clients to AP Relationships','CAPR',return_var[4],'n')) if len(APNC) != 0: # there should be a better way to check for null lists dot_file.extend(dot_libs.subGraph(APNC,'Acess Points with no Clients','AP',return_var[4]
TheCherry/ark-server-manager
src/config.py
Python
gpl-2.0
22
0
global mods m
ods
= []
CCI-MOC/GUI-Backend
api/v2/serializers/details/image.py
Python
apache-2.0
1,518
0
from core.models import Application as Image, BootScript from rest_framework import serializers from api.v2.serializers.summaries import UserSummarySerializer from api.v2.serializers.fields import ( ImageVersionRelatedField, TagRelatedField) from api.v2.serializers.fields.base import UUIDHyperlinkedIdentityField class SwapBooleanField(serializers.BooleanField): def to_internal_value(self, data): truth_value = super(SwapBooleanField, self).to_internal_value(data) swap_value = not truth_value return swap_value def to_representation(self, value):
truth_value = super(SwapBooleanField, self).to_representation(value) swap_value = not truth_value return swap_value class ImageSerializer(serializers.HyperlinkedModelSerializer): created_by = UserSummarySerializer(read_only=True) tags = TagRelatedField(many=True) versions = ImageVersionRelatedField(many=True) icon = serializers.CharField(source="get_icon_url", read_only=True)
is_public = SwapBooleanField(source='private') url = UUIDHyperlinkedIdentityField( view_name='api:v2:application-detail', ) class Meta: model = Image fields = ( 'id', 'url', 'uuid', 'name', # Adtl. Fields 'created_by', 'description', 'end_date', 'is_public', 'icon', 'start_date', 'tags', 'versions' )
graebnerc/Beyond_Equilibrium
tsm-src.py
Python
mit
83,029
0.005095
from __future__ import print_function import numpy import random import scipy.stats """ This is the source code for the paper entitled "Beyond Equilibrium: Revisiting Two-Sided Markets from an Agent-Based Modeling Perspective" published in the International Journal of Computational Economics and Econometrics. Authors: Torsten Heinrich and Claudius Graebner Emails: torsten.heinrich@maths.ox.ac.uk and claudius@claudius-graebner.com We suggest to call the file using the associated callscript via a terminal using the following command (requires a shellscript environment such as bash): ./callscript_all.sh If you wish to start a single run, the script should be used in the following way: 1. call "python src-tsm.py [filename=<output file name>] [strategy=<RIL/RILS/RO>] [providernum=<number of providers>] \ [fixedentryfee=<entryfee>] [pcc=<per customer fixed costs>] [runid=<run id>]" The model defines 3 types of agents 1. sellers 2. buyers (sellers and buyers, also collectively called customers, represent the two sides of the 'market') 3. providers - those who controll the customers' access to the tsm ... the real actors in this model This model consists of: 1. simple decision mechanism for customers (the only real agency is to subscribe and unsubscribe to providers) 2. strategic decision making for providers, driven by reinforcement learning 3. simple exchange mechanism It is recommended to run the model with the associated bash script. Otherwise, the files can be called directly. It requires a directory "data" to store the results. Then you may call the file figures.py that generates the figures of the paper in a directory figures/. This script is organized as follows: 1. Definition of the control variables. 2. Defintion of the recording variables that are used to store the results of the simulation. 3. The actual ABM. The different parts are preceeded by a heading as block comment. """ """ Control variables """ output_filename = 'results' # Default name for time series and figures provider_strategy = 'RO' # Default strategy (may be 'RO', 'RIL', or 'RILS') graphical_output = False # Do not create figures by default t_max = 500 # number of iterations no_providers = 1 # number of access providers to the tsm service no_sellers = 2000 # number of the first tsm side ('sellers') no_buyers = 10000 # number of the second tsm side ('buyers') no_transactions_per_iteration = 30000 # maximum number of transactions per iteration operatingcost = 10000 # cost of tsm service for provider per period provider_fixed_cost_ps = 25 # cost occurring to the provider per seller provider_fixed_cost_pb = 25 # cost occurring to the provider per buyer provider_transaction_cost_b = 50 # cost occurring to the provider per transaction through the buyer provider_transaction_cost_s = 50 # cost occurring to the provider per transaction through the seller max_providers = 5 # maximum number of providers a customer may have subscribed at any given time threshold_level = 400 # monetary threshold of customer revenue below which she will not try to subscribe networks of further providers (given she already has one) price_average = 1000 # average price for transactions between buyers (who have a uniform-distributed reservation price above) and sellers (who have a uniform-distributed reservation price below) init_buyer_subscription_fee = 0 # initial subscription fee for buyers to providers init_trans_cost_b = 0.0 # initial per transaction cost for the buyers init_seller_subscription_fee = 0 # initial subscription fee for sellers to providers init_trans_cost_s = 0.0 # initial per transaction cost for the sellers init_roaming_cost = 100 # initial 'roaming' access cost for transactions with customers of other providers # boundary variables min_cost = 100 # minimum boundary 'roaming' access cost max_cost = 100 # maximum boundary 'roaming' access cost min_entryfee_s = -3000 # minimum boundary seller entrance fee max_entryfee_s = 5000 # maximum boundary seller entrance fee min_entryfee_b = -3000 # minimum boundary buyer entrance fee max_entryfee_b = 5000
# maximum boundary buyer entrance fee min_trans_cost_s = -1000 # min transaction cost for seller max_trans_cost_s = 1010 # max transaction cost for seller min_trans_cost_b = -1000 # min transaction cost for buyer max_trans_cost_b = 1010 # max transaction cost for buyer ema_factor = 0.01 # exponential moving average factor past_discounting_root_expon = 0.99 # exponent for root function for discounting old reinforcement learning imbalances # auxi
liary global variables provider_id = 0 # provider id counter seller_id = 0 # 'seller' id counter buyer_id = 0 # 'buyer' id counter transaction_counter = 0 # counter variable for transactions in period t = -1 # time figure = -1 # figure objects # object list variables providerlist = [] # global list of provider objects customerlist = [] # global list of customer objects sellerlist = [] # global list of seller objects buyerlist = [] # global list of buyer objects # global network externality provider choice functions s_providerweights = [] # global seller network externality function (weights for providers according to sellers' preferences, used in sellers' provider choices), has weight entries for all providers, thus the same length as providerlist b_providerweights = [] # global buyer network externality function (weights for providers according to buyers' preferences, used in buyers' provider choices), has weight entries for all providers, thus the same length as providerlist """ Recording variables. They are used to store the results of the simulations. """ rec_t = [] # time from 0 through t_max-1 (required for drawing) rec_transactions = [] # number of transactions per period rec_prov_min = [] # providers' minimum period revenue rec_prov_max = [] # providers' maximum period revenue rec_prov_av = [] # providers' average period revenue rec_cn_max = [] # providers' maximum number of customers (by period) rec_cn_min = [] # providers' minimum number of customers (by period) rec_cn_av = [] # providers' average number of customers (by period) rec_efb_min = [] # minimum buyer subscription fee rec_efb_max = [] # maximum buyer subscription fee rec_efb_av = [] # average buyer subscription fee (average weighted by number of customers) rec_efs_min = [] # minimum seller subscription fee rec_efs_max = [] # maximum seller subscription fee rec_efs_av = [] # average seller subscription fee (average weighted by number of customers) rec_tfb_min = [] # minimum transaction fee charged from the buyers rec_tfb_max = [] # maximum transaction fee charged from the buyers rec_tfb_av = [] # average transaction fee charged from the buyers rec_tfs_min = [] # minimum transaction fee charged from the sellers rec_tfs_max = [] # maximum transaction fee charged from the sellers rec_tfs_av = [] # average transaction fee charged from the sellers rec_cost_min = [] # minimum 'roaming' access cost for customers of other providers rec_cost_max = [] # maximum 'roaming' access cost for customers of other providers rec_cost_av = [] # average 'roaming' access cost for customers of other providers rec_customer_min = [] # customers' minimum period 'revenue' rec_customer_max = [] # customers' maximum period 'revenue' rec_customer_av = [] # customers' average period 'revenue' rec_seller_min = [] # sellers' minimum period 'revenue' rec_seller_max = [] # sellers' maximum period 'revenue' rec_seller_av = [] # sellers' average period 'revenue' rec_buyer_min = [] # buyers' minimum period 'revenue' rec_buyer_max = [] # buyers' maximum period 'revenue' rec_buyer_av = [] # buyers' average period 'revenue' """ The agent based model 1st part (lines 149 - 916): Class definitions. 2nd part (lines 922 - 1486): Definition of auxiliary functions. 3rd part (lines 1493 - 1645): Defintion of the main function,
epronk/pyfit2
engines.py
Python
gpl-2.0
3,171
0.005361
import traceback from util import * import importlib class DefaultLoader(object): def __init__(self): self.fixtures
= {} def load(self, name): org_name = name fixture = self.fixtures.get(name) if fixture: print 'already exists, return' return fixture print 'DefaultLoader.load' try: module = self.do_load(name) except ImportError, inst: a = traceback.format_exc() print '{\n%s}' % inst.value names = name.split('.')[1:] for name in names: module = getattr(
module, name) fixture = getattr(module, name)() self.fixtures[org_name] = fixture return fixture def do_load(self, name): return __import__(name) class StringLoader(DefaultLoader): def __init__(self, script): self.script = script def load(self, name): x = compile(self.script, 'not_a_file.py', 'exec') return eval(x) return x class Summary(object): def __init__(self): self.reset() def reset(self): self.right = 0 self.wrong = 0 self.ignored = 0 self.exceptions = 0 class Engine(object): # return the next object in the flow or None. # check if fixture has attribute with name of next table. # if not create an instance with that name def __init__(self): self.loader = DefaultLoader() self.fixture = None self.print_traceback = False self.adapters = DefaultAdapters() self.summary = Summary() def do_process(self, table): name = table.name() try: return_table = getattr(self.fixture, name) self.fixture = return_table() self.fixture.process(table) return except AttributeError: pass self.fixture = self.loader.load(name) self.fixture.engine = self #hack self.fixture.process(table) def process(self, table, throw = True): name = table.name() if throw == True: self.do_process(table) else: try: self.do_process(table) except Exception, inst: '''Fixme: Should the rest of the table become grey?''' table.cell(0,0).error(inst) if self.print_traceback: print 'Processing table `%s` failed' % table.name() print '=====' print traceback.format_exc() print '=====' return self.fixture def compare(self, cell, actual_value): expected_value = str(cell) target_type = type(actual_value) if self.adapters.has_key(target_type): adapter = self.adapters[target_type] expected = adapter.convert(expected_value) else: expected = type(actual_value)(expected_value) if expected == actual_value: cell.passed() self.summary.right += 1 else: cell.failed(actual_value) self.summary.wrong += 1
tensorflow/ngraph-bridge
tools/log_parser.py
Python
apache-2.0
6,884
0.002905
#============================================================================== # Copyright 2019-2020 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================= import re def parse_logs(log_lines, verbose=False): """ Returns ngraph metrics parsed out of the specified log output. Regular log parsing will return: - Number of nodes in the graph - Number of nodes marked for clustering - Number of ngraph clusters Verbose log parsing will return all of the above, in addition to: - Percentage of nodes clustered - Has deadness issues - Has static input issues - Reasons why edge connected clusters did not merge - Reasons why edge connected encapsulates did not merge - Nodes per cluster - Types of edges - Op not supported - Op failed type constraint """ if type(log_lines) == type(''): log_lines = log_lines.split('\n') else: assert type(log_lines) == type( [] ), "If log_lines if not a string, it should have been a list, but instead it is a " + type( log_lines) assert all([ type(i) == type('') and '\n' not in i for i in log_lines ]), 'Each element of the list should be a string and not contain new lines' all_results = {} curr_result = {} ctr = 0 prev_line = "" for line in log_lines: start_of_subgraph = "NGTF_SUMMARY: Op_not_supported:" in line # If logs of a new sub-graph is starting, save the old one if start_of_subgraph: if len(curr_result) > 0: all_results[str(ctr)] = curr_result curr_result = {} ctr += 1 # keep collecting information in curr_result if line.startswith('NGTF_SUMMARY'): if 'Number of nodes in the graph' in line: curr_result['num_nodes_in_graph'] = int( line.split(':')[-1].strip()) elif 'Number of nodes marked for clustering' in line: curr_result['num_nodes_marked_for_clustering'] = int( line.split(':')[-1].strip().split(' ')[0].strip()) if verbose: # get percentage of total nodes match = re.search("(\d+(\.\d+)?%)", line) nodes_clustered = "" if match: nodes_clustered = match.group(0) curr_result["percentage_nodes_clustered"] = nodes_clustered elif 'Number of ngraph clusters' in line: curr_result['num_ng_clusters'] = int( line.split(':')[-1].strip()) if verbose and ('DEADNESS' in line and 'STATICINPUT' in line): line = line[len("NGTF_SUMMARY:"):] reasons = dict([i.strip() for i in item.split(":")] for item in line.split(",")) if "reasons why a pair of edge connected encapsulates did not merge" in prev_line: curr_result[ 'why_edge_connected_encapsulates_did_not_merge'] = reasons elif "reasons why a pair of edge connected clusters did not merge" in prev_line: curr_result[ 'why_edge_connected_clusters_did_not_merge'] = reasons # default has_deadness_issues and has_static_input_issues to 'No' if 'has_deadness_issues' not in curr_result.keys(): curr_result['has_deadness_issues'] = "No" if 'has_static_input_issues' not in curr_result.keys(): curr_result['has_static_input_issues'] = "No" # set has deadness/static input issues to 'Yes' if the value is > 0 if int(reasons['DEADNESS']) > 0: curr_result['has_deadness_issues'] = "Yes" if int(reasons['STATICINPUT']) > 0: curr_result['has_static_input_issues'] = "Yes" elif verbose and 'Nodes per cluster' in line: curr_result['nodes_per_cluster'] = float( line.split(':')[-1].strip()) elif verbose and 'Types of edges::' in line: line = line[len("NGTF_SUMMARY: Types of edges:: "):] edge_types = dict([i.strip() for i in item.split(":")] for item in line.split(",")) curr_result["types_of_edges"] = edge_type s elif verbose and 'Op_not_supported' in line: curr_result["op_not_supported"] = \ [i.strip() for i in line[len("NGTF_SUMMARY: Op_not_supported: "):].split(",")] elif verbose and 'Op_failed_type_constraint' in line: curr_result["op_failed_type_constraint"] = \ [i.strip() for i in line[len( "NGTF_SUMMARY: Op_failed_type_constraint: "):].split(",")] prev_line = line # add the last section to the results all_results[str(ctr)] = curr_result return all_results def compare_parsed_values(parsed_vals, expected_vals): # Both inputs are expected to be 2 diction
aries (representing jsons) # The constraints in expected is <= parsed_vals. Parsed_vals should have all possible values that the parser can spit out. However expected_vals can be relaxed (even empty) and choose to only verify/match certain fields match = lambda current, expected: all( [expected[k] == current[k] for
k in expected]) for graph_id_1 in expected_vals: # The ordering is not important and could be different, hence search through all elements of parsed_vals matching_id = None for graph_id_2 in parsed_vals: if match(expected_vals[graph_id_1], parsed_vals[graph_id_2]): matching_id = graph_id_2 break if matching_id is None: return False, 'Failed to match expected graph info ' + graph_id_1 + " which was: " + str( expected_vals[graph_id_1] ) + "\n. Got the following parsed results: " + str(parsed_vals) else: parsed_vals.pop(matching_id) return True, ''
easytaxibr/airflow
tests/core.py
Python
apache-2.0
93,982
0.001734
# -*- coding: utf-8 -*- # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import print_function import doctest import json import os import re import unittest import multiprocessing import mock from numpy.testing import assert_array_almost_equal import tempfile from datetime import datetime, time, timedelta from email.mime.multipart import MIMEMultipart from email.mime.application import MIMEApplication import signal from time import time as timetime from time import sleep import warnings from dateutil.relativedelta import relativedelta import sqlalchemy from airflow import configuration from airflow.executors import SequentialExecutor, LocalExecutor from airflow.models import Variable from tests.test_utils.fake_datetime import FakeDatetime configuration.load_test_config() from airflow import jobs, models, DAG, utils, macros, settings, exceptions from airflow.models import BaseOperator from airflow.operators.bash_operator import BashOperator from airflow.operators.check_operator import CheckOperator, ValueCheckOperator from airflow.operators.dagrun_operator import TriggerDagRunOperator from airflow.operators.python_operator import PythonOperator from airflow.operators.dummy_operator import DummyOperator from airflow.operators.http_operator import SimpleHttpOperator from airflow.operators import sensors from airflow.hooks.base_hook import BaseHook from airflow.hooks.sqlite_hook import SqliteHook from airflow.hooks.postgres_hook import PostgresHook from airflow.bin import cli from airflow.www import app as application from airflow.settings import Session from airflow.utils.state import State from airflow.utils.dates import infer_time_unit, round_time, scale_time_units from airflow.utils.logging import LoggingMixin from lxml import html from airflow.exceptions import AirflowException from airflow.configuration import AirflowConfigException import six NUM_EXAMPLE_DAGS = 18 DEV_NULL = '/dev/null' TEST_DAG_FOLDER = os.path.join( os.path.dirname(os.path.realpath(__file__)), 'dags') DEFAULT_DATE = datetime(2015, 1, 1) DEFAULT_DATE_ISO = DEFAULT_DATE.isoformat() DEFAULT_DATE_DS = DEFAULT_DATE_ISO[:10] TEST_DAG_ID = 'unit_tests' try: import cPickle as pickle except ImportError: # Python 3 import pickle def reset(dag_id=TEST_DAG_ID): session = Session() tis = session.query(models.TaskInstance).filter_by(dag_id=dag_id) tis.delete() session.commit() session.cl
ose() reset() class OperatorSubclass(BaseOperator): """ An operator to test template substitution """ template_fields = ['some_templated_field']
def __init__(self, some_templated_field, *args, **kwargs): super(OperatorSubclass, self).__init__(*args, **kwargs) self.some_templated_field = some_templated_field def execute(*args, **kwargs): pass class CoreTest(unittest.TestCase): # These defaults make the test faster to run default_scheduler_args = {"file_process_interval": 0, "processor_poll_interval": 0.5, "num_runs": 1} def setUp(self): configuration.load_test_config() self.dagbag = models.DagBag( dag_folder=DEV_NULL, include_examples=True) self.args = {'owner': 'airflow', 'start_date': DEFAULT_DATE} dag = DAG(TEST_DAG_ID, default_args=self.args) self.dag = dag self.dag_bash = self.dagbag.dags['example_bash_operator'] self.runme_0 = self.dag_bash.get_task('runme_0') self.run_after_loop = self.dag_bash.get_task('run_after_loop') self.run_this_last = self.dag_bash.get_task('run_this_last') def test_schedule_dag_no_previous_runs(self): """ Tests scheduling a dag with no previous runs """ dag = DAG(TEST_DAG_ID + 'test_schedule_dag_no_previous_runs') dag.add_task(models.BaseOperator( task_id="faketastic", owner='Also fake', start_date=datetime(2015, 1, 2, 0, 0))) dag_run = jobs.SchedulerJob(**self.default_scheduler_args).create_dag_run(dag) self.assertIsNotNone(dag_run) self.assertEqual(dag.dag_id, dag_run.dag_id) self.assertIsNotNone(dag_run.run_id) self.assertNotEqual('', dag_run.run_id) self.assertEqual(datetime(2015, 1, 2, 0, 0), dag_run.execution_date, msg= 'dag_run.execution_date did not match expectation: {0}' .format(dag_run.execution_date)) self.assertEqual(State.RUNNING, dag_run.state) self.assertFalse(dag_run.external_trigger) dag.clear() def test_schedule_dag_fake_scheduled_previous(self): """ Test scheduling a dag where there is a prior DagRun which has the same run_id as the next run should have """ delta = timedelta(hours=1) dag = DAG(TEST_DAG_ID + 'test_schedule_dag_fake_scheduled_previous', schedule_interval=delta, start_date=DEFAULT_DATE) dag.add_task(models.BaseOperator( task_id="faketastic", owner='Also fake', start_date=DEFAULT_DATE)) scheduler = jobs.SchedulerJob(**self.default_scheduler_args) dag.create_dagrun(run_id=models.DagRun.id_for_date(DEFAULT_DATE), execution_date=DEFAULT_DATE, state=State.SUCCESS, external_trigger=True) dag_run = scheduler.create_dag_run(dag) self.assertIsNotNone(dag_run) self.assertEqual(dag.dag_id, dag_run.dag_id) self.assertIsNotNone(dag_run.run_id) self.assertNotEqual('', dag_run.run_id) self.assertEqual(DEFAULT_DATE + delta, dag_run.execution_date, msg= 'dag_run.execution_date did not match expectation: {0}' .format(dag_run.execution_date)) self.assertEqual(State.RUNNING, dag_run.state) self.assertFalse(dag_run.external_trigger) def test_schedule_dag_once(self): """ Tests scheduling a dag scheduled for @once - should be scheduled the first time it is called, and not scheduled the second. """ dag = DAG(TEST_DAG_ID + 'test_schedule_dag_once') dag.schedule_interval = '@once' dag.add_task(models.BaseOperator( task_id="faketastic", owner='Also fake', start_date=datetime(2015, 1, 2, 0, 0))) dag_run = jobs.SchedulerJob(**self.default_scheduler_args).create_dag_run(dag) dag_run2 = jobs.SchedulerJob(**self.default_scheduler_args).create_dag_run(dag) self.assertIsNotNone(dag_run) self.assertIsNone(dag_run2) dag.clear() def test_fractional_seconds(self): """ Tests if fractional seconds are stored in the database """ dag = DAG(TEST_DAG_ID + 'test_fractional_seconds') dag.schedule_interval = '@once' dag.add_task(models.BaseOperator( task_id="faketastic", owner='Also fake', start_date=datetime(2015, 1, 2, 0, 0))) start_date = datetime.now() run = dag.create_dagrun( run_id='test_' + start_date.isoformat(), execution_date=start_date, start_date=start_date, state=State.RUNNING, external_trigger=False ) run.refresh_from_db() self.assertEqual(start_date, run.execution_date, "dag run execution_date loses precision") self.assertEqual(start_date, run.start_date, "dag run start_date loses precision ") def test_schedule_dag_st
brainwane/zulip
zerver/views/zephyr.py
Python
apache-2.0
2,693
0.001857
import bas
e64 import logging import re import subprocess from typing import Optional import orjson from django.conf import settings from django.http import HttpRequest, HttpResponse from django.utils.translation import ugettext as _ from zerver.decorator import authenticated_json_view from zerver.lib.ccache import ma
ke_ccache from zerver.lib.pysa import mark_sanitized from zerver.lib.request import REQ, has_request_variables from zerver.lib.response import json_error, json_success from zerver.lib.users import get_api_key from zerver.models import UserProfile # Hack for mit.edu users whose Kerberos usernames don't match what they zephyr # as. The key is for Kerberos and the value is for zephyr. kerberos_alter_egos = { 'golem': 'ctl', } @authenticated_json_view @has_request_variables def webathena_kerberos_login(request: HttpRequest, user_profile: UserProfile, cred: Optional[str]=REQ(default=None)) -> HttpResponse: global kerberos_alter_egos if cred is None: return json_error(_("Could not find Kerberos credential")) if not user_profile.realm.webathena_enabled: return json_error(_("Webathena login not enabled")) try: parsed_cred = orjson.loads(cred) user = parsed_cred["cname"]["nameString"][0] if user in kerberos_alter_egos: user = kerberos_alter_egos[user] assert(user == user_profile.email.split("@")[0]) # Limit characters in usernames to valid MIT usernames # This is important for security since DNS is not secure. assert(re.match(r'^[a-z0-9_.-]+$', user) is not None) ccache = make_ccache(parsed_cred) # 'user' has been verified to contain only benign characters that won't # help with shell injection. user = mark_sanitized(user) # 'ccache' is only written to disk by the script and used as a kerberos # credential cache file. ccache = mark_sanitized(ccache) except Exception: return json_error(_("Invalid Kerberos cache")) # TODO: Send these data via (say) rabbitmq try: api_key = get_api_key(user_profile) subprocess.check_call(["ssh", settings.PERSONAL_ZMIRROR_SERVER, "--", "/home/zulip/python-zulip-api/zulip/integrations/zephyr/process_ccache", user, api_key, base64.b64encode(ccache).decode("utf-8")]) except Exception: logging.exception("Error updating the user's ccache", stack_info=True) return json_error(_("We were unable to setup mirroring for you")) return json_success()
t3dev/odoo
addons/stock/wizard/stock_warn_insufficient_qty.py
Python
gpl-3.0
1,401
0.002141
# -*- coding: utf-8 -*- # Part of Odoo. See LICENSE file fo
r full copyright and licensing details. from odoo import api, fields, models from odoo.tools import float_compare class StockWarnInsufficientQty(models.AbstractModel): _name = 'stock.warn.insufficient.qty' _description = 'Warn Insufficient Quantity' product_id = fields.Many2one('product.product', 'Product', required=True) location_id = fields.Many2one( 'stock.location', 'Location', domain="[('usage
', '=', 'internal')]", required=True) quant_ids = fields.Many2many('stock.quant', compute='_compute_quant_ids') @api.one @api.depends('product_id') def _compute_quant_ids(self): self.quant_ids = self.env['stock.quant'].search([ ('product_id', '=', self.product_id.id), ('location_id.usage', '=', 'internal') ]) def action_done(self): raise NotImplementedError() class StockWarnInsufficientQtyScrap(models.TransientModel): _name = 'stock.warn.insufficient.qty.scrap' _inherit = 'stock.warn.insufficient.qty' _description = 'Warn Insufficient Scrap Quantity' scrap_id = fields.Many2one('stock.scrap', 'Scrap') def action_done(self): return self.scrap_id.do_scrap() def action_cancel(self): # FIXME in master: we should not have created the scrap in a first place return self.scrap_id.sudo().unlink()
plotly/plotly.py
packages/python/plotly/plotly/validators/contourcarpet/line/_color.py
Python
mit
411
0.002433
import _plotly_utils
.basevalidators class ColorValidator(_plotly_utils.basevalidators.ColorValidator): def __init__(self, plotly_name="color", parent_name="contourcarpet.line", **kwargs): super(ColorValidator, self).__init__( plotly_name=plotly_name, parent_name=parent_name, edit_type=kwargs.pop("edit_type", "style+colorbars"),
**kwargs )
recombinators/autoscaling
checker.py
Python
mit
2,410
0.000415
import os from sqs import (make_SQS_connection, get_queue, queue_size, ) from cloudwatch import (make_CW_connection, update_metric, ) from threading import Timer # Define AWS credentials AWS_ACCESS_KEY_ID = os.environ['AWS_ACCESS_KEY_ID'] AWS_SECRET_ACCESS_KEY = os.environ['AWS_SECRET_ACCESS_KEY'] REGION = 'us-west-2' # Set queue name variables FULL_COMPOSITE_QUEUE = 'snapsat_composite_queue' PREVIEW_COMPOSITE_QUEUE = 'snapsat_preview_queue' # Set metric name variables FULL_COMPOSITE_METRIC = 'number_jobs_full_queue' PREVIEW_COMPOSITE_METRIC = 'number_jobs_preview_queue' # Set metric namespace NAMESPACE = 'Snapsat' # Set size check intervals FULL_INTERVAL = 10 PREVIEW_INTERVAL = 10 # Create SQS connction SQSconn = make_SQS_connection(REGION, AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY) # Create CW connection CWconn = make_CW_connection(REGION, AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY) # Monitor size of queue def monitor_queue(SQSconn, CWconn, queue_name, metric_name): queue = get_queue(SQSconn, queue_name) size = queue_size(queue) update_metric(CWconn, NAMESPACE, metric_name, size) # Create full queue size check timer funciton def full_queue_timer(SQSconn, CWconn, queue_name, metric_name, interval): monitor_queue(SQSconn, CWconn, queue_name, metric_name) return Timer(interval, full_queue_timer, args=[SQSconn, CWconn, queue_name, metric_name, interval] ).start() # Create preview queue size check timer funciton def preview_queue_timer(SQSconn, CWconn, queue_name, metric_name, interval): monitor_queue(SQSconn, CWconn, queue_name, metric_name) return Tim
er(interval, full_queue_timer, args=[SQSconn, CWconn, queue_name, metric_name, interval] ).start() # Check queue sizes every 20 seconds def main(): full_queue_timer(SQSconn, CWconn,
FULL_COMPOSITE_QUEUE, FULL_COMPOSITE_METRIC, FULL_INTERVAL) preview_queue_timer(SQSconn, CWconn, PREVIEW_COMPOSITE_QUEUE, PREVIEW_COMPOSITE_METRIC, PREVIEW_INTERVAL) if __name__ == '__main__': main()
scottnm/ProjectEuler
python/Problem1-Multiples_of_3_&_5.py
Python
apache-2.0
82
0.085366
sum=0 fo
r x in range(0,1
000): if(x%3==0 or x%5==0): sum+=x print(sum)
barmalei/primus
lib/primus/fileartifact.py
Python
lgpl-3.0
6,594
0.014407
# # Copyright 2009 Andrei <vish@gravitysoft.org> # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, # MA 02110-1301, USA. import os, shutil, tempfile from subprocess import Popen from artifactcore import Artifact, ShellScriptHelper class FileArtifact(Artifact): def __init__ (self, name, dependencies=[]): Artifact.__init__(self, name, dependencies) if os.path.isabs(self.name): raise BaseException("File artifact '%s' cannot have absolute path" % name) def fullpath(self): return os.path.join(Artifact.home_dir(), self.name) class AcquiredFile(FileArtifact): def __init__(self, name, dependencies = []): FileArtifact.__init__(self, name, dependencies) p = self.fullpath() if os.path.isdir(p): raise BaseException("File cannot be directory") if not os.path.isdir(os.path.dirname(p)): raise BaseException("Wrong parent directory for '%s'" % p) def cleanup(self): p = self.fullpath() if os.path.exists(p): if os.path.isfile(p): os.remove(p) else: raise BaseException("File '%s' is directory") def build(self): pass def is_expired(self): return not os.path.exists(self.fullpath()) class PermanentFileArtifact(FileArtifact): def __init__ (self, name, dependencies=[]): FileArtifact.__init__(self, name, dependencies) if not os.path.exists(self.fullpath()): raise IOError("File artifact '%s' does not exist" % name) class CreateSymLink(FileArtifact): def __init__ (self, name, path, dependencies = []): FileArtifact.__init__(self, name, dependencies) self.path = path def build(self): realpath = os.path.join(Artifact.home_dir(), self.path)
if not os.path.exists(realpath): raise IOError("Path '%s' link has to refer does not exist." % self.path) os.chdir(Artifact.home_dir()) if os.path.exists(self.name): os.unlink(self.name) os.symlink(realpath, self.name)
def what_it_does(self): return "Create symlink: %s" % self.name class AcquiredDirectory(FileArtifact): def __init__ (self, name, dependencies=[]): FileArtifact.__init__(self, name, dependencies) path = self.fullpath() if os.path.exists(path) and (not os.path.isdir(path)): raise BaseException("File '%s' exists and the file is not a directory") def cleanup(self): path = self.fullpath() if os.path.exists(path): if os.path.isdir(path) : os.chdir(Artifact.home_dir()) shutil.rmtree(self.name) else: raise IOError() def build(self): path = self.fullpath() if not os.path.exists(path): os.makedirs(path) def is_expired(self): return not os.path.exists(self.fullpath()) def what_it_does(self): return "Create directory '%s'" % self.fullpath() class RunShellScript(PermanentFileArtifact): def __init__(self, name, parameters = '', dependencies = []): PermanentFileArtifact.__init__(self, name, dependencies) self.parameters = parameters def build(self): ShellScriptHelper.run(self.fullpath(), self.parameters, True) def what_it_does(self): return "Run shell script '%s %s'" % (self.name, self.parameters) class RunMakefile(RunShellScript): def __init__ (self, name, parameters = '', dependencies = []): if os.path.basename(name) != 'Makefile': name = os.path.join(name, 'Makefile') RunShellScript.__init__(self, name, parameters, dependencies) def build(self): os.chdir(os.path.dirname(self.fullpath())) ShellScriptHelper.run("make", self.parameters, False) def what_it_does(self): return "Run make file '%s %s'" % (self.name, self.parameters) class UnzipFile(PermanentFileArtifact): def __init__ (self, name, destination = None, dependencies = []): PermanentFileArtifact.__init__(self, name, dependencies) if destination == None: destination = os.path.dirname(name) self.destination = AcquiredDirectory(destination) def build(self): self.destination.build() zippath = self.fullpath() destination = self.destination.fullpath() tmp = tempfile.mkstemp(dir=Artifact.home_dir()) null = open(tmp[1], "w") try: ShellScriptHelper.run('unzip', " %s -d %s " % (zippath, destination), False, null) finally: null.close() os.remove(tmp[1]) def what_it_does(self): return "Unzip '%s' to '%s'" % (self.name, self.destination.name) class CopyFile(AcquiredFile): def __init__ (self, name, source, dependencies = []): assert source self.source = PermanentFileArtifact(source) AcquiredFile.__init__(self, name, dependencies) def is_expired(self): return True def build(self): if os.path.isdir(self.source.fullpath()): raise IOError("Copying directory ('%s') is not supported." % self.source.name) shutil.copyfile(self.source.fullpath(), self.fullpath()) def what_it_does(self): return "Copy '%s' to '%s'" % (self.source.name, self.name) class RmFile(FileArtifact): def __init__ (self, name, dependencies = []): FileArtifact.__init__(self, name, dependencies) def is_expired(self): return True def build(self): path = self.fullpath() if path.os.exists(): if os.path.isdir(path): shutil.rmtree(path) else: os.path.remove(path) def what_it_does(self): return "Remove '%s'" % self.name
classgrade/classgrade
classgrade/gradapp/migrations/0028_auto_20170105_1435.py
Python
mit
486
0
# -*- coding: utf-8 -*- # Generated by D
jango 1.10.1 on 2017-01-05 14:35 from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('gradapp', '0027_auto_20161109_0925'), ] operations = [ migrations.AlterField( model_name='evalquestion', name='c
omments', field=models.TextField(help_text='Use Markdown', max_length=500), ), ]
ovresko/erpnext
erpnext/setup/doctype/setup_progress/setup_progress.py
Python
gpl-3.0
1,811
0.027057
# -*- coding: utf-8 -*- # Copyright (c) 2017, Frappe Technologies Pvt. Ltd. and contributors # For license information, please see license.txt from __future__ import unicode_literals import frappe,
json from frappe.model.document import Document class SetupProgress(Document): pass def get_setup_progress(): if not getattr(frappe.local, "setup_progress", None): frappe.local.setup_progress = frappe.get
_doc("Setup Progress", "Setup Progress") return frappe.local.setup_progress def get_action_completed_state(action_name): for d in get_setup_progress().actions: if d.action_name == action_name: return d.is_completed def update_action_completed_state(action_name): action_table_doc = [d for d in get_setup_progress().actions if d.action_name == action_name][0] update_action(action_table_doc) def update_action(doc): doctype = doc.action_doctype docname = doc.action_document field = doc.action_field if not doc.is_completed: if doc.min_doc_count: if frappe.db.count(doctype) >= doc.min_doc_count: doc.is_completed = 1 doc.save() if docname and field: d = frappe.get_doc(doctype, docname) if d.get(field): doc.is_completed = 1 doc.save() def update_domain_actions(domain): for d in get_setup_progress().actions: domains = json.loads(d.domains) if domains == [] or domain in domains: update_action(d) def get_domain_actions_state(domain): state = {} for d in get_setup_progress().actions: domains = json.loads(d.domains) if domains == [] or domain in domains: state[d.action_name] = d.is_completed return state @frappe.whitelist() def set_action_completed_state(action_name): action_table_doc = [d for d in get_setup_progress().actions if d.action_name == action_name][0] action_table_doc.is_completed = 1 action_table_doc.save()
decvalts/iris
lib/iris/experimental/concatenate.py
Python
gpl-3.0
1,615
0
# (C) British Crown Copyright 2013 - 2015, Met Office # # This file is part of Iris. # # Iris is free software: you can redistribute it and/or modify it under # the terms of the GNU Lesser General Public License as published by the # Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Iris is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with Iris. If not, see <http://www.gnu.org/licenses/>. """ A
utomatic concatenation of multiple cubes over one or more existing dimensions. .. warning:: This
functionality has now been moved to :meth:`iris.cube.CubeList.concatenate`. """ from __future__ import (absolute_import, division, print_function) from six.moves import (filter, input, map, range, zip) # noqa def concatenate(cubes): """ Concatenate the provided cubes over common existing dimensions. .. warning:: This function is now **disabled**. The functionality has been moved to :meth:`iris.cube.CubeList.concatenate`. """ raise Exception( 'The function "iris.experimental.concatenate.concatenate" has been ' 'moved, and is now a CubeList instance method.' '\nPlease replace ' '"iris.experimental.concatenate.concatenate(<cubes>)" with ' '"iris.cube.CubeList(<cubes>).concatenate()".')
django-bft/django-bft
bft/utils/json_utils.py
Python
gpl-3.0
765
0.035294
from django.conf import settings from django.http import HttpResponseServerError, HttpResponse from django.utils import simplejson class JsonResponse(HttpResponse): def __init__(self, data, status=200): HttpResponse.__init__(self, content=simplejson.dumps(data),
content_type='application/json; charset=UTF-8', status=status ) class AJAXExceptionResponse: def process_exception(self, request, exception): if settings.DEBUG: if request.is_ajax(): import sys, traceback (exc_type, exc_info, tb) = sys.exc_info()
response = "%s\n" % exc_type.__name__ response += "%s\n\n" % exc_info response += "TRACEBACK:\n" for tb in traceback.format_tb(tb): response += "%s\n" % tb return HttpResponseServerError(response)
abrosen/numerical-analysis
numhw3.py
Python
lgpl-3.0
910
0.023077
# Andrew Rosen # performs Romberg Integration # Usage python numhw3.py from math import * de
f extrapolate(f,a,b, depth): R = [] for i i
n range(0, depth): R.append([None]*(depth)) R[0][0] = ((b-a)*.5)*(f(a)+ f(b)) for n in range(1, depth): h_n = (b-a)/(2.0**(n)) points = [0] for k in range(1,int(2**(n-1)) +1): point = f(a + (2*k -1)*h_n) points.append(point) R[n][0] = .5*R[n-1][0]+h_n*sum(points) for m in range(1,depth): for n in range(m,depth): #R[n][m] = (1.0/((4.0**m)-1))*(4**m *R[n][m-1] -R[n-1][m-1]) R[n][m] = R[n][m-1] + (1.0/((4.0**m) - 1.0)) * (R[n][m-1] - R[n-1][m-1]) #for row in R: # print row print R[depth-2][depth-2] #try increasing the last argument to increase accuracy extrapolate(lambda x: (1.0/(sqrt(2.0*pi)))*exp(-0.5*x*x) ,-1.0,1.0,5)
plotly/plotly.py
packages/python/plotly/plotly/validators/densitymapbox/colorbar/_ticktext.py
Python
mit
436
0.002294
import _plotly_utils.b
asevalidators class TicktextValidator(_plotly_utils.basevalidators.DataArrayValidator): def __init__( self, plotly_name="ticktext", parent_name="densitymapbox.colorbar", **kwargs ): super(TicktextValidator, se
lf).__init__( plotly_name=plotly_name, parent_name=parent_name, edit_type=kwargs.pop("edit_type", "colorbars"), **kwargs )
FenixFeather/ksp-music-hack
music_hack.py
Python
gpl-3.0
12,698
0.003229
#!/usr/bin/env python from __future__ import division import krpc import vlc import time import random import yaml import os import socket import math import logging import sys from collections import deque import argparse class Player(object): def __init__(self, path, preload=True): self.instance = vlc.Instance() self.player = self.instance.media_player_new() self.preload = preload self.config = {} self.tracks = self.parse_tracks(path) self.conn = None self.tracks_played = {scene:0 for scene in self.tracks} self.poll_rate = self.config["poll_rate"] self.current_scene = "SpaceCenter" def can_connect(self): address = (self.config["address"], self.config["rpc_port"]) try: s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) s.connect(address) s.shutdown(socket.SHUT_RDWR) s.close() return True except Exception as e: logging.debug(e) return False def wait_for_server(self): gamelog = GameLog(self.config["gamelog"], self.config["poll_rate"]) gamelog.wait_for_game_start(self) while True: if self.can_connect() or gamelog.loaded_save(): self.player.stop() logging.info("Save game loaded.") return if gamelog.loaded(): self.player.stop() self.play_next_track("MainMenu") logging.info("Main Menu reached") logging.debug("Game still loading.") time.sleep(self.poll_rate / 10) def connect(self, name="Music Player"): self.conn = krpc.connect(name=name, address=self.config["address"], rpc_port=self.config["rpc_port"], stream_port=self.config["stream_port"]) def get_current_scene(self): try: self.conn.space_center.active_vessel return "Flight", True except (OSError, socket.error, KeyboardInterrupt): print("Lost connection.") return None, True except krpc.error.RPCError as e: try: scene = str(e)[str(e).index("'") + 1:str(e).rindex("'")] return scene, self.current_scene != scene except: return "SpaceCenter", self.current_scene != scene def play(self): while True: try: self.current_scene, changed = self.get_current_scene() if not self.current_scene: return if self.current_scene == "Flight": self.play_flight_music() else: self.play_scene_music(changed) time.sleep(self.poll_rate) except (OSError, socket.error, KeyboardInterrupt): print("Connection lost.") return def select_track(self, scene): """"Handle avoiding repetition of tracks and empty playlists.""" try: total_tracks = len(self.tracks[scene]) except KeyError: return None if not total_tracks: return None if self.tracks_played[scene] == total_tracks: last = self.tracks[scene][-1] self.tracks[scene] = random.sample(self.tracks[scene][:-1], total_tracks - 1) self.tracks[scene].append(last) self.tracks_played[scene] = 0 result = self.tracks[scene][self.tracks_played[scene]] self.tracks_played[scene] += 1 if not self.preload: result = self.load_track(result) return result def play_next_track(self, scene): while True: next_track = self.select_track(scene) if not next_track: return if self.play_track(next_track): return def play_scene_music(self, changed): if cha
nged: self
.player.stop() if not self.player.is_playing(): self.play_next_track(self.current_scene) def play_flight_music(self): self.player.stop() while True: self.current_scene, changed = self.get_current_scene() if self.current_scene != "Flight": return vessel = self.conn.space_center.active_vessel current_body = vessel.orbit.body # We're going to switch away from polling here to # avoid unnecessary requests. We need to keep an eye # out for the transitioning outside of the atmosphere try: with self.conn.stream(getattr, vessel.flight(), "mean_altitude") as altitude: while altitude() < current_body.atmosphere_depth: current_body = vessel.orbit.body if self.player.is_playing(): self.player.stop() while altitude() >= current_body.atmosphere_depth: current_body = vessel.orbit.body if not self.player.is_playing(): self.play_next_track("Space") if vessel.parts.controlling.docking_port and self.tracks["Docking"]: self.player.stop() self.play_next_track("Docking") while vessel.parts.controlling.docking_port: if not self.player.is_playing(): self.play_next_track("Docking") time.sleep(self.poll_rate * 0.25) self.fade_out(1.5) if self.conn.space_center.target_vessel and self.tracks["Rendezvous"]: distance = math.sqrt(sum([i**2 for i in (self.conn.space_center.target_vessel.position(self.conn.space_center.active_vessel.reference_frame))])) rendezvous_distance = self.config["rendezvous_distance"] if distance < rendezvous_distance: self.fade_out(1.5) self.play_next_track("Rendezvous") try: with self.conn.stream(vessel.position, self.conn.space_center.target_vessel.reference_frame) as position: while math.sqrt(sum([i**2 for i in position()])) < rendezvous_distance: if not self.player.is_playing(): self.play_next_track("Rendezvous") if not self.conn.space_center.target_vessel: break except AttributeError: continue finally: self.fade_out(1.5) except krpc.error.RPCError: continue def play_track(self, track): self.player.set_media(track) if self.player.play() == -1: logging.warning("Couldn't play a file. Skipping.") return False logging.info("Playing {}.".format(track.get_mrl())) time.sleep(self.poll_rate) return True def fade_out(self, seconds): starting_volume = self.player.audio_get_volume() sleep_increment = seconds / starting_volume for i in range(starting_volume): self.player.audio_set_volume(max(int(starting_volume - i), 1)) time.sleep(sleep_increment) self.player.pause() self.player.audio_set_volume(int(starting_volume)) self.player.stop() def load_track(self, path): if path[0:4] != "http": return self
DeveloperJose/Vision-Rat-Brain
feature_matching_v1/scripts_batch_processing/batch_warp.py
Python
mit
2,630
0.009132
# -*- coding: utf-8 -*- import numpy as np import csv import feature import config def printProgressBar (iteration, total, prefix = '', suffix = '', decimals = 1, length = 100, fill = '█'): """ Call in a loop to create terminal progress bar @params: iteration - Required : current iteration (Int) total - Required : total
iterations (Int)
prefix - Optional : prefix string (Str) suffix - Optional : suffix string (Str) decimals - Optional : positive number of decimals in percent complete (Int) length - Optional : character length of bar (Int) fill - Optional : bar fill character (Str) """ percent = ("{0:." + str(decimals) + "f}").format(100 * (iteration / float(total))) filledLength = int(length * iteration // total) bar = fill * filledLength + '-' * (length - filledLength) print('\r%s |%s| %s%% %s' % (prefix, bar, percent, suffix), end = '\r') # Print New Line on Complete if iteration == total: print() filename = 'atlas_swanson_regions/Level-34-Region.jpg' output_filename = 'results/Level-34-range2' points = 30 disp_range = range(30, 50) nissl_range = range(1, config.NISSL_COUNT + 1) print("***** Beginning batch processing") im = feature.im_read(filename) csv_filename = output_filename + "-" + str(points) + "pts.csv" with open(csv_filename, 'w') as csvfile: fieldnames = ['warp_points', 'warp_disp', 'plate', 'matches', 'inliers'] writer = csv.DictWriter(csvfile, fieldnames=fieldnames, delimiter=',', lineterminator='\n') writer.writeheader() for disp in disp_range: printProgressBar(int(disp / disp[0]), len(disp_range), prefix='Disp: ') # Warp image im_warp = feature.warp(im, points, None, None, disp, None) print("[-- Warped. Doing Nissl comparisons now]") # Matching best_inliers = -1 best_match = None best_level = -1 for nissl_level in nissl_range: match = feature.match(im_warp, nissl_level) printProgressBar(nissl_level, len(nissl_range), prefix='Nissl Matching: ') if match is None: continue if match.inlier_count > best_inliers: best_inliers = match.inlier_count best_match = match best_level = nissl_level print ("** [Completed] Inliers: ", best_inliers, "\n\n") writer.writerow({'warp_points': points, 'warp_disp': disp, 'plate': best_level, 'matches': len(best_match.matches), 'inliers': best_inliers }) csvfile.flush()
toumorokoshi/transmute-core
transmute_core/tests/contenttype_serializers/test_default_serializer_set.py
Python
mit
1,393
0.000718
import pytest from transmute_core import NoSeriali
zerFound, SerializationException def test_default_serializer_json(serializer_set): frm, expected_
to = {"foo": "bar"}, b'{"foo": "bar"}' serializer = serializer_set["application/json"] assert serializer.dump(frm) == expected_to assert serializer.load(expected_to) == frm def test_default_serializer_yaml(serializer_set): frm, expected_to = {"foo": "bar"}, b"foo: bar\n" serializer = serializer_set["application/yaml"] assert serializer.dump(frm) == expected_to assert serializer.load(expected_to) == frm def test_default_serializer_prop(serializer_set): assert serializer_set.default.main_type == "application/json" def test_no_serializer_found_raises_exception(serializer_set): with pytest.raises(NoSerializerFound): assert serializer_set["oogabooga"] @pytest.mark.parametrize( "content_type,bad_input", [("application/yaml", b"[a, !eafia']atedntad}"), ("application/json", b'{"ooga')], ) def test_bad_object_raises_serialization_exception( serializer_set, content_type, bad_input ): """a bad object serialization should raise a serialization exception""" with pytest.raises(SerializationException): serializer_set[content_type].load(bad_input) def test_keys(serializer_set): assert serializer_set.keys() == ["application/json", "application/x-yaml"]
brextonpham/python-Ultron
python-vlc-master/generator/__init__.py
Python
mit
20
0
# Generator packag
e
petrhosek/rubber
rubber/converters/latex.py
Python
gpl-2.0
42,452
0.035028
# This file is part of Rubber and thus covered by the GPL # (c) Emmanuel Beffara, 2002--2006 """ LaTeX document building system for Rubber. This module contains all the code in Rubber that actually does the job of building a LaTeX document from start to finish. """ import os, os.path, sys, imp import re import string from rubber import _ from rubber.util import * from rubber.depend import Node from rubber.version import moddir import rubber.latex_modules from rubber.tex import Parser, EOF, OPEN, SPACE, END_LINE #---- Module handler ----{{{1 class Modules: """ This class gathers all operations related to the management of modules. The modules are searched for first in the current directory, then as scripts in the 'modules' directory in the program's data directort, then as a Python module in the package `rubber.latex'. """ def __init__ (self, env): self.env = env self.objects = {} self.commands = {} def __getitem__ (self, name): """ Return the module object of the given name. """ return self.objects[name] def has_key (self, name): """ Check if a given module is loaded. """ return self.objects.has_key(name) def register (self, name, dict={}): """ Attempt to register a module with the specified name. If the module is already loaded, do nothing. If it is found and not yet loaded, then load it, initialise it (using the context passed as optional argument) and run any delayed commands for it. """ if self.has_key(name): msg.debug(_("module %s already registered") % name, pkg='latex') return 2 # First look for a script mod = None for path in "", os.path.join(moddir, "modules"): file = os.path.join(path, name + ".rub") if os.path.exists(file): mod = ScriptModule(self.env, file) msg.log(_("script module %s registered") % name, pkg='latex') break # Then look for a Python module if not mod: try: file, path, descr = imp.find_module(name, rubber.latex_modules.__path__) pymodule = imp.load_module(name, file, path, descr) file.close() mod = PyModule(self.env, pymodule, dict) msg.log(_("built-in module %s registered") % name, pkg='latex') except ImportError: msg.debug(_("no support found for %s") % name, pkg='latex') return 0 # Run any delayed commands. if self.commands.has_key(name): for (cmd, args, vars) in self.commands[name]: msg.push_pos(vars) try: # put the variables as they were when the directive was # found saved_vars = self.env.vars self.env.vars = vars try: # call the command mod.command(cmd, args) finally: # restore the variables to their current state self.env.vars = saved_vars except AttributeError: msg.warn(_("unknown directive '%s.%s'") % (name, cmd)) except TypeError: msg.warn(_("wrong syntax for '%s.%s'") % (name, cmd)) msg.pop_pos() del self.commands[name] self.objects[name] = mod return 1 def command (self, mod, cmd, args): """ Send a command to a particular module. If this module is not loaded, store the command so that it will be sent when the module is register. """ if self.objects.has_key(mod): self.objects[mod].command(cmd, args) else: if not self.commands.has_key(mod): self.commands[mod] = [] self.commands[mod].append((cmd, args, self.env.vars)) #---- Log parser ----{{{1 re_loghead = re.compile("This is [0-9a-zA-Z-]*") re_rerun = re.compile("LaTeX Warning:.*Rerun") re_file = re.compile("(\\((?P<file>[^ \n\t(){}]*)|\\))") re_badbox = re.compile(r"(Ov|Und)erfull \\[hv]box ") re_line = re.compile(r"(l\.(?P<line>[0-9]+)( (?P<code>.*))?$|<\*>)") re_cseq = re.compile(r".*(?P<seq>(\\|\.\.\.)[^ ]*) ?$") re_macro = re.compile(r"^(?P<macro>\\.*) ->") re_page = re.compile("\[(?P<num>[0-9]+)\]") re_atline = re.compile( "( detected| in paragraph)? at lines? (?P<line>[0-9]*)(--(?P<last>[0-9]*))?") re_reference = re.compile("LaTeX Warning: Reference `(?P<ref>.*)' \ on page (?P<page>[0-9]*) undefined on input line (?P<line>[0-9]*)\\.$") re_label = re.compile("LaTeX Warning: (?P<text>Label .*)$") re_warning = re.compile( "(LaTeX|Package)( (?P<pkg>.*))? Warning: (?P<text>.*)$") re_online = re.compile("(; reported)? on input line (?P<line>[0-9]*)") re_ignored = re.compile("; all text was ignored after line (?P<line>[0-9]*).$") class LogCheck (object): """ This cl
ass performs all the extraction of information from the log file. For efficiency, the instances contain the whole file as a list of strings so that it can be read several times with no disk access. """ #-- Initialization {{{2 def __init__ (self): self.lines = None def read (self, name): """ Read the specified log file, checking that it was produce
d by the right compiler. Returns true if the log file is invalid or does not exist. """ self.lines = None try: file = open(name) except IOError: return 2 line = file.readline() if not line: file.close() return 1 if not re_loghead.match(line): file.close() return 1 self.lines = file.readlines() file.close() return 0 #-- Process information {{{2 def errors (self): """ Returns true if there was an error during the compilation. """ skipping = 0 for line in self.lines: if line.strip() == "": skipping = 0 continue if skipping: continue m = re_badbox.match(line) if m: skipping = 1 continue if line[0] == "!": # We check for the substring "pdfTeX warning" because pdfTeX # sometimes issues warnings (like undefined references) in the # form of errors... if string.find(line, "pdfTeX warning") == -1: return 1 return 0 def run_needed (self): """ Returns true if LaTeX indicated that another compilation is needed. """ for line in self.lines: if re_rerun.match(line): return 1 return 0 #-- Information extraction {{{2 def continued (self, line): """ Check if a line in the log is continued on the next line. This is needed because TeX breaks messages at 79 characters per line. We make this into a method because the test is slightly different in Metapost. """ return len(line) == 79 def parse (self, errors=0, boxes=0, refs=0, warnings=0): """ Parse the log file for relevant information. The named arguments are booleans that indicate which information should be extracted: - errors: all errors - boxes: bad boxes - refs: warnings about references - warnings: all other warnings The function returns a generator. Each generated item is a dictionary that contains (some of) the following entries: - kind: the kind of information ("error", "box", "ref", "warning") - text: the text of the error or warning - code: the piece of code that caused an error - file, line, last, pkg: as used by Message.format_pos. """ if not self.lines: return last_file = None pos = [last_file] page = 1 parsing = 0 # 1 if we are parsing an error's text skipping = 0 # 1 if we are skipping text until an empty line something = 0 # 1 if some error was found prefix = None # the prefix for warning messages from packages accu = "" # accumulated text from the previous line macro = None # the macro in which the error occurs cseqs = {} # undefined control sequences so far for line in self.lines: line = line[:-1] # remove the line feed # TeX breaks messages at 79 characters, just to make parsing # trickier... if not parsing and self.continued(line): accu += line continue line = accu + line accu = "" # Text that should be skipped (from bad box messages) if prefix is None and line == "": skipping = 0 continue if skipping: continue # Errors (including aborted compilation) if parsing: if error == "Undefined control sequence.": # This is a special case in order to report which control # sequence is undefined. m = re_cseq.match(line) if m: seq = m.group("seq") if cseqs.has_key(seq): error = None else: cseqs[seq] = None error = "Undefined control sequence %s." % m.group("seq") m = re_
caneruguz/osf.io
scripts/utils.py
Python
apache-2.0
1,318
0.000759
# -*- coding: utf-8 -*- import os import logging import datetime import sys from django.utils import timezone from website import settings def format_now(): return timezone.now().isoformat() def add_file_logger(logger, script_name, suffix=None): _, name = os.path.split(script_name) name = name.rstrip('c') if suffix is not None: name = '{0}-{1}'.format(name, suffix) file_handler = logging.FileHandler( os.path.join( settings.LOG_PATH, '.'.join([name, format_now(), 'log']) ) ) logger.addHandler(file_handler) class Progress(object): def __init__
(self, bar_len=50): self.bar_len = bar_len def start(self, total, prefix): self.total = total self.count = 0 self.prefix = prefix def increment(self, inc=1): self.count += inc filled_len = int(round(self.bar_len * self.count / float(self.total))) percents = round(100.0 * self.count / float(self.total), 1) bar = '=' * filled_len + '-' * (self.bar_len - filled_len
) sys.stdout.flush() sys.stdout.write('{}[{}] {}{} ... {}\r'.format(self.prefix, bar, percents, '%', str(self.total))) def stop(self): # To preserve line, there is probably a better way to do this print('')
aisthesis/machinelearning
experiments/neural.py
Python
mit
5,497
0.002001
""" Copyright (c) 2014 Marshall Farrier license http://opensource.org/licenses/MIT @author: Marshall Farrier @contact: marshalldfarrier@gmail.com @since: 2014-11-10 @summary: Neural network Resources: Great performance ideas: http://stackoverflow.com/questions/21106134/numpy-pure-functions-for-performance-caching """ import copy import numpy as np from scipy.special import expit def sigmoid(x): return expit(x) def sigmoid_grad(x): """ Slightly faster than: def sigmoid_grad2(x): ex = np.exp(-x) return ex / (1 + ex)**2 >>> s = ''' ... import numpy as np ... from __main__ import neural ... x = np.random.random((30, 40)) * 10.0 - 5.0''' >>> import timeit >>> timeit.timeit('y = neural.sigmoid_grad(x)', setup=s, number=10000) 0.21567797660827637 >>> timeit.timeit('y = neural.sigmoid_grad2(x)', setup=s, number=10000) 0.23735404014587402 """ sigx = sigmoid(x) return sigx * (1 - sigx) def least_sq_ntwk1(features, labels, initial_wts, **kwargs): """ Return weight matrices for a 3-layer (1 hidden layer) neural network that minimizes the sum of squares of the difference between predictions and labels. For the sake of simplicity, this implementation has only 1 hidden layer and only 1 label for each row, i.e., labels is a n x 1 column vector.. Parameters --- features : ndarray of floats data set including constant in the first column labels : ndarray of floats (1 column) In contrast to traditional neural networks, the labels are not restricted to 2 values for yes and no but can take any real value. initial_wts : ndarray of floats randomly initialized weight matrix which determines the number of hidden units. If `features` is n x m, `initial_wts` must have m rows. The number of columns in `initial_wts` will determine the number of hidden units in the neural network. For example, if `initial_wts` is m x k, then the hidden layer will be of size k + 1 (including its constant feature). maxiter : int, optional Default: 64. Stop after this number of iterations eta : float, optional Default: 0.1. Learning rate. A high value of eta risks overshooting the minimum. A low value will converge very slowly. epsilon : float, optional (not implemented) Default: None. Stop when the improvement in error from one iteration to the next falls below this threshold Return --- A tuple of 2 weight matrices `wts0` and `wts1`. This model will predict in the following way: hidden = np.ones((k + 1, 1)) """ maxiter = kwargs.get('maxiter', 64) eta = kwargs.get('eta', 0.1) wts0 = copy.deepcopy(initial_wts) # get initial hidden layer hidden = np.e
mpty((features.shape[0], wts0.shape[1] + 1)) hidden[:, 0] = 1.0 sigxw = sigmoid(features.dot(wts0)) hidden[:, 1:] = sigxw # get optimal wts1 given wts0 wts1 = np.linalg.pinv(hidden).dot(labels) predicted = hidden.dot(wts1) # backprop """ First version doesn't work. Sample output on
nonlinear function using random features: error after iteration 1: 0.389115314723 error after iteration 2: 0.324511537066 error after iteration 3: 0.728566874908 error after iteration 4: 0.174713101869 error after iteration 5: 0.47208655752 error after iteration 6: 0.779610384956 ... error after iteration 60: 1.26413065775 error after iteration 61: 1.26394322385 error after iteration 62: 1.26373950808 error after iteration 63: 1.26351975815 error after iteration 64: 1.26328420549 New version also doesn't work. Sample output on nonlinear function using random features: error after iteration 1: 2.1928215227 ... error after iteration 16: 2.1928215227 Usually similar, but another case: error after iteration 1: 0.108232998162 error after iteration 2: 1.51657739873 ... error after iteration 16: 1.51657739873 """ scalar_mult = 2.0 * eta / features.shape[0] for i in range(maxiter): # update wt0 wts0 -= scalar_mult * features.transpose().dot(sigxw * \ (1 - sigxw) * (np.outer(sigxw.dot(wts1[1:]) - labels, wts1[1:]))) # update hidden layer sigxw = sigmoid(features.dot(wts0)) hidden[:, 1:] = sigxw # update wts1 wts1 = np.linalg.pinv(hidden).dot(labels) # update predictions predicted = hidden.dot(wts1) print "error after iteration {0}: {1}".format(i + 1, mean_sq_error(predicted, labels)) return wts0, wts1 def least_sq_predict(features, wts0, wts1, **kwargs): """ Parameters --- hidden : ndarray of floats float array of the proper size for the hidden layer. Using this parameter will lead to improved performance because creating the hidden layer won't require reallocation of memory. Note that the values of the matrix passed in for this parameter will be overwritten when this function is called. """ if 'hidden' in kwargs: hidden = kwargs['hidden'] else: hidden = np.ones((features.shape[0], wts0.shape[1] + 1)) hidden[:, 1:] = sigmoid(features.dot(wts0)) return hidden.dot(wts1) def mean_sq_error(predicted, actual): diff = predicted - actual return np.mean(diff * diff, axis=0)
zhumingliang1209/Ardupilot
ardupilot/modules/waf/waflib/Build.py
Python
gpl-3.0
37,831
0.031852
#!/usr/bin/env python # encoding: utf-8 # Thomas Nagy, 2005-2010 (ita) """ Classes related to the build phase (build, clean, install, step, etc) The inheritance tree is the following: """ import os, sys, errno, re, shutil, stat try: import cPickle except ImportError: import pickle as cPickle from waflib import Runner, TaskGen, Utils, ConfigSet, Task, Logs, Options, Context, Errors import waflib.Node CACHE_DIR = 'c4che' """Location of the cache files""" CACHE_SUFFIX = '_cache.
py' """Suffix for the cache files""" INSTALL = 1337 """Positive value '->' install, see :py:attr:`waflib.Build.BuildContext.is_install`""" UNINSTALL = -1337 """Negative value '<-' uninstall, see :py:attr:`waflib.Build.BuildContext.is_install`""" SAVED_ATTRS = 'root node_deps raw_deps task_sigs'.split() """Build class members
to save between the runs (root, node_deps, raw_deps, task_sigs)""" CFG_FILES = 'cfg_files' """Files from the build directory to hash before starting the build (``config.h`` written during the configuration)""" POST_AT_ONCE = 0 """Post mode: all task generators are posted before the build really starts""" POST_LAZY = 1 """Post mode: post the task generators group after group""" POST_BOTH = 2 """Post mode: post the task generators at once, then re-check them for each group""" PROTOCOL = -1 if sys.platform == 'cli': PROTOCOL = 0 class BuildContext(Context.Context): '''executes the build''' cmd = 'build' variant = '' def __init__(self, **kw): super(BuildContext, self).__init__(**kw) self.is_install = 0 """Non-zero value when installing or uninstalling file""" self.top_dir = kw.get('top_dir', Context.top_dir) self.run_dir = kw.get('run_dir', Context.run_dir) self.post_mode = POST_AT_ONCE """post the task generators at once, group-by-group, or both""" # output directory - may be set until the nodes are considered self.out_dir = kw.get('out_dir', Context.out_dir) self.cache_dir = kw.get('cache_dir', None) if not self.cache_dir: self.cache_dir = os.path.join(self.out_dir, CACHE_DIR) # map names to environments, the '' must be defined self.all_envs = {} # ======================================= # # cache variables self.task_sigs = {} """Signatures of the tasks (persists between build executions)""" self.node_deps = {} """Dict of node dependencies found by :py:meth:`waflib.Task.Task.scan` (persists between build executions)""" self.raw_deps = {} """Dict of custom data returned by :py:meth:`waflib.Task.Task.scan` (persists between build executions)""" # list of folders that are already scanned # so that we do not need to stat them one more time self.cache_dir_contents = {} self.task_gen_cache_names = {} self.launch_dir = Context.launch_dir self.jobs = Options.options.jobs self.targets = Options.options.targets self.keep = Options.options.keep self.progress_bar = Options.options.progress_bar ############ stuff below has not been reviewed # Manual dependencies. self.deps_man = Utils.defaultdict(list) """Manual dependencies set by :py:meth:`waflib.Build.BuildContext.add_manual_dependency`""" # just the structure here self.current_group = 0 """ Current build group """ self.groups = [] """ List containing lists of task generators """ self.group_names = {} """ Map group names to the group lists. See :py:meth:`waflib.Build.BuildContext.add_group` """ def get_variant_dir(self): """Getter for the variant_dir attribute""" if not self.variant: return self.out_dir return os.path.join(self.out_dir, self.variant) variant_dir = property(get_variant_dir, None) def __call__(self, *k, **kw): """ Create a task generator and add it to the current build group. The following forms are equivalent:: def build(bld): tg = bld(a=1, b=2) def build(bld): tg = bld() tg.a = 1 tg.b = 2 def build(bld): tg = TaskGen.task_gen(a=1, b=2) bld.add_to_group(tg, None) :param group: group name to add the task generator to :type group: string """ kw['bld'] = self ret = TaskGen.task_gen(*k, **kw) self.task_gen_cache_names = {} # reset the cache, each time self.add_to_group(ret, group=kw.get('group', None)) return ret def rule(self, *k, **kw): """ Wrapper for creating a task generator using the decorator notation. The following code:: @bld.rule( target = "foo" ) def _(tsk): print("bar") is equivalent to:: def bar(tsk): print("bar") bld( target = "foo", rule = bar, ) """ def f(rule): ret = self(*k, **kw) ret.rule = rule return ret return f def __copy__(self): """Implemented to prevents copies of build contexts (raises an exception)""" raise Errors.WafError('build contexts are not supposed to be copied') def install_files(self, *k, **kw): """Actual implementation provided by :py:meth:`waflib.Build.InstallContext.install_files`""" pass def install_as(self, *k, **kw): """Actual implementation provided by :py:meth:`waflib.Build.InstallContext.install_as`""" pass def symlink_as(self, *k, **kw): """Actual implementation provided by :py:meth:`waflib.Build.InstallContext.symlink_as`""" pass def load_envs(self): """ The configuration command creates files of the form ``build/c4che/NAMEcache.py``. This method creates a :py:class:`waflib.ConfigSet.ConfigSet` instance for each ``NAME`` by reading those files. The config sets are then stored in the dict :py:attr:`waflib.Build.BuildContext.allenvs`. """ node = self.root.find_node(self.cache_dir) if not node: raise Errors.WafError('The project was not configured: run "waf configure" first!') lst = node.ant_glob('**/*%s' % CACHE_SUFFIX, quiet=True) if not lst: raise Errors.WafError('The cache directory is empty: reconfigure the project') for x in lst: name = x.path_from(node).replace(CACHE_SUFFIX, '').replace('\\', '/') env = ConfigSet.ConfigSet(x.abspath()) self.all_envs[name] = env for f in env[CFG_FILES]: newnode = self.root.find_resource(f) try: h = Utils.h_file(newnode.abspath()) except (IOError, AttributeError): Logs.error('cannot find %r' % f) h = Utils.SIG_NIL newnode.sig = h def init_dirs(self): """ Initialize the project directory and the build directory by creating the nodes :py:attr:`waflib.Build.BuildContext.srcnode` and :py:attr:`waflib.Build.BuildContext.bldnode` corresponding to ``top_dir`` and ``variant_dir`` respectively. The ``bldnode`` directory will be created if it does not exist. """ if not (os.path.isabs(self.top_dir) and os.path.isabs(self.out_dir)): raise Errors.WafError('The project was not configured: run "waf configure" first!') self.path = self.srcnode = self.root.find_dir(self.top_dir) self.bldnode = self.root.make_node(self.variant_dir) self.bldnode.mkdir() def execute(self): """ Restore the data from previous builds and call :py:meth:`waflib.Build.BuildContext.execute_build`. Overrides from :py:func:`waflib.Context.Context.execute` """ self.restore() if not self.all_envs: self.load_envs() self.execute_build() def execute_build(self): """ Execute the build by: * reading the scripts (see :py:meth:`waflib.Context.Context.recurse`) * calling :py:meth:`waflib.Build.BuildContext.pre_build` to call user build functions * calling :py:meth:`waflib.Build.BuildContext.compile` to process the tasks * calling :py:meth:`waflib.Build.BuildContext.post_build` to call user build functions """ Logs.info("Waf: Entering directory `%s'" % self.variant_dir) self.recurse([self.run_dir]) self.pre_build() # display the time elapsed in the progress bar self.timer = Utils.Timer() try: self.compile() finally: if self.progress_bar == 1 and sys.stderr.isatty(): c = len(self.returned_tasks) or 1 m = self.progress_line(c, c, Logs.colors.BLUE, Logs.colors.NORMAL) Logs.info(m, extra={'stream': sys.stderr, 'c1': Logs.colors.cursor_off, 'c2' : Logs.colors.cursor_on}) Logs.info("Waf: Leaving directory `%s'" % self.variant_dir) self.post_build() def restore(self): """ Load the data from a
miroag/mfs
tests/conftest.py
Python
mit
516
0.001938
import os import pytest @pyt
est.fixture(scope='session') def testdata(): """ Simple fixture to return reference data :return: """ class TestData(): def __init__(self): self.datadir = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'data') def fn(self, fn): return os.path.join(self.datadir, fn) def textdata(self, fn): with open(self.fn(fn), encoding='utf8') as f:
return f.read() return TestData()
iychoi/syndicate-core
ms/storage/storagetypes.py
Python
apache-2.0
25,103
0.032785
""" Copyright 2013 The Trustees of Princeton University Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import random import types import errno import time import datetime import logging import backends as backend from Crypto.Hash import SHA256 as HashAlg from Crypto.PublicKey import RSA as CryptoKey from Crypto import Random from Crypto.Signature import PKCS1_PSS as CryptoSigner import traceback SHARD_KEY_TEMPLATE = 'shard-{}-{:d}' # aliases for types Model = backend.Model Integer = backend.Integer Float = backend.Float String = backend.String Text = backend.Text Blob = backend.Blob Key = backend.Key Boolean = backend.Boolean Json = backend.Json Blob = backend.Blob Cursor = backend.Cursor # NOTE: need to have a constructor that takes urlsafe= as an argument for deserialization, and needs a urlsafe() method for serialization Computed = backend.Computed Pickled = backend.Pickled # aliases for keys make_key = backend.make_key # aliases for asynchronous operations FutureWrapper = backend.FutureWrapper FutureQueryWrapper = backend.FutureQueryWrapper wait_futures = backend.wait_futures deferred = backend.deferred concurrent = backend.concurrent concurrent_return = backend.concurrent_return get_multi_async = backend.get_multi_async put_multi_async = backend.put_multi_async # synchronous operations get_multi = backend.get_multi put_multi = backend.put_multi delete_multi = backend.delete_multi # aliases for memcache memcache = backend.memcache # aliases for transaction transaction = backend.transaction transaction_async = backend.transaction_async transactional = backend.transactional # alises for query predicates opAND = backend.opAND opOR = backend.opOR # toplevel decorator toplevel = backend.toplevel # aliases for common exceptions RequestDeadlineEx
ceededError = backend.RequestDeadlineExceededError APIRequestDeadlineExceededError = backend.APIRequestDeadlineExceededError URLRequestDeadlineExceededError = backend.URLRequestDeadlineExceededError TransactionFailedError = backend.TransactionFailedError def clock_gettime(): now = time.time() now_sec = int(now) now_nsec = int((now - now_sec) * 1e9) return (now_sec, now_nsec) ''' now = datetime.datetime.utcnow() nowtt
= now.timetuple() now_sec = int(time.mktime( nowtt )) now_nsec = int(now.microsecond * 1e3) return (now_sec, now_nsec) ''' def get_time(): now_sec, now_nsec = clock_gettime() return float(now_sec) + float(now_nsec) / 1e9 class Object( Model ): # list of names of attributes of required attributes required_attrs = [] # list of names of attributes that will be used to generate a primary key key_attrs = [] # list of names of attributes that can be read, period read_attrs = [] # list of names of attributes that can be read, but only with the object's API key read_attrs_api_required = [] # list of names of attributes that can be read, but only by the administrator read_attrs_admin_required = [] # list of names of attributes that can be written, period write_attrs = [] # list of names of attributes that can be written, but only with the object's API key write_attrs_api_required = [] # list of names of attributes that can be written, but only by the administrator write_attrs_admin_required = [] # dict of functions that generate default values # attribute name => lambda object_class, attribute_dict => default_value default_values = {} # dict of functions that validate fields # attribute name => lambda object_class, attribute_value => true/false validators = {} # class of an Object that contains sharded data shard_class = None # fields in this Object stored on a shard. shard_fields = [] # dict of functions that read sharded fields # sharded attribute name => lambda instance, shard_objects => attribute_value shard_readers = {} # dict of functions that write shard fields # sharded attribute name => lambda insance => attribute value shard_writers = {} # instance of a shard that will be populated and written write_shard = None # for RPC key_type = None @classmethod def shard_key_name( cls, name, idx ): """ Generate the name for a shard, given its base name and index """ return SHARD_KEY_TEMPLATE.format( name, idx ) @classmethod def get_shard_key( cls, name, idx ): key_str = cls.shard_key_name( name, idx ) return make_key( cls.shard_class, key_str ) @classmethod def get_shard_keys(cls, num_shards, key_name ): """ Get keys for all shards, given the number of shards. The base name will be generated from the make_key_name() method, to which the given **attrs dict will be passed. """ shard_key_strings = [cls.shard_key_name( key_name, index ) for index in range(num_shards)] return [make_key(cls.shard_class, shard_key_string) for shard_key_string in shard_key_strings] def populate_from_shards(self, shards): """ Populate the base object using a list of shards. This will use the methods to fill the fields indicated by the base instance's shard_readers dict. This method throws an exception when passed a list of Nones """ if shards == None or len(shards) == 0: return shards_existing = filter( lambda x: x is not None, shards ) if len(shards_existing) == 0: raise Exception("No valid shards for %s" % self) # populate an instance with value from shards for (shard_field, shard_reader) in self.shard_readers.items(): val = shard_reader( self, shards_existing ) setattr( self, shard_field, val ) def populate_base(self, **attrs): """ Populate the base instance of an object. Specifically, populate fields in the object that are NOT in the shard_fields list. """ base_attrs = {} for (key, value) in attrs.items(): if key not in self.shard_fields: base_attrs[key] = value super( Object, self ).populate( **base_attrs ) for (key, value) in attrs.items(): if key not in self._properties.keys(): setattr( self, key, value ) @classmethod def get_shard_attrs( cls, inst, **attrs ): """ Generate and return a dict of shard attributes and values, given an **attrs dictionary. The resulting dictionary will contain a key,value pair for each shard field, indicated by the base object instance's shard_fields list. The key,value pairings will be taken first from **attrs. If a key does not have a value, it will be populated from the base object instance's shard_writers dictionary. """ shard_attrs = {} for (shard_field, shard_value) in attrs.items(): if shard_field in cls.shard_fields: shard_attrs[shard_field] = shard_value for (shard_field, shard_writer) in cls.shard_writers.items(): if shard_attrs.get( shard_field, None ) == None: shard_attrs[shard_field] = shard_writer( inst ) return shard_attrs @classmethod def populate_shard_inst(cls, inst, shard_inst, **attrs): """ Populate an instance of a shard, given an instance of the base object and an instance of its associated shard class, with the given set of attributes. Required attributes (from the base object's shard_fields list) that are not present in **attrs will be generated using the indicated method in the base object's shard_writers dictionary. """ shard_attrs = cls.get
unicefuganda/edtrac
edtrac_project/rapidsms_edtrac/education/utils.py
Python
bsd-3-clause
12,792
0.00641
from dateutil.relativedelta import relativedelta from script.models import Script, ScriptProgress from rapidsms.models import Connection import datetime from rapidsms.models import Contact from rapidsms.contrib.locations.models import Location from poll.models impo
rt Poll from script.models im
port ScriptStep from django.db.models import Count from django.conf import settings from education.scheduling import schedule_at, at def is_holiday(date1, holidays = getattr(settings, 'SCHOOL_HOLIDAYS', [])): for date_start, date_end in holidays: if isinstance(date_end, str): if date1.date() == date_start.date(): return True elif date1.date() >= date_start.date() and date1.date() <= date_end.date(): return True return False def is_empty(arg): """ Generalizes 'empty' checks on Strings, sequences, and dicts. Returns 'True' for None, empty strings, strings with just white-space, and sequences with len == 0 """ if arg is None: return True if isinstance(arg, basestring): arg = arg.strip() try: if not len(arg): return True except TypeError: # wasn't a sequence pass return False def previous_calendar_week(t=None): """ To education monitoring, a week runs between Thursdays, Thursday marks the beginning of a new week of data submission Data for a new week is accepted until Wednesday evening of the following week """ d = t or datetime.datetime.now() if not d.weekday() == 3: # last Thursday == next Thursday minus 7 days. last_thursday = d + (datetime.timedelta((3-d.weekday())%7) - (datetime.timedelta(days=7))) else: last_thursday = d end_date = last_thursday + datetime.timedelta(days=6) return (last_thursday.date(), end_date) def _this_thursday(sp=None, get_time=datetime.datetime.now, time_set=None, holidays=getattr(settings, 'SCHOOL_HOLIDAYS', [])): """ This Thursday of the week which is not a school holiday. """ schedule = time_set or get_time() d = sp.time if sp else schedule d = d + datetime.timedelta((3 - d.weekday()) % 7) while(is_holiday(d, holidays)): d = d + datetime.timedelta(1) # try next day return at(d.date(), 10) def get_polls(**kwargs): script_polls = ScriptStep.objects.values_list('poll', flat=True).exclude(poll=None) return Poll.objects.exclude(pk__in=script_polls).annotate(Count('responses')) def compute_average_percentage(list_of_percentages): """ Average percentage -> this is also a handly tool to compute averages generally while sanitizing """ sanitize = [] try: for i in list_of_percentages: if isinstance(float(i), float): sanitize.append(float(i)) else: pass except ValueError: print "non-numeric characters used" pass if len(sanitize) <= 0: return 0 return sum(sanitize) / float(len(sanitize)) def list_poll_responses(poll, **kwargs): """ pass a poll queryset and you get yourself a dict with locations vs responses (quite handy for the charts) dependecies: Contact and Location must be in your module; this lists all Poll responses by district """ #forceful import from poll.models import Poll to_ret = {} """ narrowed down to 3 districts (and up to 14 districts) """ DISTRICT = ['Kaabong', 'Kabarole', 'Kyegegwa', 'Kotido'] if not kwargs: # if no other arguments are provided for location in Location.objects.filter(name__in=DISTRICT): to_ret[location.__unicode__()] = compute_average_percentage([msg.message.text for msg in poll.responses.filter(contact__in=Contact.objects.filter(reporting_location=location))]) return to_ret else: # filter by number of weeks #TODO more elegant solution to coincide with actual school term weeks date_filter = kwargs['weeks'] #give the date in weeks date_now = datetime.datetime.now() date_diff = date_now - datetime.timedelta(weeks=date_filter) all_emis_reports = EmisReporter.objects.filter(reporting_location__in=[loc for loc in Locations.objects.filter(name__in=DISTRICT)]) for location in Location.objects.filter(name__in=DISTRICT): to_ret[location.__unicode__()] = compute_average_percentage([msg.message.text for msg in poll.responses.filter(date__gte=date_diff, contact__in=Contact.objects.filter(reporting_location=location))]) return to_ret themes = { 1.1 : "Name and location of our Sub-county/Division", 1.2 : 'Physical features of our Sub-County/Division', 1.3 : 'People in our Sub-county/Division', 2.1 : 'Occupations of people in our Sub-county/Division and their importance', 2.2 : 'Social Services and their importance', 2.3 : 'Challenges in social services and their possible solutions', 3.1 : 'Soil', 3.2 : 'Natural causes of changes in the environment', 3.3 : 'Changes in the environment through human activities', 4.1 : 'Air and the Sun', 4.2 : 'Water', 4.3 : 'Managing Water', 5.1 : 'Living things', 5.2 : 'Birds and Insects', 5.3 : 'Care for insects, birds and animals', 6.1 : 'Plants and their habitat', 6.2 : 'Parts of a flowering plant and their uses', 6.3 : 'Crop-growing practices', 7.1 : 'Saving resources', 7.2 : 'Spending resources', 7.3 : 'Projects', 8.1 : 'Living in peace with others', 8.2 : 'Child rights, needs and their importance', 8.3 : 'Child responsibility', 9.1 : 'Customs in our sub-county/division', 9.2 : 'Gender', 9.3 : 'Ways of promoting and preserving culture', 10.1: 'Disease vectors', 10.2: 'Diseases spread by vectors', 10.3: 'HIV/AIDS', 11.1: 'Concept of technology', 11.2: 'Processing and making things from natural materials', 11.3: 'Making things from artificial materials', 12.1: 'Sources of energy', 12.2: 'Ways of saving energy', 12.3: 'Dangers of energy and ways of avoiding them' } ## {{{ http://code.activestate.com/recipes/409413/ (r2) """ Descriptive statistical analysis tool. """ class StatisticsException(Exception): """Statistics Exception class.""" pass class Statistics(object): """Class for descriptive statistical analysis. Behavior: Computes numerical statistics for a given data set. Available public methods: None Available instance attributes: N: total number of elements in the data set sum: sum of all values (n) in the data set min: smallest value of the data set max: largest value of the data set mode: value(s) that appear(s) most often in the data set mean: arithmetic average of the data set range: difference between the largest and smallest value in the data set median: value which is in the exact middle of the data set variance: measure of the spread of the data set about the mean stddev: standard deviation - measure of the dispersion of the data set based on variance identification: Instance ID Raised Exceptions: StatisticsException Bases Classes: object (builtin) Example Usage: x = [ -1, 0, 1 ] try: stats = Statistics(x) except StatisticsException, mesg: <handle exception> print "N: %s" % stats.N print "SUM: %s" % stats.sum print "MIN: %s" % stats.min print "MAX: %s" % stats.max print "MODE: %s" % stats.mode print "MEAN: %0.2f" % stats.mean print "RANGE: %s" % stats.range print "MEDIAN: %0.2f" % stats.median print "VARIANCE: %0.5f" % stats.variance print "STDDEV: %0.5f" % stats.stddev print "DATA LIST: %s" % stats.sample """ def __init__(self, sample=[], population=False): """Statistics class initializer method.""" # Raise an exception if the data set is empty. if (not sample): raise StatisticsException, "Empty data set!: %s" %
zavlab1/foobnix
foobnix/preferences/configs/other_conf.py
Python
gpl-3.0
9,256
0.002161
#-*- coding: utf-8 -*- ''' Created on 23 дек. 2010 @author: ivan ''' import logging from gi.repository import Gtk from foobnix.fc.fc import FC from foobnix.preferences.configs import CONFIG_OTHER from foobnix.util.antiscreensaver import antiscreensaver from foobnix.preferences.config_plugin import ConfigPlugin from foobnix.helpers.dialog_entry import info_dialog_with_link_and_donate from foobnix.helpers.pref_widgets import FrameDecorator class OtherConfig(ConfigPlugin): name = CONFIG_OTHER def __init__(self, controls): self.controls = controls box = Gtk.Box.new(Gtk.Orientation.VERTICAL, 0) box.hide() df_vbox = Gtk.Box.new(Gtk.Orientation.VERTICAL, 5) df_vbox.set_border_width(4) download_frame = FrameDecorator(_("File downloads"), df_vbox, 0.5, 0.5) """save to""" hbox = Gtk.Box.new(Gtk.Orientation.HORIZONTAL, 5) self.online_dir = Gtk.FileChooserButton.new("Set place", Gtk.FileChooserAction.SELECT_FOLDER) self.online_dir.connect("current-folder-changed", self.on_change_folder) hbox.pack_start(Gtk.Label.new(_("Save online music to folder:")), False, True, 0) hbox.pack_start(self.online_dir, True, True, 0) """automatic save""" self.automatic_save_checkbutton = Gtk.CheckButton.new_with_label(_("Automatic online music save")) self.nosubfolder_checkbutton = Gtk.CheckButton.new_with_label(_("Save to one folder (no subfolders)")) """download threads""" thbox = Gtk.Box.new(Gtk.Orientation.HORIZONTAL, 5) tab_label = Gtk.Label.new(_("Download in threads")) adjustment = Gtk.Adjustment(value=1, lower=1, upper=10, step_incr=1, page_incr=1, page_size=0) self.threads_count = Gtk.SpinButton.new(adjustment, 0.0, 0) thbox.pack_start(tab_label, False, False, 0) thbox.pack_start(self.threads_count, False, True, 0) df_vbox.pack_start(hbox, False, False, 2) df_vbox.pack_start(self.automatic_save_checkbutton, False, False, 2) df_vbox.pack_start(self.nosubfolder_checkbutton, False, False, 2) df_vbox.pack_start(thbox, False, False, 2) download_frame.show_all() """disc cover size""" cbox = Gtk.Box.new(Gtk.Orientation.HORIZONTAL, 5) cbox.set_border_width(4) dc_frame = FrameDecorator(_("Disc cover settings"), cbox, 0.5, 0.5) tab_label = Gtk.Label.new(_("Disc cover size:")) adjustment = Gtk.Adjustment(value=1, lower=100, upper=350, step_incr=20, page_incr=50, page_size=0) self.image_size_spin = Gtk.SpinButton.new(adjustment, 0.0, 0) cbox.pack_start(tab_label, False, False, 0) cbox.pack_start(self.image_size_spin, False, True, 0) dc_frame.show_all() """notification""" uhbox = Gtk.Box.new(Gtk.Orientation.HORIZONTAL, 5) uhbox.set_border_width(4) updates_frame = FrameDecorator(_("Updates"), uhbox, 0.5, 0.5) self.check_new_version = Gtk.CheckButton(label=_("Check for new foobnix release on start"), use_underline=True) demo = Gtk.Button.new_with_label(_("Check for update")) demo.connect("clicked", lambda * a: info_dialog_with_link_and_donate("foobnix [version]")) uhbox.pack_start(self.check_new_version, True, True, 0) uhbox.pack_start(demo, False, False, 0) updates_frame.show_all() """background image""" thvbox = Gtk.Box.new(Gtk.Orientation.VERTICAL, 1) thvbox.set_border_width(4) theme_frame = FrameDecorator(_("Theming"), thvbox, 0.5, 0.5) """menu position""" pbox = Gtk.Box.new(Gtk.Orientation.HORIZONTAL, 5) pbox.show() label = Gtk.Label.new(_("Menu type: ")) self.old_style = Gtk.RadioButton(_("Old St
yle (Menu Bar)")) self.new_style = Gtk.RadioButton.new_with_label_from_widget(self.old_style, _("New Style (
Button)")) pbox.pack_start(label, False, False, 0) pbox.pack_start(self.new_style, False, True, 0) pbox.pack_start(self.old_style, False, False, 0) o_r_box = Gtk.Box.new(Gtk.Orientation.HORIZONTAL, 5) o_r_box.show() o_r_label = Gtk.Label.new(_("Order-Repeat Switcher Style:")) self.buttons = Gtk.RadioButton.new_with_label(None, _("Toggle Buttons")) self.labels = Gtk.RadioButton.new_with_label_from_widget(self.buttons, _("Text Labels")) o_r_box.pack_start(o_r_label, False, False, 0) o_r_box.pack_start(self.buttons, False, True, 0) o_r_box.pack_start(self.labels, False, False, 0) """opacity""" obox = Gtk.Box.new(Gtk.Orientation.HORIZONTAL, 5) obox.show() tab_label = Gtk.Label.new(_("Opacity:")) tab_label.show() adjustment = Gtk.Adjustment(value=1, lower=20, upper=100, step_incr=1, page_incr=1, page_size=0) self.opacity_size = Gtk.SpinButton.new(adjustment, 0.0, 0) self.opacity_size.connect("value-changed", self.on_chage_opacity) self.opacity_size.show() obox.pack_start(tab_label, False, False, 0) obox.pack_start(self.opacity_size, False, True, 0) self.fmgrs_combo = self.fmgr_combobox() hcombobox = Gtk.Box.new(Gtk.Orientation.HORIZONTAL, 5) hcombobox.pack_start(Gtk.Label.new(_('Choose your preferred file manager:')), False, False, 0) hcombobox.pack_start(self.fmgrs_combo, False, False, 0) self.disable_screensaver = Gtk.CheckButton(label=_("Disable Xscreensaver"), use_underline=True) thvbox.pack_start(pbox, False, False, 1) thvbox.pack_start(o_r_box, False, False, 1) thvbox.pack_start(obox, False, False, 1) thvbox.pack_start(hcombobox, False, False, 1) thvbox.pack_start(self.disable_screensaver, False, False, 0) theme_frame.show_all() """packaging""" box.pack_start(download_frame, False, True, 2) box.pack_start(dc_frame, False, True, 2) box.pack_start(theme_frame, False, False, 2) box.pack_start(updates_frame, False, True, 2) self.widget = box def on_chage_opacity(self, *a): opacity = self.opacity_size.get_value() / 100 self.controls.main_window.set_opacity(opacity) self.controls.preferences.set_opacity(opacity) def on_change_menu_type(self, *a): if self.old_style.get_active(): FC().menu_style = "old" elif self.new_style.get_active(): FC().menu_style = "new" self.controls.top_panel.update_menu_style() def on_change_folder(self, *a): path = self.online_dir.get_filename() FC().online_save_to_folder = path logging.info("Change music online folder: " + path) def on_load(self): self.online_dir.set_current_folder(FC().online_save_to_folder) self.online_dir.set_sensitive(FC().is_save_online) """disc""" self.image_size_spin.set_value(FC().info_panel_image_size) self.threads_count.set_value(FC().amount_dm_threads) self.opacity_size.set_value(int(FC().window_opacity * 100)) self.check_new_version.set_active(FC().check_new_version) if FC().automatic_online_save: self.automatic_save_checkbutton.set_active(True) if FC().nosubfolder: self.nosubfolder_checkbutton.set_active(True) """menu style""" if FC().menu_style == "new": self.new_style.set_active(True) else: self.old_style.set_active(True) if FC().order_repeat_style == "TextLabels": self.labels.set_active(True) self.fmgrs_combo.set_active(FC().active_manager[0]) if FC().antiscreensaver: self.disable_screensaver.set_active(True) antiscreensaver() def on_save(self): if self.buttons.get_active(): FC().order_repeat_style = "ToggleButtons" else: FC().order_repeat_style = "TextLabels" self.controls.os.on_load() FC().info_panel_image_size = self.image_size_spin.get_value_as_int() FC().amount_dm_threads = self.threads_count.get_va
Azure/azure-sdk-for-python
sdk/formrecognizer/azure-ai-formrecognizer/tests/test_dac_analyze_layout.py
Python
mit
7,732
0.004656
# coding=utf-8 # ------------------------------------ # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. # --
---------------------------------- im
port pytest import functools from devtools_testutils import recorded_by_proxy from azure.ai.formrecognizer._generated.v2022_01_30_preview.models import AnalyzeResultOperation from azure.ai.formrecognizer import DocumentAnalysisClient from azure.ai.formrecognizer import AnalyzeResult from preparers import FormRecognizerPreparer from testcase import FormRecognizerTest from preparers import GlobalClientPreparer as _GlobalClientPreparer DocumentAnalysisClientPreparer = functools.partial(_GlobalClientPreparer, DocumentAnalysisClient) class TestDACAnalyzeLayout(FormRecognizerTest): def teardown(self): self.sleep(4) @FormRecognizerPreparer() @DocumentAnalysisClientPreparer() @recorded_by_proxy def test_layout_stream_transform_pdf(self, client): with open(self.invoice_pdf, "rb") as fd: document = fd.read() responses = [] def callback(raw_response, _, headers): analyze_result = client._deserialize(AnalyzeResultOperation, raw_response) extracted_layout = AnalyzeResult._from_generated(analyze_result.analyze_result) responses.append(analyze_result) responses.append(extracted_layout) poller = client.begin_analyze_document("prebuilt-layout", document, cls=callback) result = poller.result() raw_analyze_result = responses[0].analyze_result returned_model = responses[1] # Check AnalyzeResult assert returned_model.model_id == raw_analyze_result.model_id assert returned_model.api_version == raw_analyze_result.api_version assert returned_model.content == raw_analyze_result.content self.assertDocumentPagesTransformCorrect(returned_model.pages, raw_analyze_result.pages) self.assertDocumentTransformCorrect(returned_model.documents, raw_analyze_result.documents) self.assertDocumentTablesTransformCorrect(returned_model.tables, raw_analyze_result.tables) self.assertDocumentKeyValuePairsTransformCorrect(returned_model.key_value_pairs, raw_analyze_result.key_value_pairs) self.assertDocumentEntitiesTransformCorrect(returned_model.entities, raw_analyze_result.entities) self.assertDocumentStylesTransformCorrect(returned_model.styles, raw_analyze_result.styles) # check page range assert len(raw_analyze_result.pages) == len(returned_model.pages) @FormRecognizerPreparer() @DocumentAnalysisClientPreparer() @recorded_by_proxy def test_layout_stream_transform_jpg(self, client): with open(self.form_jpg, "rb") as fd: document = fd.read() responses = [] def callback(raw_response, _, headers): analyze_result = client._deserialize(AnalyzeResultOperation, raw_response) extracted_layout = AnalyzeResult._from_generated(analyze_result.analyze_result) responses.append(analyze_result) responses.append(extracted_layout) poller = client.begin_analyze_document("prebuilt-layout", document, cls=callback) result = poller.result() raw_analyze_result = responses[0].analyze_result returned_model = responses[1] # Check AnalyzeResult assert returned_model.model_id == raw_analyze_result.model_id assert returned_model.api_version == raw_analyze_result.api_version assert returned_model.content == raw_analyze_result.content self.assertDocumentPagesTransformCorrect(returned_model.pages, raw_analyze_result.pages) self.assertDocumentTransformCorrect(returned_model.documents, raw_analyze_result.documents) self.assertDocumentTablesTransformCorrect(returned_model.tables, raw_analyze_result.tables) self.assertDocumentKeyValuePairsTransformCorrect(returned_model.key_value_pairs, raw_analyze_result.key_value_pairs) self.assertDocumentEntitiesTransformCorrect(returned_model.entities, raw_analyze_result.entities) self.assertDocumentStylesTransformCorrect(returned_model.styles, raw_analyze_result.styles) # check page range assert len(raw_analyze_result.pages) == len(returned_model.pages) @FormRecognizerPreparer() @DocumentAnalysisClientPreparer() @recorded_by_proxy def test_layout_multipage_transform(self, client): with open(self.multipage_invoice_pdf, "rb") as fd: document = fd.read() responses = [] def callback(raw_response, _, headers): analyze_result = client._deserialize(AnalyzeResultOperation, raw_response) extracted_layout = AnalyzeResult._from_generated(analyze_result.analyze_result) responses.append(analyze_result) responses.append(extracted_layout) poller = client.begin_analyze_document("prebuilt-layout", document, cls=callback) result = poller.result() raw_analyze_result = responses[0].analyze_result returned_model = responses[1] # Check AnalyzeResult assert returned_model.model_id == raw_analyze_result.model_id assert returned_model.api_version == raw_analyze_result.api_version assert returned_model.content == raw_analyze_result.content self.assertDocumentPagesTransformCorrect(returned_model.pages, raw_analyze_result.pages) self.assertDocumentTransformCorrect(returned_model.documents, raw_analyze_result.documents) self.assertDocumentTablesTransformCorrect(returned_model.tables, raw_analyze_result.tables) self.assertDocumentKeyValuePairsTransformCorrect(returned_model.key_value_pairs, raw_analyze_result.key_value_pairs) self.assertDocumentEntitiesTransformCorrect(returned_model.entities, raw_analyze_result.entities) self.assertDocumentStylesTransformCorrect(returned_model.styles, raw_analyze_result.styles) # check page range assert len(raw_analyze_result.pages) == len(returned_model.pages) @pytest.mark.live_test_only @FormRecognizerPreparer() @DocumentAnalysisClientPreparer() @recorded_by_proxy def test_layout_multipage_table_span_pdf(self, client): with open(self.multipage_table_pdf, "rb") as fd: my_file = fd.read() poller = client.begin_analyze_document("prebuilt-layout", my_file) layout = poller.result() assert len(layout.tables) == 3 assert layout.tables[0].row_count == 30 assert layout.tables[0].column_count == 5 assert layout.tables[1].row_count == 6 assert layout.tables[1].column_count == 5 assert layout.tables[2].row_count == 23 assert layout.tables[2].column_count == 5 @FormRecognizerPreparer() @DocumentAnalysisClientPreparer() @recorded_by_proxy def test_layout_specify_pages(self, client): with open(self.multipage_invoice_pdf, "rb") as fd: document = fd.read() poller = client.begin_analyze_document("prebuilt-layout", document, pages="1") result = poller.result() assert len(result.pages) == 1 poller = client.begin_analyze_document("prebuilt-layout", document, pages="1, 3") result = poller.result() assert len(result.pages) == 2 poller = client.begin_analyze_document("prebuilt-layout", document, pages="1-2") result = poller.result() assert len(result.pages) == 2 poller = client.begin_analyze_document("prebuilt-layout", document, pages="1-2, 3") result = poller.result() assert len(result.pages) == 3
koery/win-sublime
Data/Packages/Package Control/package_control/downloaders/caching_downloader.py
Python
mit
5,367
0.001491
import sys import re import json import hashlib from ..console_write import console_write class CachingDownloader(object): """ A base downloader that will use a caching backend to cache HTTP requests and make conditional requests. """ def add_conditional_headers(self, url, headers): """ Add `If-Modified-Since` and `If-None-Match` headers to a request if a cached copy exists :param headers: A dict with the request headers :return: The request headers dict, possibly with new headers added """ if not self.settings.get('cache'): return headers info_key = self.generate_key(url, '.info') info_json = self.settings['cache'].get(info_key) if not info_json: return headers # Make sure we have the cached content to use if we get a 304 key = self.generate_key(url) if not self.settings['cache'].has(key): return headers try: info = json.loads(info_json.decode('utf-8')) except ValueError: return headers etag = info.get('etag') if etag: headers['If-None-Match'] = etag last_modified = info.get('last-modified') if last_modified: headers['If-Modified-Since'] = last_modified return headers def cache_result(self, method, url, status, headers, content): """ Processes a request result, either caching the result, or returning the cached version of the url. :param method: The HTTP method used for the request :param url: The url of the request
:param status: The numeric response status of the request :param headers: A dict of reponse headers, with keys being lowercase :param content: The response content :return: The response content """ debug = self.settings.g
et('debug', False) cache = self.settings.get('cache') if not cache: if debug: console_write(u"Skipping cache since there is no cache object", True) return content if method.lower() != 'get': if debug: console_write(u"Skipping cache since the HTTP method != GET", True) return content status = int(status) # Don't do anything unless it was successful or not modified if status not in [200, 304]: if debug: console_write(u"Skipping cache since the HTTP status code not one of: 200, 304", True) return content key = self.generate_key(url) if status == 304: cached_content = cache.get(key) if cached_content: if debug: console_write(u"Using cached content for %s from %s" % (url, cache.path(key)), True) return cached_content # If we got a 304, but did not have the cached content # stop here so we don't cache an empty response return content # If we got here, the status is 200 # Respect some basic cache control headers cache_control = headers.get('cache-control', '') if cache_control: fields = re.split(',\s*', cache_control) for field in fields: if field == 'no-store': return content # Don't ever cache zip/binary files for the sake of hard drive space if headers.get('content-type') in ['application/zip', 'application/octet-stream']: if debug: console_write(u"Skipping cache since the response is a zip file", True) return content etag = headers.get('etag') last_modified = headers.get('last-modified') if not etag and not last_modified: return content struct = {'etag': etag, 'last-modified': last_modified} struct_json = json.dumps(struct, indent=4) info_key = self.generate_key(url, '.info') if debug: console_write(u"Caching %s in %s" % (url, cache.path(key)), True) cache.set(info_key, struct_json.encode('utf-8')) cache.set(key, content) return content def generate_key(self, url, suffix=''): """ Generates a key to store the cache under :param url: The URL being cached :param suffix: A string to append to the key :return: A string key for the URL """ if sys.version_info >= (3,) or isinstance(url, unicode): url = url.encode('utf-8') key = hashlib.md5(url).hexdigest() return key + suffix def retrieve_cached(self, url): """ Tries to return the cached content for a URL :param url: The URL to get the cached content for :return: The cached content """ key = self.generate_key(url) cache = self.settings['cache'] if not cache.has(key): return False if self.settings.get('debug'): console_write(u"Using cached content for %s from %s" % (url, cache.path(key)), True) return cache.get(key)
ashleywaite/django-more
django_more/hashing.py
Python
bsd-3-clause
3,651
0
from base64 import b64encode, b16encode, b64decod
e, b16decode from math import ceil __all__ = [ "b64max", "b64len", "b64from16", "b64from256", "b16len", "b16max", "b16from64", "b16from256", "HashString", ] # Base 64 helpers for working in strings # b64 encodes 6 bits per character, in 3 byte raw increments, four bytes b64 def b64max(char_length):
""" Maximum number of raw bits that can be stored in a b64 of length x """ # Four byte increments only, discard extra length return (char_length // 4) * (3 * 8) def b64len(bit_length): """ Minimum b64 length required to hold x raw bits """ # Three raw bytes {bl / (8*3)} is four b64 bytes return ceil(int(bit_length) / (8 * 3)) * 4 def b64from16(val): """ ASCII encoded base 64 string from a base 16 digest """ return b64from256(b16decode(val, casefold=True)) def b64from256(val): """ ASCII encoded base 64 string from a raw (base 256) digest """ return str(b64encode(bytes(val)), encoding="ascii") # Base 16 helpers for working in strings # b16 encodes 4 bits per character def b16len(bit_length): """ Minimum b16/hex length required to hold x raw bits """ return ceil(int(bit_length) / 4) def b16max(char_length): """ Maximum number of raw bits that can be stored in a b16 of length x """ return int(char_length) * 4 def b16from64(val): """ ASCII encoded base 16 string from a base 64 digest """ return b16from256(b64decode(val)) def b16from256(val): """ ASCII encoded base 16 string from a raw (base 256) digest """ return str(b16encode(bytes(val)), encoding="ascii") class HashString(str): b64to = { "b16": b16from64, "b64": str.__str__, "b256": b64decode, } b16to = { "b64": b64from16, "b16": str.__str__, "b256": b16decode, } def __new__(cls, value): return super().__new__(cls, value) def __getattr__(self, attr): try: setattr(self, attr, self.b_to[attr](self)) return getattr(self, attr) except KeyError: raise AttributeError @classmethod def from_b64(cls, value): """ Create from a base 64 value """ self = cls(value) self.b_to = self.b64to return self @classmethod def from_b16(cls, value): """ Create from a base 16 value """ self = cls(value) self.b_to = self.b16to return self @classmethod def from_b256(cls, value): """ Create from a raw (base 256) value """ self = cls.from_b64(b64from256(value)) self.b256 = value return self def __eq__(self, value): if isinstance(value, str): if str.__eq__(self, value): return True if str.__eq__(self.b64, value): # Check for encoding sensitive matches return True if str.__eq__(str(self), str.lower(value)): # Check for lower case matches of base 16 return True elif isinstance(value, bytes) and bytes.__eq__(self.b256, value): return True return False def __bytes__(self): # Bytes will give the base256 / raw bytes return self.b256 def __str__(self): # Stringy informal representations of hashes are base16 lowercase return str.lower(self.b16) def __repr__(self): # Formal accurate representations of hases are base64 return self.b64 def __hash__(self): # Hashing always uses base64 for consistency return hash(self.b64)
majorcs/fail2ban
fail2ban/tests/fail2banregextestcase.py
Python
gpl-2.0
6,546
0.019554
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: t -*- # vi: set ft=python sts=4 ts=4 sw=4 noet : # This file is part of Fail2Ban. # # Fail2Ban is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # Fail2Ban is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Fail2Ban; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. # Fail2Ban developers __author__ = "Serg Brester" __copyright__ = "Copyright (c) 2015 Serg G. Brester (sebres), 2008- Fail2Ban Contributors" __license__ = "GPL" from __builtin__ import open as fopen import unittest import getpass import os import sys import time import tempfile import uuid
try: from systemd import journal except ImportError: journal = None from ..client import fail2banregex from ..client.fail2banregex import Fail2banRegex, get_opt_parser, output from .utils import LogCaptureTestCase, logSys fail2banregex.logSys = logSys def _test_output(*args): logSys.info(args[0]) fail2banregex.output = _test_output CON
F_FILES_DIR = os.path.abspath( os.path.join(os.path.dirname(__file__),"..", "..", "config")) TEST_FILES_DIR = os.path.join(os.path.dirname(__file__), "files") def _Fail2banRegex(*args): parser = get_opt_parser() (opts, args) = parser.parse_args(list(args)) return (opts, args, Fail2banRegex(opts)) class Fail2banRegexTest(LogCaptureTestCase): RE_00 = r"(?:(?:Authentication failure|Failed [-/\w+]+) for(?: [iI](?:llegal|nvalid) user)?|[Ii](?:llegal|nvalid) user|ROOT LOGIN REFUSED) .*(?: from|FROM) <HOST>" FILENAME_01 = os.path.join(TEST_FILES_DIR, "testcase01.log") FILENAME_02 = os.path.join(TEST_FILES_DIR, "testcase02.log") FILENAME_WRONGCHAR = os.path.join(TEST_FILES_DIR, "testcase-wrong-char.log") FILTER_SSHD = os.path.join(CONF_FILES_DIR, 'filter.d', 'sshd.conf') def setUp(self): """Call before every test case.""" LogCaptureTestCase.setUp(self) def tearDown(self): """Call after every test case.""" LogCaptureTestCase.tearDown(self) def testWrongRE(self): (opts, args, fail2banRegex) = _Fail2banRegex( "test", r".** from <HOST>$" ) self.assertRaises(Exception, lambda: fail2banRegex.start(opts, args)) self.assertLogged("Unable to compile regular expression") def testWrongIngnoreRE(self): (opts, args, fail2banRegex) = _Fail2banRegex( "test", r".*? from <HOST>$", r".**" ) self.assertRaises(Exception, lambda: fail2banRegex.start(opts, args)) self.assertLogged("Unable to compile regular expression") def testDirectFound(self): (opts, args, fail2banRegex) = _Fail2banRegex( "--print-all-matched", "--print-no-missed", "Dec 31 11:59:59 [sshd] error: PAM: Authentication failure for kevin from 192.0.2.0", r"Authentication failure for .*? from <HOST>$" ) self.assertTrue(fail2banRegex.start(opts, args)) self.assertLogged('Lines: 1 lines, 0 ignored, 1 matched, 0 missed') def testDirectNotFound(self): (opts, args, fail2banRegex) = _Fail2banRegex( "--print-all-missed", "Dec 31 11:59:59 [sshd] error: PAM: Authentication failure for kevin from 192.0.2.0", r"XYZ from <HOST>$" ) self.assertTrue(fail2banRegex.start(opts, args)) self.assertLogged('Lines: 1 lines, 0 ignored, 0 matched, 1 missed') def testDirectIgnored(self): (opts, args, fail2banRegex) = _Fail2banRegex( "--print-all-ignored", "Dec 31 11:59:59 [sshd] error: PAM: Authentication failure for kevin from 192.0.2.0", r"Authentication failure for .*? from <HOST>$", r"kevin from 192.0.2.0$" ) self.assertTrue(fail2banRegex.start(opts, args)) self.assertLogged('Lines: 1 lines, 1 ignored, 0 matched, 0 missed') def testDirectRE_1(self): (opts, args, fail2banRegex) = _Fail2banRegex( "--print-all-matched", Fail2banRegexTest.FILENAME_01, Fail2banRegexTest.RE_00 ) self.assertTrue(fail2banRegex.start(opts, args)) self.assertLogged('Lines: 19 lines, 0 ignored, 13 matched, 6 missed') self.assertLogged('Error decoding line'); self.assertLogged('Continuing to process line ignoring invalid characters') self.assertLogged('Dez 31 11:59:59 [sshd] error: PAM: Authentication failure for kevin from 193.168.0.128') self.assertLogged('Dec 31 11:59:59 [sshd] error: PAM: Authentication failure for kevin from 87.142.124.10') def testDirectRE_2(self): (opts, args, fail2banRegex) = _Fail2banRegex( "--print-all-matched", Fail2banRegexTest.FILENAME_02, Fail2banRegexTest.RE_00 ) self.assertTrue(fail2banRegex.start(opts, args)) self.assertLogged('Lines: 13 lines, 0 ignored, 5 matched, 8 missed') def testVerbose(self): (opts, args, fail2banRegex) = _Fail2banRegex( "--verbose", "--print-no-missed", Fail2banRegexTest.FILENAME_02, Fail2banRegexTest.RE_00 ) self.assertTrue(fail2banRegex.start(opts, args)) self.assertLogged('Lines: 13 lines, 0 ignored, 5 matched, 8 missed') self.assertLogged('141.3.81.106 Fri Aug 14 11:53:59 2015') self.assertLogged('141.3.81.106 Fri Aug 14 11:54:59 2015') def testWronChar(self): (opts, args, fail2banRegex) = _Fail2banRegex( Fail2banRegexTest.FILENAME_WRONGCHAR, Fail2banRegexTest.FILTER_SSHD ) self.assertTrue(fail2banRegex.start(opts, args)) self.assertLogged('Lines: 4 lines, 0 ignored, 2 matched, 2 missed') self.assertLogged('Error decoding line'); self.assertLogged('Continuing to process line ignoring invalid characters:', '2015-01-14 20:00:58 user '); self.assertLogged('Continuing to process line ignoring invalid characters:', '2015-01-14 20:00:59 user '); self.assertLogged('Nov 8 00:16:12 main sshd[32548]: input_userauth_request: invalid user llinco') self.assertLogged('Nov 8 00:16:12 main sshd[32547]: pam_succeed_if(sshd:auth): error retrieving information about user llinco') def testWronCharDebuggex(self): (opts, args, fail2banRegex) = _Fail2banRegex( "--debuggex", "--print-all-matched", Fail2banRegexTest.FILENAME_WRONGCHAR, Fail2banRegexTest.FILTER_SSHD ) self.assertTrue(fail2banRegex.start(opts, args)) self.assertLogged('Lines: 4 lines, 0 ignored, 2 matched, 2 missed') self.assertLogged('http://')
CompassionCH/compassion-modules
sponsorship_compassion/models/contracts_report.py
Python
agpl-3.0
10,144
0.00138
############################################################################## # # Copyright (C) 2018 Compassion CH (http://www.compassion.ch) # @author: Stephane Eicher <seicher@compassion.ch> # # The licence is in the file __manifest__.py # ############################################################################## from dateutil.relativedelta import relativedelta from odoo import models, fields, api # For more readability we have split "res.partner" by functionality # pylint: disable=R7980 class PartnerSponsorshipReport(models.Model): _inherit = "res.partner" end_period = fields.Date(compute="_compute_end_period") start_period = fields.Date(compute="_compute_start_period") related_active_sponsorships = fields.One2many( "recurring.contract", compute="_compute_related_active_sponsorship", readonly=False, ) related_sponsorships = fields.One2many( "recurring.contract", compute="_compute_related_sponsorship", readonly=False ) # sr -> Sponsorship Report sr_sponsorship = fields.Integer( "Number of sponsorship", compute="_compute_sr_sponsorship", help="Count only the sponsorships who " "are fu
lly managed or those who are " "paid (not the correspondent).", )
sr_nb_b2s_letter = fields.Integer('Number of letters to sponsor', compute='_compute_b2s_letter') sr_nb_s2b_letter = fields.Integer('Number of letters to beneficiary', compute='_compute_s2b_letter') sr_nb_boy = fields.Integer("Number of boys", compute="_compute_boy") sr_nb_girl = fields.Integer("Number of girls", compute="_compute_girl") sr_time_fcp = fields.Integer( "Total hour spent at the FCP", compute="_compute_time_scp" ) sr_nb_meal = fields.Integer("Number of meals served", compute="_compute_meal") sr_nb_bible = fields.Integer( "Number of bibles distributed", compute="_compute_nb_bible" ) sr_nb_medic_check = fields.Integer( "Number of given medical checks", compute="_compute_medic_check" ) sr_total_donation = fields.Monetary("Invoices", compute="_compute_total_donation") sr_total_gift = fields.Integer("Gift", compute="_compute_total_gift") @api.multi def _compute_related_sponsorship(self): for partner in self: sponsorships = partner.sponsorship_ids sponsorships |= partner.member_ids.mapped("sponsorship_ids") partner.related_sponsorships = sponsorships @api.multi def _compute_s2b_letter(self): def get_nb_letter(_partner): return self.env['correspondence'].search_count( [('partner_id', '=', _partner.id), ('direction', '=', 'Supporter To Beneficiary'), ('scanned_date', '>', _partner.start_period), ('scanned_date', '<=', _partner.end_period)]) for partner in self: nb_letter = get_nb_letter(partner) if partner.is_church: for member in partner.member_ids: nb_letter += get_nb_letter(member) partner.sr_nb_s2b_letter = nb_letter @api.multi def _compute_b2s_letter(self): def get_nb_letter(_partner): return self.env['correspondence'].search_count( [('partner_id', '=', _partner.id), ('direction', '=', 'Beneficiary To Supporter'), ('scanned_date', '>', _partner.start_period), ('scanned_date', '<=', _partner.end_period)]) for partner in self: nb_letter = get_nb_letter(partner) if partner.is_church: for member in partner.member_ids: nb_letter += get_nb_letter(member) partner.sr_nb_b2s_letter = nb_letter @api.multi def _compute_related_active_sponsorship(self): for partner in self: sponsorships = partner.related_sponsorships partner.related_active_sponsorships = sponsorships.filtered("is_active") @api.multi def _compute_start_period(self): for partner in self: end = partner.end_period partner.start_period = fields.Date.to_string(end - relativedelta(months=12)) @api.multi def _compute_end_period(self): today = fields.Date.today() for partner in self: partner.end_period = today @api.multi def _compute_sr_sponsorship(self): for partner in self: partner.sr_sponsorship = len(partner.related_active_sponsorships) @api.multi def _compute_boy(self): for partner in self: partner.sr_nb_boy = len( partner.related_active_sponsorships.mapped("child_id").filtered( lambda r: r.gender == "M" ) ) @api.multi def _compute_girl(self): for partner in self: partner.sr_nb_girl = len( partner.related_active_sponsorships.mapped("child_id").filtered( lambda r: r.gender == "F" ) ) @api.multi def _compute_time_scp(self): def get_time_in_scp(sponsorship): nb_weeks = sponsorship.contract_duration // 7.0 country = sponsorship.child_id.field_office_id return nb_weeks * country.fcp_hours_week for partner in self: total_day = sum(partner.related_sponsorships.mapped(get_time_in_scp)) partner.sr_time_fcp = total_day @api.multi def _compute_meal(self): def get_nb_meal(sponsorship): nb_weeks = sponsorship.contract_duration // 7.0 country = sponsorship.child_id.field_office_id return nb_weeks * country.fcp_meal_week for partner in self: total_meal = sum( partner.related_sponsorships.filtered("global_id").mapped(get_nb_meal) ) partner.sr_nb_meal = total_meal @api.multi def _compute_medic_check(self): def get_nb_check(sponsorship): nb_year = sponsorship.contract_duration // 365 country = sponsorship.child_id.field_office_id return nb_year * country.fcp_medical_check for partner in self: total_check = sum( partner.related_sponsorships.filtered("global_id").mapped(get_nb_check) ) partner.sr_nb_medic_check = total_check @api.multi def _compute_nb_bible(self): for partner in self: total_bible = len(partner.related_sponsorships.filtered("global_id")) partner.sr_nb_bible = total_bible @api.multi def _compute_total_donation(self): def get_sum_invoice(_partner): invoices = self.env["account.invoice"].search( [ ("partner_id", "=", _partner.id), ("type", "=", "out_invoice"), ("state", "=", "paid"), ("invoice_category", "in", ["gift", "sponsorship", "fund"]), ("last_payment", "<", _partner.end_period), ("last_payment", ">", _partner.start_period), ] ) return sum(invoices.mapped("amount_total")) for partner in self: sr_total_donation = get_sum_invoice(partner) if partner.is_church: for member in partner.member_ids: sr_total_donation += get_sum_invoice(member) partner.sr_total_donation = sr_total_donation @api.multi def _compute_total_gift(self): def get_nb_gift(_partner): return self.env["account.invoice"].search_count( [ ("partner_id", "=", _partner.id), ("invoice_category", "=", "gift"), ("type", "=", "out_invoice"), ("state", "=", "paid"), ("last_payment", "<", _partner.end_period), ("last_payment", ">=", _partner.start_period),
dekom/threepress-bookworm-read-only
bookworm/django_evolution/__init__.py
Python
bsd-3-clause
1,410
0.001418
# The version of Django Evolution # # This is in the format of: # # (Major, Minor, Micro, alpha/beta/rc/final, Release Number,
Released) # VERSION = (0, 6, 6, 'alpha', 0, False) def get_version_string(): version = '%s.%s' % (VERSION[0], VERSION[1]) if VERSION[2]: version += ".%s" % VERSION[2] if VERSION[3] != 'final': if VERSION[3] == 'rc': version += ' RC%s' % VERSION[4] else: versio
n += ' %s %s' % (VERSION[3], VERSION[4]) if not is_release(): version += " (dev)" return version def get_package_version(): version = '%s.%s' % (VERSION[0], VERSION[1]) if VERSION[2]: version += ".%s" % VERSION[2] if VERSION[3] != 'final': version += '%s%s' % (VERSION[3], VERSION[4]) return version def is_release(): return VERSION[5] __version_info__ = VERSION[:-1] __version__ = get_package_version() class EvolutionException(Exception): def __init__(self,msg): self.msg = msg def __str__(self): return str(self.msg) class CannotSimulate(EvolutionException): pass class SimulationFailure(EvolutionException): pass class EvolutionNotImplementedError(EvolutionException, NotImplementedError): pass try: from django.db import connections __is_multi_db = True except: __is_multi_db = False def is_multi_db(): return __is_multi_db
ColumbiaCMB/kid_readout
kid_readout/analysis/timeseries/periodic.py
Python
bsd-2-clause
1,094
0.001828
""" This module contains functions to analyze periodic data. """ from __future__ import division, print_function import numpy as np def folded_shape(array, period_samples): if period_sam
ples == 0: raise ValueError("Cannot fold unmodulated data or with period=0") shape = list(array.shape) shape[-1] = -1 shape.append(period_samples) return tuple(shape) def fold(array, period_samples, reduce=None): reshaped = array.reshape(folded_shape(array, period_samples)) if reduce is None: return reshaped else: return reduce(reshaped, axis=reshaped.ndim - 2)
def mask_left_right(size, skip): left_mask = (skip * size < np.arange(size)) & (np.arange(size) < size // 2) right_mask = size // 2 + skip * size < np.arange(size) return left_mask, right_mask def peak_to_peak(folded, skip=0.1): left_mask, right_mask = mask_left_right(size=folded.size, skip=skip) left = folded[left_mask] right = folded[right_mask] return np.mean(left) - np.mean(right), np.sqrt(np.var(left) / left.size + np.var(right) / right.size)
paperreduction/fabric-bolt
fabric_bolt/core/urls.py
Python
mit
1,071
0.001867
from django.conf.urls import include, url from django.contrib import admin from django.conf import settings import socketio.sdjango from fabric_bolt.core import views socketio.sdjango.autodiscover() admin.autodiscover() urlpatterns = [ url(r'^grappelli/', include('grappelli.urls')), url(r'^admin/', include(admin.site.urls)), url(r'^$', views.Dashboard.as_view(), name='index'), url(r'^hosts/', include('fabric_bolt.hosts.urls')), url(r'^web-hooks/', include('fabric_bolt.web_hooks.urls')), url(r'^launch-window/', include('fabric_bolt.launch_window.urls')), url(r'^projects/', include('fabric_bolt.projects.urls')), url(r'^socket\.io
', include(socketio.sdjango.urls)), url(r'^users/', include('fabric_bolt.accounts.urls')), ] # Serve the static files from django if settings.DEBUG: urlpatterns += [ url(r'^media/(?P<path>.*)$', 'django.views.static.serve', {'documen
t_root': settings.MEDIA_ROOT, }), url(r'^static/(?P<path>.*)$', 'django.views.static.serve', {'document_root': settings.STATIC_ROOT}), ]
liqd/a4-meinberlin
meinberlin/apps/newsletters/migrations/0001_initial.py
Python
agpl-3.0
1,979
0.005558
# -*- coding: utf-8 -*- from __future__ import unicode_literals import ckeditor_uploader.fields import django.utils.timezone from django.conf import settings from django.db import migrations from dj
ango.db import models class Migration(migrations.Migration): dependencies = [ migrations.swappable_dependency(settings.A4_ORGANISATIONS_MODEL), ('a4projects', '0008_project_tile_image'), migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [ migrations.CreateModel( name='Newsletter', fields=[ ('id', models.AutoField(serialize=False, primary_key=True,
verbose_name='ID', auto_created=True)), ('created', models.DateTimeField(editable=False, default=django.utils.timezone.now)), ('modified', models.DateTimeField(null=True, editable=False, blank=True)), ('sender', models.EmailField(max_length=254, blank=True, verbose_name='Sender')), ('subject', models.CharField(max_length=254, verbose_name='Subject')), ('body', ckeditor_uploader.fields.RichTextUploadingField(blank=True, verbose_name='Email body')), ('sent', models.DateTimeField(null=True, blank=True, verbose_name='Sent')), ('receivers', models.PositiveSmallIntegerField(choices=[(0, 'Every user on the platform'), (1, 'Users following the chosen organisation'), (2, 'Users following the chosen project')], verbose_name='Receivers')), ('creator', models.ForeignKey(to=settings.AUTH_USER_MODEL, on_delete=models.CASCADE)), ('organisation', models.ForeignKey(null=True, to=settings.A4_ORGANISATIONS_MODEL, blank=True, on_delete=models.CASCADE)), ('project', models.ForeignKey(null=True, to='a4projects.Project', blank=True, on_delete=models.CASCADE)), ], options={ 'abstract': False, }, ), ]
wecatch/app-turbo
demos/models/blog/base.py
Python
apache-2.0
188
0
# -*- coding:utf-8 -*- from models.base impo
rt SqlBaseModel class Model(SqlBaseModel): def __init__(self): super(Model, self).__init_
_(db_name='blog') Base = Model().Base
baidubce/bce-sdk-python
sample/vcr/vcr_sample_conf.py
Python
apache-2.0
675
0.002963
# !/usr/bin/env python # coding=utf-8 """ Configuration for vcr samples. """ import logging from baidubce.bce_client_configuration import BceClientConfiguration fro
m baidubce.auth.bce_credentials import BceCredentials HOST = 'http://vcr.bj.baidubce.com' AK = 'Fill AK here' SK = 'Fill SK here' logger = logging.getLogger('baidubce.services.vcr.vcrclient') fh = logging.FileHandler('sample.log') fh.setLevel(logging.DEBUG) formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') fh.setFormatter(formatter) logger.setLevel(logging.DEBUG) logger.addHandler(fh) config =
BceClientConfiguration(credentials=BceCredentials(AK, SK), endpoint=HOST)
asedunov/intellij-community
python/testData/inspections/ChainedComparison5.py
Python
apache-2.0
164
0.060976
mapsize = 35 def test(x,
y): if <weak_warning descr="Simplify chained comparison">
0 <= x < <caret>mapsize and y >= 0 and y < mapsize</weak_warning>: return 1
broonie89/loadify-1
lib/ohdevtools/commands/update-nuget.py
Python
mit
2,276
0.005712
from xml.etree.cElementTree import parse, Element, ElementTree, dump from os import walk from os.path import join from optparse import OptionParser description = "Update the master package.config from individual project ones." command_group = "Developer tools" # Snippet used from the ElementTree documentation. # Tidy up the indentation of XML elements. def indent(elem, level=0): i = "\n" + level*" " if len(elem): if not elem.text or not elem.text.strip(): elem.text = i + " " if not elem.tail or not elem.tail.strip(): elem.tail = i for elem in elem: indent(elem, level+1) if not elem.tail or not elem.tail.strip(): elem.tail = i else: if level and (not elem.tail or not elem.tail.strip()): elem.tail = i def parse_args(): usage = ( "\n"+ " %prog\n"+ "\n"+ "Update the master projectdata/packages.config from all the individual project\n"+ "package.config files throught the src/ directory.") parser = OptionParser(usage=usage) return parser.parse_args() def match(value, target_string): for target in target_string.split("|"): if target=="": return True if target==value: return True return False def main(): options, args = parse_args() rootElement = Element('packages') packages = {} print "Searching for packages.config files:" for dirpath, subdirs, filenames in walk('src'): for filename in filenames: if filename == 'packages.config': filepath = join(dirpath, filename) print " " + filepath et = parse(filepath) for packageElement in et.findall('package'): pkgId = packageElement.get('id') pkgVersion = package
Element.get('version') packages[pkgId, pkgVersion] = packageElement print print "Writing projectdata/packages.config:" rootElement.extend([value for (key,value) in sorted(packages.items())]) indent(rootElement) tree = ElementTree(rootElement) dump(tree) tree.write('proj
ectdata/packages.config') if __name__ == "__main__": main()
chirilo/mozillians
vendor-local/lib/python/tablib/packages/odf3/thumbnail.py
Python
bsd-3-clause
31,736
0.000095
#!/usr/bin/python # -*- coding: utf-8 -*- # This contains a 128x128 px thumbnail in PNG format # Taken from http://www.zwahlendesign.ch/en/node/20 # openoffice_icons/openoffice_icons_linux/openoffice11.png # License: Freeware import base64 iconstr = """\ iVBORw0KGgoAAAANSUhEUgAAAIAAAACACAYAAAG0OVFdAAAABGdBTUEAANbY1E9YMgAAABl0RVh0 U29mdHdhcmUAQWRvYmUgSW1hZ2VSZWFkeXHJZTwAAFoHSURBVHjaYvz//z8DJQAggFhu3LiBU1JI SOiPmJgYM7IYUD0jMh8ggFhAhKamJuOHDx/+8fPz4zQsMTGRYf78+RjiAAHEBCJOnTr1HZvmN2/e MDAyQiycOXMmw5MnTxhmzZoViqwGIIAYrl+/DqKM/6OBNWvWgOmvX7/+37Rp0/8jR478//fv3/+f P3/+h+phPHHixH+AAIK75D8WMGnSpP8vXrz4//v37/9///6Fi4MMALruf3Bw8H+AAAJp5rQrOoeh edmyZWAbgd77f/bsWTAbBoB6JOpbmkF0OkAAgcLgO8gUYCCCnSIlJQWmw8LCGA4cOAAOAyMjI3hY gMDvP7+f3791+weQuQAggGBi7FPmrvnf3NwMtgnkt/Xr1//fuXMn2EaQ5TB89+nX/wUlJSDbPUFe AQgguKleiY2/QIpBTv727TuKJhB+//nf/xtP/4ANrK6tBRnAATIAIICQEwUjUCHIoyjOBYGbz/8y 8HMwMXCzfmcoLC1kMDH3YNDU1mGQ4PvLCBBALEjq/t958Zfh0dt/DL/+MDD8BdkBNIeXnYFBhIeR 4efffwybNqxgEOEXZLjw25Xh2QMWhmi9BwwAAYRsAMO5268ZZMREGGSEGBmYgcEL1MMAcgwo3D9/ +sIwf84cBhHLGoYAVVYGxi/3wDYABBCKU6dPn37s1vM//3/+/v//20+gn5/9+b/7yq//iw++/6+o qAhy0zUg1gH5HYYBAgg99Srsvvzz//6Tt//beSf+V/doBGkqheaFL0CKF1kzCAMEECOWfAMSY3Yq PvF7X68FKCcCPcLAA8QqQHwB3VaAAGKktDwACCCc5QETE5ODjIzMfi4uLoRtjIwiQBe8RVYHEEDg WODh4dkBTMLuQE1YDdPR0WG4cuUKw6tXr968ffsWxdsAAQTWAbQJq+aenh5wogJpBpUNzMzMGGoA AggckshZFRmA8sXz58/BeQKY2WA5kRmkp7Oz8z8vL+8WgAACG3Lv3j0Mze/fvwcpBuaLb/9//foF FweG2U9dXV2RixcvguTNAAKIAVQWaPt2oGgGlT4gzSBDNm/e/P/jx48o8n/+/PlraWkJil5OgAAC OUDEKvsgWOLdu3f/k5KSwOxPnz79nzt3LrgIQwY/fvz4X1FbDbIgAOQVgAACxcIbFnZesFcEBQXB AbdhwwYGNjY2BmdnZzANSypffvxn4OFgY/j5+TvI9i0gMYAAgkUJI7Dc+/flyxeGly9fMaipqWEE 9m1gTv329RvDjAmVDE52dgx6enpgvQABBIu7//fvPwCmB14Mze+//geXBwKcTAwn9q9kEOIXYNC2 8IfLAwQQcqIIOHPv9/o3X/4z/PkLzABAR7KyQMoCPi5Ghm9fvjJM7i5lUDbwYXjI4sIwK41LHBgG rwACCLk82Pvq038GaQEmBi52iAEwK/4BDbx7cTeDEB8/w42/TgwhRt8ZzNeeeAHyAUAAoSTL15/+ /f/++z+DrBATw/P3/xgeAkunt5//MSzYcpOhJYyNQUNDowGorA9o82eYHoAAQjFgw6kv/yV4/zLc v3WRoaRxBoOEtj/D2cXhPECNAcAExAbUiFE5AgQQenkAis/PrkWH/u/us3MGsvdBxYOAeD3QAIy8 DxBAjNiKJXIAqIZ//PjxYT4+PmtgHmEAJjiGhw8fMhLSBxBALIQUcHBw1AINbAIZCkqUuABywQZM kwzAnMBw//79TcCy2A+f+QABBA4BoOuZHj169FdWVpYs3wPzKoOAgACKI0BsYCnDwMrKyg204xsu vQABxAQtkv6FhISUEmuho6Mjw9OnT+F8UNsIWHQxAMsChtOnT4PaSwzAVglYDBgNX9H129raci8C AhAbIICQkTCoACEWgAoVDw8PcKl17Nix/ydPnvx//vz5/9jMAKqRh9Vi9fX1YLHe3l6QuD1AAMEs ZwUVi6s37CTK8t27d4MtBrW7QPj169f/79y58x+YCDFKP1jJCIruurq6VyC+t4/Pf2DUgAozSYAA Atvu4Wm5D+QA47hVoLIWwwBQsVpaWgq2FIRVVVX/gxp427dv/79kyZL/Fy5cAIcIPrBh/QZwtZOS mvoXmLDngDIOKEQAAgg5CmLsis7+v3XrFlgDyAJIWoIAkM+A8Q5ufYEqidmzZ4Md8PnzZxzVGQSD wN79+8F0ekb6X2C92AyqRmFRAhBA6PnUVtuv99CVjUXwlAysicEKQZUuKJcAm/7AlM0GrmyBwYi9 ogWa+hYY6m+AxeDPt9cY9PV0GSoqKxjef/jGMGvGZGmgec9gSgECCFtBofvu3ftLoJQNjFuwI0RF RRlwNRkQbQ4Ghmfv/jF8BlZaoKDjAzYnb1w4wHDx+lWG98A66s27zwwVZUUM8vJyakAH3IbpAwgg rCXVxo2bnvr5+Ur9+w+pFX78+s/w8w+kvQnyMCsQs7GAeIwM91//A6r5z8DLAQwRFmDVwwnUA1R6 4uhBhl0H9jG8efacgZldgCE4Pp+BiUuc4fTNLwyVwUJMsGIZIIBwFZUam89+u84GrND+QZMeKQ04 acYbDGs3bWR4B/T5kbtcDLouWQycvKLgqp0FGJBGghdu2mgLaoDUAgQQrqL4BjOw/augogGuXNnZ GBn4OUG+Y2RgY4W2l7//Bwb3P2BpB2oGMjKwMDMy3ARW+5nRbgwB7hYMTk5ODIVdWxmiQp0Yvj5b 9qy1uHIn0NyroH4dyHxYDgAIIHyVhdvzd392vvj4nwGYdhi+AKOBGdpY//vvDwPr348MX94+BVed fTPXMry4tm02qMbLzs7eBmynrwOWgsuA/G1Ai77jCy2AAMLnAM75S1a/SIwJ3QTqpoAEzFO3N7Nx CTEwMrMycN8qvLB9y8FAoPADmFna2tp/rl69mglyCKh9QExNCxBAjCTWOxKg+h6Iv2KRAzXDxYD4 ORD/ROoG4wUAAURx/4BSABBAeMcbSAHA4jUF2M2YDWo3sLOzM0ybNi0SmBBXENIHEEAkt4hALR9g FTsX2PJJBFrIwMKCPSMB2xcMwI4BwSgGCCC8LSJgBSMtLi5+AGiRCsgyUPFLTJRt3bqVwdXVFRQS oK7MX3xqAQII7gCgTyKBrZplIIuAwUlyFADbAwwWFhZgB3p7e8OEZYD4IT59AAEEGzKyBuVb9CEC YsHy5csZysvLUUIH1Bq6du3aLdBACD69AAEEC4GXwHYAuHYjFqxevZph3bp1DCtWrACH2Pfv38EO AHWQgFU0OLqEhYXZQM00fAAggGBV3DPYeA8hAEq0SkpKDKGhoWCfgywFWQ7shTLcvXuXAdjzBLeI QVEpIiICCl1hdDMWLFiwCtirBdsNEEDwEQdgcBFsih08eBCFD2qOgTqloEYMaIwJmPjATTPkLvG2 bds2IY9sAHt/6rDhNFAAAAQQ3FWtra1biW2Qgjrvly5dAteTwP422HJQo/TBgwcYTTpgg+Y/zHIX FxdWYGj9P3fu3H9g6LwHNYQBAgil8kEel8NneXp6OthyUF8e1H8HNddAoYGtPQlSD+3LM2ZmZoLF Nm7c+B86XMcLEEBgmw10JazMUrYSbFiC23VQy0EhABreACa6/8BCBxz0oEEFbJ4ANmiDgXoEQOyG 1tb/VlZWIDNAvWxGgABiSSqseXiHMUju359fDEADGCQkJHAmwJUrV4LbiKDEBeyxgjodDLdv3wY3 19TV1Rm4ubkZsGXlnJycNdpa2vfAQwXAtAbsP2wEMu+AWkUAAQQSkwU1yUH4ypUrGK4HKQImJHiT HIRBiezy5cvgJjko4b18+fI/vugDhdK/P//+VTfU/09ISACNliaCogWULgACCJQVHp+aYtQEToiz 9qK4fP/+/aBsBC5WQdkNVLiAshtoCBqU3Tg5ORmMjY3BjVZ8hdiZM2eBbQhGxhdPnv4DOrofZDSs oQIQQOC8+OMXQw+IvvaSB16axcTEMJiYmID5oKY3KG/fvHmTAZjwwMUuyCGgQTRcloOMAeFPX34A +4I2DKWVlUA9P38DE+oRoDS8YwkQQLCS8POhPiNfi/Rdm0H9ehUVFXjnE2QRsMvFAExkDF+/fgWX lqAmu4KCArifAIp/XPXTm8//GW5dPs9gbW3JwAxUtGL5ik7ooOVvmBqAAEKuDXfwcLIwvH37Fm45 MHuBfQ2MY3DilJSUZIDUikxgi5EHsVC668DAffcF2Ef4/BVseU5hAYMwjyBo3ABUN7xEVgsQQMi9 jT97JjgZvHkDGc8E9e1BdfqPHz8Z9PUNGLS1QcEtBox3LnDZj2uw4hWwEfvyw1+G38B+BOsviEcE efkYXgNzGLC/0Qn0/R9k9QABhN7duTRn/pyPIF/9/PkLWJ9zAC3WBscz1i4YUsPy0zfIAPuHb//A vSRulh8MZ8+dY4iMjWX49/cfg6OjHYORiYU0ul6AAMKWdAP+/v23HpT4YAmQEHj05h/Dj9//wRYL 8zCBHXTs4DaG81cuM7x98YLh229mhqjEPAZpaRkGNSkWPuRhMoAAwtbhOwmKe2ZmYDwDLf8G7A98 +g7qG/wHxi2w5gPy//6HWPYOmMhuPvsL7raJAC2WFmQGdlCAXTfGbwzPgenm0YMHQHNYGGxsHRg+ M4kz3H71jyGlbGoOsmUAAYStSfbm3M3XDAIiUkAL/zF8+8nI8PM3pMMJshSMQcPGTJA+IiewCcEJ 7Dm9AAYzGzNktuHZrdMMt+7eYeAA9qKffGBmEPinx3DkNNDRTH8Yfoh4tAHzVjvMMoAAwhYCv6/f f/Xv6XtgKgam5j/AugTUMQZZyMSImKwAWfQdmJnefQM1Jv6D50zuAH14/fFnBhU1VYY3r18y8PHx M3zms2F4/EUEaDmk06ogKw4q3OAeBwggrI3SnprEqgnLz3aAesCgXi8fEIPLGuiEDIyJngVBFZ+l jgLDbWCZIcgrwLDj4l8GbSdDBi52JgZ3/f8M74FZ/O2rZ7C2IrhHBRBAWB1w89rlAwrC0PAGdXlY GRmE+BjBQQ0S+v7zP8MvoO+/AtPDDyAN6jPyczEyHLryHjyC9ub1awZhUQkGHVZRBnOJ2wzt5Zbb Jj55AuqYngXlNOSSECCAcBXgou8/fnn16RcneGxAQpAJHBKgIASNmoMGgD8AE+QXYBR9A6aPP7// MGw69prh8e1zDOZCFxiAjRSGkJCQbaD5JKilr9HzPwwABBAuBzBdu3n/LwuvLDCOgTng639wnP+D TFcC8Q+Gv19fMnx5/5yhu386w9kDK0CWzAE269k3bdo0wc7ODlTkggai7mIbH0YGAAGEq2Py7/jl J98klKW5+Dj+MvAxfWJ4+/opw707VxnaJq1g4BRUYOCT1GWQF3z9G2i5JdSXjOvXr/8HtXwZMZaD AEAA4esIRLu7e+bu3Ln9JJB9xSh2+SwOPikG2AQHsPIKh3bDwRULsGiWB9aeB48dOxYH5B4FZRRi un0AAYTPAWxQ+Z9Qvg2w0XIYaDGo6gb58g2aen0gVgXiXaCSmdjuOUAAkdIVAqlVBjWlcMhLgio0 qMP+E+sAgACi2nwBLQGoRw7se7gCO7uJwHZnBLBNyobcpqAEAAQQy0B6DNjkUAR6KAnYvIgFpWFQ EwM0tgEackBu5SH3eUHNlNOnT98GBgpovPMXpW4ACCAWWsQWsPUYB/RIPNBjjjBPgVqShAZ7iQGg 1omysrK8lpaWJpB7kVLzAAKI6CwA9IAlECcBPRMDxBwgj4EwrgEiagDQnHdRURHD4sWLGbq7uxlK Skrgcvfv3weNEaA0rcgBAAEEDwBQzC1cuNDO39//AB8fHwO5QzUUZgmG3t5ehoqKCnCyB3UPQHMT 2ABoQGTt2rU9sbGxZcTUN7gAQACxII26/AcGwndQgIACgB4A5MEHwDbrt2/fGC5cuMCQl5cHbkb8 g89aI8oAkBhoCAuEQWxQdrK1tQUlCVA38xm5bgAIIPRMeX/Xrl0HQQ6iNgD1Ljdu3Ahf2hQVFQVO xvr6+iCPMOTm5oI9eunSJUgHDehR0Fjb8+fPwaMP165dA9MgPkgclFrExMRAXeRjwIhjJdddAAGE UgYADQL1f1yBsbJdTk6OKtkAlH+zs7PBMY0rOYNiFIRBngIFFMiDoNQBKgNAM+CgIRfQcAxIP6hX DCp7YAUqaDjHxsbGAJgdLuIrmC0tLa+tXLlSA2Tew4cP/8bFxXE9efLkH0AAYRSCQMWKBw8ePG9h YcGPb5qeGIBtZRhsNh00/gByfG1tLcPSpUvBMd7f389gaGgIlgOpA2VF0HAAqFMMWo6Eq3967949 UM2AtUD08vLiAeK7QHvEQOtjgCmcAeh50Ey/FjDQHwIEEDbzuCQlJVNB403UBKCRPNDYZEZGxn9g coePc7W0tPwHDc6C1iEBYwS8aAlkN2jgFbT+CNuQIzoAqQOmtG5YioZGKouTk9NP0FgodNnR/zlz 5vzfsWPHf2Dq6QOldCAWAQggbM1NXv9Q/9OggTpcq6tIBaAx1Pz8/P8bNmyAexxkPmjFJmzBJciB oOFR0BQ4aMUWSA/IYyB5YsZtQdPpoKk0qOfZHBwcnoNGob/+/P5/2owZ/1tbW/8fPXoUZn8CA2Rp HStAADFCPS0UXTbt3uM/FuDi/8+PTwzavNcYeqqiKa4ROjo6wENtoDF9cHe7p4ehsLAQnMRBox+g /A5aeAIa+wMlfVAyB+VzUHIF2Q0agCSmrQHKVsCa5AGwR6QBbKeI37x585S8vLz49bt3GKrLKxiE geYBszaoIAWtGQCtKboIDKz3AAEEMhlUglrCPA9OOxy8DCfvsYCn7EFTb8QWhiALlixZAsqP4NId BCorK1GW9IAKO1DeB40zg0p0EBvkeJA9oPwuLi4OXoUDaj0SMyaF3EJUVFRUAJZhFgcOHlwtBiw4 rty6yVBXVc1gaW7+e+bMmX/v3r3bC+0qgpZ1fgTpAwggRqT2gI1D0en9/xgglv78/JIhy/kPQ5i/ C96JM1DVBmrmIk2OMVhbWzP4+vqCqylQTIPqeGDeZ5CWlmZ49uwZeGAdFLigwACV7KAaB7QaGDTo CjKLnNoHZA9oDJWNg51BSECQ4cLVqwz1wALWztr61+zZs/8CU0QtdLIe5Pn3oNVKIH0AAcSI1iYw DClZfOLVP22Wf39/Mby7e4hh98xo+FJlGAAtS9q5cydDQkICQ1JSEsPcuXMxqjVQqQ6q0kDJHJS0 QUkd5GlQAIDm0UClOmh0GTTKDKriQDFOnsch9j14cB8YgIJAs4QYTl04z9Bc38BgbWnxa+HCRb9u 3LhRCvU8qCv9GbnlCBBAjFgKQZXo9MwDj7lTpb69vccwr1gNPEkAyoegUAbFKmhcHjR5gJ4HQ
R4F 5WVQsgZNEILYoCYrKOmD5EGBAqveQLEOzKPgFIArqROaFgbJv//yl+E2MKmrK0sByw0BhqOnTjK0 tbQymJub/dm6ecvXUydPlgGVnoZ6/gt6sxkggHAFuZStrfb0f/oz/ER/n2GY1x4PLpSAfQWG+Ph4 lGQHimVQIQZqtIBiGDSHAAKgGAU1YEAxDcpCIE+CYhjUgIHI8eCt23EtDQItGP/4DTRI9h/o+X8M j+9fY7AxVgWaxcmw/8g
Rhq72dgYfbx+GbVu3MWzbtiULmudB81NfsfUZAAIIX5oDNdviDCLm969s tGJQVVVFSaIgj4Nmd0GFGSjGQYEBKshAMcrLCym9YV1gSlqUIK0/gb3+Lz//M4DWp3798R
iamdankaufman/beets
beets/autotag/__init__.py
Python
mit
9,819
0
# This file is part of beets. # Copyright 2013, Adrian Sampson. # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. """Facilities for automatically determining files' correct metadata. """ import os import logging import re from beets import library, mediafile, config from beets.util import sorted_walk, ancestry, displayable_path # Parts of external interface. from .hooks import AlbumInfo, TrackInfo, AlbumMatch, TrackMatch # noqa from .match import tag_item, tag_album # noqa from .match import Recommendation # noqa # Global logger. log = logging.getLogger('beets') # Constants for directory walker. MULTIDISC_MARKERS = (r'dis[ck]', r'cd') MULTIDISC_PAT_FMT = r'^(.*%s[\W_]*)\d' # Additional utilities for the main interface. def albums_in_dir(path): """Recursively searches the given directory and returns an iterable of (paths, items) where paths is a list of directories and items is a list of Items that is probably an album. Specifically, any folder containing any media files is an album. """ collapse_pat = collapse_paths = collapse_items = None for root, dirs, files in sorted_walk(path, ignore=config['ignore'].as_str_seq(), logger=log): # Get a list of items in the directory. items = [] for filename in files: try: i = library.Item.from_path(os.path.join(root, filename)) except mediafile.FileTypeError: pass except mediafile.UnreadableFileError: log.warn(u'unreadable file: {0}'.format( displayable_path(filename)) ) except library.ReadError as exc: log.error(u'error reading {0}: {1}'.format( displayable_path(filename), exc )) else: items.append(i) # If we're currently collapsing the constituent directories in a # multi-disc album, check whether we should continue collapsing # and add the current directory. If so, just add the directory # and move on to the next directory. If not, stop collapsing. if collapse_paths: if (not collapse_pat and collapse_paths[0] in ancestry(root)) or \ (collapse_pat and collapse_pat.match(os.path.basename(root))): # Still collapsing. collapse_paths.append(root) collapse_items += items continue else: # Collapse finished. Yield the collapsed directory and # proceed to process the current one. if collapse_items: yield collapse_paths, collapse_items collapse_pat = collapse_paths = collapse_items = None # Check whether this directory looks like the *first* directory # in a multi-disc sequence. There are two indicators: the file # is named like part of a multi-disc sequence (e.g., "Title Disc # 1") or it contains no items but only directories that are # named i
n this way. start_collapsing = False for marker in MULTIDISC_MARKERS: marker_pat = re.compile(MULTIDISC_PAT_FMT % marker, re.I) match = marker_pat.match(os.pat
h.basename(root)) # Is this directory the root of a nested multi-disc album? if dirs and not items: # Check whether all subdirectories have the same prefix. start_collapsing = True subdir_pat = None for subdir in dirs: # The first directory dictates the pattern for # the remaining directories. if not subdir_pat: match = marker_pat.match(subdir) if match: subdir_pat = re.compile( r'^%s\d' % re.escape(match.group(1)), re.I ) else: start_collapsing = False break # Subsequent directories must match the pattern. elif not subdir_pat.match(subdir): start_collapsing = False break # If all subdirectories match, don't check other # markers. if start_collapsing: break # Is this directory the first in a flattened multi-disc album? elif match: start_collapsing = True # Set the current pattern to match directories with the same # prefix as this one, followed by a digit. collapse_pat = re.compile( r'^%s\d' % re.escape(match.group(1)), re.I ) break # If either of the above heuristics indicated that this is the # beginning of a multi-disc album, initialize the collapsed # directory and item lists and check the next directory. if start_collapsing: # Start collapsing; continue to the next iteration. collapse_paths = [root] collapse_items = items continue # If it's nonempty, yield it. if items: yield [root], items # Clear out any unfinished collapse. if collapse_paths and collapse_items: yield collapse_paths, collapse_items def apply_item_metadata(item, track_info): """Set an item's metadata from its matched TrackInfo object. """ item.artist = track_info.artist item.artist_sort = track_info.artist_sort item.artist_credit = track_info.artist_credit item.title = track_info.title item.mb_trackid = track_info.track_id if track_info.artist_id: item.mb_artistid = track_info.artist_id # At the moment, the other metadata is left intact (including album # and track number). Perhaps these should be emptied? def apply_metadata(album_info, mapping): """Set the items' metadata to match an AlbumInfo object using a mapping from Items to TrackInfo objects. """ for item, track_info in mapping.iteritems(): # Album, artist, track count. if track_info.artist: item.artist = track_info.artist else: item.artist = album_info.artist item.albumartist = album_info.artist item.album = album_info.album # Artist sort and credit names. item.artist_sort = track_info.artist_sort or album_info.artist_sort item.artist_credit = (track_info.artist_credit or album_info.artist_credit) item.albumartist_sort = album_info.artist_sort item.albumartist_credit = album_info.artist_credit # Release date. for prefix in '', 'original_': if config['original_date'] and not prefix: # Ignore specific release date. continue for suffix in 'year', 'month', 'day': key = prefix + suffix value = getattr(album_info, key) or 0 # If we don't even have a year, apply nothing. if suffix == 'year' and not value: break # Otherwise, set the fetched value (or 0 for the month # and day if not available). item[key] = value # If we're using original release date for both fields,
HybridF5/jacket
jacket/api/compute/openstack/compute/deferred_delete.py
Python
apache-2.0
3,026
0.00033
# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #
# Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the s
pecific language governing permissions and limitations # under the License. """The deferred instance delete extension.""" import webob from jacket.api.compute.openstack import common from jacket.api.compute.openstack import extensions from jacket.api.compute.openstack import wsgi from jacket.compute import cloud from jacket.compute import exception ALIAS = 'os-deferred-delete' authorize = extensions.os_compute_authorizer(ALIAS) class DeferredDeleteController(wsgi.Controller): def __init__(self, *args, **kwargs): super(DeferredDeleteController, self).__init__(*args, **kwargs) self.compute_api = cloud.API(skip_policy_check=True) @wsgi.response(202) @extensions.expected_errors((404, 409, 403)) @wsgi.action('restore') def _restore(self, req, id, body): """Restore a previously deleted instance.""" context = req.environ["compute.context"] authorize(context) instance = common.get_instance(self.compute_api, context, id) try: self.compute_api.restore(context, instance) except exception.InstanceUnknownCell as error: raise webob.exc.HTTPNotFound(explanation=error.format_message()) except exception.QuotaError as error: raise webob.exc.HTTPForbidden(explanation=error.format_message()) except exception.InstanceInvalidState as state_error: common.raise_http_conflict_for_instance_invalid_state(state_error, 'restore', id) @wsgi.response(202) @extensions.expected_errors((404, 409)) @wsgi.action('forceDelete') def _force_delete(self, req, id, body): """Force delete of instance before deferred cleanup.""" context = req.environ["compute.context"] authorize(context) instance = common.get_instance(self.compute_api, context, id) try: self.compute_api.force_delete(context, instance) except exception.InstanceIsLocked as e: raise webob.exc.HTTPConflict(explanation=e.format_message()) class DeferredDelete(extensions.V21APIExtensionBase): """Instance deferred delete.""" name = "DeferredDelete" alias = "os-deferred-delete" version = 1 def get_controller_extensions(self): controller = DeferredDeleteController() extension = extensions.ControllerExtension(self, 'servers', controller) return [extension] def get_resources(self): return []
Lyleo/OmniMarkupPreviewer
OmniMarkupLib/Renderers/libs/python3/docutils/examples.py
Python
mit
3,913
0.000511
# $Id$ # Author: David Goodger <goodger@python.org> # Copyright: This module has been placed in the public domain. """ This module contains practical examples of Docutils client code. Importing this module from client code is not recommended; its contents are subject to change in future Docutils releases. Instead, it is recommended that you copy and paste the parts you need into your own code, modifying as necessary. """ from docutils import core, io def html_parts(input_string, source_path=None, destination_path=None, input_encoding='unicode', doctitle=True, initial_header_level=1): """ Given an input string, returns a dictionary of HTML document parts. Dictionary keys are the names of parts, and values are Unicode strings; encoding is up to the client. Parameters: - `input_string`: A multi-line text string; required. - `source_path`: Path to the source file or object. Optional, but useful for diagnostic output (system messages). - `destination_path`: Path to the file or object which will receive the output; optional. Used for determining relative paths (stylesheets, source links, etc.). - `input_encoding`: The encoding of `input_string`. If it is an encoded 8-bit string, provide the correct encoding. If it is a Unicode string, use "unicode", the default. - `doctitle`: Disable the promotion of a lone top-level section title to document title (and subsequent section title to document subtitle promotion); enabled by default. - `initial_header_level`: The initial level for header elements (e.g. 1 for "<h1>"). """ overrides = {'input_encoding': input_encoding, 'doctitle_xform': doctitle, 'initial_header_level': initial_header_level} parts = core.publish_parts( source=input_string, source_path=source_path, destination_path=destination_path, writer_name='html', settings_overrides=overrides) return parts def html_body(input_string, source_path=None, destination_path=None, input_encoding='unicode', output_encoding='unicode', doctitle=True, initial_header_level=1): """ Given an input string, returns an HTML fragment as a string. The return value is the contents of the <body> element. Parameters (see `html_parts()` for the remainder): - `output_encoding`: The desired encoding of the output. If a Unicode string is desired, use the default value of "unicode" . """ parts = html_parts( input_string=input_string, source_path=source_path, destination_path=destination_path, input_encoding=input_encoding, doctitle=doctitle, initial_header_level=initial_header_level) fragment = parts['html_body'] if output_encoding != 'unicode': fragment = fragment.encode(output_encoding) return fragment def internals(input_string, source_pa
th=None, destination_path=None, input_encoding='unicode', settings_overrides=None): """ Return the document tree and publisher, for exploring Docutils internals. Parameters: see `html_parts()`. """ if settings_overrides: overrides = settings_overrides.copy() else: overrides = {} overrides['input_encoding'] = input_encoding output, pub = core
.publish_programmatically( source_class=io.StringInput, source=input_string, source_path=source_path, destination_class=io.NullOutput, destination=None, destination_path=destination_path, reader=None, reader_name='standalone', parser=None, parser_name='restructuredtext', writer=None, writer_name='null', settings=None, settings_spec=None, settings_overrides=overrides, config_section=None, enable_exit_status=None) return pub.writer.document, pub
kelsoncm/django_brfied
django_brfied/management/commands/importar_uf_municipio.py
Python
mit
1,647
0.00365
from django.core.management.base import BaseCommand from django_brfied.models import UnidadeFederativa, Municipio from ...migrations import UNIDADE_FEDERATIVA_ROWS, MUNICIPIO_ROWS class Command(BaseCommand): help = "Importa as UFs e os Municípios para a base" # requires_system_checks = False # def __init__(self, *args, **kwargs): # super().__init__(*args, **kwargs) # self.ignore_patterns = [] # # def add_arguments(self, parser): # parser.add_argument( # '--no-default-ignore', action='store_false', dest='use_default_ignore_patterns', # help="Don't ignore the common private glob-style patterns (defaults to 'CVS', '.*' and '*~').", # ) # # def set_options(self, **options): # """ # Set instance variables based on an options dict # """ # if options['use_default_ignore_patterns']: # ignore_patterns += apps.get_app_config('staticfiles').ignore_patterns def handle(self, **options): print('Importando UF') for uf in UNIDADE_FEDERATIVA_ROWS: UnidadeFederativa.objects.\ update_or_create(sigla=uf[0], defaults={'nome': uf[1], 'codigo': uf[2], 'regiao': uf[3]})
print('UF importadas\n') print('Importando municípios') i =
1 q = len(MUNICIPIO_ROWS) for m in MUNICIPIO_ROWS: if i%500 == 0: print('\tImportados %3.2f%%' % ((i / q) * 100)) Municipio.objects.update_or_create(codigo=m[0], defaults={'nome': m[1], 'uf_id': m[2]}) i += 1 print('Municípios importados')
wheeler-microfluidics/zmq-plugin
zmq_plugin/_version.py
Python
lgpl-2.1
18,459
0
# This file helps to compute a version number in source trees obtained from # git-archive tarball (such as those provided by githubs download-from-tag # feature). Distribution tarballs (built by setup.py sdist) and build # directories (produced by setup.py build) will contain a much shorter file # that just contains the computed version number. # This file is released into the public domain. Generated by # versioneer-0.18 (https://github.com/warner/python-versioneer) """Git implementation of _version.py.""" import errno import os import re import subprocess import sys def get_keywords(): """Get the keywords needed to look up the version information.""" # these strings will be replaced by git during git-archive. # setup.py/versioneer.py will grep for the variable names, so they must # each be defined on a line of their own. _version.py will just call # get_keywords(). git_refnames = "$Format:%d$" git_full = "$Format:%H$" git_date = "$Format:%ci$" keywords = {"refnames": git_refnames, "full": git_full, "date": git_date} return keywords class VersioneerConfig: """Container for Versioneer configuration parameters.""" def get_config(): """Create, populate and return the VersioneerConfig() object.""" # these strings are filled in when 'setup.py versioneer' creates # _version.py cfg = VersioneerConfig() cfg.VCS = "git" cfg.style = "pep440" cfg.tag_prefix = "v" cfg.parentdir_prefix = "zmq-plugin-" cfg.versionfile_source = "zmq_plugin/_version.py" cfg.verbose = False return cfg class NotThisMethod(Exception): """Exception raised if a method is not valid for the current scenario.""" LONG_VERSION_PY = {} HANDLERS = {} def register_vcs_handler(vcs, method): # decorator """Decorator to mark a method as the handler for a particular VCS.""" def decorate(f): """Store f in HANDLERS[vcs][method].""" if vcs not in HANDLERS: HANDLERS[vcs] = {} HANDLERS[vcs][method] = f return f return decorate def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False, env=None): """Call the given command(s).""" assert isinstance(commands, list) p = None for c in commands: try: dispcmd = str([c] + args) # remember shell=False, so use git.cmd on windows, not just git p = subprocess.Popen([c] + args, cwd=cwd, env=env, stdout=subprocess.PIPE, stderr=(subprocess.PIPE if hide_stderr else None)) break except EnvironmentError: e = sys.exc_info()[1] if e.errno == errno.ENOENT: continue if verbose: print("unable to run %s" % dispcmd) print(e) return None, None else: if verbose: print("unable to find command, tried %s" % (commands,)) return None, None stdout = p.communicate()[0].strip() if sys.version_info[0] >= 3: stdout = stdout.decode() if p.returncode != 0: if verbose: print("unable to run %s (error)" % dispcmd) print("stdout was %s" % stdout) return
None, p.returncode return stdout, p.returncode def versions_from_parentdir(parentdir_prefix, root, verbose): """Try to determine the version from the parent directory name. Source tarballs conventionally unpack into a directory that includes both the project name and a version string. We will also support searching up two directory levels for an appropriately named parent directory """ rootdirs = [] for i in range(3): dirname = os.pa
th.basename(root) if dirname.startswith(parentdir_prefix): return {"version": dirname[len(parentdir_prefix):], "full-revisionid": None, "dirty": False, "error": None, "date": None} else: rootdirs.append(root) root = os.path.dirname(root) # up a level if verbose: print("Tried directories %s but none started with prefix %s" % (str(rootdirs), parentdir_prefix)) raise NotThisMethod("rootdir doesn't start with parentdir_prefix") @register_vcs_handler("git", "get_keywords") def git_get_keywords(versionfile_abs): """Extract version information from the given file.""" # the code embedded in _version.py can just fetch the value of these # keywords. When used from setup.py, we don't want to import _version.py, # so we do it with a regexp instead. This function is not used from # _version.py. keywords = {} try: f = open(versionfile_abs, "r") for line in f.readlines(): if line.strip().startswith("git_refnames ="): mo = re.search(r'=\s*"(.*)"', line) if mo: keywords["refnames"] = mo.group(1) if line.strip().startswith("git_full ="): mo = re.search(r'=\s*"(.*)"', line) if mo: keywords["full"] = mo.group(1) if line.strip().startswith("git_date ="): mo = re.search(r'=\s*"(.*)"', line) if mo: keywords["date"] = mo.group(1) f.close() except EnvironmentError: pass return keywords @register_vcs_handler("git", "keywords") def git_versions_from_keywords(keywords, tag_prefix, verbose): """Get version information from git keywords.""" if not keywords: raise NotThisMethod("no keywords at all, weird") date = keywords.get("date") if date is not None: # git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant # datestamp. However we prefer "%ci" (which expands to an "ISO-8601 # -like" string, which we must then edit to make compliant), because # it's been around since git-1.5.3, and it's too difficult to # discover which version we're using, or to work around using an # older one. date = date.strip().replace(" ", "T", 1).replace(" ", "", 1) refnames = keywords["refnames"].strip() if refnames.startswith("$Format"): if verbose: print("keywords are unexpanded, not using") raise NotThisMethod("unexpanded keywords, not a git-archive tarball") refs = set([r.strip() for r in refnames.strip("()").split(",")]) # starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of # just "foo-1.0". If we see a "tag: " prefix, prefer those. TAG = "tag: " tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)]) if not tags: # Either we're using git < 1.8.3, or there really are no tags. We use # a heuristic: assume all version tags have a digit. The old git %d # expansion behaves like git log --decorate=short and strips out the # refs/heads/ and refs/tags/ prefixes that would let us distinguish # between branches and tags. By ignoring refnames without digits, we # filter out many common branch names like "release" and # "stabilization", as well as "HEAD" and "master". tags = set([r for r in refs if re.search(r'\d', r)]) if verbose: print("discarding '%s', no digits" % ",".join(refs - tags)) if verbose: print("likely tags: %s" % ",".join(sorted(tags))) for ref in sorted(tags): # sorting will prefer e.g. "2.0" over "2.0rc1" if ref.startswith(tag_prefix): r = ref[len(tag_prefix):] if verbose: print("picking %s" % r) return {"version": r, "full-revisionid": keywords["full"].strip(), "dirty": False, "error": None, "date": date} # no suitable tags, so version is "0+unknown", but full hex is still there if verbose: print("no suitable tags, using unknown + full revision id") return {"version": "0+unknown", "full-revisionid": keywords["full"].strip(),
sjsucohort6/openstack
python/venv/lib/python2.7/site-packages/neutronclient/tests/functional/test_clientlib.py
Python
mit
3,224
0
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import uuid from keystoneclient.auth.identity import v2 as v2_auth from keystoneclient import discover from keystoneclient import session from tempest_lib import base import testtools from neutronclient.common import exceptions from neutronclient.tests.functional import base as func_base from neutronclient.v2_0 import client as v2_client # This module tests client library functionalities with # Keystone client. Neutron client supports two types of # HTTP clients (HTTPClient and SessionClient), # so it is better to test both clients. class LibraryTestBase(base.BaseTestCase): def setUp(self): super(LibraryTestBase, self).setUp() self.client = self._get_client() class Libv2HTTPClientTestBase(LibraryTestBase): def _get_client(self): creds = func_base.credentials() return v2_client.Client(username=creds['username'], password=creds['password'], tenant_name=creds['tenant_name'], auth_url=creds['auth_url']) class Libv2SessionClientTestBase(LibraryTestBase): def _get_client(self): creds = func_base.credentials() session_params = {} ks_session = session.Session.construct(session_params) ks_discover
= discover.Discover(session=ks_session, auth_url=creds['auth_url']) # At the moment, we use keystone v2 AP
I v2_auth_url = ks_discover.url_for('2.0') ks_session.auth = v2_auth.Password( v2_auth_url, username=creds['username'], password=creds['password'], tenant_name=creds['tenant_name']) return v2_client.Client(session=ks_session) class LibraryTestCase(object): def test_list_network(self): nets = self.client.list_networks() self.assertIsInstance(nets['networks'], list) def test_post_put_delele_network(self): name = str(uuid.uuid4()) net = self.client.create_network({'network': {'name': name}}) net_id = net['network']['id'] self.assertEqual(name, net['network']['name']) name2 = str(uuid.uuid4()) net = self.client.update_network(net_id, {'network': {'name': name2}}) self.assertEqual(name2, net['network']['name']) self.client.delete_network(net_id) with testtools.ExpectedException(exceptions.NetworkNotFoundClient): self.client.show_network(net_id) class LibraryHTTPClientTest(LibraryTestCase, Libv2HTTPClientTestBase): pass class LibrarySessionClientTest(LibraryTestCase, Libv2SessionClientTestBase): pass
PostCenter/botlang
tests/storage/test_storage_extensions.py
Python
mit
3,005
0.000333
import unittest from botlang import BotlangSystem from botlang.extensions.storage import StorageApi class DummyStore(StorageApi): def __init__(self): self.backend = {} def put(self, key, value, expiration=None): self.backend[key] = value def get(self, key): return self.backend.get(key) def remove(self, key): del self.backend[key] class StorageExtensionTestCase(unittest.TestCase): def test_local_storage(self): db = DummyStore() runtime = BotlangSystem.bot_instance().setup_local_storage(db) results = runtime.eval(""" (localdb-put "test1" 444) (localdb-put "test2" "miau") [define got1 (localdb-get-or-else "test3" (function () ":3"))] [define got2 (localdb-get-or-else "test3" (function () "3:"))] (localdb-remove "test2") (make-dict (list (list "test1" (lo
caldb-get "test1")) (list "test2" (localdb-get "test2")) (list "got1" got1) (list "got2" got2) ) ) """) self.assertEqual(results['test1'], 444)
self.assertEqual(results['test2'], None) self.assertEqual(results['got1'], ':3') self.assertEqual(results['got2'], ':3') def test_global_storage(self): db = DummyStore() runtime = BotlangSystem.bot_instance().setup_global_storage(db) results = runtime.eval(""" (globaldb-put "test1" 444) (globaldb-put "test2" "miau") [define got1 (globaldb-get-or-else "test3" (function () ":3"))] [define got2 (globaldb-get-or-else "test3" (function () "3:"))] (globaldb-remove "test2") (make-dict (list (list "test1" (globaldb-get "test1")) (list "test2" (globaldb-get "test2")) (list "got1" got1) (list "got2" got2) ) ) """) self.assertEqual(results['test1'], 444) self.assertEqual(results['test2'], None) self.assertEqual(results['got1'], ':3') self.assertEqual(results['got2'], ':3') def test_cache(self): cache = DummyStore() runtime = BotlangSystem.bot_instance().setup_cache_extension(cache) results = runtime.eval(""" (cache-put "test1" 444) (cache-put "test2" "miau") [define got1 (cache-get-or-else "test3" (function () ":3"))] [define got2 (cache-get-or-else "test3" (function () "3:"))] (cache-remove "test2") (make-dict (list (list "test1" (cache-get "test1")) (list "test2" (cache-get "test2")) (list "got1" got1) (list "got2" got2) ) ) """) self.assertEqual(results['test1'], 444) self.assertEqual(results['test2'], None) self.assertEqual(results['got1'], ':3') self.assertEqual(results['got2'], ':3')
drivendata/countable-care-3rd-place
src/generate_feature9.py
Python
mit
2,602
0.001922
#!/usr/bin/env python from scipy import sparse from sklearn.datasets import dump_svmlight_file from sklearn.preprocessing import LabelEncoder import argparse import logging import numpy as np import os import pandas as pd from kaggler.util import encode_categorical_features, normalize_numerical_feature logging.basicConfig(format='%(asctime)s %(levelname)s %(message)s', level=logging.DEBUG) def generate_feature(train_file, label_file, test_file, feature_dir, feature_name): # Load data files logging.info('Loading training and test data') trn = pd.read_csv(train_file, index_col=0) tst = pd.read_csv(test_file, index_col=0) label = pd.read_csv(label_file, index_col=0) n_trn = trn.shape[0] n_tst = tst.shape[0] lbl_enc = LabelEncoder() trn['release'] = lbl_enc.fit_transform(trn.release.values) tst['release'] = lbl_enc.fit_transform(tst.release.values) logging.info('Combining training and test data') df = pd.concat([trn, tst], ignore_index=True) df.fillna(-1, inplace=True) cols = list(df.columns) cat_cols = [x for x in cols if x[0] == 'c'] # One-Hot-Encoding for categorical variables logging.info('One-hot-encoding categorical columns') for col in cat_cols: df[col] = lbl_enc.fit_transform(df[col].values) logging.info('Saving features into {}'.format(feature_dir)) for i in range(label.shape[1]): train_feature_file = os.path.join(feature_dir, '{}.trn{:02d}.sps'.format(feature_name, i)) test_feature_file = os.path.join(feature_dir, '{}.tst{:02d}.sps'.format(feature_name, i)) dump_svmlight_file(df.values[:n_trn], label.ix[:, i], train_feature_file,
zero_based=False)
dump_svmlight_file(df.values[n_trn:], np.zeros((n_tst,)), test_feature_file, zero_based=False) if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--train-file', required=True, dest='train') parser.add_argument('--label-file', required=True, dest='label') parser.add_argument('--test-file', required=True, dest='test') parser.add_argument('--feature-dir', required=True, dest='feature_dir') parser.add_argument('--feature-name', required=True, dest='feature_name') args = parser.parse_args() generate_feature(train_file=args.train, label_file=args.label, test_file=args.test, feature_dir=args.feature_dir, feature_name=args.feature_name)
JQIamo/artiq
artiq/test/lit/local_access/parallel.py
Python
lgpl-3.0
121
0
# RUN: %python -m artiq
.compiler.testbench.signature %s >%t with interleave: delay(1.0) t0 = no
w_mu() print(t0)
elewis33/doorstop
doorstop/core/vcs/veracity.py
Python
lgpl-3.0
959
0
"""Plug-in module to store requirements in a Veracity repository.""" from doorstop import common from doorstop.core.vcs.base import BaseWorkingCopy log = common.logger(__name__) class WorkingCopy(BaseWorkingCopy): """Veracity working copy.""" DIRECTORY = '.sgdrawer' IGNORES = ('.sgignores', '.vvignores') def lock(self, path): # track: http://veracity-scm.com/qa/questions/2034 log.debug("`vv` does not support scripted locking: {}".format(path)) self.call('vv',
'pull') self.call('vv', 'update') def edit(self, path): log.info("`vv` adds all changes") def add(self, path): self.call('vv', 'add', path) def delete(self, path):
self.call('vv', 'remove', path) def commit(self, message=None): message = message or input("Commit message: ") # pylint: disable=W0141 self.call('vv', 'commit', '--message', message) self.call('vv', 'push')
resmo/ansible
lib/ansible/modules/cloud/google/gcp_spanner_instance_info.py
Python
gpl-3.0
5,026
0.004377
#!/usr/bin/python # -*- coding: utf-8 -*- # # Copyright (C) 2017 Google # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) # ---------------------------------------------------------------------------- # # *** AUTO GENERATED CODE *** AUTO GENERATED CODE *** # # ---------------------------------------------------------------------------- # # This file is automatically generated by Magic Modules and manual # changes will be clobbered when the file is regenerated. # # Please read more about how to change this file at # https://www.github.com/GoogleCloudPlatform/magic-modules # # ---------------------------------------------------------------------------- from __future__ import absolute_import, division, print_function __metaclass__ = type ################################################################################ # Documentation ################################################################################ ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: gcp_spanner_instance_info description: - Gather info for GCP Instance - This module was called C(gcp_spanner_instance_facts) before Ansible 2.9. The usage has not changed. short_description: Gather info for GCP Instance version_added: 2.8 author: Google Inc. (@googlecloudplatform) requirements: - python >= 2.6 - requests >= 2.18.4 - google-auth >= 1.3.0 options: {} extends_documentation_fragment: gcp ''' EXAMPLES = ''' - name: get info on an instance gcp_spanner_instance_info: project: test_project auth_kind: serviceaccount service_account_file: "/tmp/auth.pem" ''' RETURN = ''' resources: description: List of resources returned: always type: complex contains: name: description: - A unique identifier for the instance, which cannot be changed after the instance is created. The name must be between 6 and 30 characters in length. returned: success type: str config: description: - The name of the instance's configuration (similar but not quite the same as a region) which defines defines the geographic placement and replication of your databases in this instance. It determines where your data is stored. Values are typically of the form `regional-europe-west1` , `us-central` etc. - In order to obtain a valid list please consult the [Configuration section of the docs](U(https://cloud.google.com/spanner/docs/instances)). returned: success type: str displayName: description: - The descriptive name for this instance as it appears in UIs. Must be unique per
project and between 4 and 30 characters in length. returned: success type: str nodeCount: description: - The number of nodes allocated to this instance.
returned: success type: int labels: description: - 'An object containing a list of "key": value pairs.' - 'Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.' returned: success type: dict ''' ################################################################################ # Imports ################################################################################ from ansible.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest import json ################################################################################ # Main ################################################################################ def main(): module = GcpModule(argument_spec=dict()) if module._name == 'gcp_spanner_instance_facts': module.deprecate("The 'gcp_spanner_instance_facts' module has been renamed to 'gcp_spanner_instance_info'", version='2.13') if not module.params['scopes']: module.params['scopes'] = ['https://www.googleapis.com/auth/spanner.admin'] return_value = {'resources': fetch_list(module, collection(module))} module.exit_json(**return_value) def collection(module): return "https://spanner.googleapis.com/v1/projects/{project}/instances".format(**module.params) def fetch_list(module, link): auth = GcpSession(module, 'spanner') return auth.list(link, return_if_object, array_name='instances') def return_if_object(module, response): # If not found, return nothing. if response.status_code == 404: return None # If no content, return nothing. if response.status_code == 204: return None try: module.raise_for_status(response) result = response.json() except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst: module.fail_json(msg="Invalid JSON response with error: %s" % inst) if navigate_hash(result, ['error', 'errors']): module.fail_json(msg=navigate_hash(result, ['error', 'errors'])) return result if __name__ == "__main__": main()
WilmerLab/HTSOHM-dev
analysis/figure_ml_vs_vf.py
Python
mit
2,118
0.010387
#!/usr/bin/env python3 import click import matplotlib import matplotlib.pyplot as plt from matplotlib import rc import numpy as np import pandas as pd prop1range = [0.0, 1.0] # VF prop2range = [0.0, 800.0] # ML num_ch4_a3 = 2.69015E-05 # from methane-comparison.xlsx fsl = fs = 8 rc('font',**{'family':'sans-serif','sans-serif':['Helvetica']}) # rc('text', usetex=True) @click.command() @click.argument('csv-path', type=click.File()) def figure_ml_vs_vf(csv_path): num_bins = 40 # figure has to be a little "oversized" so that mpl makes it big enough to fill a 1-column fig. fig = plt.figure(figsize=(4.5,4.5)) # we only want 5 colors for ch4/uc, where each color is centered at 0,1,2,3,4 +-0.5. cm = matplotlib.cm.get_cmap("viridis",5) points = pd.read_csv(csv_path) points['ch4_uc'] = points.absolute_volumetric_loading * (num_ch4_a3 * points.a * points.b * points.c) ax = fig.subplots(ncols=1) ax.set_xlim(prop1range[0], prop1range[1]) ax.set_ylim(prop2range[0], prop2range[1]) ax.set_xticks(prop1range[1] * np.array([0.0, 0.25, 0.5, 0.75, 1.0])) ax.set_yticks(prop2range[1] * np.array([0.0, 0.25, 0.5, 0.75, 1.0])) ax.set_xticks(prop1range[1] * np.array(range(0,num_bins + 1
))/num_bins, minor=True) ax.set_yticks(prop2range[1] * np.array(range(0,num_bins + 1))/num_bins, minor=True) ax.tick_params(axis='x', which='major', labelsize=fs) ax.tick_params(axis='y', which='major', labelsize=fs) ax.grid(which='major', axis='both', linestyle='-', color='0.9', zorder=0) sc = ax.scatter(points.void_fraction_geo, points.absolute_volumetric_loading, zorder=2, alpha=0.6, s=points.a, edgecolors=None, linewidths=0, c=points.ch4_uc,
cmap=cm, vmin=-0.5, vmax=4.5) ax.set_xlabel('Void Fraction', fontsize=fsl) ax.set_ylabel('Methane Loading [V/V]', fontsize=fsl) # cb = fig.colorbar(sc, ax=ax) # cb.ax.tick_params(labelsize=fs) output_path = "figure.png" fig.savefig(output_path, dpi=1200, bbox_inches='tight') plt.close(fig) if __name__ == '__main__': figure_ml_vs_vf()
Meriipu/quodlibet
gdist/__init__.py
Python
gpl-2.0
7,826
0
# Copyright 2007 Joe Wreschnig # 2012-2016 Christoph Reiter # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. # IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY # CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, # TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE # SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """distutils extensions for GTK+/GObject/Unix This module contains a Distribution subclass (GDistribution) which implements build and install commands for operations related to Python GTK+ and GObject support. This includes installation of man pages and gettext support. Also supports setuptools but needs to be imported after setuptools (which does some monkey patching) """ import sys from distutils.core import setup from .shortcuts import build_shortcuts, install_shortcuts from .man import install_man from .po import build_mo, install_mo, po_stats, update_po, create_po, build_po from .icons import install_icons from .search_provider import install_search_provider from .dbus_services import build_dbus_services, install_dbus_services from .appdata import build_appdata, install_appdata from .coverage import coverage_cmd from .docs import build_sphinx from .scripts import build_scripts from .tests import quality_cmd, distcheck_cmd, test_cmd from .clean import clean from .bash_completions import install_bash_completions from .zsh_completions import install_zsh_completions from .util import get_dist_class, Distribution distutils_build = get_dist_class("build") class build(distutils_build): """Override the default build with new subcommands.""" sub_commands = distutils_build.sub_commands + [ ("build_mo", lambda self: self.distribution.has_po()), ("build_po", lambda self: self.distribution.has_po()), ("build_shortcuts", lambda self: self.distribution.has_shortcuts()), ("build_dbus_services", lambda self: self.distribution.has_dbus_services()), ("build_appdata", lambda self: self.distribution.has_appdata()), ] distutils_install = get_dist_class("install") class install(distutils_install): """Override the default install with new subcommands.""" user_options = distutils_install.user_options + [ ("mandir=", None, "destination directory for man pages. " "Defaults to $PREFIX/share/man"), ] sub_commands = distutils_install.sub_commands + [ ("install_shortcuts", lambda self: self.distribution.has_shortcuts()), ("install_man", lambda self: self.distribution.has_man_pages()), ("install_mo", lambda self: self.distribution.has_po()), ("install_icons", lambda self: self.distribution.need_icon_install()), ("install_search_provider", lambda self: self.distribution.need_search_provider()), ("install_dbus_services", lambda self: self.distribution.has_dbus_services()), ("install_appdata", lambda self: self.distribution.has_appdata()), ("install_bash_completions", lambda self: self.distribution.has_bash_completions()), ("install_zsh_completions", lambda self: self.distribution.has_zsh_completions()), ] def initialize_options(self): distutils_install.initialize_options(self) self.mandir = None is_osx = (sys.platform == "darwin") class GDistribution(Distribution): """A Distribution with support for GTK+-related options The GDistribution class adds a number of commads and parameters relate
d to GTK+ and GObje
ct Python programs and libraries. Parameters (to distutils.core.setup): po_directory -- directory where .po files are contained po_package -- package name for translation files shortcuts -- list of .desktop files to build/install dbus_services -- list of .service files to build/install man_pages -- list of man pages to install appdata -- list of appdata files to install Using the translation features requires gettext. Example: from distutils.core import setup from gdist import GDistribution setup(distclass=GDistribution, ...) """ shortcuts = [] appdata = [] dbus_services = [] po_directory = None man_pages = [] po_package = None search_provider = None coverage_options = {} bash_completions = [] zsh_completions = [] def __init__(self, *args, **kwargs): Distribution.__init__(self, *args, **kwargs) self.cmdclass.setdefault("build_po", build_po) self.cmdclass.setdefault("build_mo", build_mo) self.cmdclass.setdefault("build_shortcuts", build_shortcuts) self.cmdclass.setdefault("build_dbus_services", build_dbus_services) self.cmdclass.setdefault("build_appdata", build_appdata) self.cmdclass.setdefault("build_scripts", build_scripts) self.cmdclass.setdefault("install_icons", install_icons) self.cmdclass.setdefault("install_shortcuts", install_shortcuts) self.cmdclass.setdefault("install_dbus_services", install_dbus_services) self.cmdclass.setdefault("install_man", install_man) self.cmdclass.setdefault("install_mo", install_mo) self.cmdclass.setdefault("install_search_provider", install_search_provider) self.cmdclass.setdefault("install_appdata", install_appdata) self.cmdclass.setdefault( "install_bash_completions", install_bash_completions) self.cmdclass.setdefault( "install_zsh_completions", install_zsh_completions) self.cmdclass.setdefault("build", build) self.cmdclass.setdefault("install", install) self.cmdclass.setdefault("po_stats", po_stats) self.cmdclass.setdefault("update_po", update_po) self.cmdclass.setdefault("create_po", create_po) self.cmdclass.setdefault("coverage", coverage_cmd) self.cmdclass.setdefault("build_sphinx", build_sphinx) self.cmdclass.setdefault("quality", quality_cmd) self.cmdclass.setdefault("distcheck", distcheck_cmd) self.cmdclass.setdefault("test", test_cmd) self.cmdclass.setdefault("quality", quality_cmd) self.cmdclass.setdefault("clean", clean) def has_po(self): return bool(self.po_directory) def has_shortcuts(self): return not is_osx and bool(self.shortcuts) def has_appdata(self): return not is_osx and bool(self.appdata) def has_man_pages(self): return bool(self.man_pages) def has_dbus_services(self): return not is_osx and bool(self.dbus_services) def has_bash_completions(self): return bool(self.bash_completions) def has_zsh_completions(self): return bool(self.zsh_completions) def need_icon_install(self): return not is_osx def need_search_provider(self): return not is_osx __all__ = ["GDistribution", "setup"]
blueyed/coveragepy
tests/farm/html/src/main.py
Python
apache-2.0
257
0
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 # Fo
r details: https://github.com/nedbat/coveragepy/blob/mast
er/NOTICE.txt import m1 import m2 import m3 a = 5 b = 6 assert m1.m1a == 1 assert m2.m2a == 1 assert m3.m3a == 1
wdcxc/blog
admin/views/base.py
Python
mit
789
0.014157
import imp
ortlib from django.conf import settings from django.views import View class BaseView(View): """后台管理基类""" def __init__(self): self.context = {} self.context["path"] = {} def dispatch(self,request,*args,**kwargs): _path = request.path_info.split("/")[1:] self.context["path"]["app"] = _path[0] self.context["path"]["module"] = _path[1] s
elf.context["path"]["action"] = _path[-1] imp_module_path = self.context["path"]["app"]+".views."+self.context["path"]["module"] imp_module = importlib.import_module(imp_module_path) imp_cls = getattr(imp_module,self.context["path"]["module"].capitalize()) return getattr(imp_cls,self.context["path"]["action"])(self,request)
nharraud/invenio
invenio/legacy/webstyle/templates.py
Python
gpl-2.0
27,002
0.004555
# This file is part of Invenio. # Copyright (C) 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012, # 2013, 2014, 2015 CERN. # # Invenio is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License as # published by the Free Software Foundation; either version 2 of the # License, or (at your option) any later version. # # Invenio is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Invenio; if not, write to the Free Software Foundation, Inc., # 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA. """ WebStyle templates. Customize the look of pages of Invenio """ __revision__ = \ "$Id$" import time import cgi import traceback import urllib import sys import string from bs4 import BeautifulSoup from invenio.ext.template import render_template_to_string from invenio.config import \ CFG_SITE_LANG, \ CFG_SITE_NAME, \ CFG_SITE_NAME_INTL, \ CFG_SITE_SUPPORT_EMAIL, \ CFG_BASE_URL, \
CFG_SITE_URL from invenio_base.i18n import gettext_set_language, language_list_long from invenio.utils.url import make_canonical_urlargd, create_html_link from invenio.utils.date import convert_datecvs_to_datestruct from invenio_formatter import format_record from invenio.utils.html import get_mathjax_header c
lass Template: def tmpl_navtrailbox_body(self, ln, title, previous_links, separator, prolog, epilog): """Bootstrap friendly-Create navigation trail box body Parameters: - 'ln' *string* - The language to display - 'title' *string* - page title; - 'previous_links' *string* - the trail content from site title until current page (both ends exclusive) - 'prolog' *string* - HTML code to prefix the navtrail item with - 'epilog' *string* - HTML code to suffix the navtrail item with - 'separator' *string* - HTML code that separates two navtrail items Output: - text containing the navtrail Note: returns empty string for Home page. (guessed by title). """ # load the right message language _ = gettext_set_language(ln) if title == CFG_SITE_NAME_INTL.get(ln, CFG_SITE_NAME): return "" # Breadcrumbs # breadcrumb objects should provide properties 'text' and 'url' # First element breadcrumbs = [dict(text=_("Home"), url=CFG_SITE_URL), ] # Decode previous elements if previous_links: soup = BeautifulSoup(previous_links) for link in soup.find_all('a'): breadcrumbs.append(dict( text=unicode(' '.join(link.contents)), url=link.get('href'))) # Add head if title: breadcrumbs.append(dict(text=title, url='#')) return render_template_to_string("breadcrumbs.html", breadcrumbs=breadcrumbs).encode('utf8') def tmpl_page(self, req, **kwargs): """Creates a complete page Parameters: - 'ln' *string* - The language to display - 'description' *string* - description goes to the metadata in the header of the HTML page, not yet escaped for HTML - 'keywords' *string* - keywords goes to the metadata in the header of the HTML page, not yet escaped for HTML - 'userinfobox' *string* - the HTML code for the user information box - 'useractivities_menu' *string* - the HTML code for the user activities menu - 'adminactivities_menu' *string* - the HTML code for the admin activities menu - 'navtrailbox' *string* - the HTML code for the navigation trail box - 'pageheaderadd' *string* - additional page header HTML code - 'boxlefttop' *string* - left-top box HTML code - 'boxlefttopadd' *string* - additional left-top box HTML code - 'boxleftbottom' *string* - left-bottom box HTML code - 'boxleftbottomadd' *string* - additional left-bottom box HTML code - 'boxrighttop' *string* - right-top box HTML code - 'boxrighttopadd' *string* - additional right-top box HTML code - 'boxrightbottom' *string* - right-bottom box HTML code - 'boxrightbottomadd' *string* - additional right-bottom box HTML code - 'title' *string* - the title of the page, not yet escaped for HTML - 'titleprologue' *string* - what to print before page title - 'titleepilogue' *string* - what to print after page title - 'body' *string* - the body of the page - 'lastupdated' *string* - when the page was last updated - 'uid' *int* - user ID - 'pagefooteradd' *string* - additional page footer HTML code - 'secure_page_p' *int* (0 or 1) - are we to use HTTPS friendly page elements or not? - 'navmenuid' *string* - the id of the navigation item to highlight for this page - 'metaheaderadd' *string* - list of further tags to add to the <HEAD></HEAD> part of the page - 'rssurl' *string* - the url of the RSS feed for this page - 'show_title_p' *int* (0 or 1) - do we display the page title in the body of the page? - 'body_css_classes' *list* - list of classes to add to the body tag - 'show_header' *boolean* - tells whether page header should be displayed or not - 'show_footer' *boolean* - tells whether page footer should be displayed or not Output: - HTML code of the page """ ctx = dict(ln=CFG_SITE_LANG, description="", keywords="", userinfobox="", useractivities_menu="", adminactivities_menu="", navtrailbox="", pageheaderadd="", boxlefttop="", boxlefttopadd="", boxleftbottom="", boxleftbottomadd="", boxrighttop="", boxrighttopadd="", boxrightbottom="", boxrightbottomadd="", titleprologue="", title="", titleepilogue="", body="", lastupdated=None, pagefooteradd="", uid=0, secure_page_p=0, navmenuid="", metaheaderadd="", rssurl=CFG_SITE_URL+"/rss", show_title_p=True, body_css_classes=None, show_header=True, show_footer=True) ctx.update(kwargs) return render_template_to_string("legacy_page.html", **ctx).encode('utf8') def tmpl_pageheader(self, req, **kwargs): """Creates a page header Parameters: - 'ln' *string* - The language to display - 'headertitle' *string* - the title of the HTML page, not yet escaped for HTML - 'description' *string* - description goes to the metadata in the header of the HTML page, not yet escaped for HTML - 'keywords' *string* - keywords goes to the metadata in the header of the HTML page, not yet escaped for HTML - 'userinfobox' *string* - the HTML code for the user information box - 'useractivities_menu' *string* - the HTML code for the user activities menu - 'adminactivities_menu' *string* - the HTML code for the admin activities menu - 'navtrailbox' *string* - the HTML code for the navigation trail box - 'pageheaderadd' *string* - additional page header HTML code - 'uid' *int* - user ID - 'secure_page_p' *int* (0 or 1) - are we to use HTTPS friendly page elements or not? - 'navmenuid' *string* - the id of the navigation item to highlight for this page - 'metaheaderadd' *string* - list of further tags to add to the <HEAD></HEAD> part of
KirarinSnow/Google-Code-Jam
Round 3 2008/D1.py
Python
gpl-3.0
2,492
0.006019
#!/usr/bin/python # # Problem: Endless Knight # Language: Python # Author: KirarinSnow # Usage: python thisfile.py <input.in >output.out # Comments: OK for large, but may time out on small. from itertools import * MOD = 10007 # Precompute factorial table mod MOD fact = [1] * MOD for i in xrange(1, MOD): fact[i] = (fact[i-1] * i) # n choose k -- using Lucas's theorem def choose(n, k): if k > n: return 0 elif n < MOD: return (fact[n]/fact[n-k]/fact[k])%MOD else: prod = 1 while n > 0: prod *= choose(n%MOD, k%MOD) prod %= MOD n /= MOD k /= MOD return prod def compute(): h, w, r = map(int, raw_input().split()) rocks = [map(int, raw_input().split()) for i in range(r)] if (h+w-2)%3 != 0: return
0 # normalize rock coordinates h, w = h-1-(h+w-2)/3, w-1-(h+w-2)/3 for i in range(r): row, col = rocks[
i] if (row+col-2)%3 != 0: rocks[i] = None else: rocks[i] = [row-1-(row+col-2)/3, col-1-(row+col-2)/3] if rocks[i][0] < 0 or rocks[i][0] > h: rocks[i] = None elif rocks[i][1] < 0 or rocks[i][1] > w: rocks[i] = None total = 0 for num in range(r+1): for perm in permutations(range(r), num): # verify increasing property of permutation inc = True for i in range(num): if rocks[perm[i]] == None: inc = False break if i > 0: if rocks[perm[i]][0] < rocks[perm[i-1]][0]: inc = False break if rocks[perm[i]][1] < rocks[perm[i-1]][1]: inc = False break if inc: points = [[0,0]] + [rocks[j] for j in perm] + [[h,w]] # number of paths going through all points prod = 1 for j in range(1, len(points)): dh = points[j][0] - points[j-1][0] dw = points[j][1] - points[j-1][1] prod *= choose(dh+dw, dw) prod %= MOD # inclusion-exclusion total += (-1)**num * prod total %= MOD return total for i in range(input()): print "Case #%d: %d" % (i+1, compute())
treytabner/quickly
quickly/shell.py
Python
gpl-3.0
2,887
0
""" Quickly, quickly deploy and manage cloud servers Copyright (C) 2014 Trey Tabner <trey@tabner.com> This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. """ import argparse import pkg_resources from prettytable import PrettyTable from quickly.deploy import DeploymentTool from quickly.manage import ManagementTool def main(): """ Determine which quickly command to execute """ version = pkg_
resources.require('quickly')[0].version parser = argparse.ArgumentParser( version=version, description="Quickly deploy and manage cloud servers") parser.add_argument( "-n", "--no-action", dest='action', action='store_false', help="Perform no actions other than listing details") subparsers = parser.add_subparsers(dest='mode') deploy_parser = subparsers.add_parser( 'deploy', help="D
eploy and configure one or more servers in parallel") deploy_parser.add_argument( 'plan', help="File containing deployment plan in YAML format") manage_parser = subparsers.add_parser( 'manage', help="Manage one or more servers by executing commands") manage_parser.add_argument( 'plan', help="Plan that determines servers to action against") manage_parser.add_argument('command', nargs=argparse.REMAINDER, help="Command to execute on specified servers") args = parser.parse_args() if args.mode == 'deploy': try: deploy = DeploymentTool(args.plan) except Exception as exc: print("Shell exception: %s" % exc) else: todo = PrettyTable(["Server Name", "Roles", "Image", "Size"]) todo.align = 'l' for d in deploy.deployments: todo.add_row([d.name, ', '.join(d.roles), d.image.name, d.size.name]) print(todo) if args.action: deploy.deploy() elif args.mode == 'manage': manage = ManagementTool(args.plan) todo = PrettyTable(["Server Name", "Access IP", "Device ID"]) todo.align = 'l' for s in manage.servers: todo.add_row([s.name, s.extra.get('access_ip'), s.id]) print(todo) if args.action: manage.execute(args.command) if __name__ == "__main__": main()
KanoComputing/kano-feedback
kano_feedback/return_codes.py
Python
gpl-2.0
550
0
# return_codes.py # # Copyr
ight (
C) 2018 Kano Computing Ltd. # License: http://www.gnu.org/licenses/gpl-2.0.txt GNU GPL v2 # # Return codes of binaries used throughout this project. class RC(object): """Return codes of binaries used throughout this project. See ``source`` for more details.""" SUCCESS = 0 INCORRECT_ARGS = 1 NO_INTERNET = 2 NO_KANO_WORLD_ACC = 3 CANNOT_CREATE_FLAG = 4 # read-only fs? # kano-feedback-cli specific. ERROR_SEND_DATA = 10 ERROR_COPY_ARCHIVE = 11 ERROR_CREATE_FLAG = 12
plotly/python-api
packages/python/plotly/plotly/validators/scattermapbox/_hoverlabel.py
Python
mit
2,062
0.000485
import _plotly_utils.basevalidators class HoverlabelValidator(_plotly_utils
.basevalidators.CompoundValidator): def __init__(self, plotly_name="hoverlabel", parent_name="scattermapbox", **kwargs): super(HoverlabelValidator, self).__init__( plotly_name=plotly_name, parent_name=parent_name, data_class_str=kwargs.pop("data_class_str", "Hoverlabel"), data_docs=kwargs.pop( "data_docs", """ align Sets the horizontal alignment of the text content within h
over label box. Has an effect only if the hover label text spans more two or more lines alignsrc Sets the source reference on Chart Studio Cloud for align . bgcolor Sets the background color of the hover labels for this trace bgcolorsrc Sets the source reference on Chart Studio Cloud for bgcolor . bordercolor Sets the border color of the hover labels for this trace. bordercolorsrc Sets the source reference on Chart Studio Cloud for bordercolor . font Sets the font used in hover labels. namelength Sets the default length (in number of characters) of the trace name in the hover labels for all traces. -1 shows the whole name regardless of length. 0-3 shows the first 0-3 characters, and an integer >3 will show the whole name if it is less than that many characters, but if it is longer, will truncate to `namelength - 3` characters and add an ellipsis. namelengthsrc Sets the source reference on Chart Studio Cloud for namelength . """, ), **kwargs )
globocom/vault
identity/models.py
Python
apache-2.0
408
0
# -*- coding: utf-8 -*- from django.db import mode
ls class Project(models.Model): id = models.AutoField(primary_key=True) project =
models.CharField(max_length=255) user = models.CharField(max_length=255) password = models.CharField(max_length=255) created_at = models.DateTimeField(auto_now=True) class Meta: db_table = 'project' unique_together = (('project'),)
tszym/ansible
lib/ansible/modules/cloud/univention/udm_dns_zone.py
Python
gpl-3.0
7,158
0.00964
#!/usr/bin/python # -*- coding: UTF-8 -*- # Copyright (c) 2016, Adfinis SyGroup AG # Tobias Rueetschi <tobias.ruetschi@adfinis-sygroup.ch> # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.0', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: udm_dns_zone version_added: "2.2" author: "Tobias Rueetschi (@2-B)" short_description: Manage dns zones on a univention corporate server description: - "This module allows to manage dns zones on a univention corporate server (UCS). It uses the python API of the UCS to create a new object or edit it." requirements: - Python >= 2.6 options: state: required: false default: "present" choices: [ present, absent ] description: - Whether the dns zone is present or not. type: required: true choices: [ forward_zone, reverse_zone ] description: - Define if the zone is a forward or reverse DNS zone. zone: required: true description: - DNS zone name, e.g. C(example.com). nameserver: required: false description: - List of appropriate name servers. Required if C(state=present). interfaces: required: false description: - List of interface IP addresses, on which the server should response this zone. Required if C(state=present). refresh: required: false default: 3600 description: - Interval before the zone should be refreshed. retry: required: false default: 1800 description: - Interval that should elapse before a failed refresh should be retried. expire: required: false default: 604800 description: - Specifies the upper limit on the time interval that can elapse before the zone is no longer authoritative. ttl: required: false default: 600 description: - Minimum TTL field that should be exported with any RR from this zone. contact: required: false default: '' description: - Contact person in the SOA record. mx: required: false default: [] description: - List of MX servers. (Must declared as A or AAAA records). ''' EXAMPLES = ''' # Create a DNS zone on a UCS - udm_dns_zone: zone: example.com type: forward_zone nameserver: - ucs.example.com interfaces: - 192.0.2.1 ''' RETURN = '''# ''' from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.univention_umc import ( umc_module_for_add, umc_module_for_edit, ldap_search, base_dn, ) def convert_time(time): """Convert a time in seconds into the biggest unit""" units = [ (24 * 60 * 60 , 'days'), (60 * 60 , 'hours'), (60 , 'minutes'), (1 , 'seconds'), ] if time == 0: return ('0', 'seconds') for unit in units: if time >= unit[0]: return ('{}'.format(time // unit[0]), unit[1]) def main(): module = AnsibleModule( argument_spec = dict( type = dict(required=True, type='str'), zone = dict(required=True, aliases=['name'], type='str'), nameserver = dict(default=[], type='list'), interfaces = dict(default=[], type='list'), refresh = dict(default=3600, type='int'), retry = dict(default=1800, type='int'), expire = dict(default=604800, type='int'), ttl = dict(default=600, type='int'), contact = dict(default='', type='str'), mx = dict(default=[], type='list'), state = dict(default='present', choices=['present', 'absent'], type='str') ), supports_check_mode=True, required_if = ([ ('state', 'present', ['nameserver', 'interfaces']) ]) ) type = module.params['type'] zone = module.params['zone'] nameserver = module.params['nameserver'] interfaces = module.params['interfaces'] refresh = module.params['refresh'] retry = module.params['retry'] expire = module.params['expire'] ttl = module.params['ttl'] contact = module.params['contact'] mx = module.params['mx'] state = module.params['state'] changed = False obj = list(ldap_search( '(&(objectClass=dNSZone)(zoneName={}))'.format(zone), attr=['dNSZone'] )) exists = bool(len(obj)) container = 'cn=dns,{}'.format(base_dn()) dn = 'zoneName={},{}'.format(zone, container) if contact == '': contact = 'root@{}.'.format(zone) if state == 'present': try: if not exists: obj = umc_module_for_add('dns/{}'.format(type), container) else: obj = umc_module_for_edit('dns/{}'.format(type), dn) obj['zone'] = zone obj['nameserver'] = nameserver obj['a'] = interfaces obj['refresh'] = convert_time(refresh) obj['retry'] = convert_time(retry) obj['expire'] = convert_time(expire) obj['ttl'] = convert_time(ttl) obj['contact'] = contact obj['mx'] = mx diff = obj.diff() if exists: for k in obj.keys(): if obj.hasChanged(k): changed = True else: changed = True if not module.check_mode: if not exists: obj.create() elif changed: obj.modify() except Exception as e: module.fail_json( msg='Creating/editing dns zone {} failed: {}'.format(zone, e) ) if state == 'absent' and exists: try: obj = umc_module_for_edit('dns/{}'.format(type), dn) if not module.check_mode: ob
j.remove() changed = True except Exception as e: module.fail_json( msg='Removing dns zone {} failed: {}'.format(zone, e) ) module.exit_json( changed=changed, diff=diff, zon
e=zone ) if __name__ == '__main__': main()
ztop/psutil
psutil/_pslinux.py
Python
bsd-3-clause
40,128
0.0001
#!/usr/bin/env python # Copyright (c) 2009, Giampaolo Rodola'. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Linux platform implementation.""" from __future__ import division import base64 import errno import os import re import socket import struct import sys import warnings from psutil import _common from psutil import _psposix from psutil._common import (isfile_strict, usage_percent, deprecated) from psutil._compat import PY3, xrange, namedtuple, wraps from psutil._error import AccessDenied, NoSuchProcess, TimeoutExpired import _psutil_linux as cext import _psutil_posix __extra__all__ = [ # io prio constants "IOPRIO_CLASS_NONE", "IOPRIO_CLASS_RT", "IOPRIO
_CLASS_BE", "IOPRIO_CLASS_IDLE", # connection status constants "CONN_ESTABLISHED", "CONN_SYN_SENT", "CONN_SYN_RECV", "CONN_FIN_WAIT1", "CONN_FIN_WAIT2", "CONN_TIME_WAIT", "CONN_CLOSE", "CONN_CLOSE_WAIT", "CONN_LAST_ACK", "CONN_LISTEN", "CONN_CLOSING", # other "phymem_buffers", "cached_phymem"] # --- constants HAS_PRLIMIT =
hasattr(cext, "linux_prlimit") # RLIMIT_* constants, not guaranteed to be present on all kernels if HAS_PRLIMIT: for name in dir(cext): if name.startswith('RLIM'): __extra__all__.append(name) # Number of clock ticks per second CLOCK_TICKS = os.sysconf("SC_CLK_TCK") PAGESIZE = os.sysconf("SC_PAGE_SIZE") BOOT_TIME = None # set later # ioprio_* constants http://linux.die.net/man/2/ioprio_get IOPRIO_CLASS_NONE = 0 IOPRIO_CLASS_RT = 1 IOPRIO_CLASS_BE = 2 IOPRIO_CLASS_IDLE = 3 # taken from /fs/proc/array.c PROC_STATUSES = { "R": _common.STATUS_RUNNING, "S": _common.STATUS_SLEEPING, "D": _common.STATUS_DISK_SLEEP, "T": _common.STATUS_STOPPED, "t": _common.STATUS_TRACING_STOP, "Z": _common.STATUS_ZOMBIE, "X": _common.STATUS_DEAD, "x": _common.STATUS_DEAD, "K": _common.STATUS_WAKE_KILL, "W": _common.STATUS_WAKING } # http://students.mimuw.edu.pl/lxr/source/include/net/tcp_states.h TCP_STATUSES = { "01": _common.CONN_ESTABLISHED, "02": _common.CONN_SYN_SENT, "03": _common.CONN_SYN_RECV, "04": _common.CONN_FIN_WAIT1, "05": _common.CONN_FIN_WAIT2, "06": _common.CONN_TIME_WAIT, "07": _common.CONN_CLOSE, "08": _common.CONN_CLOSE_WAIT, "09": _common.CONN_LAST_ACK, "0A": _common.CONN_LISTEN, "0B": _common.CONN_CLOSING } # --- named tuples def _get_cputimes_fields(): """Return a namedtuple of variable fields depending on the CPU times available on this Linux kernel version which may be: (user, nice, system, idle, iowait, irq, softirq, [steal, [guest, [guest_nice]]]) """ f = open('/proc/stat', 'r') try: values = f.readline().split()[1:] finally: f.close() fields = ['user', 'nice', 'system', 'idle', 'iowait', 'irq', 'softirq'] vlen = len(values) if vlen >= 8: # Linux >= 2.6.11 fields.append('steal') if vlen >= 9: # Linux >= 2.6.24 fields.append('guest') if vlen >= 10: # Linux >= 3.2.0 fields.append('guest_nice') return fields scputimes = namedtuple('scputimes', _get_cputimes_fields()) svmem = namedtuple( 'svmem', ['total', 'available', 'percent', 'used', 'free', 'active', 'inactive', 'buffers', 'cached']) pextmem = namedtuple('pextmem', 'rss vms shared text lib data dirty') pmmap_grouped = namedtuple( 'pmmap_grouped', ['path', 'rss', 'size', 'pss', 'shared_clean', 'shared_dirty', 'private_clean', 'private_dirty', 'referenced', 'anonymous', 'swap']) pmmap_ext = namedtuple( 'pmmap_ext', 'addr perms ' + ' '.join(pmmap_grouped._fields)) # --- system memory def virtual_memory(): total, free, buffers, shared, _, _ = cext.linux_sysinfo() cached = active = inactive = None f = open('/proc/meminfo', 'r') try: for line in f: if line.startswith('Cached:'): cached = int(line.split()[1]) * 1024 elif line.startswith('Active:'): active = int(line.split()[1]) * 1024 elif line.startswith('Inactive:'): inactive = int(line.split()[1]) * 1024 if (cached is not None and active is not None and inactive is not None): break else: # we might get here when dealing with exotic Linux flavors, see: # http://code.google.com/p/psutil/issues/detail?id=313 msg = "'cached', 'active' and 'inactive' memory stats couldn't " \ "be determined and were set to 0" warnings.warn(msg, RuntimeWarning) cached = active = inactive = 0 finally: f.close() avail = free + buffers + cached used = total - free percent = usage_percent((total - avail), total, _round=1) return svmem(total, avail, percent, used, free, active, inactive, buffers, cached) def swap_memory(): _, _, _, _, total, free = cext.linux_sysinfo() used = total - free percent = usage_percent(used, total, _round=1) # get pgin/pgouts f = open("/proc/vmstat", "r") sin = sout = None try: for line in f: # values are expressed in 4 kilo bytes, we want bytes instead if line.startswith('pswpin'): sin = int(line.split(' ')[1]) * 4 * 1024 elif line.startswith('pswpout'): sout = int(line.split(' ')[1]) * 4 * 1024 if sin is not None and sout is not None: break else: # we might get here when dealing with exotic Linux flavors, see: # http://code.google.com/p/psutil/issues/detail?id=313 msg = "'sin' and 'sout' swap memory stats couldn't " \ "be determined and were set to 0" warnings.warn(msg, RuntimeWarning) sin = sout = 0 finally: f.close() return _common.sswap(total, used, free, percent, sin, sout) @deprecated(replacement='psutil.virtual_memory().cached') def cached_phymem(): return virtual_memory().cached @deprecated(replacement='psutil.virtual_memory().buffers') def phymem_buffers(): return virtual_memory().buffers # --- CPUs def cpu_times(): """Return a named tuple representing the following system-wide CPU times: (user, nice, system, idle, iowait, irq, softirq [steal, [guest, [guest_nice]]]) Last 3 fields may not be available on all Linux kernel versions. """ f = open('/proc/stat', 'r') try: values = f.readline().split() finally: f.close() fields = values[1:len(scputimes._fields) + 1] fields = [float(x) / CLOCK_TICKS for x in fields] return scputimes(*fields) def per_cpu_times(): """Return a list of namedtuple representing the CPU times for every CPU available on the system. """ cpus = [] f = open('/proc/stat', 'r') try: # get rid of the first line which refers to system wide CPU stats f.readline() for line in f: if line.startswith('cpu'): values = line.split() fields = values[1:len(scputimes._fields) + 1] fields = [float(x) / CLOCK_TICKS for x in fields] entry = scputimes(*fields) cpus.append(entry) return cpus finally: f.close() def cpu_count_logical(): """Return the number of logical CPUs in the system.""" try: return os.sysconf("SC_NPROCESSORS_ONLN") except ValueError: # as a second fallback we try to parse /proc/cpuinfo num = 0 f = open('/proc/cpuinfo', 'r') try: lines = f.readlines() finally: f.close() for line in lines: if line.lower().startswith('processor'): num += 1 # unknown format (e.g. amrel/sparc architectures), see: # http://code.google.com/p/psutil/issues/detail?id=200 # try to parse /proc/stat as a last resort if
oss/rutgers-repository-utils
setup.py
Python
gpl-2.0
858
0.039627
#!/usr/bin/env python """ Setup script for Rutgers Repository Utils. """ import distutils import sys from distutils.core import setup setup(name = 'rutgers-repository-utils', version = '1.3', de
scription = 'Python scripts for repository management', author = 'Open System Solutions', author_email = 'oss@oss.rutgers.edu', url = 'https://github.com/oss/rutgers-repository-utils', license = 'GPLv2+', platforms = ['linux'], long_description = """These Python scripts are based on yum-utils and createrepo. Together, they create repository
directories and do dependency checking.""", packages = ['repoutils'], package_dir = {'repoutils' : 'lib'})
FiveEye/ml-notebook
dlp/ch3_3_boston_housing.py
Python
mit
1,900
0.007368
import numpy as np import keras as ks import matplotlib.pyplot as plt from keras.datasets import boston_housing from keras import models from keras import layers from keras.utils.np_utils import to_categorical (train_data, train_targets), (test_data, test_targets) = boston_housing.load_data() mean = train_data.mean(axis = 0) train_data -= mean std = train_data.std(axis=0) train_data /= std test_data -= mean test_data /= std def build_model(): model = models.Sequential() model.add(layers.Dense(64, activation='relu', input_shape=(train_data.shape[1],))) model.add(layers.Dense(64, activation='relu')) model.add(layers.Dense(1)) model.compile(optimizer='rmsprop', loss='mse', metrics=['mae']) return model k = 4 num_val_samples = len(train_data) // k num_epochs = 500 all_scores = [] all_mae_histories = [] for i in range(k): print('processing fold #', i) val_data = train_data[i *
num_val_samples : (i+1) * num_val_samples] val_targets = train_targets[i * num_val_samples : (i+1) * num
_val_samples] partial_train_data = np.concatenate([train_data[: i * num_val_samples], train_data[(i+1) * num_val_samples:]], axis=0) partial_train_targets = np.concatenate([train_targets[: i * num_val_samples], train_targets[(i+1) * num_val_samples:]], axis=0) model = build_model() history = model.fit(partial_train_data, partial_train_targets, epochs=num_epochs, batch_size=1, verbose=0) val_mse, val_mae = model.evaluate(val_data, val_targets, verbose=0) all_mae_histories.append(history.history['mean_absolute_error']) all_scores.append(val_mae) print(all_scores) average_mae_history = [np.mean([x[i] for x in all_mae_histories]) for i in range(num_epochs)] # plt.plot(range(1, len(average_mae_history) + 1), average_mae_history) plt.plot(average_mae_history[10:]) plt.xlabel('Epochs') plt.ylabel('Validation MAE') plt.show()
l-vincent-l/APITaxi
APITaxi/commands/warm_up_redis.py
Python
agpl-3.0
1,795
0.002786
from . import manager def warm_up_redis_func(app=None, db=None, user_model=None, redis_store=None): not_available = set() available = set() cur = db.session.connection().connection.cursor() cur.execute(""" SELECT taxi.id AS taxi_id, vd.status, vd.added_by FROM taxi LEFT OUTER JOIN vehicle ON vehicle.id = taxi.vehicle_id LEFT OUTER JOIN vehicle_description AS vd ON vehicle.id = vd.vehicle_id """) users = {u.id: u.email for u in user_model.query.all()} for taxi_id, status, added_by in cur.fetchall(): user = users.get(added_by) taxi_id_operator = "{}:{}".format(taxi_id, user) if status == 'free': available.add(taxi_id_operator) else: not_available.add(taxi_id_operator) to_remove = list()
if redis_store.type(app.config['REDIS_NOT_AVAILABLE']) != 'zset': redis_store.delete(app.config['REDIS_NOT_AVAILABLE'])
else: cursor, keys = redis_store.zscan(app.config['REDIS_NOT_AVAILABLE'], 0) keys = set([k[0] for k in keys]) while cursor != 0: to_remove.extend(keys.intersection(available)) not_available.difference_update(keys) cursor, keys = redis_store.zscan(app.config['REDIS_NOT_AVAILABLE'], cursor) keys = set([k[0] for k in keys]) if len(to_remove) > 0: redis_store.zrem(app.config['REDIS_NOT_AVAILABLE'], to_remove) if len(not_available) > 0: redis_store.zadd(app.config['REDIS_NOT_AVAILABLE'], **{k:0 for k in not_available}) @manager.command def warm_up_redis(): from flask import current_app import APITaxi_models as models from APITaxi.extensions import redis_store warm_up_redis_func(current_app, models.db, models.User, redis_store)
wmizzi/tn2capstone
ServerScript/recievestore.py
Python
bsd-2-clause
1,705
0.017009
# created by Angus Clark 9/2/17 updated 27/2/17 # ToDo impliment traceroute function into this # Perhaps get rid of unnecessary itemediate temp file import socket import os import json import my_traceroute s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) host = '130.56.253.43' #print host port = 5201 # Change port (must enable security settigns of server) s.bind((host,port)) s.listen(5) MAX_HOPS = 30 # max hops for traceroute while True: c, addr = s.accept() #accept incoming Connection f = open('temp.json','wb') # open blank binary to dump incoming data #print addr[0] l = c.recv(1024) while(l): # Dump data into temp file and get next chunk of data f.write(l) l = c.recv(1024) f.close() c.close() tempfile = open('temp.json','rb') info = json.load(tempfile) info["UserInfo"]["ip"] = addr[0] # store ip address of sender last_addr = '0.0.0.0' # placeholder for first iteration for hop in range(1,MAX_HOPS): result = my_traceroute.traceroute(hop, info["UserInfo"]["ip"]) #print result if result == -1: break if result[1] == last_addr: break info["TRACEROUTE"][str(result[0])] = {} info["TRACEROUTE"][str(result[
0])].update({'node':result[1], 'rtt':result[2]}) last_addr = result[1] id = info["UserInfo"]["user id"] timestamp = info["UserInfo"]["timestamp"] os.system('mkdir /home/ubuntu/data/'+id) path = "/home/ubuntu/data/" + id + "/" filename = timestamp + '.json'
savefile = open(path + filename, 'w+') savefile.write(json.dumps(info)) savefile.close()
matcatc/Test_Parser
src/TestParser/View/Tkinter/__init__.py
Python
gpl-3.0
714
0.002801
''' @date Aug 28, 2010 @author: Matthew A. Todd This file is part of Test Parser by Ma
tthew A. Todd Test Parser is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. Test Parser is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOS
E. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with Test Parser. If not, see <http://www.gnu.org/licenses/>. '''
kvesteri/validators
tests/test_between.py
Python
mit
863
0
# -*- coding: utf-8 -*- import pytest import validators @pytest.mark.parametrize(('
value', 'min', 'max'), [ (12, 11, 13), (12, None, 14), (12, 11, None), (12, 12, 12) ]) def test_returns_true_on_valid_range(value, min, max): assert validators.between(value, min=min, max=max) @pytest.mark.parametrize(('value', 'min', 'max'), [ (12, 13, 12), (12, None, None), ]) def test_raises_assertion_error_for_in
valid_args(value, min, max): with pytest.raises(AssertionError): assert validators.between(value, min=min, max=max) @pytest.mark.parametrize(('value', 'min', 'max'), [ (12, 13, 14), (12, None, 11), (12, 13, None) ]) def test_returns_failed_validation_on_invalid_range(value, min, max): result = validators.between(value, min=min, max=max) assert isinstance(result, validators.ValidationFailure)
dlab-berkeley/collaboratool-archive
bsd2/vagrant-ansible/ansible/lib/ansible/playbook/play.py
Python
apache-2.0
27,767
0.005258
# (c) 2012-2013, Michael DeHaan <michael.dehaan@gmail.com> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. ############################################# from ansible.utils.template import template from ansible import utils from ansible import errors from ansible.playbook.task import Task import pipes import shlex import os class Play(object): __slots__ = [ 'hosts', 'name', 'vars', 'default_vars', 'vars_prompt', 'vars_files', 'handlers', 'remote_user', 'remote_port', 'included_roles', 'accelerate', 'accelerate_port', 'sudo', 'sudo_user', 'transport', 'playbook', 'tags', 'gather_facts', 'serial', '_ds', '_handlers', '_tasks', 'basedir', 'any_errors_fatal', 'roles', 'max_fail_pct' ] # to catch typos and so forth -- these are userland names # and don't line up 1:1 with how they are stored VALID_KEYS = [ 'hosts', 'name', 'vars', 'vars_prompt', 'vars_files', 'tasks', 'handlers', 'user', 'port', 'include', 'accelerate', 'accelerate_port', 'sudo', 'sudo_user', 'connection', 'tags', 'gather_facts', 'serial', 'any_errors_fatal', 'roles', 'pre_tasks', 'post_tasks', 'max_fail_percentage' ] # ************************************************* def __init__(self, playbook, ds, basedir): ''' constructor loads from a play datastructure ''' for x in ds.keys(): if not x in Play.VALID_KEYS: raise errors.AnsibleError("%s is not a legal parameter in an Ansible Playbook" % x) # allow all playbook keys to be set by --extra-vars self.vars = ds.get('vars', {}) self.vars_prompt = ds.get('vars_prompt', {}) self.playbook = playbook self.vars = self._get_vars() self.basedir = basedir self.roles = ds.get('roles', None) self.tags = ds.get('tags', None) if self.tags is None: self.tags = [] elif type(self.tags) in [ str, unicode ]: self.tags = self.tags.split(",") elif type(self.tags) != list: self.tags = [] # We first load the vars files from the datastructure # so we have the default variables to pass into the roles self.vars_files = ds.get('vars_files', []) self._update_vars_files_for_host(None) # now we load the roles into the datastructure self.included_roles = [] ds = self._load_roles(self.roles, ds) # and finally re-process the vars files as they may have # been updated by the included roles self.vars_files = ds.get('vars_files', []) self._update_vars_files_for_host(None) # template everything to be efficient, but do not pre-mature template # tasks/handlers as they may have inventory scope overrides _tasks = ds.pop('tasks', []) _handlers = ds.pop('handlers', []) ds = template(basedir, ds, self.vars) ds['tasks'] = _tasks ds['handlers'] = _handlers self._ds = ds hosts = ds.get('hosts') if hosts is None: raise errors.AnsibleError('hosts declaration is required') elif isinstance(hosts, list): hosts = ';'.join(hosts) self.serial = int(ds.get('serial', 0)) self.hosts = hosts self.name = ds.get('name', self.hosts) self._tasks = ds.get('tasks', []) self._handlers = ds.get('handlers', []) self.remote_user = ds.get('user', self.playbook.remote_user) self.remote_port = ds.get('port', self.playbook.remote_port) self.sudo = ds.get('sudo', self.playbook.sudo) self.sudo_user = ds.get('sudo_user', self.playbook.sudo_user) self.transport = ds.get('connection', self.playbook.transport) self.gather_facts = ds.get('gather_facts', None) self.remote_port = self.remote_port self.any_errors_fatal = utils.boolean(ds.get('any_errors_fatal', 'false')) self.accelerate = utils.boolean(ds.get('accelerate', 'false')) self.accelerate_port = ds.get('accelerate_port', None) self.max_fail_pct = int(ds.get('max_fail_percentage', 100)) load_vars = {} if self.playbook.inventory.basedir() is not None: load_vars['inventory_dir'] = self.playbook.inventory.basedir() self._tasks = self._load_tasks(self._ds.get('tasks', []), load_vars) self._handlers = self._load_tasks(self._ds.get('handlers', []), load_vars) if self.sudo_user != 'root': self.sudo = True # ************************************************* def _get_role_path(self, role): """ Returns the path on disk to the directory containing the role directories like tasks, templates, etc. Also returns any variables that were included with the role """ orig_path = template(self.basedir,role,self.vars) role_vars = {} if type(orig_path) == dict: # what, not a path? role_name = orig_path.get('role', None) if role_name is None: raise errors.AnsibleError("expected a role name in dictionary: %s" % orig_path) role_vars = orig_path orig_path = rol
e_name path = utils.path_dwim(self.basedir, os.path.joi
n('roles', orig_path)) if not os.path.isdir(path) and not orig_path.startswith(".") and not orig_path.startswith("/"): path2 = utils.path_dwim(self.basedir, orig_path) if not os.path.isdir(path2): raise errors.AnsibleError("cannot find role in %s or %s" % (path, path2)) path = path2 elif not os.path.isdir(path): raise errors.AnsibleError("cannot find role in %s" % (path)) return (path, role_vars) def _build_role_dependencies(self, roles, dep_stack, passed_vars={}, level=0): # this number is arbitrary, but it seems sane if level > 20: raise errors.AnsibleError("too many levels of recursion while resolving role dependencies") for role in roles: role_path,role_vars = self._get_role_path(role) role_vars = utils.combine_vars(passed_vars, role_vars) vars = self._resolve_main(utils.path_dwim(self.basedir, os.path.join(role_path, 'vars'))) vars_data = {} if os.path.isfile(vars): vars_data = utils.parse_yaml_from_file(vars) if vars_data: role_vars = utils.combine_vars(vars_data, role_vars) defaults = self._resolve_main(utils.path_dwim(self.basedir, os.path.join(role_path, 'defaults'))) defaults_data = {} if os.path.isfile(defaults): defaults_data = utils.parse_yaml_from_file(defaults) # the meta directory contains the yaml that should # hold the list of dependencies (if any) meta = self._resolve_main(utils.path_dwim(self.basedir, os.path.join(role_path, 'meta'))) if os.path.isfile(meta): data = utils.parse_yaml_from_file(meta) if data: dependencies = data.get('dependencies',[]) for dep in dependencies: allow_dupes = False (dep_path,dep_vars) = self._get_role_path(dep) meta = self._r
Thielak/program-y
src/programy/parser/template/maps/predecessor.py
Python
mit
1,445
0.00692
""" Copyright (c) 2016 Keith Sterling Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR
ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ import logging from programy.parser.template.maps.map import TemplateMap class PredecessorMap(TemplateMap): NAME = "predecessor" def __init__(self): TemplateMap.__init__(self) def get_name(self): return PredecessorMap.NAME def map(self, v
alue): int_value = int(value) str_value = str(int_value - 1) return str_value
xlcteam/scoreBoard
scorebrd/migrations/0009_auto__del_field_group_results.py
Python
bsd-3-clause
7,683
0.00807
# -*- coding: utf-8 -*- import datetime from south.db import db from south.v2 import SchemaMigration from django.db import models class Migration(SchemaMigration): def forwards(self, orm): # Deleting field 'Group.results' db.delete_column('scorebrd_group', 'results_id') # Adding M2M table for field results on 'Group' db.create_table('scorebrd_group_results', ( ('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)), ('group', models.ForeignKey(orm['scorebrd.group'], null=False)), ('teamresult', models.ForeignKey(orm['scorebrd.teamresult'], null=False)) )) db.create_unique('scorebrd_group_results', ['group_id', 'teamresult_id']) def backwards(self, orm): # User chose to not deal with backwards NULL issues for 'Group.results' raise RuntimeError("Cannot reverse this migration. 'Group.results' and its values cannot be restored.") # Removing M2M table for field results on 'Group' db.delete_table('scorebrd_group_results') models = { 'auth.group': { 'Meta': {'object_name': 'Group'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}), 'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}) }, 'auth.permission': { 'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'}, 'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}) }, 'auth.user': { 'Meta': {'object_name': 'User'}, 'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}), 'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}), 'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}) }, 'contenttypes.contenttype': { 'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"}, 'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}) }, 'scorebrd.competition': { 'Meta': {'object_name': 'Competition'}, 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['scorebrd.Group']", 'symmetrical': 'False'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}) }, 'scorebrd.event': { 'Meta': {'object_name': 'Event'}, 'competitions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['scorebrd.Competition']", 'symmetrical': 'False'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}) }, 'scorebrd.group': { 'Meta': {'object_name': 'Group'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'matches': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['scorebrd.Match']", 'symmetrical': 'False'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}), 'results': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['scorebrd.TeamResult']", 'symmetrical': 'False'}), 'teams': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['scorebrd.Team']", 'symmetrical': 'False'}) }, 'scorebrd.match': { 'Meta': {'object_name': 'Match'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'playing': ('django.db.models.fields.CharField', [], {'default': "'N'", 'max_length': '1'}), 'referee': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}), 'scoreA': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'scoreB': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'teamA': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'homelanders'", 'to': "orm['scorebrd.Team']"}), 'teamB': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'foreigners'", 'to': "orm['scorebrd.Team']"}) }, 'scorebrd.team': { 'Meta': {'object_name': 'Team'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}) }, 'scorebrd.teamresult': { 'Meta': {'object_name': 'TeamResult'}, 'draws': ('django.db.mode
ls.fields.IntegerField', [], {'default': '0'}),
'goal_diff': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'goal_shot': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'loses': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'matches_played': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'points': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'team': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['scorebrd.Team']"}), 'wins': ('django.db.models.fields.IntegerField', [], {'default': '0'}) } } complete_apps = ['scorebrd']
hoelsner/product-database
django_project/views.py
Python
mit
7,479
0.002139
import logging import redis from django.conf import settings from django.contrib.auth import authenticate, login, logout from django.contrib.auth.decorators import login_required from django.contrib.auth.mixins import LoginRequiredMixin from django.contrib.auth.views import PasswordChangeView from django.http import HttpResponseForbidden, JsonResponse, HttpResponse from django.http import HttpResponseRedirect from django.shortcuts import redirect, render, render_to_response from django.urls import reverse_lazy, reverse from app.config.settings import AppSettings from app.productdb.utils import login_required_if_login_only_mode from django_project.celery import app as celery, TaskState, get_meta_data_for_task from django_project import context_processors logger = logging.getLogger("productdb") def custom_page_not_found_view(request, exception): response = render(request, 'django_project/custom_404_page.html', {}) response.status_code = 404 return response def custom_error_view(request): response = render(request, 'django_project/custom_500_page.html', {}) response.status_code = 500 return response def custom_bad_request_view(request, exception): response = render(request, 'django_project/custom_400_page.html', {}) response.status_code = 400 return response def custom_permission_denied_view(request, exception): response = render(request, 'django_project/custom_403_page.html', {}) response.status_code = 403 return response def custom_csrf_failure_page(request, reason=""): context = { "message": "Form expired" if reason == "" else reason } return render_to_response('django_project/custom_csrf_failure_page.html', context) class ChangePasswordView(LoginRequiredMixin, PasswordChangeView): template_name = "django_project/change_password.html" success_url = reverse_lazy("custom_password_change_done") def get(self, request, *args, **kwargs): if context_processors.is_ldap_authenticated_user(request)["IS_LDAP_ACCOUNT"]: return HttpResponseForbidden("You're not allowed to change your password in this application") return super().get(request, *args, **kwargs) @login_required def custom_password_change_done(request): """thank you page with link to homepage""" # check if the request comes from an LDAP account, if so, raise a PermissionDenied exception if context_processors.is_ldap_authenticated_user(request)["IS_LDAP_ACCOUNT"]: return HttpResponseForbidden("You're not allowed to change your password in this application") else: return render(request, "django_project/password_change_done.html", context={}) def login_user(request): """login user :param request: :return: """ app_config = AppSettings() context = { "login_only_mode": app_config.is_login_only_mode() } if request.user.is_authenticated: return HttpResponseRedirect(reverse("productdb:home")) if request.GET: context["next"] = request.GET['next'] else: context["next"] = None if request.method == 'POST': # authenticate user username = request.POST['username'] password = request.POST['password'] user = authenticate(username=username, password=password) if user is not None: if user.is_active: login(request, user) if context["next"] and not context["next"].startswith("/productdb/login"): return HttpResponseRedirect(context["next"]) else: return HttpResponseRedirect(reverse("productdb:home")) else: context["message"] = "User account was disabled.<br>Please contact the administrator." else: context["message"] = "Login failed, invalid credentials" return render(request, "django_project/login.html", context=context) @login_required def logout_user(request): """logout user :param request: :return: """ if request.user.is_authenticated: logout(request) return redirect(reverse("login")) def task_progress_view(request, task_id): """Progress view for an asynchronous task""" if login_required_if_login_only_mode(request): return redirect('%s?next=%s' % (settings.LOGIN_URL, request.path)) default_title = "Please wait..." redirect_default = reverse("productdb:home") meta_data = get_meta_data_for_task(task_id) # title of the progress view if "title" in meta_data.keys(): title = meta_data["title"] else: title = default_title # redirect after task is completed if "redirect_to" in meta_data.keys(): redirect_to = meta_data["redirect_to"] auto_redirect = meta_data.get("auto_redirect", False) else: logger.warning("Cannot find redirect link to task meta data, use homepage") redirect_to = redirect_default auto_redirect = False context = { "task_id": task_id, "title": title, "redirect_to": redirect_to, "auto_redirect": auto_redirect } return render(request, "django_project/task_progress_view.html", context=context) def task_status_ajax(request, task_id): """returns a JSON representation of the task state""" if settings.DEBUG: # show results for task in debug mode valid_request = True else: valid_request = request.is_ajax() if valid_request: try: task = celery.AsyncResult(task_id) if task.state == TaskState.PENDING: response = { "state": "pending", "status_message": "try to start task" } elif task.state == TaskState.STARTED or task
.state.lower() == TaskState.PROCESSING: response = {
"state": "processing", "status_message": task.info.get("status_message", "") } elif task.state == TaskState.SUCCESS: response = { "state": "success", "status_message": task.info.get("status_message", "") } if "error_message" in task.info: response["error_message"] = task.info["error_message"] if "data" in task.info: response["data"] = task.info["data"] else: # something went wrong in the within the task response = { "state": "failed", "error_message": str(task.info), # this is the exception that was raised } except redis.ConnectionError: logger.error("cannot get task update", exc_info=True) response = { "state": "failed", "error_message": "A server process (redis) is not running, please contact the administrator" } except Exception: # catch any exception logger.error("cannot get task update", exc_info=True) response = { "state": "failed", "error_message": "Unknown error: " + str(task.info), # this is the exception raised } logger.debug("task state for %s is\n%s" % (task_id, str(response))) return JsonResponse(response) else: return HttpResponse("Bad Request", status=400)
neurobin/test
test1.py
Python
mit
876
0.001142
#!/usr/bin/env python """@package letsacme ################ letsacme ################### This script automates the process of getting a signed TLS/SSL certificate from Let's Encrypt using the ACME protocol.
It will need to be r
un on your server and have access to your private account key. It gets both the certificate and the chain (CABUNDLE) and prints them on stdout unless specified otherwise. """ import argparse # argument parser import subprocess # Popen import json # json.loads import os # os.path import sys # sys.exit import base64 # b64encode import binascii # unhexlify import time # time import hashlib # sha256 import re # regex operation import copy # deepcopy import textwrap # wrap and dedent import logging # Logger import errno # EEXIST import shutil # rmtree
jawilson/home-assistant
homeassistant/components/edl21/sensor.py
Python
apache-2.0
11,383
0.000264
"""Support for EDL21 Smart Meters.""" from __future__ import annotations from datetime import timedelta import logging from sml import SmlGetListResponse from sml.asyncio import SmlProtocol import voluptuous as vol from homeassistant.components.sensor import PLATFORM_SCHEMA, SensorEntity from homeassistant.const import CONF_NAME from homeassistant.core import HomeAssistant, callback import homeassistant.helpers.config_validation as cv from homeassistant.helpers.dispatcher import ( async_dispatcher_connect, async_dispatcher_send, ) from homeassistant.helpers.entity_platform import AddEntitiesCallback from homeassistant.helpers.entity_registry import async_get_registry from homeassistant.helpers.typing import ConfigType, DiscoveryInfoType from homeassistant.util.dt import utcnow _LOGGER = logging.getLogger(__name__) DOMAIN = "edl21" CONF_SERIAL_PORT = "serial_port" ICON_POWER = "mdi:flash" MIN_TIME_BETWEEN_UPDATES = timedelta(seconds=60) SIGNAL_EDL21_TELEGRAM = "edl21_telegram" PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend( { vol.Required(CONF_SERIAL_PORT): cv.string, vol.Optional(CONF_NAME, default=""): cv.string, }, ) async def async_setup_platform( hass: HomeAssistant, config: ConfigType, async_add_entities: AddEntitiesCallback, discovery_info: DiscoveryInfoType | None = None, ) -> None: """Set up the EDL21 sensor.""" hass.data[DOMAIN] = EDL21(hass, config, async_add_entities) await hass.data[DOMAIN].connect() class EDL21: """EDL21 handles telegrams sent by a compatible smart meter.""" # OBIS format: A-B:C.D.E*F _OBIS_NAMES = { # A=1: Electricity # C=0: General purpose objects # D=0: Free ID-numbers for utilities "1-0:0.0.9*255": "Electricity ID", # D=2: Program entries "1-0:0.2.0*0": "Configuration program version number", "1-0:0.2.0*1": "Firmware version number", # C=1: Active power + # D=8: Time integral 1 # E=0: Total "1-0:1.8.0*255": "Positive active energy total", # E=1: Rate 1 "1-0:1.8.1*255": "Positive active energy in tariff T1", # E=2: Rate 2 "1-0:1.8.2*255": "Positive active energy in tariff T2", # D=17: Time integral 7 # E=0: Total "1-0:1.17.0*255": "Last signed positive active energy total", # C=2: Active power - # D=8: Time integral 1 # E=0: Total "1-0:2.8.0*255": "Negative active energy total", # E=1: Rate 1 "1-0:2.8.1*255": "Negative active energy in tariff T1", # E=2: Rate 2 "1-0:2.8.2*255": "Negative active energy in tariff T2", # C=14: Supply frequency # D=7: Instantaneous value # E=0: Total "1-0:14.7.0*255": "Supply frequency", # C=15: Active power absolute # D=7: Instantaneous value # E=0: Total "1-0:15.7.0*255": "Absolute active instantaneous power", # C=16: Active power sum # D=7: Instantaneous value # E=0: Total "1-0:16.7.0*255": "Sum active instantaneous power", # C=31: Active amperage L1 # D=7: Instantaneous value # E=0: Total "1-0:31.7.0*255": "L1 active instantaneous amperage", # C=32: Active voltage L1 # D=7: Instantaneous value # E=0: Total "1-0:32.7.0*255": "L1 active instantaneous voltage", # C=36: Active power L1 # D=7: Instantaneous value # E=0: Total "1-0:36.7.0*255": "L1 active instantaneous power", # C=51: Active amperage L2 # D=7: Instantaneous value # E=0: Total "1-0:51.7.0*255": "L2 active instantaneous amperage", # C=52: Active voltage L2 # D=7: Instantaneous value # E=0: Total "1-0:52.7.0*255": "L2 active instantaneous voltage", # C=56: Active power L2 # D=7: Instantaneous value # E=0: Total "1-0:56.7.0*255": "L2 active instantaneous power", # C=71: Active amperage L3 # D=7: Instantaneous value # E=0: Total "1-0:71.7.0*255": "L3 active instantaneous amperage", # C=72: Active voltage L3 # D=7: Instantaneous value # E=0: Total "1-0:72.7.0*255": "L3 active instantaneous voltage", # C=76: Active power L3 # D=7: Instantaneous value # E=0: Total "1-0:76.7.0*255": "L3 active instantaneous power", # C=81: Angles # D=7: Instantaneous value # E=4: U(L1) x I(L1) # E=15: U(L2) x I(L2) # E=26: U(L3) x I(L3) "1-0:81.7.4*255": "U(L1)/I(L1) phase angle", "1-0:81.7.15*255": "U(L2)/I(L2) phase angle", "1-0:81.7.26*255": "U(L3)/I(L3) phase angle", # C=96: Electricity-related service entries "1-0:96.1.0*255": "Metering point ID 1", "1-0:96.5.0*255": "Internal operating status", } _OBIS_BLACKLIST = { # C=96: Electricity-related service entries "1-0:96.50.1*1", # Manufacturer specific "1-0:96.90.2*1", # Manufacturer specific "1-0:96.90.2*2", # Manufacturer specific # A=129: Manufacturer specific "129-129:199.130.3*255", # Iskraemeco: Manufacturer "129-129:199.130.5*255", # Iskraemeco: Public Key } def __init__(self, hass, config, async_add_entities) -> None: """Initialize an EDL21 object.""" self._registered_obis: set[tuple[str, str]] = set() self._hass = hass self._async_add_entities = async_add_entities self._name = config[CONF_NAME] self._proto = SmlProtocol(config[CONF_SERIAL_PORT]) self._proto.add_listener(self.event, ["SmlGetListResponse"]) async def connect(self): """Connect to an EDL21 reader.""" await self._proto.connect(self._hass.loop) def event(self, message_body) -> None: """Handle events from pysml.""" assert isinstance(message_body, SmlGetListResponse) electricity_id = None for telegram in message_body.get("valList", []): if telegram.get("objName") in ("1-0:0.0.9*255", "1-0:96.1.0*255"): electricity_id = telegram.get("value") break if electricity_id is None: return electricity_id = electricity_id.replace(" ", "") new_entities = [] for telegram in message_body.get("valList", []): if not (obis := telegram.get("objName")): continue if (electricity_id, obis) in self._registered_obis: async_dispatcher_send( self._hass, SIGNAL_EDL21_TELEGRAM, electricity_id, telegram ) else: if name := self._OBIS_NAMES.get(obis): if self._name: name = f"{self._name}: {name}" new_entities.append( EDL21Entity(electricity_id, obis, name, telegram) ) self._registered_obis.add((electricity_id, obis)) elif obis
not in self._OBIS_BLACKLIST: _LOGGER.warning( "Unhandled sensor %s detected. Please report at " 'https://github.com/home-assistant/core/issues?q=is%%3Aissue+label%%3A"integration%%3A+edl21"+', obis, ) self._OBIS_BLACKLIST.add(obis) if new_entities: self._hass.loop.create_task(self.add_entities(new_entities)) as
ync def add_entities(self, new_entities) -> None: """Migrate old unique IDs, then add entities to hass.""" registry = await async_get_registry(self._hass) for entity in new_entities: old_entity_id = registry.async_get_entity_id( "sensor", DOMAIN, entity.old_unique_id ) if old_entity_id is not None: _LOGGER.debug( "Migrating unique_id from [%s] to [%s]", entity.old_unique_id, entity.unique_id, )
sahlinet/fastapp
fastapp/migrations/0011_thread_updated.py
Python
mit
451
0
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations class Migration(migrations.Migration): dependencies = [ ('fastapp', '0010_auto_20150910_2010'), ] operations = [ migrations.AddField( model_name='thread', name='upd
ated', field=models.DateTimeField(auto_now=True, null=True), p
reserve_default=True, ), ]
renanalencar/congrefor
congrefor/wsgi.py
Python
mit
296
0.013514
import os os.environ.setdefault("DJANGO_SE
TTINGS_MODULE", "congrefor.settings") from django.core.wsgi import get_wsgi_application application = get_wsgi_application() #from django.core.wsgi import get_wsgi_application #from dj_static import Cling #application = C
ling(get_wsgi_application())
pasko-evg/Python-2014
Lecture03/lecture_03.py
Python
unlicense
321
0
# coding=utf-8 # Лекция http://uneex.ru/LecturesCMC/PythonIntro2014/03_DataTypes import decimal import random # print decimal.Decimal(1.1) + decima
l.Decimal(1.1) # print decimal.Decimal
("1.1") + decimal.Decimal("1.1") # print dir(random) a = [] for j in range(0, 10): a.append(random.randrange(100)) print a