text stringlengths 4 1.02M | meta dict |
|---|---|
from header_common import *
from header_operations import *
| {
"content_hash": "4f805f60b857c69d9e702f915f845669",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 31,
"avg_line_length": 30,
"alnum_prop": 0.8,
"repo_name": "qt911025/pw_module_system",
"id": "ec0634445ffa1e5372dee1d0937d780c23ca9ada",
"size": "276",
"binary": false,
"copies": "6",
"ref": "refs/heads/pw",
"path": "header_tableau_materials.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
import collections
import logging
import re
from king_phisher import ipaddress
from king_phisher import its
from king_phisher.constants import SPFResult
import dns.exception
import dns.name
import dns.query
import dns.rdtypes.ANY.TXT
import dns.resolver
import smoke_zephyr.utilities
MACRO_REGEX = re.compile(r'%\{([slodipvh])(\d*)([r]?)(.?)\}')
"""A regular expression which matches SPF record macros."""
MAX_QUERIES = 10
"""
The maximum number of DNS queries allowed to take place during evaluation as
defined within section 4.6.4 of :rfc:`7208`.
"""
MAX_QUERIES_VOID = float('inf')
"""
The maximum number of DNS queries allowed to either return with rcode 0 and no
answers or rcode 3 (Name Error) as defined within section 4.6.4 of :rfc:`7208`.
"""
DEFAULT_DNS_TIMEOUT = 10
"""
The default number of seconds to wait for a query response from the DNS server.
"""
QUALIFIERS = {
'+': SPFResult.PASS,
'-': SPFResult.FAIL,
'~': SPFResult.SOFT_FAIL,
'?': SPFResult.NEUTRAL
}
"""A dict object keyed with the qualifier symbols to their readable values."""
SPFMatch = collections.namedtuple('SPFMatch', ('record', 'directive'))
"""A simple container to associate a matched directive with it's record."""
class SPFDirective(object):
"""
A class representing a single directive within a sender policy framework
record.
"""
__slots__ = ('mechanism', 'qualifier', 'rvalue')
def __init__(self, mechanism, qualifier, rvalue=None):
"""
:param str mechanism: The SPF mechanism that this directive uses.
:param str qualifier: The qualifier value of the directive in it's single character format.
:param str rvalue: The optional rvalue for directives which use them.
"""
if qualifier not in QUALIFIERS:
raise ValueError('invalid qualifier: ' + qualifier)
self.mechanism = mechanism
self.qualifier = qualifier
self.rvalue = rvalue
def __repr__(self):
return "<{0} '{1}' >".format(self.__class__.__name__, str(self))
def __str__(self):
directive = ''
if self.qualifier != '+':
directive += self.qualifier
directive += self.mechanism
if self.rvalue:
directive += ':' + self.rvalue
return directive
@classmethod
def from_string(cls, directive):
"""
Parse an SPF directive from a string and return it's class
representation.
:param str directive: The SPF directive to parse.
"""
if ':' in directive:
(mechanism, rvalue) = directive.split(':', 1)
else:
(mechanism, rvalue) = (directive, None)
mechanism = mechanism.lower()
qualifier = '+'
if mechanism[0] in QUALIFIERS:
qualifier = mechanism[0]
mechanism = mechanism[1:]
return cls(mechanism, qualifier, rvalue)
class SPFRecord(object):
"""
A class representing a parsed Sender Policy Framework record with all of
its directives.
"""
__slots__ = ('domain', 'directives')
def __init__(self, directives, domain=None):
"""
:param list directives: A list of :py:class:`.SPFDirective` instances.
:param str domain: The domain with which this record is associated with.
"""
self.directives = directives
self.domain = domain
def __repr__(self):
return "<{0} '{1}' >".format(self.__class__.__name__, str(self))
def __str__(self):
return 'v=spf1 ' + ' '.join(str(d) for d in self.directives)
class SPFError(Exception):
"""Base exception for errors raised by this module."""
def __init__(self, message):
self.message = message
def __repr__(self):
return "<{0} message='{1}' >".format(self.__class__.__name__, self.message)
class SPFPermError(SPFError):
"""
Exception indicating that the domains published records could not be
correctly interpreted. Described in section 2.6.7 of :rfc:`7208`.
"""
pass
class SPFParseError(SPFPermError):
"""
Exception indicating that the domains published records could not be
correctly parsed.
"""
pass
class SPFTempError(SPFError):
"""
Exception indicating that the verification process encountered a transient
(generally DNS) error while performing the check. Described in section 2.6.6
of :rfc:`7208`.
"""
pass
class SPFTimeOutError(SPFTempError):
"""
Exception indicating that a timeout occurred while querying the DNS server.
This is normally caused when the client can't communicate with the DNS
server.
"""
pass
@smoke_zephyr.utilities.Cache('3m')
def check_host(ip, domain, sender=None, timeout=DEFAULT_DNS_TIMEOUT):
"""
Analyze the Sender Policy Framework of a domain by creating a
:py:class:`.SenderPolicyFramework` instance and returning the result of
:py:meth:`.SenderPolicyFramework.check_host`.
:param ip: The IP address of the host sending the message.
:type ip: str, :py:class:`ipaddress.IPv4Address`, :py:class:`ipaddress.IPv6Address`
:param str domain: The domain to check the SPF policy of.
:param str sender: The "MAIL FROM" identity of the message being sent.
:param int timeout: The timeout for DNS queries.
:return: The result of the SPF policy if one can be found or None.
:rtype: None, str
"""
s = SenderPolicyFramework(ip, domain, sender=sender, timeout=timeout)
return s.check_host()
def validate_record(ip, domain, sender=None):
"""
Check if an SPF record exists for the domain and can be parsed by this
module.
:return: Whether the record exists and is parsable or not.
:rtype: bool
"""
try:
result = check_host(ip, domain, sender)
except SPFPermError:
return False
return isinstance(result, str)
# http://tools.ietf.org/html/rfc7208
class SenderPolicyFramework(object):
"""
Analyze the Sender Policy Framework configuration for a domain to determine
if an IP address is authorized to send messages on it's behalf. The exp
modifier defined in section 6.2 of the RFC is not supported.
"""
def __init__(self, ip, domain, sender=None, timeout=DEFAULT_DNS_TIMEOUT):
"""
:param ip: The IP address of the host sending the message.
:type ip: str, :py:class:`ipaddress.IPv4Address`, :py:class:`ipaddress.IPv6Address`
:param str domain: The domain to check the SPF policy of.
:param str sender: The "MAIL FROM" identity of the message being sent.
:param int timeout: The timeout for DNS queries.
"""
if isinstance(ip, str):
ip = ipaddress.ip_address(ip)
self.ip_address = ip
self.domain = domain
self.helo_domain = 'unknown'
sender = (sender or 'postmaster')
if not '@' in sender:
sender = sender + '@' + self.domain
self.sender = sender
self.records = collections.OrderedDict()
"""
A :py:class:`collections.OrderedDict` of all the SPF records that were
resolved. This would be any records resolved due to an "include"
directive in addition to the top level domain.
"""
self.matches = []
"""
A list of :py:class:`.SPFMatch` instances showing the path traversed to
identify a matching directive. Multiple entries in this list are
present when include directives are used and a match is found within
the body of one. The list is ordered from the top level domain to the
matching record.
"""
# dns lookup limit per https://tools.ietf.org/html/rfc7208#section-4.6.4
self.query_limit = MAX_QUERIES
self.query_limit_void = MAX_QUERIES_VOID
self.policy = None
self.timeout = timeout
"""
The human readable policy result, one of the
:py:class:`.SPFResult` constants`.
"""
self._policy_checked = False
self.logger = logging.getLogger('KingPhisher.SPF.SenderPolicyFramework')
def __repr__(self):
return "<{0} ip='{1}' domain='{2}' sender='{3}' >".format(self.__class__.__name__, self.ip_address, self.domain, self.sender)
def __str__(self):
return self.check_host() or ''
def check_host(self):
"""
Check the SPF policy described by the object. The string representing the
matched policy is returned if an SPF policy exists, otherwise None will
be returned if no policy is defined.
:return: The result of the SPF policy described by the object.
:rtype: None, str
"""
if not self._policy_checked:
self.policy = self._check_host(self.ip_address, self.domain, self.sender)
self._policy_checked = True
return self.policy
def _check_host(self, ip, domain, sender, top_level=True):
try:
answers, _ = self._dns_query(domain, 'TXT')
except SPFTimeOutError:
raise
except SPFTempError:
if not top_level:
raise
answers = []
answers = list(part for part in answers if isinstance(part, dns.rdtypes.ANY.TXT.TXT))
answers = [part for part in answers if part.strings[0].decode('utf-8').startswith('v=spf1 ')]
if len(answers) == 0:
return
record = ''.join([part.decode('utf-8') for part in answers[0].strings])
if not record.startswith('v=spf1 '):
raise SPFParseError('invalid record header')
raw_directives = record[7:].split(' ')
raw_directives = tuple(directive for directive in raw_directives if len(directive))
self.logger.debug("parsing {0:,} directives for domain: {1}".format(len(raw_directives), domain))
if not len(raw_directives):
raise SPFParseError('no directives were found')
directives = []
for directive in raw_directives:
if directive.startswith('redirect='):
if len([r for r in raw_directives if r.endswith('all')]):
# ignore redirects when all is present per https://tools.ietf.org/html/rfc7208#section-6.1
self.logger.warning("ignoring redirect modifier to: {0} due to an existing 'all' mechanism".format(domain))
continue
directive = directive[9:]
domain = self.expand_macros(directive, self.ip_address, domain, self.sender)
self.logger.debug("following redirect modifier to: {0}".format(domain))
if top_level and len(directives) == 0:
# treat a single redirect as a new top level
return self._check_host(ip, domain, sender, top_level=True)
else:
result = self._check_host(ip, domain, sender, top_level=False)
self.logger.debug("top check found matching spf record from redirect to: {0}".format(domain))
return result
directive = SPFDirective.from_string(directive)
if directive.mechanism not in ('a', 'all', 'exists', 'include', 'ip4', 'ip6', 'mx', 'ptr'):
raise SPFParseError("unknown mechanism type: '{0}'".format(directive.mechanism))
directives.append(directive)
record = SPFRecord(directives, domain=domain)
self.records[domain] = record
for directive in directives:
if not top_level and directive.mechanism == 'all':
break
if self._evaluate_mechanism(ip, domain, sender, directive.mechanism, directive.rvalue):
self.matches.insert(0, SPFMatch(record, directive))
self.logger.debug("{0} check found matching spf directive: '{1}'".format(('top' if top_level else 'recursive'), directive))
return QUALIFIERS[directive.qualifier]
self.logger.debug('no directives matched, returning default policy of neutral')
# default result per https://tools.ietf.org/html/rfc7208#section-4.7
return SPFResult.NEUTRAL
def _dns_query(self, qname, qtype):
# querys all system dns servers
# returns (answers, additional)
self.query_limit -= 1
if self.query_limit < 0:
raise SPFPermError('DNS query limit reached')
nameserver = dns.resolver.get_default_resolver().nameservers[0]
query = dns.message.make_query(qname, qtype)
# Only query first DNS server https://www.rfc-editor.org/rfc/rfc7208.txt (page 19 last paragraph)
self.logger.debug("resolving {0:<3} record for {1} using nameserver {2} (remaining queries: {3})".format(qtype, qname, nameserver, self.query_limit))
try:
response = dns.query.udp(query, nameserver, self.timeout)
except dns.exception.Timeout:
self.logger.warning("dns timeout reached, unable to query: {0} (type: {1}, nameserver: {2})".format(qname, qtype, nameserver))
raise SPFTimeOutError("DNS timeout reached, unable to query: {0} (type: {1}, nameserver: {2})".format(qname, qtype, nameserver))
except dns.exception.DNSException:
self.logger.warning("dns resolution error for: {0} (type: {1}, nameserver: {2})".format(qname, qtype, nameserver))
raise SPFTempError("DNS resolution error for: {0} (type: {1}, nameserver: {2})".format(qname, qtype, nameserver))
rcode = response.rcode()
# check for error codes per https://tools.ietf.org/html/rfc7208#section-5
if rcode not in (dns.rcode.NOERROR, dns.rcode.NXDOMAIN):
self.logger.warning("dns resolution error for: {0} (type: {1} rcode: {2})".format(qname, qtype, rcode))
raise SPFTempError("DNS resolution error for: {0} (type: {1} rcode: {2})".format(qname, qtype, rcode))
answers = []
if len(response.answer) == 0 or rcode == dns.rcode.NXDOMAIN:
self.logger.debug("resolving {0:<3} record for {1} using nameserver {2} resulted in a void lookup".format(qtype, qname, nameserver))
self.query_limit_void -= 1
if self.query_limit_void < 0:
raise SPFPermError('DNS query void lookup limit reached')
for answer in response.answer:
answers.extend(answer.items)
return answers, response.additional
def _evaluate_mechanism(self, ip, domain, sender, mechanism, rvalue):
if rvalue is None:
rvalue = domain
else:
rvalue = self.expand_macros(rvalue, ip, domain, sender)
if mechanism == 'a':
if self._hostname_matches_ip(ip, rvalue):
return True
elif mechanism == 'all':
return True
elif mechanism == 'exists':
answers, _ = self._dns_query(rvalue, 'A')
if len(answers):
return True
elif mechanism == 'include':
# pass results in match per https://tools.ietf.org/html/rfc7208#section-5.2
return self._check_host(ip, rvalue, sender, top_level=False) == SPFResult.PASS
elif mechanism == 'ip4':
try:
if its.py_v2 and isinstance(rvalue, str):
rvalue = rvalue.decode('utf-8')
ip_network = ipaddress.IPv4Network(rvalue, strict=False)
except ipaddress.AddressValueError:
raise SPFParseError('invalid IPv4 network: ' + rvalue)
if ip in ip_network:
return True
elif mechanism == 'ip6':
try:
if its.py_v2 and isinstance(rvalue, str):
rvalue = rvalue.decode('utf-8')
ip_network = ipaddress.IPv6Network(rvalue, strict=False)
except ipaddress.AddressValueError:
raise SPFParseError('invalid IPv6 network: ' + rvalue)
if ip in ip_network:
return True
elif mechanism == 'mx':
answers, additional = self._dns_query(rvalue, 'MX')
for answer in answers:
hostname = None
if answer.rdtype == dns.rdatatype.MX:
hostname = answer.exchange
elif answer.rdtype == dns.rdatatype.CNAME:
hostname = answer.target
else:
raise ValueError('answer is not an MX or CNAME record')
hostname = str(hostname).rstrip('.')
found, matches = self._hostname_matches_additional(ip, hostname, additional)
if matches:
return True
if not found and self._hostname_matches_ip(ip, hostname):
return True
elif mechanism == 'ptr':
if isinstance(ip, ipaddress.IPv4Address):
ip = str(ip)
suffix = 'in-addr'
else:
ip = '.'.join(ip.exploded.replace(':', ''))
suffix = 'ip6'
ptr_domain = (rvalue or domain)
ip = ip.split('.')
ip.reverse()
ip = '.'.join(ip)
answers, _ = self._dns_query(ip + '.' + suffix + '.arpa', 'PTR')
for ptr_record in answers:
ptr_record = str(ptr_record.target).rstrip('.')
if ptr_domain == ptr_record or ptr_domain.endswith('.' + ptr_record):
return True
else:
raise SPFPermError("unsupported mechanism type: '{0}'".format(mechanism))
return False
def _hostname_matches_additional(self, ip, name, additional):
"""
Search for *name* in *additional* and if it is found, check that it
includes *ip*.
:param ip: The IP address to search for.
:type ip: :py:class:`ipaddress.IPv4Address`, :py:class:`ipaddress.IPv6Address`
:param str name: The name to search for.
:param tuple additional: The additional data returned from a dns query to search in.
:return: The first value is whether or not *name* was found in *additional*, the second is if *ip* was also found.
:rtype: tuple
"""
rdtype = (1 if isinstance(ip, ipaddress.IPv4Address) else 28)
ip = str(ip)
additional = (entry for entry in additional if entry.rdtype == rdtype)
entry = next((entry for entry in additional if str(entry.name)[:-1] == name), None)
if entry is None:
return False, None
item = next((item for item in entry.items if item.address == ip), None)
return True, item is not None
def _hostname_matches_ip(self, ip, name):
qtype = ('A' if isinstance(ip, ipaddress.IPv4Address) else 'AAAA')
answers, _ = self._dns_query(name, qtype)
return str(ip) in tuple(a.address for a in answers)
def expand_macros(self, value, ip, domain, sender):
"""
Expand a string based on the macros it contains as specified by section
7 of :rfc:`7208`.
:param str value: The string containing macros to expand.
:param ip: The IP address to use when expanding macros.
:type ip: str, :py:class:`ipaddress.IPv4Address`, :py:class:`ipaddress.IPv6Address`
:param str domain: The domain name to use when expanding macros.
:param str sender: The email address of the sender to use when expanding macros.
:return: The string with the interpreted macros replaced within it.
:rtype: str
"""
if isinstance(ip, str):
ip = ipaddress.ip_address(ip)
macro_table = {
's': sender,
'l': sender.split('@', 1)[0],
'o': sender.split('@', 1)[1],
'd': domain,
'i': (str(ip) if isinstance(ip, ipaddress.IPv4Address) else '.'.join(ip.exploded.replace(':', ''))),
#'p'
'v': ('in-addr' if isinstance(ip, ipaddress.IPv4Address) else 'ip6'),
'h': self.helo_domain
}
for escape in (('%%', '%'), ('%-', '%20'), ('%_', ' ')):
value = value.replace(*escape)
end = 0
result = ''
for match in MACRO_REGEX.finditer(value):
result += value[end:match.start()]
macro_type = match.group(1)
macro_digit = int(match.group(2) or 128)
macro_reverse = (match.group(3) == 'r')
macro_delimiter = (match.group(4) or '.')
if not macro_type in macro_table:
raise SPFPermError("unsupported macro type: '{0}'".format(macro_type))
macro_value = macro_table[macro_type]
macro_value = macro_value.split(macro_delimiter)
if macro_reverse:
macro_value.reverse()
macro_value = macro_value[-macro_digit:]
macro_value = '.'.join(macro_value)
result += macro_value
end = match.end()
result += value[end:]
return result
@property
def match(self):
if not self.matches:
return None
return self.matches[-1]
| {
"content_hash": "3f8adc8aab1d9a27efb16fb2a9d11e1a",
"timestamp": "",
"source": "github",
"line_count": 507,
"max_line_length": 151,
"avg_line_length": 36.009861932938854,
"alnum_prop": 0.6950210878019389,
"repo_name": "guitarmanj/king-phisher",
"id": "6f616ad75de9f20d8f5a0611b230535b5f33e350",
"size": "19829",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "king_phisher/spf.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "9631"
},
{
"name": "HTML",
"bytes": "552"
},
{
"name": "JavaScript",
"bytes": "1328"
},
{
"name": "Jupyter Notebook",
"bytes": "10497"
},
{
"name": "Mako",
"bytes": "574"
},
{
"name": "Python",
"bytes": "707829"
},
{
"name": "Ruby",
"bytes": "6757"
}
],
"symlink_target": ""
} |
from trove.common import cfg
from trove.common.strategies.cluster import base
from trove.guestagent import api as guest_api
from trove.openstack.common import log as logging
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
class VerticaGuestAgentStrategy(base.BaseGuestAgentStrategy):
@property
def guest_client_class(self):
return VerticaGuestAgentAPI
class VerticaGuestAgentAPI(guest_api.API):
def get_public_keys(self, user):
LOG.debug("Getting public keys for user: %s." % user)
return self._call("get_public_keys", guest_api.AGENT_HIGH_TIMEOUT,
self.version_cap, user=user)
def authorize_public_keys(self, user, public_keys):
LOG.debug("Authorizing public keys for user: %s." % user)
return self._call("authorize_public_keys",
guest_api.AGENT_HIGH_TIMEOUT, self.version_cap,
user=user, public_keys=public_keys)
def install_cluster(self, members):
LOG.debug("Installing Vertica cluster on members: %s." % members)
return self._call("install_cluster", CONF.cluster_usage_timeout,
self.version_cap, members=members)
def cluster_complete(self):
LOG.debug("Notifying cluster install completion.")
return self._call("cluster_complete", guest_api.AGENT_HIGH_TIMEOUT,
self.version_cap)
| {
"content_hash": "6b3bca8b110a424f68c490d8645d8430",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 75,
"avg_line_length": 36.43589743589744,
"alnum_prop": 0.6537649542575651,
"repo_name": "CMSS-BCRDB/RDSV1.0",
"id": "14b35df982f6af299d24ce29f74caa429d881927",
"size": "2013",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "trove/common/strategies/cluster/experimental/vertica/guestagent.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ApacheConf",
"bytes": "88"
},
{
"name": "CSS",
"bytes": "21914"
},
{
"name": "JavaScript",
"bytes": "60526"
},
{
"name": "Python",
"bytes": "2844169"
},
{
"name": "Shell",
"bytes": "4771"
},
{
"name": "XSLT",
"bytes": "50542"
}
],
"symlink_target": ""
} |
from .jobscheduler import JobExistError
from .jobscheduler import JobScheduler
from .jobscheduler import NextFireTimeError
from .jobscheduler import get_next_fire_time
__all__ = [
'JobExistError',
'JobScheduler',
'NextFireTimeError',
'get_next_fire_time',
]
| {
"content_hash": "3e35852fef4be3d0019a9934c3ab2f65",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 44,
"avg_line_length": 25,
"alnum_prop": 0.7454545454545455,
"repo_name": "baishancloud/pykit",
"id": "f87ba9e5003b2aa4c23aa427b8503627fe222af8",
"size": "275",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "jobscheduler/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "666985"
},
{
"name": "Shell",
"bytes": "32314"
}
],
"symlink_target": ""
} |
import json
import sys
def partialDOIMatch(d1, d2):
"""
Assumes d1 is a "full DOI", like '10.1145/1166253.1166292', and d2
is a partial DOI, like '1166292' or '1166253.1166292'. Returns true
if they match and false otherwise.
Note that in the previous case, a partial like '292' would be a
negative match. The partial must contain full subsections.
"""
if (d2.find('.') >= 0):
return d2 == d1.split('/')[-1]
return d2 == d1.split('.')[-1]
if __name__ == "__main__":
if len(sys.argv) != 3:
print "Usage: python chi-to-json.py <papers.tsv> <citation_counts.json>"
exit (1);
input_file = open(sys.argv[1])
papers = {}
for line in input_file.readlines():
# Drop whitespace at the end, and split on tabs.
vals = line.rstrip().split('\t')
# Build a new dictionary with the values for the paper.
paper = {}
paper['conference'] = vals[0]
paper['year'] = vals[1]
paper['title'] = vals[2]
paper['abstract'] = vals[3]
paper['authors'] = vals[4].split('~')
# paper['doi'] = vals[5]
paper['references'] = vals[6:]
paper['citations'] = []
paper['citation_count'] = 0 # All papers have a 0 CC by default
# Index papers by doi to set up for building citations.
papers[vals[5]] = paper
input_file.close()
# Once we have a dictionary with all papers, go through them again
# building the citations
for doi, paper in papers.iteritems():
for ref in paper['references']:
try:
papers[ref]['citations'].append(doi)
except KeyError:
# Skip this one, there's no paper with that doi in our dataset.
#print "Not found " + ref
pass
# For debugging: number of references and citations in the whole
# dataset. If the dataset is self-contained, these numbers should
# be the same.
#print len(papers)
#print reduce(lambda x,y: x+y, [len(p['references']) for p in papers.values()])
#print reduce(lambda x,y: x+y, [len(p['citations']) for p in papers.values()])
ccs = None
with open(sys.argv[2]) as citation_counts_json:
ccs = json.loads(citation_counts_json.read())
# The following process adds citation count information to every
# paper that can be found on the specified file. This is a slow
# process because we don't use the hash function in dict, but it
# shouldn't have to run frequently.
for d2 in ccs.keys():
matches = [d1 for d1 in papers.keys() if partialDOIMatch(d1, d2)]
if matches:
papers[matches[0]]['citation_count'] = ccs[d2]['citation_count']
# Write out a JSON object with everything in it.
print json.dumps({'papers': papers})
| {
"content_hash": "5ba2942b4d0b1b7e2fbcc2960bab8607",
"timestamp": "",
"source": "github",
"line_count": 80,
"max_line_length": 83,
"avg_line_length": 36.1375,
"alnum_prop": 0.5876859218263577,
"repo_name": "drasnop/PaperQuest",
"id": "479d3dc75560e8458a404ebfbc1628deee545fce",
"size": "3567",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/chi-to-json.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "14803"
},
{
"name": "HTML",
"bytes": "14335"
},
{
"name": "JavaScript",
"bytes": "118070"
},
{
"name": "Python",
"bytes": "13156"
}
],
"symlink_target": ""
} |
import requests
import json
import urllib
def getBuild(logURL):
try:
response = requests.get(logURL).json()
#response=urllib.urlopen(logURL).read()
except:
print('Error calling')
else:
data = response
parsedValue = data['lastSuccessfulBuild']['number']
return parsedValue
def getCommit(commitURL):
try:
response = requests.get(commitURL).json()
#response=urllib.urlopen(logURL).read()
except:
print('Error calling')
else:
data = response
#print response
parsedValue = data['buildsByBranchName']['refs/remotes/origin/master']['revision']['SHA1']
return parsedValue
logURL = "http://spoc-jenkins01.spoc.linux/job/Kolla_CI/api/json?pretty=true"
latestBuild = str(getBuild(logURL))
commitURL = "http://spoc-jenkins01.spoc.linux/job/Kolla_CI/"+latestBuild+"/git/api/json?pretty=true"
commitID = getCommit(commitURL)
print commitID
| {
"content_hash": "403cbb00d20f90eb335a6e8e92b0dfba",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 100,
"avg_line_length": 25.43243243243243,
"alnum_prop": 0.6780021253985122,
"repo_name": "lukepatrick/os-helm-aio-installer",
"id": "26fe10efe3a3a741b1ea1d1e2eac05baa06e0297",
"size": "964",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "getCommitInfo.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "964"
},
{
"name": "Shell",
"bytes": "25944"
}
],
"symlink_target": ""
} |
INDEX = "config/index.rst"
INDEX_PATH = '.index'
# Path to directives the user wants to include
DIRECTIVES = "config/directives.rst"
# Directory contain rst source files
SRC = "src"
| {
"content_hash": "acf65d82c43e5880a9738907e5710478",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 46,
"avg_line_length": 23,
"alnum_prop": 0.7336956521739131,
"repo_name": "kidaa/Encyclopedia",
"id": "8aad33422444d54ce53484cd76d77fda043cc808",
"size": "184",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/settings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "8233"
},
{
"name": "HTML",
"bytes": "2935"
},
{
"name": "JavaScript",
"bytes": "71351"
},
{
"name": "Makefile",
"bytes": "876"
},
{
"name": "Python",
"bytes": "13458"
},
{
"name": "Ruby",
"bytes": "1297"
}
],
"symlink_target": ""
} |
from ray.rllib.env.external_env import ExternalEnv
from ray.rllib.env.external_multi_agent_env import ExternalMultiAgentEnv
from ray.rllib.env.vector_env import VectorEnv
from ray.rllib.env.multi_agent_env import MultiAgentEnv
from ray.rllib.utils.annotations import override, PublicAPI
ASYNC_RESET_RETURN = "async_reset_return"
@PublicAPI
class BaseEnv:
"""The lowest-level env interface used by RLlib for sampling.
BaseEnv models multiple agents executing asynchronously in multiple
environments. A call to poll() returns observations from ready agents
keyed by their environment and agent ids, and actions for those agents
can be sent back via send_actions().
All other env types can be adapted to BaseEnv. RLlib handles these
conversions internally in RolloutWorker, for example:
gym.Env => rllib.VectorEnv => rllib.BaseEnv
rllib.MultiAgentEnv => rllib.BaseEnv
rllib.ExternalEnv => rllib.BaseEnv
Attributes:
action_space (gym.Space): Action space. This must be defined for
single-agent envs. Multi-agent envs can set this to None.
observation_space (gym.Space): Observation space. This must be defined
for single-agent envs. Multi-agent envs can set this to None.
Examples:
>>> env = MyBaseEnv()
>>> obs, rewards, dones, infos, off_policy_actions = env.poll()
>>> print(obs)
{
"env_0": {
"car_0": [2.4, 1.6],
"car_1": [3.4, -3.2],
},
"env_1": {
"car_0": [8.0, 4.1],
},
"env_2": {
"car_0": [2.3, 3.3],
"car_1": [1.4, -0.2],
"car_3": [1.2, 0.1],
},
}
>>> env.send_actions(
actions={
"env_0": {
"car_0": 0,
"car_1": 1,
}, ...
})
>>> obs, rewards, dones, infos, off_policy_actions = env.poll()
>>> print(obs)
{
"env_0": {
"car_0": [4.1, 1.7],
"car_1": [3.2, -4.2],
}, ...
}
>>> print(dones)
{
"env_0": {
"__all__": False,
"car_0": False,
"car_1": True,
}, ...
}
"""
@staticmethod
def to_base_env(env,
make_env=None,
num_envs=1,
remote_envs=False,
remote_env_batch_wait_ms=0):
"""Wraps any env type as needed to expose the async interface."""
from ray.rllib.env.remote_vector_env import RemoteVectorEnv
if remote_envs and num_envs == 1:
raise ValueError(
"Remote envs only make sense to use if num_envs > 1 "
"(i.e. vectorization is enabled).")
if not isinstance(env, BaseEnv):
if isinstance(env, MultiAgentEnv):
if remote_envs:
env = RemoteVectorEnv(
make_env,
num_envs,
multiagent=True,
remote_env_batch_wait_ms=remote_env_batch_wait_ms)
else:
env = _MultiAgentEnvToBaseEnv(
make_env=make_env,
existing_envs=[env],
num_envs=num_envs)
elif isinstance(env, ExternalMultiAgentEnv):
if num_envs != 1:
raise ValueError(
"ExternalMultiAgentEnv does not currently support "
"num_envs > 1.")
env = _ExternalEnvToBaseEnv(env, multiagent=True)
elif isinstance(env, ExternalEnv):
if num_envs != 1:
raise ValueError(
"ExternalEnv does not currently support num_envs > 1.")
env = _ExternalEnvToBaseEnv(env)
elif isinstance(env, VectorEnv):
env = _VectorEnvToBaseEnv(env)
else:
if remote_envs:
env = RemoteVectorEnv(
make_env,
num_envs,
multiagent=False,
remote_env_batch_wait_ms=remote_env_batch_wait_ms)
else:
env = VectorEnv.wrap(
make_env=make_env,
existing_envs=[env],
num_envs=num_envs,
action_space=env.action_space,
observation_space=env.observation_space)
env = _VectorEnvToBaseEnv(env)
assert isinstance(env, BaseEnv), env
return env
@PublicAPI
def poll(self):
"""Returns observations from ready agents.
The returns are two-level dicts mapping from env_id to a dict of
agent_id to values. The number of agents and envs can vary over time.
Returns
-------
obs (dict): New observations for each ready agent.
rewards (dict): Reward values for each ready agent. If the
episode is just started, the value will be None.
dones (dict): Done values for each ready agent. The special key
"__all__" is used to indicate env termination.
infos (dict): Info values for each ready agent.
off_policy_actions (dict): Agents may take off-policy actions. When
that happens, there will be an entry in this dict that contains
the taken action. There is no need to send_actions() for agents
that have already chosen off-policy actions.
"""
raise NotImplementedError
@PublicAPI
def send_actions(self, action_dict):
"""Called to send actions back to running agents in this env.
Actions should be sent for each ready agent that returned observations
in the previous poll() call.
Arguments:
action_dict (dict): Actions values keyed by env_id and agent_id.
"""
raise NotImplementedError
@PublicAPI
def try_reset(self, env_id):
"""Attempt to reset the env with the given id.
If the environment does not support synchronous reset, None can be
returned here.
Returns:
obs (dict|None): Resetted observation or None if not supported.
"""
return None
@PublicAPI
def get_unwrapped(self):
"""Return a reference to the underlying gym envs, if any.
Returns:
envs (list): Underlying gym envs or [].
"""
return []
@PublicAPI
def stop(self):
"""Releases all resources used."""
for env in self.get_unwrapped():
if hasattr(env, "close"):
env.close()
# Fixed agent identifier when there is only the single agent in the env
_DUMMY_AGENT_ID = "agent0"
def _with_dummy_agent_id(env_id_to_values, dummy_id=_DUMMY_AGENT_ID):
return {k: {dummy_id: v} for (k, v) in env_id_to_values.items()}
class _ExternalEnvToBaseEnv(BaseEnv):
"""Internal adapter of ExternalEnv to BaseEnv."""
def __init__(self, external_env, preprocessor=None, multiagent=False):
self.external_env = external_env
self.prep = preprocessor
self.multiagent = multiagent
self.action_space = external_env.action_space
if preprocessor:
self.observation_space = preprocessor.observation_space
else:
self.observation_space = external_env.observation_space
external_env.start()
@override(BaseEnv)
def poll(self):
with self.external_env._results_avail_condition:
results = self._poll()
while len(results[0]) == 0:
self.external_env._results_avail_condition.wait()
results = self._poll()
if not self.external_env.isAlive():
raise Exception("Serving thread has stopped.")
limit = self.external_env._max_concurrent_episodes
assert len(results[0]) < limit, \
("Too many concurrent episodes, were some leaked? This "
"ExternalEnv was created with max_concurrent={}".format(limit))
return results
@override(BaseEnv)
def send_actions(self, action_dict):
if self.multiagent:
for env_id, actions in action_dict.items():
self.external_env._episodes[env_id].action_queue.put(actions)
else:
for env_id, action in action_dict.items():
self.external_env._episodes[env_id].action_queue.put(
action[_DUMMY_AGENT_ID])
def _poll(self):
all_obs, all_rewards, all_dones, all_infos = {}, {}, {}, {}
off_policy_actions = {}
for eid, episode in self.external_env._episodes.copy().items():
data = episode.get_data()
cur_done = episode.cur_done_dict[
"__all__"] if self.multiagent else episode.cur_done
if cur_done:
del self.external_env._episodes[eid]
if data:
if self.prep:
all_obs[eid] = self.prep.transform(data["obs"])
else:
all_obs[eid] = data["obs"]
all_rewards[eid] = data["reward"]
all_dones[eid] = data["done"]
all_infos[eid] = data["info"]
if "off_policy_action" in data:
off_policy_actions[eid] = data["off_policy_action"]
if self.multiagent:
# ensure a consistent set of keys
# rely on all_obs having all possible keys for now
for eid, eid_dict in all_obs.items():
for agent_id in eid_dict.keys():
def fix(d, zero_val):
if agent_id not in d[eid]:
d[eid][agent_id] = zero_val
fix(all_rewards, 0.0)
fix(all_dones, False)
fix(all_infos, {})
return (all_obs, all_rewards, all_dones, all_infos,
off_policy_actions)
else:
return _with_dummy_agent_id(all_obs), \
_with_dummy_agent_id(all_rewards), \
_with_dummy_agent_id(all_dones, "__all__"), \
_with_dummy_agent_id(all_infos), \
_with_dummy_agent_id(off_policy_actions)
class _VectorEnvToBaseEnv(BaseEnv):
"""Internal adapter of VectorEnv to BaseEnv.
We assume the caller will always send the full vector of actions in each
call to send_actions(), and that they call reset_at() on all completed
environments before calling send_actions().
"""
def __init__(self, vector_env):
self.vector_env = vector_env
self.action_space = vector_env.action_space
self.observation_space = vector_env.observation_space
self.num_envs = vector_env.num_envs
self.new_obs = None # lazily initialized
self.cur_rewards = [None for _ in range(self.num_envs)]
self.cur_dones = [False for _ in range(self.num_envs)]
self.cur_infos = [None for _ in range(self.num_envs)]
@override(BaseEnv)
def poll(self):
if self.new_obs is None:
self.new_obs = self.vector_env.vector_reset()
new_obs = dict(enumerate(self.new_obs))
rewards = dict(enumerate(self.cur_rewards))
dones = dict(enumerate(self.cur_dones))
infos = dict(enumerate(self.cur_infos))
self.new_obs = []
self.cur_rewards = []
self.cur_dones = []
self.cur_infos = []
return _with_dummy_agent_id(new_obs), \
_with_dummy_agent_id(rewards), \
_with_dummy_agent_id(dones, "__all__"), \
_with_dummy_agent_id(infos), {}
@override(BaseEnv)
def send_actions(self, action_dict):
action_vector = [None] * self.num_envs
for i in range(self.num_envs):
action_vector[i] = action_dict[i][_DUMMY_AGENT_ID]
self.new_obs, self.cur_rewards, self.cur_dones, self.cur_infos = \
self.vector_env.vector_step(action_vector)
@override(BaseEnv)
def try_reset(self, env_id):
return {_DUMMY_AGENT_ID: self.vector_env.reset_at(env_id)}
@override(BaseEnv)
def get_unwrapped(self):
return self.vector_env.get_unwrapped()
class _MultiAgentEnvToBaseEnv(BaseEnv):
"""Internal adapter of MultiAgentEnv to BaseEnv.
This also supports vectorization if num_envs > 1.
"""
def __init__(self, make_env, existing_envs, num_envs):
"""Wrap existing multi-agent envs.
Arguments:
make_env (func|None): Factory that produces a new multiagent env.
Must be defined if the number of existing envs is less than
num_envs.
existing_envs (list): List of existing multiagent envs.
num_envs (int): Desired num multiagent envs to keep total.
"""
self.make_env = make_env
self.envs = existing_envs
self.num_envs = num_envs
self.dones = set()
while len(self.envs) < self.num_envs:
self.envs.append(self.make_env(len(self.envs)))
for env in self.envs:
assert isinstance(env, MultiAgentEnv)
self.env_states = [_MultiAgentEnvState(env) for env in self.envs]
@override(BaseEnv)
def poll(self):
obs, rewards, dones, infos = {}, {}, {}, {}
for i, env_state in enumerate(self.env_states):
obs[i], rewards[i], dones[i], infos[i] = env_state.poll()
return obs, rewards, dones, infos, {}
@override(BaseEnv)
def send_actions(self, action_dict):
for env_id, agent_dict in action_dict.items():
if env_id in self.dones:
raise ValueError("Env {} is already done".format(env_id))
env = self.envs[env_id]
obs, rewards, dones, infos = env.step(agent_dict)
assert isinstance(obs, dict), "Not a multi-agent obs"
assert isinstance(rewards, dict), "Not a multi-agent reward"
assert isinstance(dones, dict), "Not a multi-agent return"
assert isinstance(infos, dict), "Not a multi-agent info"
if set(obs.keys()) != set(rewards.keys()):
raise ValueError(
"Key set for obs and rewards must be the same: "
"{} vs {}".format(obs.keys(), rewards.keys()))
if set(infos).difference(set(obs)):
raise ValueError("Key set for infos must be a subset of obs: "
"{} vs {}".format(infos.keys(), obs.keys()))
if "__all__" not in dones:
raise ValueError(
"In multi-agent environments, '__all__': True|False must "
"be included in the 'done' dict: got {}.".format(dones))
if dones["__all__"]:
self.dones.add(env_id)
self.env_states[env_id].observe(obs, rewards, dones, infos)
@override(BaseEnv)
def try_reset(self, env_id):
obs = self.env_states[env_id].reset()
assert isinstance(obs, dict), "Not a multi-agent obs"
if obs is not None and env_id in self.dones:
self.dones.remove(env_id)
return obs
@override(BaseEnv)
def get_unwrapped(self):
return [state.env for state in self.env_states]
class _MultiAgentEnvState:
def __init__(self, env):
assert isinstance(env, MultiAgentEnv)
self.env = env
self.initialized = False
def poll(self):
if not self.initialized:
self.reset()
self.initialized = True
obs, rew, dones, info = (self.last_obs, self.last_rewards,
self.last_dones, self.last_infos)
self.last_obs = {}
self.last_rewards = {}
self.last_dones = {"__all__": False}
self.last_infos = {}
return obs, rew, dones, info
def observe(self, obs, rewards, dones, infos):
self.last_obs = obs
self.last_rewards = rewards
self.last_dones = dones
self.last_infos = infos
def reset(self):
self.last_obs = self.env.reset()
self.last_rewards = {
agent_id: None
for agent_id in self.last_obs.keys()
}
self.last_dones = {
agent_id: False
for agent_id in self.last_obs.keys()
}
self.last_infos = {agent_id: {} for agent_id in self.last_obs.keys()}
self.last_dones["__all__"] = False
return self.last_obs
| {
"content_hash": "a7033117c0e01fff5e231bd9ed75848e",
"timestamp": "",
"source": "github",
"line_count": 447,
"max_line_length": 79,
"avg_line_length": 37.6420581655481,
"alnum_prop": 0.544098419113277,
"repo_name": "stephanie-wang/ray",
"id": "29c2e3a9cf43ec6049d14017498a7981d14eceb6",
"size": "16826",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rllib/env/base_env.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "29882"
},
{
"name": "C++",
"bytes": "2149909"
},
{
"name": "CSS",
"bytes": "8025"
},
{
"name": "Dockerfile",
"bytes": "5499"
},
{
"name": "Go",
"bytes": "28481"
},
{
"name": "HTML",
"bytes": "30435"
},
{
"name": "Java",
"bytes": "738348"
},
{
"name": "JavaScript",
"bytes": "444"
},
{
"name": "Jupyter Notebook",
"bytes": "1615"
},
{
"name": "Makefile",
"bytes": "1965"
},
{
"name": "Python",
"bytes": "4058862"
},
{
"name": "Shell",
"bytes": "88736"
},
{
"name": "Starlark",
"bytes": "121207"
},
{
"name": "TypeScript",
"bytes": "64161"
}
],
"symlink_target": ""
} |
"""Tests for Configuration objects."""
from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import pytest
from confpy.core import config
from confpy.core import namespace
def test_config_instance_namespace_setting():
"""Test that namespaces are bound to a config on init."""
ns = namespace.Namespace(description="test")
conf = config.Configuration(test=ns)
assert conf.test is ns
assert conf.test.description == "test"
def test_config_subclasses_are_not_affected_by_parent():
"""Test that Configuration subclasses to not receive parent namespaces."""
ns = namespace.Namespace(description="modified")
class TestConfiguration(config.Configuration):
_NAMESPACES = {}
parent = config.Configuration(modified=ns)
child = TestConfiguration()
assert parent.modified is ns
with pytest.raises(AttributeError):
child.modified
| {
"content_hash": "63a1562caeb25353660b5ffbda1c01b1",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 78,
"avg_line_length": 27.416666666666668,
"alnum_prop": 0.7254305977710233,
"repo_name": "kevinconway/confpy",
"id": "3b83fea80b59b4f414fee734a1802e89c34ea849",
"size": "987",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/core/test_config.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "62011"
}
],
"symlink_target": ""
} |
import json
import os
import unittest
from pymatgen.analysis.structure_prediction.substitutor import Substitutor
from pymatgen.core.composition import Composition
from pymatgen.core.periodic_table import Species
from pymatgen.util.testing import PymatgenTest
def get_table():
"""
Loads a lightweight lambda table for use in unit tests to reduce
initialization time, and make unit tests insensitive to changes in the
default lambda table.
"""
data_dir = os.path.join(
PymatgenTest.TEST_FILES_DIR,
"struct_predictor",
)
json_file = os.path.join(data_dir, "test_lambda.json")
with open(json_file) as f:
lambda_table = json.load(f)
return lambda_table
class SubstitutorTest(PymatgenTest):
def setUp(self):
self.s = Substitutor(threshold=1e-3, lambda_table=get_table(), alpha=-5.0)
def test_substitutor(self):
s_list = [Species("O", -2), Species("Li", 1)]
subs = self.s.pred_from_list(s_list)
self.assertEqual(len(subs), 4, "incorrect number of substitutions")
c = Composition({"O2-": 1, "Li1+": 2})
subs = self.s.pred_from_comp(c)
self.assertEqual(len(subs), 4, "incorrect number of substitutions")
structures = [{"structure": PymatgenTest.get_structure("Li2O"), "id": "pmgtest"}]
subs = self.s.pred_from_structures(["Na+", "O2-"], structures)
self.assertEqual(subs[0].formula, "Na2 O1")
def test_as_dict(self):
Substitutor.from_dict(self.s.as_dict())
if __name__ == "__main__":
unittest.main()
| {
"content_hash": "260cca0f4416d83c7066ee742847e378",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 89,
"avg_line_length": 32.6875,
"alnum_prop": 0.6583811344805609,
"repo_name": "fraricci/pymatgen",
"id": "5e6a2a88645c034acc833a56337af7013fd86d9e",
"size": "1664",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "pymatgen/analysis/structure_prediction/tests/test_substitutor.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "87"
},
{
"name": "CSS",
"bytes": "7572"
},
{
"name": "Cython",
"bytes": "38793"
},
{
"name": "HTML",
"bytes": "12642493"
},
{
"name": "OpenEdge ABL",
"bytes": "312"
},
{
"name": "Python",
"bytes": "9195124"
},
{
"name": "Roff",
"bytes": "1407429"
},
{
"name": "Shell",
"bytes": "12027"
}
],
"symlink_target": ""
} |
from ..common import is_optimizer_op, OP_ROLE_KEY, OpRole, is_update_op
from paddle.framework import core
from paddle.utils import unique_name
__all__ = []
class PlaceType:
# sync with memcpy op, maybe not a good design
CPU = 0
CUDA = 1
CUDA_PINNED = 2
XPU = 3 # unsupport for now
NPU = 4
NPU_PINNED = 5
@staticmethod
def default_device():
if core.is_compiled_with_cuda():
return PlaceType.CUDA
elif core.is_compiled_with_npu():
return PlaceType.NPU
return PlaceType.CPU
@staticmethod
def default_pinned():
if core.is_compiled_with_cuda():
return PlaceType.CUDA_PINNED
elif core.is_compiled_with_npu():
return PlaceType.NPU_PINNED
return PlaceType.CPU
class OffloadHelper:
cpu_place_type = 0
cuda_place_type = PlaceType.default_device()
cuda_pinned_place_type = PlaceType.default_pinned()
def __init__(self, mp_ring_id=None, dp_ring_id=None):
self.mp_ring_id = mp_ring_id
self.dp_ring_id = dp_ring_id
def _insert_cast_op(self, block, idx, src_name, dst_name):
src_var = block.var(src_name)
if not block.has_var(dst_name):
block.create_var(
name=dst_name,
shape=src_var.shape,
dtype=core.VarDesc.VarType.FP16,
persistable=True,
)
dst_var = block.var(dst_name)
assert dst_var.dtype == core.VarDesc.VarType.FP16
block._insert_op_without_sync(
idx,
type='cast',
inputs={'X': src_var},
outputs={'Out': dst_var},
attrs={
'in_dtype': src_var.dtype,
'out_dtype': dst_var.dtype,
OP_ROLE_KEY: OpRole.Optimize,
},
)
def _insert_broadcast_op(self, block, idx, param_name):
rings = []
if self.dp_ring_id is not None:
rings.append(self.dp_ring_id)
# need sync non distributed param in mp group
if self.mp_ring_id is not None:
param = block.var(param_name)
if not hasattr(param, 'is_distributed') or not param.is_distributed:
rings.append(self.mp_ring_id)
# the insert op order is: mp, dp
for ring in rings:
block._insert_op_without_sync(
idx,
type="c_broadcast",
inputs={'X': param_name},
outputs={'Out': param_name},
attrs={
'ring_id': ring,
'root': 0,
'use_calc_stream': True,
OP_ROLE_KEY: OpRole.Forward,
},
)
def _insert_memcpy_op(self, block, idx, src_name, dst_name, dst_place_type):
src_var = block.var(src_name)
dst_var = block.var(dst_name)
block._insert_op_without_sync(
idx,
type='memcpy',
inputs={'X': src_var},
outputs={'Out': dst_var},
attrs={
'dst_place_type': dst_place_type,
OP_ROLE_KEY: OpRole.Optimize,
},
)
def _insert_fetch_op(self, block, idx, src_name, dst_name):
self._insert_memcpy_op(
block, idx, src_name, dst_name, OffloadHelper.cuda_place_type
)
def _insert_offload_op(self, block, idx, src_name, dst_name):
self._insert_memcpy_op(
block, idx, src_name, dst_name, OffloadHelper.cuda_pinned_place_type
)
def _get_offload_var_name(self, name):
return unique_name.generate(name + '@offload')
def _create_offload_var(self, var_name, offload_var_name, blocks):
for block in blocks:
var = block.var(var_name)
var.persistable = False
offload_var = block.create_var(
name=offload_var_name,
shape=var.shape,
dtype=var.dtype,
persistable=True,
)
def offload_fp32param(self, block, startup_block, offload=True):
"""
(p_fp16) = cast(p)
(p_fp16_recompute) = cast(p)
(pout,) = adam(p)
===========================>
rename(p_fp16_recompute, p_fp16)
(p,) = prefetch(p@offload)
(pout,) = adam(p)
(p_fp16) = cast(p)
(p@offload) = memcpy(p)
"""
param_to_idx = dict()
param_to_fp16 = dict()
# recompute_var which need rename to fp16_param
fp16_param_to_recompute = dict()
recompute_to_fp16 = dict()
def remove_param(input_name):
param_to_idx.pop(input_name)
if input_name in param_to_fp16:
fp16_param = param_to_fp16.pop(input_name)
if fp16_param in fp16_param_to_recompute:
recompute = fp16_param_to_recompute.pop(fp16_param)
recompute_to_fp16.pop(recompute)
# step1: record param
for idx, op in reversed(list(enumerate(block.ops))):
if is_update_op(op):
param = op.desc.input("Param")[0]
param_to_idx[param] = idx
# step2: remove param which can't offload and
# record param->fp16param, fp16param->recompute_var
for idx, op in enumerate(block.ops):
if is_optimizer_op(op):
break
# TODO (Yuang Liu): tmp solution for fuse_grad_merge + optimize_cast
if not offload and op.type == 'coalesce_tensor':
continue
for input_name in op.desc.input_arg_names():
if input_name not in param_to_idx:
continue
# param which will be used by fp32 op
if op.type != 'cast':
remove_param(input_name)
continue
# param is only used by cast op,
# which to cast fp32_param to fp16_param
output_name = op.output_arg_names[0]
if 'cast_fp16' not in output_name:
remove_param(input_name)
continue
if 'subprog' not in output_name:
assert output_name == input_name + '.cast_fp16'
assert (
input_name not in param_to_fp16
), "There must be only one cast op from fp32 param to fp16 param."
param_to_fp16[input_name] = output_name
else:
# fp16-->recompute_var
assert (
input_name in param_to_fp16
), "param must first be cast to fp16"
fp16_param = param_to_fp16[input_name]
fp16_param_to_recompute[fp16_param] = output_name
recompute_to_fp16[output_name] = fp16_param
param_name_to_offload_name = dict()
# step3: main_block add offload, cast op
# change recompute to fp16, remove cast(param) to fp16
for idx, op in reversed(list(enumerate(block.ops))):
if is_update_op(op):
param = op.desc.input("Param")[0]
if param not in param_to_idx:
continue
# step3.1: create offload_var
offload_var_name = self._get_offload_var_name(param)
param_name_to_offload_name[param] = offload_var_name
if offload:
self._create_offload_var(
param, offload_var_name, [block, startup_block]
)
# step3.2: insert cast op and offload op
self._insert_offload_op(
block, idx + 1, param, offload_var_name
)
assert param in param_to_fp16
fp16_param_name = param_to_fp16[param]
fp16_param_var = block.var(fp16_param_name)
fp16_param_var.persistable = True
self._insert_cast_op(
block, idx + 1, param, param_to_fp16[param]
)
if offload:
# step3.3: insert fetch op
self._insert_fetch_op(block, idx, offload_var_name, param)
continue
# step3.4: remove cast op
if op.type == 'cast':
input_name = op.desc.input_arg_names()[0]
if input_name in param_to_idx:
block._remove_op(idx, sync=False)
continue
# step3.5: change recompute_param to fp16_param
for input_name in op.desc.input_arg_names():
if input_name in recompute_to_fp16:
op._rename_input(input_name, recompute_to_fp16[input_name])
for output_name in op.desc.output_arg_names():
if output_name in recompute_to_fp16:
op._rename_output(
output_name, recompute_to_fp16[output_name]
)
# step4: remove recompute_param
for name in recompute_to_fp16.keys():
block._remove_var(name, sync=False)
# step5: startup_block add offload
visited_vars = set()
# FIXME(wangxi): should insert in idx, need move comm init to the head.
insert_idx = len(startup_block.ops)
for idx, op in reversed(list(enumerate(startup_block.ops))):
for out_name in op.output_arg_names:
if out_name in visited_vars:
continue
if out_name in param_name_to_offload_name:
var_name = out_name
if offload:
offload_var_name = param_name_to_offload_name[var_name]
self._insert_offload_op(
startup_block,
insert_idx,
var_name,
offload_var_name,
)
self._insert_cast_op(
startup_block,
insert_idx,
var_name,
param_to_fp16[var_name],
)
# NOTE(wangxi): cast and offload should insert after broadcast param.
# the insert op order is: {mp, dp}broadcast, cast, offload
self._insert_broadcast_op(
startup_block, insert_idx, var_name
)
visited_vars.add(out_name)
block._sync_with_cpp()
startup_block._sync_with_cpp()
def cast_fp32param_in_optimize(self, block, startup_block):
"""
(p_fp16) = cast(p)
(p_fp16_recompute) = cast(p)
(pout,) = adam(p)
===========================>
rename(p_fp16_recompute, p_fp16)
(pout,) = adam(p)
(p_fp16) = cast(p)
"""
self.offload_fp32param(block, startup_block, offload=False)
def offload(self, block, startup_block):
"""
(m1, m2) = prefetch(m1@offload, m2@offload)
(m1out, m2out, pout) = adam(m1, m2, p)
(m1@offload, m2@offload) = memcpy(m1, m2)
"""
vars_name_to_offload_name = dict()
# main_block add offload
for idx, op in reversed(list(enumerate(block.ops))):
if not is_optimizer_op(op):
break
vars_name = []
if op.type == "adam" or op.type == "adamw":
# {Moment1Out = [''], Moment2Out = [''], ParamOut = ['']} =
# adam(inputs={Moment1 = [''], Moment2 = [''], Param = ['']})
vars_name.append(op.desc.input("Moment1")[0])
vars_name.append(op.desc.input("Moment2")[0])
elif op.type == 'momentum':
pass
elif op.type == 'lars':
pass
elif op.type == 'lamb':
pass
# step1: create and init offload_var
for var_name in vars_name:
assert var_name not in vars_name_to_offload_name
offload_var_name = self._get_offload_var_name(var_name)
vars_name_to_offload_name[var_name] = offload_var_name
self._create_offload_var(
var_name, offload_var_name, [block, startup_block]
)
# step2: insert offload op
for var_name in vars_name:
offload_var_name = vars_name_to_offload_name[var_name]
self._insert_offload_op(
block, idx + 1, var_name, offload_var_name
)
# step3: insert fetch op
for var_name in vars_name:
offload_var_name = vars_name_to_offload_name[var_name]
self._insert_fetch_op(block, idx, offload_var_name, var_name)
# startup_block add offload
visited_vars = set()
for idx, op in reversed(list(enumerate(startup_block.ops))):
for out_name in op.output_arg_names:
if out_name in visited_vars:
continue
if out_name in vars_name_to_offload_name:
var_name = out_name
offload_var_name = vars_name_to_offload_name[var_name]
# insert offload op after var is generated
self._insert_offload_op(
startup_block, idx + 1, var_name, offload_var_name
)
visited_vars.add(out_name)
block._sync_with_cpp()
startup_block._sync_with_cpp()
def opt_sharding_cast_fp32param(
self, block, startup_block, params, offload=False
):
"""
(p_fp16) = cast(p)
(p_fp16_recompute) = cast(p)
(pout,) = adam(p)
===========================>
rename(p_fp16_recompute, p_fp16)
(pout,) = adam(p)
(p_fp16) = cast(p)
broadcast(p_fp16)
"""
global_params = set()
local_params = set()
param_to_fp16 = dict()
# recompute_var which need rename to fp16_param
fp16_param_to_recompute = dict()
recompute_to_fp16 = dict()
def remove_param(input_name):
global_params.remove(input_name)
if input_name in local_params:
local_params.remove(input_name)
if input_name in param_to_fp16:
fp16_param = param_to_fp16.pop(input_name)
if fp16_param in fp16_param_to_recompute:
recompute = fp16_param_to_recompute.pop(fp16_param)
recompute_to_fp16.pop(recompute)
# step1: record param
global_params = set(params)
for idx, op in reversed(list(enumerate(block.ops))):
if is_update_op(op):
param = op.desc.input("Param")[0]
local_params.add(param)
# step2: remove param which can't offload and
# record param->fp16param, fp16param->recompute_var
for idx, op in enumerate(block.ops):
if is_optimizer_op(op):
break
# TODO (Yuang Liu): tmp solution for fuse_grad_merge + optimize_cast
if op.type == 'coalesce_tensor':
continue
for input_name in op.desc.input_arg_names():
if input_name not in global_params:
continue
# param which will be used by fp32 op
if op.type != 'cast':
remove_param(input_name)
continue
# param is only used by cast op,
# which to cast fp32_param to fp16_param
output_name = op.output_arg_names[0]
if 'cast_fp16' not in output_name:
remove_param(input_name)
continue
if 'subprog' not in output_name:
assert output_name == input_name + '.cast_fp16'
assert (
input_name not in param_to_fp16
), "There must be only one cast op from fp32 param to fp16 param."
param_to_fp16[input_name] = output_name
else:
# fp16-->recompute_var
assert (
input_name in param_to_fp16
), "param must first be cast to fp16"
fp16_param = param_to_fp16[input_name]
fp16_param_to_recompute[fp16_param] = output_name
recompute_to_fp16[output_name] = fp16_param
param_name_to_offload_name = dict()
# step3: main_block add offload, cast op
# change recompute to fp16, remove cast(param) to fp16
for idx, op in reversed(list(enumerate(block.ops))):
if is_update_op(op):
param = op.desc.input("Param")[0]
if param not in global_params:
continue
# step3.1: create offload_var
offload_var_name = self._get_offload_var_name(param)
param_name_to_offload_name[param] = offload_var_name
if offload:
self._create_offload_var(
param, offload_var_name, [block, startup_block]
)
# step3.2: insert cast op and offload op
self._insert_offload_op(
block, idx + 1, param, offload_var_name
)
assert param in param_to_fp16
fp16_param_name = param_to_fp16[param]
fp16_param_var = block.var(fp16_param_name)
fp16_param_var.persistable = True
self._insert_cast_op(
block, idx + 1, param, param_to_fp16[param]
)
if offload:
# step3.3: insert fetch op
self._insert_fetch_op(block, idx, offload_var_name, param)
continue
# step3.4: remove cast op
if op.type == 'cast':
input_name = op.desc.input_arg_names()[0]
if input_name in global_params:
block._remove_op(idx, sync=False)
continue
# step3.5: change recompute_param to fp16_param
for input_name in op.desc.input_arg_names():
if input_name in recompute_to_fp16:
op._rename_input(input_name, recompute_to_fp16[input_name])
for output_name in op.desc.output_arg_names():
if output_name in recompute_to_fp16:
op._rename_output(
output_name, recompute_to_fp16[output_name]
)
# step4: remove recompute_param
for name in recompute_to_fp16.keys():
block._remove_var(name, sync=False)
# step5: remove fp32 param which not need
for idx, op in enumerate(block.ops):
if op.type not in ['coalesce_tensor', 'c_broadcast']:
continue
for input_name in op.desc.input_arg_names():
if input_name in param_to_fp16:
op._rename_input(input_name, param_to_fp16[input_name])
for output_name in op.desc.output_arg_names():
if output_name in param_to_fp16:
op._rename_output(output_name, param_to_fp16[output_name])
for param in global_params:
assert param in param_to_fp16
fp16_param_name = param_to_fp16[param]
fp16_param_var = block.var(fp16_param_name)
fp16_param_var.persistable = True
if param not in local_params:
block._remove_var(param, sync=False)
# step6: startup_block add offload
visited_vars = set()
insert_idx = len(startup_block.ops)
for idx, op in reversed(list(enumerate(startup_block.ops))):
for out_name in op.output_arg_names:
if out_name in visited_vars:
continue
if out_name in param_to_fp16:
var_name = out_name
if offload:
self._insert_offload_op(
startup_block,
idx + 1,
var_name,
param_name_to_offload_name[var_name],
)
self._insert_cast_op(
startup_block,
insert_idx,
var_name,
param_to_fp16[var_name],
)
# NOTE(wangxi): cast and offload should insert after broadcast param.
# the insert op order is: {mp, dp}broadcast, cast, offload
self._insert_broadcast_op(
startup_block, insert_idx, var_name
)
if var_name not in local_params:
param = startup_block.var(out_name)
param.persistable = False
visited_vars.add(out_name)
block._sync_with_cpp()
startup_block._sync_with_cpp()
| {
"content_hash": "131772d92b49908b3b0612c0624e9a9e",
"timestamp": "",
"source": "github",
"line_count": 566,
"max_line_length": 89,
"avg_line_length": 37.91696113074205,
"alnum_prop": 0.4932668561576814,
"repo_name": "PaddlePaddle/Paddle",
"id": "058b2adc8e18518501b9a3c371244a62ee4640de",
"size": "22072",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "python/paddle/distributed/fleet/meta_optimizers/sharding/offload_helper.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "58544"
},
{
"name": "C",
"bytes": "210300"
},
{
"name": "C++",
"bytes": "36848680"
},
{
"name": "CMake",
"bytes": "902619"
},
{
"name": "Cuda",
"bytes": "5227207"
},
{
"name": "Dockerfile",
"bytes": "4361"
},
{
"name": "Go",
"bytes": "49796"
},
{
"name": "Java",
"bytes": "16630"
},
{
"name": "Jinja",
"bytes": "23852"
},
{
"name": "MLIR",
"bytes": "39982"
},
{
"name": "Python",
"bytes": "36203874"
},
{
"name": "R",
"bytes": "1332"
},
{
"name": "Shell",
"bytes": "553177"
}
],
"symlink_target": ""
} |
import ldap3
import ssl
import re
import ast
import datetime
import pytz
from classes.ldapexceptions import LdapErrorException
from ldap3.core.exceptions import LDAPSSLConfigurationError
from classes.ldapdn import LdapDn
from ldap3.extend.microsoft.modifyPassword import ad_modify_password
def str_list(list):
newlist = [str(x) for x in list]
return newlist
import logging
logger = logging.getLogger('etd.sys.ldapconnection')
class LdapNamingContexts(object):
def __init__(self, server):
self.contexts = list()
for context in server.info.naming_contexts:
dn = LdapDn(context)
self.contexts.append(dn)
def has(self, context):
if context in self.contexts:
return True
return False
def __iter__(self):
return iter(self.contexts)
def __repr__(self):
r = list()
for context in self.contexts:
r.append(str(context))
return ";".join(r)
class LdapResult(object):
def __init__(self, connection, query_time=None):
self.last_error = connection.last_error
self.result = connection.result
self.query_time = query_time
self.type = connection.result['type']
self.status = self.result['result']
return
def __repr__(self):
r = "{} {}ms, status: {}".format(
self.result['type'],
self.query_time.microseconds,
self.result['description']
)
return r
class LdapSearchResult(LdapResult):
def __init__(self, connection, entry_generator, query_time=None):
super().__init__(connection, query_time)
self.entry_generator = entry_generator
def is_empty(self):
if len(self.entries) > 0:
return False
else:
return True
def __repr__(self):
r = "{} records in {}ms, status: {}".format(
len(self.entries),
self.query_time.microseconds,
self.result['description']
)
return r
class LdapConnection(object):
def __init__(self, host, admin_user, password,
use_ssl=False, ca_certs_file=False, raise_exceptions=True,
ca_certs_path=False, valid_names=list(), read_only=False, verifyssl=True):
self.host = host
self.admin_user = admin_user
self.password = password
self.raise_exceptions = raise_exceptions
self.use_ssl = use_ssl
self.verifyssl = verifyssl
self.ca_certs_file = None
if ca_certs_file:
self.ca_certs_file = ca_certs_file.path()
self.ca_certs_path = None
if ca_certs_path:
self.ca_certs_path = ca_certs_path.path()
self.valid_names = valid_names
self.read_only = read_only
self.tls = self.__prepare_tls()
self.server = self.__prepare_server()
self.connection = self.__connection()
self.naming_contexts = LdapNamingContexts(self.server)
if not self.use_ssl:
return
cipher = self.connection.socket.cipher()
logger.info("SSL Cipher: {}: {}".format(str(self.connection.server), ",".join(str_list(cipher))))
return
def __prepare_tls(self):
tls = ldap3.Tls(validate=ssl.CERT_NONE)
if self.use_ssl:
if self.verifyssl:
validate = ssl.CERT_REQUIRED
else:
validate = ssl.CERT_NONE
try:
if self.ca_certs_file or self.ca_certs_path:
tls = ldap3.Tls(
validate=validate,
ca_certs_file=self.ca_certs_file,
ca_certs_path=self.ca_certs_path,
valid_names=self.valid_names
)
except LDAPSSLConfigurationError as e:
raise LdapErrorException(e)
return tls
def __prepare_server(self):
server = ldap3.Server(
host=self.host,
use_ssl=self.use_ssl,
tls=self.tls,
get_info=ldap3.ALL)
return server
def __connection(self):
try:
connection = ldap3.Connection(
self.server,
self.admin_user,
password=self.password,
auto_bind=True,
client_strategy=ldap3.SYNC,
read_only=self.read_only,
return_empty_attributes=False,
raise_exceptions=self.raise_exceptions
)
except ldap3.core.exceptions.LDAPBindError as e:
raise LdapErrorException(e)
except ldap3.core.exceptions.LDAPSocketOpenError as e:
cert_error = self.__parse_certificate_error(e)
if cert_error:
raise LdapErrorException(cert_error)
raise LdapErrorException(e)
except ldap3.core.exceptions.LDAPSessionTerminatedByServerError as e:
raise LdapErrorException(e)
return connection
def has_naming_context(self, context):
return self.naming_contexts.has(context)
def naming_contexts_as_string(self):
return str(self.naming_contexts)
def search(self, base_dn, search_filter=None, size_limit=0):
base_dn = str(base_dn)
filter_string = "(objectclass=*)"
if search_filter:
filter_string = str(search_filter)
try:
ts_start = datetime.datetime.now(pytz.utc)
"""self.connection.search(search_base=base_dn,
search_filter=filter_string,
dereference_aliases=ldap3.DEREF_NEVER,
search_scope=ldap3.SUBTREE,
size_limit=size_limit,
attributes=["*"])
"""
entry_generator = self.connection.extend.standard.paged_search(
search_base=base_dn,
search_filter=filter_string,
dereference_aliases=ldap3.DEREF_NEVER,
search_scope=ldap3.SUBTREE,
attributes=["*"],
generator = True)
ts_end = datetime.datetime.now(pytz.utc)
ts_delta = ts_end - ts_start
except (ldap3.core.exceptions.LDAPInvalidFilterError,
ldap3.core.exceptions.LDAPObjectClassError) as e:
raise LdapErrorException(e)
# TODO: New error handling?
"""
r = LdapSearchResult(
connection=self.connection,
query_time=ts_delta
)
return r
"""
r = LdapSearchResult(
entry_generator=entry_generator,
connection=self.connection,
query_time=ts_delta
)
return r
def add(self,dn,object_class=None,attributes=None,controls=None):
dn = str(dn)
ts_start = datetime.datetime.now(pytz.utc)
self.connection.add(dn,object_class,attributes,controls)
ts_end = datetime.datetime.now(pytz.utc)
ts_delta = ts_end - ts_start
r = LdapResult(
connection=self.connection,
query_time=ts_delta
)
return r
def modify(self,dn,changes,controls=None):
dn = str(dn)
ts_start = datetime.datetime.now(pytz.utc)
self.connection.modify(dn, changes, controls)
ts_end = datetime.datetime.now(pytz.utc)
ts_delta = ts_end - ts_start
r = LdapResult(
connection=self.connection,
query_time=ts_delta
)
return r
def modify_dn(self, dn, relative_dn, delete_old_dn=True, new_superior=None, controls=None):
dn = str(dn)
ts_start = datetime.datetime.now(pytz.utc)
self.connection.modify_dn(dn, relative_dn, delete_old_dn, new_superior, controls)
ts_end = datetime.datetime.now(pytz.utc)
ts_delta = ts_end - ts_start
r = LdapResult(
connection=self.connection,
query_time=ts_delta
)
return r
def set_password(self, dn, password):
try:
ts_start = datetime.datetime.now(pytz.utc)
#ad_modify_password(
# self.connection, dn, password, None, controls=None)
ts_end = datetime.datetime.now(pytz.utc)
ts_delta = ts_end - ts_start
except Exception as e:
raise LdapErrorException('set password failed: ' + str(e))
r = LdapResult(
connection=self.connection,
query_time=ts_delta
)
return r
@staticmethod
def __parse_certificate_error(exception):
message = str(exception)
match = re.match(r"socket ssl wrapping error: certificate {(.*)} .* in \[(.*)\]",
message,
re.IGNORECASE)
if match:
cert_str = "{" + match.group(1) + "}"
cert = ast.literal_eval(cert_str)
cert_subject = str(cert['subject'])
names_str = "[" + match.group(2) + "]"
names = ast.literal_eval(names_str)
names_joined = ",".join(names)
msg = "certificate error: subject does not match hostnames: {} subject: {}".format(names_joined,
cert_subject)
return msg
else:
return False
| {
"content_hash": "888a534e1ae50ce1a6be3a8156268aab",
"timestamp": "",
"source": "github",
"line_count": 296,
"max_line_length": 108,
"avg_line_length": 32.361486486486484,
"alnum_prop": 0.546507986219856,
"repo_name": "edushare-at/py-etd",
"id": "0804c5b1583214c533f3452b6927f89e087435c9",
"size": "9579",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "classes/ldapconnection.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "133969"
},
{
"name": "Shell",
"bytes": "143"
}
],
"symlink_target": ""
} |
'''
Created on Mar 28, 2014
Provides functions for constructing the input file for STP.
@author: stefan
'''
import itertools
def blockCharacteristic(stpfile, characteristic, wordsize):
"""
Excludes this characteristic from being found.
"""
# Only add state words (x, y, s)
# TODO: extend for other ciphers
filtered_words = {var_name: var_value for var_name, var_value in
characteristic.characteristic_data.items()
if var_name.startswith('x') or
var_name.startswith('y') or
var_name.startswith('s') or
var_name.startswith('v')}
blockingStatement = "ASSERT(NOT("
for key, value in filtered_words.items():
blockingStatement += "BVXOR({}, {}) | ".format(key, value)
blockingStatement = blockingStatement[:-2]
blockingStatement += ") = 0hex{});".format("0"*(wordsize // 4))
stpfile.write(blockingStatement)
return
def setupQuery(stpfile):
"""
Adds the query and printing of counterexample to the stp stpfile.
"""
stpfile.write("QUERY(FALSE);\n")
stpfile.write("COUNTEREXAMPLE;\n")
return
def setupVariables(stpfile, variables, wordsize):
"""
Adds a list of variables to the stp stpfile.
"""
stpfile.write(getStringForVariables(variables, wordsize) + '\n')
return
def assertVariableValue(stpfile, a, b):
"""
Adds an assert that a = b to the stp stpfile.
"""
stpfile.write("ASSERT({} = {});\n".format(a, b))
return
def getStringForVariables(variables, wordsize):
"""
Takes as input the variable name, number of variables and the wordsize
and constructs for instance a string of the form:
x00, x01, ..., x30: BITVECTOR(wordsize);
"""
command = ""
for var in variables:
command += var + ","
command = command[:-1]
command += ": BITVECTOR({0});".format(wordsize)
return command
def assertNonZero(stpfile, variables, wordsize):
stpfile.write(getStringForNonZero(variables, wordsize) + '\n')
return
def getStringForNonZero(variables, wordsize):
"""
Asserts that no all-zero characteristic is allowed
"""
command = "ASSERT(NOT(("
for var in variables:
command += var + "|"
command = command[:-1]
command += ") = 0bin{}));".format("0" * wordsize)
return command
def limitWeight(stpfile, weight, p, wordsize, ignoreMSBs=0):
"""
Adds the weight computation and assertion to the stp stpfile.
"""
stpfile.write("limitWeight: BITVECTOR(16);\n")
stpfile.write(getWeightString(p, wordsize, ignoreMSBs, "limitWeight") + "\n")
stpfile.write("ASSERT(BVLE(limitWeight, {0:#018b}));\n".format(weight))
return
def setupWeightComputationSum(stpfile, weight, p, wordsize, ignoreMSBs=0):
"""
Assert that weight is equal to the sum of p.
"""
stpfile.write("weight: BITVECTOR(16);\n")
round_sum = ""
for w in p:
round_sum += w + ","
if len(p) > 1:
stpfile.write("ASSERT(weight = BVPLUS({},{}));\n".format(16, round_sum[:-1]))
else:
stpfile.write("ASSERT(weight = {});\n".format(round_sum[:-1]))
stpfile.write("ASSERT(weight = {0:#018b});\n".format(weight))
return
def setupWeightComputation(stpfile, weight, p, wordsize, ignoreMSBs=0):
"""
Assert that weight is equal to the sum of the hamming weight of p.
"""
stpfile.write("weight: BITVECTOR(16);\n")
stpfile.write(getWeightString(p, wordsize, ignoreMSBs) + "\n")
stpfile.write("ASSERT(weight = {0:#018b});\n".format(weight))
#stpfile.write("ASSERT(BVLE(weight, {0:#018b}));\n".format(weight))
return
def getWeightString(variables, wordsize, ignoreMSBs=0, weightVariable="weight"):
"""
Asserts that the weight is equal to the hamming weight of the
given variables.
"""
# if len(variables) == 1:
# return "ASSERT({} = {});\n".format(weightVariable, variables[0])
command = "ASSERT(({} = BVPLUS(16,".format(weightVariable)
for var in variables:
tmp = "0b00000000@(BVPLUS(8, "
for bit in range(wordsize - ignoreMSBs):
# Ignore MSBs if they do not contribute to
# probability of the characteristic.
tmp += "0bin0000000@({0}[{1}:{1}]),".format(var, bit)
# Pad the constraint if necessary
if (wordsize - ignoreMSBs) == 1:
tmp += "0bin0,"
command += tmp[:-1] + ")),"
if len(variables):
command += "0bin0000000000000000,"
command = command[:-1]
command += ")));"
return command
def getStringEq(a, b, c):
command = "(BVXOR(~{0}, {1}) & BVXOR(~{0}, {2}))".format(a, b, c)
return command
def getStringAdd(a, b, c, wordsize):
command = "(((BVXOR((~{0} << 1)[{3}:0], ({1} << 1)[{3}:0])".format(
a, b, c, wordsize - 1)
command += "& BVXOR((~{0} << 1)[{3}:0], ({2} << 1)[{3}:0]))".format(
a, b, c, wordsize - 1)
command += " & BVXOR({0}, BVXOR({1}, BVXOR({2}, ({1} << 1)[{3}:0]))))".format(
a, b, c, wordsize - 1)
command += " = 0bin{})".format("0" * wordsize)
return command
def getStringForAndDifferential(a, b, c):
"""
AND = valid(x,y,out) = (x and out) or (y and out) or (not out)
"""
command = "(({0} & {2}) | ({1} & {2}) | (~{2}))".format(a, b, c)
return command
def getStringLeftRotate(value, rotation, wordsize):
if rotation % wordsize == 0:
return "{0}".format(value)
command = "((({0} << {1})[{2}:0]) | (({0} >> {3})[{2}:0]))".format(
value, (rotation % wordsize), wordsize - 1, (wordsize - rotation) % wordsize)
return command
def getStringRightRotate(value, rotation, wordsize):
if rotation % wordsize == 0:
return "{0}".format(value)
command = "((({0} >> {1})[{2}:0]) | (({0} << {3})[{2}:0]))".format(
value, (rotation % wordsize), wordsize - 1, (wordsize - rotation) % wordsize)
return command
def add4bitSbox(sbox, variables):
"""
Adds the constraints for the S-box and the weight
for the differential transition.
sbox is a list representing the S-box.
variables should be a list containing the input and
output variables of the S-box and the weight variables.
S(x) = y
The probability of the transitions is
2^-{hw(w0||w1||w2||w3)}
w ... hamming weight from the DDT table
"""
assert(len(sbox) == 16)
assert(len(variables) == 12)
# First compute the DDT
DDT = [[0]*16 for i in range(16)]
for a in range(16):
for b in range(16):
DDT[a ^ b][sbox[a] ^ sbox[b]] += 1
# Construct DNF of all valid trails
trails = []
# All zero trail with probability 1
for input_diff in range(16):
for output_diff in range(16):
if DDT[input_diff][output_diff] != 0:
tmp = []
tmp.append((input_diff >> 3) & 1)
tmp.append((input_diff >> 2) & 1)
tmp.append((input_diff >> 1) & 1)
tmp.append((input_diff >> 0) & 1)
tmp.append((output_diff >> 3) & 1)
tmp.append((output_diff >> 2) & 1)
tmp.append((output_diff >> 1) & 1)
tmp.append((output_diff >> 0) & 1)
if DDT[input_diff][output_diff] == 2:
tmp += [0, 1, 1, 1] # 2^-3
elif DDT[input_diff][output_diff] == 4:
tmp += [0, 0, 1, 1] # 2^-2
elif DDT[input_diff][output_diff] == 8:
tmp += [0, 0, 0, 1] # 2^-1
elif DDT[input_diff][output_diff] == 16:
tmp += [0, 0, 0, 0]
trails.append(tmp)
# Build CNF from invalid trails
cnf = ""
for prod in itertools.product([0, 1], repeat=len(trails[0])):
# Trail is not valid
if list(prod) not in trails:
expr = ["~" if x == 1 else "" for x in list(prod)]
clause = ""
for literal in range(12):
clause += "{0}{1} | ".format(expr[literal], variables[literal])
cnf += "({}) &".format(clause[:-2])
return "ASSERT({} = 0bin1);\n".format(cnf[:-2])
| {
"content_hash": "b20b34a3c5c7a2c5c436db09946239e2",
"timestamp": "",
"source": "github",
"line_count": 259,
"max_line_length": 85,
"avg_line_length": 31.71814671814672,
"alnum_prop": 0.5638466220328667,
"repo_name": "kste/cryptosmt",
"id": "6f24b323d1cee2b9548a4cca206305b6d6eebc5f",
"size": "8215",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "parser/stpcommands.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "1122"
},
{
"name": "Python",
"bytes": "418582"
}
],
"symlink_target": ""
} |
"""Test configuration for the Matchlight SDK."""
import time
import uuid
import pytest
import matchlight
@pytest.fixture
def access_key():
"""Provides a fake access key in the form of a UUID4."""
return uuid.uuid4().hex
@pytest.fixture
def secret_key():
"""Provides a fake secret key in the form of a UUID4."""
return uuid.uuid4().hex
@pytest.fixture
def connection(access_key, secret_key):
"""A connection object initialized with a fake access and secret key."""
return matchlight.Matchlight(access_key=access_key, secret_key=secret_key)
@pytest.fixture(scope='function')
def id():
"""Provides a fake id in the form of a UUID4."""
return str(uuid.uuid4())
@pytest.fixture(scope='function')
def number_of_records():
"""A record count fixture."""
return 10
@pytest.fixture(scope='function')
def number_of_unseen_alerts():
"""An unseen alerts count fixture."""
return 10
@pytest.fixture(scope='function')
def document(request):
"""A document mapping fixture."""
return {
'id': uuid.uuid4().hex,
'name': 'Document record',
'description': '',
'ctime': time.time(),
'mtime': time.time(),
'metadata': {},
}
@pytest.fixture(scope='function')
def document_record(document):
"""A document record fixture."""
return matchlight.Record(**document)
@pytest.fixture(scope='function')
def project_name():
"""A project name fixture."""
return 'Test Project'
@pytest.fixture(scope='function', params=[
'bulk_pii',
'document',
'pii',
'source_code',
])
def project_type(request):
"""A parametrized project type fixture."""
return request.param
@pytest.fixture(scope='function')
def upload_token():
"""An upload token (dash-separated UUID4) fixture."""
return str(uuid.uuid4())
@pytest.fixture(scope='function')
def project_payload(project_name, project_type, upload_token,
number_of_records, number_of_unseen_alerts):
"""A project payload artifact, parametrized by project type."""
return {
'project_name': project_name,
'project_type': project_type,
'project_upload_token': upload_token,
'last_date_modified': time.time(),
'number_of_records': number_of_records,
'number_of_unseen_alerts': number_of_unseen_alerts,
}
@pytest.fixture
def project(project_payload):
"""A project instance fixture, parametrized by project type."""
return matchlight.Project.from_mapping(project_payload)
@pytest.fixture(scope='function')
def alert_payload(id, upload_token):
"""An alert payload artifact."""
return {
'id': id,
'alert_number': 10,
'type': 'pii',
'url': 'https://terbiumlabs.com/matchlight.html',
'url_metadata': {
'description': 'Matchlight provides intelligence on your most imp',
'tor_only': 'false'
},
'ctime': time.time(),
'mtime': time.time(),
'seen': 'true',
'archived': 'true',
'upload_token': upload_token,
'details': {
'pii': {
'fields': ['phone']
}
},
'asset_name': 'Example Record',
'project_name': 'Sample Project',
}
@pytest.fixture(scope='function')
def alert_details_pii_payload():
"""An alert details payload artifact for a pii alert."""
return {
'details': {
'pii': [
{
'email': 'o****@gmail.com',
'first': 'a****',
'last': 'b****',
'record_id': 'd3c59d38c4054f62876a2a7a3dca41ca'
}
]
},
'notes': '',
'type': 'pii'
}
@pytest.fixture
def alert(alert_payload):
"""An alert instance fixture."""
return matchlight.Alert.from_mapping(alert_payload)
@pytest.fixture(scope='function')
def pii_search_email_only_results():
"""PII search results for only the email field."""
return [
{
'fields': ['email'],
'ts': '2018-07-25T20:00:44',
'source': 'Exactis Breach June 2018'
},
{
'fields': ['email'],
'ts': '2017-01-25T02:35:04',
'source': 'https://pastebin.com/raw.php?i=1DgbtSZc'
},
{
'fields': ['email'],
'ts': 1556221970,
'source': 'Zoosk Breach Nov 2016'
},
{
'fields': ['email'],
'ts': '1558333205',
'source': 'https://www.reddit.com/r/AskReddit/comments/3oqj4a'
},
]
@pytest.fixture(scope='function')
def search_email_only_results():
"""PII search results for only the email field."""
return [
{
'cwid': 'fff33cbe7ed54f5ebfccd09c3d24999c',
'score': 800,
'ts': 1453298293,
'urls': [
[
1453298293,
(
'http://blockchainbdgpzk.onion/tx/4f4097992b89156'
'690817556fc3f540535bdfadde06661c9cae21d500943f970'
)
]
]
},
{
'cwid': 'ffedf0a2b775cfd50383603ff827d702',
'score': 400,
'ts': 1438870917,
'urls': [
[
1438870917,
(
'http://nqigfqrxnkwcqmiq.onion/'
'wiki/index.php#Whistleblowing'
)
]
]
},
]
| {
"content_hash": "82bcac1f9c7ab6bb4bbb8654ef455a7f",
"timestamp": "",
"source": "github",
"line_count": 219,
"max_line_length": 79,
"avg_line_length": 25.616438356164384,
"alnum_prop": 0.5365418894830659,
"repo_name": "TerbiumLabs/python-matchlightsdk",
"id": "ea3008d6f5b0d48456c19d3dc4bc37363f7f8efa",
"size": "5610",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/conftest.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "830"
},
{
"name": "Python",
"bytes": "115720"
},
{
"name": "Shell",
"bytes": "1171"
}
],
"symlink_target": ""
} |
import psutil
from os3.core.item import Os3Item
class Process(psutil.Process, Os3Item):
pass
| {
"content_hash": "19ae4d86068e79e1c8e7d6b924f7ee40",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 39,
"avg_line_length": 14.285714285714286,
"alnum_prop": 0.76,
"repo_name": "Nekmo/gradale",
"id": "dfd9e0fc484f87bc906c0e0e0e7659f04c91c0be",
"size": "124",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "os3/ps/process.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "33594"
}
],
"symlink_target": ""
} |
from oslo_policy import policy
RULE_ADMIN_OR_OWNER = 'rule:admin_or_owner'
ROLE_ADMIN = 'role:admin'
UNPROTECTED = ''
rules = [
policy.RuleDefault(
name='context_is_admin',
check_str='role:admin'),
policy.RuleDefault(
name='admin_or_owner',
check_str='is_admin:True or tenant:%(tenant_id)s'),
policy.RuleDefault(
name='default',
check_str=UNPROTECTED)
]
def list_rules():
return rules
| {
"content_hash": "8c5eb1d9fd554d84c23355910deed0ba",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 59,
"avg_line_length": 21.571428571428573,
"alnum_prop": 0.6247240618101545,
"repo_name": "stackforge/cloudkitty",
"id": "66c0d38e80f206e5f59f3f5d88692f7da02da2cd",
"size": "1052",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cloudkitty/common/policies/base.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "1648"
},
{
"name": "Python",
"bytes": "452298"
},
{
"name": "Shell",
"bytes": "12116"
}
],
"symlink_target": ""
} |
from django.contrib import admin
from awl.admintools import make_admin_obj_mixin, fancy_modeladmin
from awl.tests.models import (Link, Author, Book, Chapter, Driver,
VehicleMake, VehicleModel, Dealer)
from awl.rankedmodel.admintools import (admin_link_move_up,
admin_link_move_down, admin_move_links)
# ============================================================================
# Waelsteng Admin Models
# ============================================================================
@admin.register(Link)
class LinkAdmin(admin.ModelAdmin):
list_display = ('url', 'text', 'visit_me')
def visit_me(self, obj):
return '<a href="%s">%s</a>' % (obj.url, obj.text)
# ============================================================================
# Admintools Admin Models
# ============================================================================
@admin.register(Author)
class AuthorAdmin(admin.ModelAdmin):
list_display = ('name', )
base = make_admin_obj_mixin('BookMixin')
base.add_obj_link('show_author', 'author')
@admin.register(Book)
class BookAdmin(admin.ModelAdmin, base):
list_display = ('name', 'show_author')
base = make_admin_obj_mixin('ChapterMixin')
base.add_obj_link('show_author', 'book__author')
base.add_obj_link('show_book', 'book', 'My Book',
'{{obj.classname}}.id={{obj.id}}')
base.add_obj_ref('readonly_author', 'book__author')
base.add_obj_ref('readonly_book', 'book', 'Readonly Book',
'RO {{obj.classname}}.id={{obj.id}}')
@admin.register(Chapter)
class ChapterAdmin(admin.ModelAdmin, base):
list_display = ('name', 'show_author', 'show_book', 'readonly_author',
'readonly_book')
# ----------------------------------------------------------------------------
base = fancy_modeladmin('id')
base.add_displays('name')
base.add_link('vehiclemodel__vehiclemake')
base.add_link('vehiclemodel', 'My Vehicle Model',
'{{obj.fullname}} id={{obj.id}}', empty='<i>no model</i>')
base.add_object('vehiclemodel__vehiclemake')
base.add_object('vehiclemodel', 'RO Vehicle Model',
'RO {{obj.fullname}} id={{obj.id}}')
base.add_formatted_field('rating', '%0.1f')
base.add_templated_field('rating', '{{row.name}} {{field}}')
@admin.register(Driver)
class DriverAdmin(base):
pass
@admin.register(VehicleMake)
class VehicleMakeAdmin(admin.ModelAdmin):
pass
base = fancy_modeladmin('id')
base.add_display('name')
base.add_display('year', 'YEAR TITLE', empty='<i>no year</i>')
base.add_fk_link('driver_set', Driver, 'vehiclemodel')
base.add_fk_link('driver_set', Driver, 'vehiclemodel', 'Driver Title',
'{{row.name}} {{count}} {{title}}', empty='<i>no drivers</i>')
@admin.register(VehicleModel)
class VehicleModelAdmin(base):
pass
base = fancy_modeladmin('id', 'name')
base.add_m2m_link('vehicle_models')
base.add_m2m_link('vehicle_models', 'Models Sold')
base.add_m2m_link('vehicle_models', 'Models Sold', '{{count}}',
'<i>no models</i>')
@admin.register(Dealer)
class DealerAdmin(base):
pass
# ============================================================================
# RankedModel Admin Models
# ============================================================================
class RankAdmin(admin.ModelAdmin):
list_display = ('name', 'move_up', 'move_down', 'move_both')
def move_up(self, obj):
return admin_link_move_up(obj)
move_up.short_description = 'Move Up Rank'
def move_down(self, obj):
return admin_link_move_down(obj)
move_down.short_description = 'Move Up Rank'
def move_both(self, obj):
return admin_move_links(obj)
move_both.short_description = 'Move Both'
| {
"content_hash": "39ca62753716af168352b57a39ba4437",
"timestamp": "",
"source": "github",
"line_count": 112,
"max_line_length": 78,
"avg_line_length": 32.526785714285715,
"alnum_prop": 0.5758989843535548,
"repo_name": "cltrudeau/django-awl",
"id": "d8348b3812c07b354af32bf410dd6c9fc8d42f52",
"size": "3643",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "awl/tests/admin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "17"
},
{
"name": "Python",
"bytes": "155336"
},
{
"name": "Shell",
"bytes": "961"
}
],
"symlink_target": ""
} |
import numpy as np
import galsim
import os
from .galaxymaker import GalaxyMaker
from .great3_cosmos_gals.galaxies import COSMOSGalaxyBuilder
from .great3_cosmos_gals.noise import PlaceholderNoiseBuilder
from .. import utils
from ..observation import Observation
class GREAT3COSMOSGalaxyMaker(GalaxyMaker):
"""
Returns COSMOS galaxies as if viewed from the ground a la GREAT3
See the GREAT3 challenge docs for details. Code was pulled from the
GREAT3 simulations code base.
You will need the COSMOS data, which can be downloaded from
http://great3.jb.man.ac.uk/leaderboard/data/public/COSMOS_23.5_training_sample.tar.gz
http://great3.jb.man.ac.uk/leaderboard/data/public/great3_galaxy_selection_files.tar.gz
Once the data is unpacked, place all files in a single directory and feed this path to
the code as `cosmos_dir` below.
Examples:
atmos_seeing = 0.55
cosmos_dir = '/path/to/data'
seed = 12345
cgm = COSMOSGalaxyMaker(seed,cosmos_dir)
# build a catalog
cgm.build_catalog_for_seeing(seeing)
# now draw from it
for i in xrange(10):
galaxy,galinfo = cgm.get_galaxy(seeing,n_epochs,max_size,pixel_scale)
# you can also just draw galaxies at random, but then the catalog is rebuilt each time
# which is slow.
galaxy,galinfo = cgm.get_galaxy(seeing,n_epochs,max_size,pixel_scale)
# if you specify save_catalog=True, then you can skip the building step (the code
# will do it internally).
galaxy,galinfo = cgm.get_galaxy(seeing,n_epochs,max_size,pixel_scale,save_catalog=True)
# the above gets you a galaxy with no PSF
# one should then add in PSF and pixel effects and noise if wanted
# try these methods
# great3 clone
cgm.apply_psf_and_noise_whiten_ala_great3(...)
# like great3, but no extra noise added
cgm.apply_psf_and_noise_whiten(...)
"""
def __init__(self,seed=None,**kw):
assert seed is not None,"Random seed must be given in cosmos galaxy maker!"
self.noise_mult = 1.0
self.rng = galsim.UniformDeviate(seed)
self.rng_np = np.random.RandomState(int(self.rng() * 1000000))
self.cosmos_data = kw['galaxymaker'].get('cosmos_data',os.environ['GREAT3DATA'])
self.real_galaxy = kw['galaxymaker'].get('real_galaxy',True)
self.preload = kw['galaxymaker'].get('preload',False)
self.cosmosgb = COSMOSGalaxyBuilder(self.real_galaxy,self.cosmos_data,preload=self.preload)
self.catalog_dtype = self.cosmosgb.generateSubfieldParameters()['schema']
self.catalog_dtype.append(('weight','f8'))
self.catalog_dtype.append(('n_epochs','i4'))
self.catalogs = {}
self.conf = {}
self.conf.update(kw)
def get_galaxy_from_info(self,record_in,seeing,n_epochs,max_size,pixel_scale):
"""
Get a COSMOS galaxy from a specific row in table.
"""
record = record_in.copy()
if record['n_epochs'] != n_epochs:
rat = float(record['n_epochs']/n_epochs)
record['n_epochs'] = n_epochs
for tag in ["bulge_flux","disk_flux","flux_rescale"]:
if tag in record.dtype.names:
record[tag] *= rat
nb = PlaceholderNoiseBuilder()
nb_params = nb.generateEpochParameters(self.rng,record['n_epochs'],seeing,self.noise_mult)
galaxy = self.cosmosgb.makeGalSimObject(record, max_size, max_size, pixel_scale, self.rng)
galinfo = {}
galinfo['noise_builder'] = nb
galinfo['noise_builder_params'] = nb_params
galinfo['info'] = record
galinfo['seeing'] = seeing
galinfo['noise'] = np.sqrt(galinfo['noise_builder_params']['variance'])
try:
galinfo['orig_stamp_size_arcsec'] = galaxy.original.original_gal.image.array.shape[0]*0.03
except:
galinfo['orig_stamp_size_arcsec'] = galaxy.original.gal_image.array.shape[0]*0.03
galinfo['max_size'] = max_size
return galaxy,galinfo
def build_catalog_for_seeing(self,seeing,verbose=False,randomly_rotate=True):
"""
Build a galaxy catalog a specific seeing value.
If you build a catalog and then get galaxies with the same seeing value,
the code will skip subsequent building steps.
"""
nb = PlaceholderNoiseBuilder()
nb_params = nb.generateEpochParameters(self.rng,1,seeing,self.noise_mult)
# NOTE
# typical_variance is for SE by definition, so just make a typical gal for the seeing and one epoch
# will handle increased variance for multiple epochs below
# also will rescale flux comps below
self.catalogs[seeing] = {'cat':self.cosmosgb.generateCatalog(self.rng,None,None,nb.typical_variance, \
self.noise_mult,seeing=seeing,verbose=verbose, \
randomly_rotate=randomly_rotate),
'typical_var':nb.typical_variance}
self.catalogs[seeing]['cat']['weight'] /= np.sum(self.catalogs[seeing]['cat']['weight'])
def get_catalog_for_seeing(self,seeing,verbose=False,randomly_rotate=True):
"""
Get a catalog for a specific seeing value.
"""
if seeing not in self.catalogs:
self.build_catalog_for_seeing(seeing,verbose=verbose,randomly_rotate=randomly_rotate)
return self.catalogs[seeing]['cat'].copy()
def get_prepsf_galaxy(self,seeing,n_epochs,max_size,pixel_scale,verbose=False, \
randomly_rotate=True,save_catalog=False):
"""
Get a galaxy from COSMOS postage stamp a la GREAT3.
In GREAT3, seeing was set to atmospheric PSF FWHM.
"""
if save_catalog or seeing in self.catalogs:
if seeing not in self.catalogs:
self.build_catalog_for_seeing(seeing,verbose=verbose,randomly_rotate=randomly_rotate)
#now get catalog
catalog = self.catalogs[seeing]['cat']
Ncosmos = len(catalog)
#now draw at random with weights
# seed numpy.random to get predictable behavior
randind = self.rng_np.choice(Ncosmos,replace=True,p=catalog['weight'])
"""
while True:
randind = self.rng_np.choice(Ncosmos,replace=True)
if self.rng_np.uniform() < catalog['weight'][randind]:
break
"""
record = catalog[randind].copy()
record['n_epochs'] = n_epochs
for tag in ["bulge_flux","disk_flux","flux_rescale"]:
if tag in record.dtype.names:
record[tag] /= n_epochs
nb = PlaceholderNoiseBuilder()
nb_params = nb.generateEpochParameters(self.rng,record['n_epochs'],seeing,self.noise_mult)
assert nb.typical_variance == self.catalogs[seeing]['typical_var']
else:
record = np.zeros(1,dtype=self.catalog_dtype)[0]
record['n_epochs'] = n_epochs
nb = PlaceholderNoiseBuilder()
nb_params = nb.generateEpochParameters(self.rng,record['n_epochs'],seeing,self.noise_mult)
self.cosmosgb.generateCatalog(self.rng,[record],None,nb.typical_variance,self.noise_mult,seeing=seeing, \
verbose=verbose,randomly_rotate=randomly_rotate)
galaxy = self.cosmosgb.makeGalSimObject(record, max_size, max_size, pixel_scale, self.rng)
galinfo = {}
galinfo['noise_builder'] = nb
galinfo['noise_builder_params'] = nb_params
galinfo['info'] = record.copy()
galinfo['seeing'] = seeing
galinfo['noise'] = np.sqrt(galinfo['noise_builder_params']['variance'])
try:
galinfo['orig_stamp_size_arcsec'] = galaxy.original.original_gal.image.array.shape[0]*0.03
except:
galinfo['orig_stamp_size_arcsec'] = galaxy.original.gal_image.array.shape[0]*0.03
galinfo['max_size'] = max_size
return galaxy,galinfo
def _get_sub_image(self,galim,max_size):
curr_bounds = galim.getBounds()
curr_xsize = curr_bounds.getXMax() - curr_bounds.getXMin()
curr_ysize = curr_bounds.getYMax() - curr_bounds.getYMin()
if curr_xsize > max_size or curr_ysize > max_size or curr_ysize != curr_xsize:
sub_bounds = self._get_sub_bounds(curr_bounds,max_size)
sub_galim = galim.subImage(sub_bounds)
return sub_galim
else:
return galim
def _get_sub_bounds(self,curr_bounds,max_size):
xmin = curr_bounds.getXMin()
xmax = curr_bounds.getXMax()
curr_xsize = xmax - xmin + 1
ymin = curr_bounds.getYMin()
ymax = curr_bounds.getYMax()
curr_ysize = ymax - ymin + 1
final_size = np.min((curr_xsize,curr_ysize,max_size))
offx = curr_xsize - final_size
if offx > 0:
offx = offx//2
sub_xmin = xmin+offx
sub_xmax = xmin+offx+final_size - 1
else:
sub_xmin = xmin
sub_xmax = xmax
offy = curr_ysize - final_size
if offy > 0:
offy = offy//2
sub_ymin = ymin+offy
sub_ymax = ymin+offy+final_size - 1
else:
sub_ymin = ymin
sub_ymax = ymax
sub_bounds = galsim.BoundsI(sub_xmin,sub_xmax,sub_ymin,sub_ymax)
return sub_bounds
def _get_final_size(self,orig_size,max_size,min_size,sizes):
if min_size is None:
min_size = 0
if max_size is None:
max_size = np.inf
if orig_size > max_size:
size = max_size
elif orig_size < min_size:
size = min_size
else:
size = orig_size
if sizes is not None and not ((min_size > np.max(sizes)) or (max_size < np.min(sizes))):
sizes = np.array(sorted(sizes))
q, = np.where((sizes >= size) & (sizes <= max_size) & (sizes >= min_size))
assert len(q) > 0,"No possible stamp size given!"
psizes = sizes[q]
q = np.argmin(np.abs(orig_size-psizes))
size = psizes[q]
return size
def apply_psf_and_noise_whiten(self,galaxy,galinfo,pixel,psf=None,max_size=None,min_size=None,sizes=None,use_great3_noise=False):
"""
Automates finishing of galaxies for a psf and pixel.
Add the great3 noise level for ground observations w/ use_great3_noise = True.
Shear should be applied already if wanted.
Noise can then be added to the image via (for example)
noise_to_add = np.sqrt(total_noise**2 - current_var)
noise = galsim.GaussianNoise(rng, sigma=noise_to_add)
noise.applyTo(final_galim)
"""
# cut to orig postage stamp in range
if max_size is None:
max_sz = galinfo['max_size']
else:
max_sz = np.min((max_size,galinfo['max_size']))
orig_size = int(np.ceil(galinfo['orig_stamp_size_arcsec']/pixel.getScale()))
size = self._get_final_size(orig_size,max_sz,min_size,sizes)
# great3 did it like this
# final_galaxy = galsim.Convolve([psf, pixel, galaxy])
# galim = final_galaxy.draw(scale=pixel.getScale())
# using newer galsim APIs
if psf is not None:
final_galaxy = galsim.Convolve([psf, pixel, galaxy])
else:
final_galaxy = galsim.Convolve([pixel, galaxy])
galinfo['galsim_object'] = final_galaxy
galim = final_galaxy.drawImage(scale=pixel.getScale(),method='no_pixel')
if hasattr(final_galaxy,'noise'):
#current_var = final_galaxy.noise.applyWhiteningTo(galim)
current_var = final_galaxy.noise.whitenImage(galim)
else:
current_var = 0.0
final_galim = self._get_sub_image(galim,size)
im = final_galim.array.copy()
wt = np.zeros_like(im)
if current_var > 0:
wt[:,:] = 1.0/current_var
else:
wt[:,:] = 1.0
galinfo['center'] = utils.get_image_center(im,wt,rng=self.rng_np)
if use_great3_noise:
galinfo['noise_builder'].addNoise(self.rng,galinfo['noise_builder_params'],final_galim,current_var)
return final_galim, galinfo['noise_builder_params']['variance']
else:
return final_galim, current_var
def get_extra_data_dtype(self):
return [('cosmos_id','i8'),('g1_intrinsic','f8'),('g2_intrinsic','f8')]
def get_extra_percutout_data_dtype(self):
return [('variance','f8')]
def get_galaxy(self,psf=None,g=None,n_epochs=None,pixel_scale=None,seeing=None,**kwargs):
if n_epochs is None:
n_epochs = self.conf.get('n_epochs',1)
if pixel_scale is None:
key = 'pixel_scale'
assert key in self.conf,"You must specify '%s' for stamps!" % key
pixel_scale = self.conf['pixel_scale']
if g is None:
key = 'g'
assert key in self.conf,"You must specify '%s' for stamps!" % key
g = self.conf['g']
if seeing is None:
key = 'seeing'
assert key in self.conf,"You must specify '%s' for stamps!" % key
seeing = self.conf.get(key)
for key in ['max_size','min_size','sizes']:
assert key in self.conf,"You must specify '%s' for stamps!" % key
psf['galsim_object'] = psf.get('galsim_object',None)
galaxy,galinfo = self.get_prepsf_galaxy(seeing,n_epochs,self.conf['max_size'],pixel_scale, \
verbose=self.conf['galaxymaker'].get('verbose',False), \
randomly_rotate=self.conf['galaxymaker'].get('randomly_rotate',True), \
save_catalog=self.conf['galaxymaker'].get('save_catalog',False))
pixel = galsim.Pixel(scale=pixel_scale)
galaxy = galaxy.shear(g1=g[0], g2=g[1])
final_gal_image,variance = self.apply_psf_and_noise_whiten(galaxy,galinfo,pixel,psf=psf['galsim_object'], \
max_size=self.conf['max_size'], \
min_size=self.conf['min_size'], \
sizes=self.conf['sizes'], \
use_great3_noise=self.conf['galaxymaker'].get('use_great3_noise',False))
o = Observation()
o.image = final_gal_image.array.copy()
wt = np.zeros_like(o.image)
wt[:,:] = 1.0/variance
o.weight = wt
row,col = galinfo['center']
o.update(galinfo)
o['row'] = row
o['col'] = col
o['variance'] = variance
o['pixel_scale'] = pixel_scale
o['g'] = g
o['n_epochs'] = n_epochs
o.psf = psf
o['galsim_image'] = final_gal_image
o['prepsf_galsim_object'] = galaxy
o['extra_percutout_data'] = {'variance':[variance]}
gi = np.array([galinfo['info']],dtype=self.catalog_dtype)
o['extra_data'] = dict(cosmos_id=gi['cosmos_ident'][0], \
g1_intrinsic=gi['g1_intrinsic'][0], \
g2_intrinsic=gi['g2_intrinsic'][0])
return o
| {
"content_hash": "59c6be096791ef21069df5b761b05a89",
"timestamp": "",
"source": "github",
"line_count": 376,
"max_line_length": 139,
"avg_line_length": 42.58510638297872,
"alnum_prop": 0.566637521858606,
"repo_name": "kstory8/egret",
"id": "c5c3de150c877e4fe0f180a21c807ecd38d2a8d1",
"size": "16012",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "egret/galaxymakers/cosmosgalaxymaker.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "176802"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from django import template
from django.conf import settings
from django.core.urlresolvers import reverse
from six.moves.urllib.parse import urlencode
from sentry.models import User, UserAvatar
from sentry.utils.avatar import get_email_avatar, get_gravatar_url, get_letter_avatar
register = template.Library()
# Adapted from http://en.gravatar.com/site/implement/images/django/
# The "mm" default is for the grey, "mystery man" icon. See:
# http://en.gravatar.com/site/implement/images/
@register.simple_tag(takes_context=True)
def gravatar_url(context, email, size, default="mm"):
return get_gravatar_url(email, size, default)
@register.simple_tag(takes_context=True)
def letter_avatar_svg(context, display_name, identifier, size=None):
return get_letter_avatar(display_name, identifier, size=size)
@register.simple_tag(takes_context=True)
def profile_photo_url(context, user_id, size=None):
try:
avatar = UserAvatar.objects.get_from_cache(user=user_id)
except UserAvatar.DoesNotExist:
return
url = reverse("sentry-user-avatar-url", args=[avatar.ident])
if size:
url += "?" + urlencode({"s": size})
return settings.SENTRY_URL_PREFIX + url
# Don't use this in any situations where you're rendering more
# than 1-2 avatars. It will make a request for every user!
@register.simple_tag(takes_context=True)
def email_avatar(context, display_name, identifier, size=None, try_gravatar=True):
return get_email_avatar(display_name, identifier, size, try_gravatar)
@register.inclusion_tag("sentry/partial/avatar.html")
def avatar(user, size=36):
# user can be User or OrganizationMember
if isinstance(user, User):
user_id = user.id
email = user.email
else:
user_id = user.user_id
email = user.email
if user_id:
email = user.user.email
return {
"email": email,
"user_id": user_id,
"size": size,
"avatar_type": user.get_avatar_type(),
"display_name": user.get_display_name(),
"label": user.get_label(),
}
@register.inclusion_tag("sentry/partial/avatar.html")
def avatar_for_email(user, size=36):
# user can be User or OrganizationMember
if isinstance(user, User):
user_id = user.id
email = user.email
else:
user_id = user.user_id
email = user.email
if user_id:
email = user.user.email
return {
"for_email": True,
"email": email,
"user_id": user_id,
"size": size,
"avatar_type": user.get_avatar_type(),
"display_name": user.get_display_name(),
"label": user.get_label(),
}
| {
"content_hash": "3a5190d80c06156d649eab39d1a5874e",
"timestamp": "",
"source": "github",
"line_count": 86,
"max_line_length": 85,
"avg_line_length": 31.63953488372093,
"alnum_prop": 0.6622565233370085,
"repo_name": "mvaled/sentry",
"id": "dd38a5bb5ae4bac617197f65c88f0ca82dee2f90",
"size": "2721",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/sentry/templatetags/sentry_avatars.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "226439"
},
{
"name": "Dockerfile",
"bytes": "6431"
},
{
"name": "HTML",
"bytes": "173429"
},
{
"name": "JavaScript",
"bytes": "9314175"
},
{
"name": "Lua",
"bytes": "65885"
},
{
"name": "Makefile",
"bytes": "9225"
},
{
"name": "Python",
"bytes": "50385401"
},
{
"name": "Ruby",
"bytes": "168"
},
{
"name": "Shell",
"bytes": "5685"
},
{
"name": "TypeScript",
"bytes": "773664"
}
],
"symlink_target": ""
} |
import _plotly_utils.basevalidators
class SideValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(
self,
plotly_name="side",
parent_name="histogram2dcontour.colorbar.title",
**kwargs,
):
super(SideValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
values=kwargs.pop("values", ["right", "top", "bottom"]),
**kwargs,
)
| {
"content_hash": "ed2d9bd7d6039acb9085f7b715b2c379",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 70,
"avg_line_length": 31.235294117647058,
"alnum_prop": 0.576271186440678,
"repo_name": "plotly/plotly.py",
"id": "0348d814c6d6e3b62413f8cbd191f5fa25bf1ce0",
"size": "531",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/validators/histogram2dcontour/colorbar/title/_side.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "545"
},
{
"name": "JavaScript",
"bytes": "2074"
},
{
"name": "PostScript",
"bytes": "565328"
},
{
"name": "Python",
"bytes": "31506317"
},
{
"name": "TypeScript",
"bytes": "71337"
}
],
"symlink_target": ""
} |
__title__ = 'events.contrib.themes.bootstrap3'
__author__ = 'Artur Barseghyan <artur.barseghyan@gmail.com>'
__copyright__ = '2014-2017 Artur Barseghyan'
__license__ = 'GPL 2.0/LGPL 2.1'
__all__ = ('default_app_config', 'UID',)
default_app_config = 'events.contrib.themes.bootstrap3.apps.Config'
UID = 'bootstrap3'
| {
"content_hash": "5aa7bd56cdcef49fd2d517b8a916bd9d",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 67,
"avg_line_length": 35.111111111111114,
"alnum_prop": 0.689873417721519,
"repo_name": "mansonul/events",
"id": "110c855b50fa8daad5a31bf878021f448fcbfd36",
"size": "316",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "events/contrib/themes/bootstrap3/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "90251"
},
{
"name": "HTML",
"bytes": "186225"
},
{
"name": "JavaScript",
"bytes": "43221"
},
{
"name": "Python",
"bytes": "804726"
},
{
"name": "Shell",
"bytes": "4196"
}
],
"symlink_target": ""
} |
from caps.io import reader as db
simulation = "L500_NR_tracers"
mt = db.Simulation(simulation)
simdir, hcdir, profilesdir = mt.get_directories()
aexp_list = mt.get_halo_epochs()
for aexp in aexp_list:
clusters = mt.get_halo_ids(aexp=aexp, main_halos_only=True)
aexp_str = "%0.4f" % aexp
output = open(simdir+"/"+hcdir+"/progenitors_a"+aexp_str+".dat","w")
for cluster in clusters:
print >>output, cluster
output.close()
| {
"content_hash": "110ad4b008f3db5655c36850444e83c3",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 72,
"avg_line_length": 24.36842105263158,
"alnum_prop": 0.6544276457883369,
"repo_name": "cavestruz/L500analysis",
"id": "5832a09c01f3da902b457b963f282cccb6be3db3",
"size": "486",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "caps/mergertree/write_progenitor_files.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "509320"
}
],
"symlink_target": ""
} |
import json
import tweepy
from tweepy import OAuthHandler
import commonfunctions as cf
from commonfunctions import mysecrets
# Here I'm importing my API login details.
# If you're using your own login details, you can just replace them here.
# The Twitter API
sec = mysecrets.Secrets()
consumer_key = sec.consumer_key
consumer_secret = sec.consumer_secret
access_token = sec.access_token
access_secret = sec.access_secret
# This is some code that tweepy / the Twitter API needs to start a session
auth = OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_secret)
api = tweepy.API(auth)
# Set how many times you want to fetch Tweets
# The reply will be 200 times this number
numberOfRuns = 16
allStatuses = []
username = 'HillaryClinton'
statuses = api.user_timeline(username, count=1)
# Set the ID of the last Tweet you want to fetch
maxId = cf.list_to_item(statuses).id
# For each run in the range, fetch the tweets
for run in xrange(numberOfRuns):
statuses = api.user_timeline(username, max_id=maxId, count=200)
for status in statuses:
allStatuses.append(status)
maxId = statuses[-1].id
# Save them all to files
with open(username + 'Tweets.json','w') as f:
json.dump([status._json for status in allStatuses], f) | {
"content_hash": "e0a1ff19b7f91cdca537bc4e97f69a17",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 74,
"avg_line_length": 28.555555555555557,
"alnum_prop": 0.7486381322957198,
"repo_name": "keelanfh/electionary",
"id": "8b6c81402b9214ee37367e1dc359d260c81447b6",
"size": "1360",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "twitter/twitter-fetch-data.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "106552"
}
],
"symlink_target": ""
} |
"""
This modules provides a function to transparently load the stylesheets
with the correct rc file.
"""
import logging
import platform
import os
__version__ = "2.1"
def _logger():
return logging.getLogger('skinstyle')
def load_stylesheet(theme, pyside=True):
"""
Loads the stylesheet. Takes care of importing the rc module.
:param pyside: True to load the pyside rc file, False to load the PyQt rc file
:return the stylesheet string
"""
if theme == 'darkstyle':
import themes.darkstyle.pyside_style_rc
elif theme == 'robotstyle':
import themes.robotstyle.pyside_style_rc
from PySide.QtCore import QFile, QTextStream
basedir = os.path.abspath(os.path.dirname(__file__))
localPath = "%s/style.qss" % theme
f = QFile( os.path.join(basedir, localPath))
if not f.exists():
_logger().error("Unable to load stylesheet, file not found in "
"resources")
return ""
else:
f.open(QFile.ReadOnly | QFile.Text)
ts = QTextStream(f)
stylesheet = ts.readAll()
# if platform.system().lower() == 'darwin': # see issue #12 on github
# mac_fix = '''
# QDockWidget::title
# {
# background-color: #353434;
# text-align: center;
# height: 10px;
# }
# '''
# stylesheet += mac_fix
return stylesheet
# def load_stylesheet_pyqt5():
# """
# Loads the stylesheet for use in a pyqt5 application.
# :param pyside: True to load the pyside rc file, False to load the PyQt rc file
# :return the stylesheet string
# """
# # Smart import of the rc file
# import qdarkstyle.pyqt5_style_rc
# # Load the stylesheet content from resources
# from PyQt5.QtCore import QFile, QTextStream
# f = QFile(":qdarkstyle/style.qss")
# if not f.exists():
# _logger().error("Unable to load stylesheet, file not found in "
# "resources")
# return ""
# else:
# f.open(QFile.ReadOnly | QFile.Text)
# ts = QTextStream(f)
# stylesheet = ts.readAll()
# if platform.system().lower() == 'darwin': # see issue #12 on github
# mac_fix = '''
# QDockWidget::title
# {
# background-color: #353434;
# text-align: center;
# height: 12px;
# }
# '''
# stylesheet += mac_fix
# return stylesheet
| {
"content_hash": "e5252d304820293165d0df882877ce78",
"timestamp": "",
"source": "github",
"line_count": 88,
"max_line_length": 84,
"avg_line_length": 29.102272727272727,
"alnum_prop": 0.5579851620460757,
"repo_name": "cloudteampro/juma-editor",
"id": "f86cb7cc4aadc1f2a9b8a6146f071daf4cf45044",
"size": "3718",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "editor/lib/themes/__init__.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "490405"
},
{
"name": "C++",
"bytes": "15076"
},
{
"name": "Lua",
"bytes": "223218"
},
{
"name": "Makefile",
"bytes": "6088"
},
{
"name": "Objective-C",
"bytes": "25470"
},
{
"name": "Python",
"bytes": "1033362"
},
{
"name": "Shell",
"bytes": "2792"
}
],
"symlink_target": ""
} |
from qiime2.plugin import SemanticType
from q2_types.sample_data import SampleData
from q2_types.feature_data import FeatureData
ClassifierPredictions = SemanticType(
'ClassifierPredictions', variant_of=SampleData.field['type'])
RegressorPredictions = SemanticType(
'RegressorPredictions', variant_of=SampleData.field['type'])
SampleEstimator = SemanticType('SampleEstimator', field_names='type')
Classifier = SemanticType(
'Classifier', variant_of=SampleEstimator.field['type'])
Regressor = SemanticType(
'Regressor', variant_of=SampleEstimator.field['type'])
BooleanSeries = SemanticType(
'BooleanSeries', variant_of=SampleData.field['type'])
Importance = SemanticType(
'Importance', variant_of=FeatureData.field['type'])
| {
"content_hash": "16de54b3b8fb9c6caaaa82d929f58c46",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 69,
"avg_line_length": 41.666666666666664,
"alnum_prop": 0.7773333333333333,
"repo_name": "nbokulich/q2-sample-classifier",
"id": "0e202b5fb6f68301803735a3c2536a01aa4e3c5c",
"size": "1100",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "q2_sample_classifier/_type.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "1029"
},
{
"name": "Makefile",
"bytes": "277"
},
{
"name": "Python",
"bytes": "246308"
},
{
"name": "TeX",
"bytes": "11290"
}
],
"symlink_target": ""
} |
from .adapter_base import AdapterBase
from graph_tool import Graph, Vertex, Edge
import os.path
class GraphAdapter(AdapterBase):
def __init__(self, seed_str, name,
file_extension='gml',
vertex_schema={
'gene' : 'vector<bool>',
'gen' : 'int',
'fitness' : 'vector<long>',
'score' : 'long'
},
edge_schema={
'label' : 'string',
'gen' : 'int'
}):
self.seed = seed_str
self.name = name
self.file_extension = file_extension
self.graph = Graph()
# Create graph properties
self.graph.gp.labels = self.graph.new_gp('vector<string>')
self.graph.gp.labels = [seed_str]
self.graph.gp.name = self.graph.new_gp('string')
self.graph.gp.name = self.name
# Create vertex properties
for key in vertex_schema:
self.graph.vp[key] = self.graph.new_vp(vertex_schema[key])
# Create edge properties
for key in edge_schema:
self.graph.ep[key] = self.graph.new_ep(edge_schema[key])
def add_node(self, gene, gen=0, attrs={}):
v = self.graph.add_vertex()
self.graph.vp.gene[v] = gene
self.graph.vp.gen[v] = gen
self.set_props(v, attrs)
return self.graph.vertex_index[v]
def add_edge(self, TAG, srcID, destID, attrs={}):
e = self.graph.add_edge(srcID, destID)
self.graph.ep.label[e] = TAG
for key in attrs:
self.graph.ep[key][e] = attrs[key]
return self.graph.edge_index[e]
def getNode(self, nodeID):
return self.graph.vertex(nodeID)
def getEdge(self, edgeID):
return self.graph.edge(edgeID)
def fetchIndividual(self, individual):
targets = graph_tool.util.find_vertex(self.graph, self.graph.vp.gene, individual)
# find the last node, the one with highest `gen`
if targets:
# guaranteed to be in order!!
return self.graph.vertex_index[targets[-1]]
else:
return None
def walk_edge(self, TAG, startID):
pass
def update_fitness(self, nodeID, fitness):
v = self.graph.vertex(nodeID)
self.set_props(v, {'fitness' : fitness})
def update_score(self, nodeID, score):
v = self.graph.vertex(nodeID)
self.set_props(v, {'score' : score})
def set_props(self, v, attrs):
for key in attrs:
self.graph.vp[key][v] = attrs[key]
def save(self):
filename = os.path.join('graphs', self.name) + '.' + self.file_extension
self.graph.save(filename)
return filename
def numNodes(self):
return self.graph.num_vertices()
| {
"content_hash": "72414406361bee0525da471d4bf6d0c0",
"timestamp": "",
"source": "github",
"line_count": 90,
"max_line_length": 89,
"avg_line_length": 30.6,
"alnum_prop": 0.5664488017429193,
"repo_name": "arrow-/PEAviz",
"id": "57873eb66faa6058b3b2b094a668de9e2dacb966",
"size": "2754",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "peaviz/adapters/graph_adapter.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "41213"
}
],
"symlink_target": ""
} |
import re
import logging
import datetime
logger = logging.getLogger(__name__)
EMAIL_REGEX = re.compile(r'[^@]+@[^@]+\.[^@]+')
def verify_identity(prenom: str, nom: str):
return nom and len(nom) > 0 and prenom and len(prenom) > 0
def verify_email(courriel: str):
return courriel and len(courriel) > 0 and EMAIL_REGEX.match(courriel)
def verify_promotion(promotion: int):
return promotion and datetime.datetime.now().year <= promotion <= datetime.datetime.now().year + 3
def verify_genre(genre: str):
return genre in ('personnel', 'professionnel', 'ancien', 'etudiant')
def verify_all(prenom: str, nom: str, courriel: str, genre: str, promotion, accompagnateurs: list):
for (a_prenom, a_nom) in accompagnateurs:
if not a_prenom or not len(a_prenom) > 0 or not a_nom or not len(a_nom) > 0:
return False
return verify_identity(prenom, nom) and verify_email(courriel) \
and verify_genre(genre) and (verify_promotion(promotion) or genre != 'etudiant')
| {
"content_hash": "baf24017b8739c446a2baae2e162bdfb",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 102,
"avg_line_length": 32.67741935483871,
"alnum_prop": 0.6781836130306022,
"repo_name": "emeric254/gala-stri-website",
"id": "c69bc24e89c899958dd0fa57281f320f4bba51cd",
"size": "1038",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Tools/VerifyFields.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "10646"
},
{
"name": "JavaScript",
"bytes": "31092"
},
{
"name": "Python",
"bytes": "24212"
}
],
"symlink_target": ""
} |
import math
from rpython.jit.metainterp.test.support import LLJitMixin
from rpython.rlib.rfloat import isinf, isnan, INFINITY, NAN
class MathTests:
def test_math_sqrt(self):
def f(x):
try:
return math.sqrt(x)
except ValueError:
return -INFINITY
res = self.interp_operations(f, [0.0])
assert res == 0.0
self.check_operations_history(call_pure_f=1)
#
res = self.interp_operations(f, [25.0])
assert res == 5.0
self.check_operations_history(call_pure_f=1)
#
res = self.interp_operations(f, [-0.0])
assert str(res) == '-0.0'
self.check_operations_history(call_pure_f=1)
#
res = self.interp_operations(f, [1000000.0])
assert res == 1000.0
self.check_operations_history(call_pure_f=1)
#
res = self.interp_operations(f, [-1.0])
assert res == -INFINITY
self.check_operations_history(call_pure_f=0)
#
res = self.interp_operations(f, [INFINITY])
assert isinf(res) and not isnan(res) and res > 0.0
self.check_operations_history(call_pure_f=0)
#
res = self.interp_operations(f, [NAN])
assert isnan(res) and not isinf(res)
self.check_operations_history(call_pure_f=0)
class TestLLtype(MathTests, LLJitMixin):
pass
| {
"content_hash": "b3089c4468fcd40cf7ead5eeb6224f1c",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 59,
"avg_line_length": 31.65909090909091,
"alnum_prop": 0.5829145728643216,
"repo_name": "oblique-labs/pyVM",
"id": "26d94fa5dcee93f4dbba9d9943f831ba63c6dba9",
"size": "1393",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "rpython/jit/metainterp/test/test_math.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "161293"
},
{
"name": "Awk",
"bytes": "271"
},
{
"name": "Batchfile",
"bytes": "5289"
},
{
"name": "C",
"bytes": "771638"
},
{
"name": "C++",
"bytes": "12850"
},
{
"name": "Emacs Lisp",
"bytes": "3149"
},
{
"name": "HCL",
"bytes": "155"
},
{
"name": "M4",
"bytes": "12737"
},
{
"name": "Makefile",
"bytes": "35222"
},
{
"name": "Objective-C",
"bytes": "2224"
},
{
"name": "Python",
"bytes": "18329219"
},
{
"name": "Shell",
"bytes": "15396"
},
{
"name": "Vim script",
"bytes": "1107"
}
],
"symlink_target": ""
} |
import numpy as np
import mahotas as mh
# This little script just builds an image with two examples, side-by-side:
text = mh.imread("../SimpleImageDataset/text21.jpg")
scene = mh.imread("../SimpleImageDataset/scene00.jpg")
h, w, _ = text.shape
canvas = np.zeros((h, 2 * w + 128, 3), np.uint8)
canvas[:, -w:] = scene
canvas[:, :w] = text
canvas = canvas[::4, ::4]
mh.imsave('../1400OS_10_10+.jpg', canvas)
| {
"content_hash": "29b2018130e3168e4c54ee1e6ed8d717",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 74,
"avg_line_length": 31.307692307692307,
"alnum_prop": 0.6683046683046683,
"repo_name": "xho95/BuildingMachineLearningSystemsWithPython",
"id": "058b1069182729d98529cfe623fc3803a6c90dfa",
"size": "624",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "ch10/figure10.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "1543"
},
{
"name": "Python",
"bytes": "215909"
},
{
"name": "Shell",
"bytes": "743"
}
],
"symlink_target": ""
} |
from juriscraper.opinions.united_states.state import pa
import re
class Site(pa.Site):
def __init__(self, *args, **kwargs):
super(Site, self).__init__(*args, **kwargs)
self.court_id = self.__module__
self.regex = re.compile("(.*)\n(.*)", re.M)
self.url = "http://www.pacourts.us/assets/rss/SuperiorOpinionsRss.ashx"
def _get_judges(self):
# Judges for this feed are provided as obscure numbers.
return None
| {
"content_hash": "a1a7f1f97db5c1e692e8a04ba926513e",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 79,
"avg_line_length": 33.285714285714285,
"alnum_prop": 0.6180257510729614,
"repo_name": "Andr3iC/juriscraper",
"id": "81a64938bb50f272cfe35694b09f5f90fc82afe9",
"size": "622",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "opinions/united_states/state/pasuperct.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "HTML",
"bytes": "25075213"
},
{
"name": "Python",
"bytes": "599952"
}
],
"symlink_target": ""
} |
from interface_methods import bootstrap, mcmc, mapestimate, diagnostics, asir
def set_seed(value):
swignifit_raw.setSeed(value)
| {
"content_hash": "4eec6d4dadf7164a2d842dab286c48e3",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 77,
"avg_line_length": 33.25,
"alnum_prop": 0.7819548872180451,
"repo_name": "esc/Psignifit-3.x",
"id": "2ed2c1d122b46f010bcc90363765037806ccc032",
"size": "449",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "swignifit/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "1835"
},
{
"name": "C++",
"bytes": "385427"
},
{
"name": "Inno Setup",
"bytes": "7857"
},
{
"name": "Makefile",
"bytes": "14574"
},
{
"name": "Matlab",
"bytes": "46120"
},
{
"name": "Python",
"bytes": "408054"
},
{
"name": "R",
"bytes": "42383"
},
{
"name": "Shell",
"bytes": "787"
},
{
"name": "TeX",
"bytes": "24029"
}
],
"symlink_target": ""
} |
from openprocurement.api.utils import (
get_file,
upload_file,
update_file_content_type,
json_view,
context_unpack,
APIResource,
)
from openprocurement.api.validation import (
validate_file_update,
validate_file_upload,
validate_patch_document_data,
)
from openprocurement.tender.core.utils import (
save_tender, optendersresource, apply_patch,
)
@optendersresource(name='belowThreshold:Tender Cancellation Documents',
collection_path='/tenders/{tender_id}/cancellations/{cancellation_id}/documents',
path='/tenders/{tender_id}/cancellations/{cancellation_id}/documents/{document_id}',
procurementMethodType='belowThreshold',
description="Tender cancellation documents")
class TenderCancellationDocumentResource(APIResource):
@json_view(permission='view_tender')
def collection_get(self):
"""Tender Cancellation Documents List"""
if self.request.params.get('all', ''):
collection_data = [i.serialize("view") for i in self.context.documents]
else:
collection_data = sorted(dict([
(i.id, i.serialize("view"))
for i in self.context.documents
]).values(), key=lambda i: i['dateModified'])
return {'data': collection_data}
@json_view(validators=(validate_file_upload,), permission='edit_tender')
def collection_post(self):
"""Tender Cancellation Document Upload
"""
if self.request.validated['tender_status'] in ['complete', 'cancelled', 'unsuccessful']:
self.request.errors.add('body', 'data', 'Can\'t add document in current ({}) tender status'.format(self.request.validated['tender_status']))
self.request.errors.status = 403
return
document = upload_file(self.request)
self.context.documents.append(document)
if save_tender(self.request):
self.LOGGER.info('Created tender cancellation document {}'.format(document.id),
extra=context_unpack(self.request, {'MESSAGE_ID': 'tender_cancellation_document_create'}, {'document_id': document.id}))
self.request.response.status = 201
document_route = self.request.matched_route.name.replace("collection_", "")
self.request.response.headers['Location'] = self.request.current_route_url(_route_name=document_route, document_id=document.id, _query={})
return {'data': document.serialize("view")}
@json_view(permission='view_tender')
def get(self):
"""Tender Cancellation Document Read"""
if self.request.params.get('download'):
return get_file(self.request)
document = self.request.validated['document']
document_data = document.serialize("view")
document_data['previousVersions'] = [
i.serialize("view")
for i in self.request.validated['documents']
if i.url != document.url
]
return {'data': document_data}
@json_view(validators=(validate_file_update,), permission='edit_tender')
def put(self):
"""Tender Cancellation Document Update"""
if self.request.validated['tender_status'] in ['complete', 'cancelled', 'unsuccessful']:
self.request.errors.add('body', 'data', 'Can\'t update document in current ({}) tender status'.format(self.request.validated['tender_status']))
self.request.errors.status = 403
return
document = upload_file(self.request)
self.request.validated['cancellation'].documents.append(document)
if save_tender(self.request):
self.LOGGER.info('Updated tender cancellation document {}'.format(self.request.context.id),
extra=context_unpack(self.request, {'MESSAGE_ID': 'tender_cancellation_document_put'}))
return {'data': document.serialize("view")}
@json_view(content_type="application/json", validators=(validate_patch_document_data,), permission='edit_tender')
def patch(self):
"""Tender Cancellation Document Update"""
if self.request.validated['tender_status'] in ['complete', 'cancelled', 'unsuccessful']:
self.request.errors.add('body', 'data', 'Can\'t update document in current ({}) tender status'.format(self.request.validated['tender_status']))
self.request.errors.status = 403
return
if apply_patch(self.request, src=self.request.context.serialize()):
update_file_content_type(self.request)
self.LOGGER.info('Updated tender cancellation document {}'.format(self.request.context.id),
extra=context_unpack(self.request, {'MESSAGE_ID': 'tender_cancellation_document_patch'}))
return {'data': self.request.context.serialize("view")}
| {
"content_hash": "15edf9efc08ca73da11e9e0fa71da037",
"timestamp": "",
"source": "github",
"line_count": 95,
"max_line_length": 155,
"avg_line_length": 51.31578947368421,
"alnum_prop": 0.6428717948717949,
"repo_name": "yarsanich/openprocurement.tender.belowthreshold",
"id": "2710471bf9caa97c4a9fcc6ba4bd40db08358a18",
"size": "4899",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "openprocurement/tender/belowthreshold/views/cancellation_document.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "940647"
}
],
"symlink_target": ""
} |
from django import forms
import models
from usuarios import models as usuarios
from Piscix.middleware import get_current_user
from django_autocomplete.widgets import AutocompleteWidget
from exile_ui.widgets import DatePickerWidget
from django_select2.forms import Select2Widget
class SeguimientoForm(forms.ModelForm):
class Meta:
model = models.Seguimiento
exclude = ('usuario', )
widgets = {
#"fecha_proxima": DatePickerWidget(attrs={'class': 'date'}, format="%m/%d/%Y
"inicio": Select2Widget
}
# end class
def save(self, commit=True):
user = get_current_user()
seguimiento = super(SeguimientoForm, self).save(commit=False)
seguimiento.usuario = user
seguimiento.save()
return seguimiento
# end defreporte
# end class
class InicioSeguimientoForm(forms.ModelForm):
class Meta:
model = models.InicioSeguimiento
exclude = ('usuario', )
widgets = {
# "fecha_proxima": DatePickerWidget(attrs={'class': 'date'}, format="%m/%d/%Y"),
"cliente": Select2Widget
}
# end class
def __init__(self, *args, **kwargs):
super(InicioSeguimientoForm, self).__init__(*args, **kwargs)
self.fields['fecha_proxima'].input_formats = ['%d/%m/%Y']
# end def
def save(self, commit=True):
user = get_current_user()
seguimiento = super(InicioSeguimientoForm, self).save(commit=False)
seguimiento.usuario = user
seguimiento.save()
return seguimiento
# end defreporte
# end class
| {
"content_hash": "500d7e86540795cb5e87662c6556a7b7",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 92,
"avg_line_length": 30.28301886792453,
"alnum_prop": 0.638006230529595,
"repo_name": "exildev/Piscix",
"id": "0c456694e83ba22532129f701ffe574f97e284d7",
"size": "1605",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "gestion_cartera/forms.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "58048"
},
{
"name": "HTML",
"bytes": "34644"
},
{
"name": "JavaScript",
"bytes": "94296"
},
{
"name": "Python",
"bytes": "120220"
},
{
"name": "Shell",
"bytes": "205"
}
],
"symlink_target": ""
} |
import sublime
import sublime_plugin
import re
try:
from QuickSearchEnhanced.quick_search import panels
except ImportError as error:
sublime.error_message("Dependency import failed; please read readme for " +
"GotoLineEnhanced plugin for installation instructions; to disable this " +
"message remove this plugin; message: " + str(error))
raise error
class PromptGotoLine(sublime_plugin.TextCommand):
def run(self, edit, start_with_current = False):
label = 'Tab for goto; Esc for cancel'
current = ''
if start_with_current and len(self.view.sel()) > 0:
current, _ = self.view.rowcol(self.view.sel()[0].a)
current += 1
panels.create([label], None, self._close, None, str(current),
[['goto_line', True]], self._create).show()
self.initial_viewport_position = self.view.viewport_position()
def _close(self, panel):
self.view.show(self.view.sel()[0])
if not getattr(panel, 'success', False):
self.view.set_viewport_position(self.initial_viewport_position)
callback = lambda: panel.get_opener().erase_regions('goto_line_enhanced')
sublime.set_timeout(callback, 100)
def _create(self, panel):
view = panel.get_panel()
view.sel().clear()
view.sel().add(sublime.Region(0, 0))
view.set_overwrite_status(True)
def get_panel():
panel = panels.get_current()
goto_panel = panel and panel.get_caller('goto_line')
if goto_panel == None:
return None
return panel
def shift(view, point):
string = view.substr(view.line(point))
shift_match = re.search(r'(?<!\n)(\S|$)', string)
if shift_match != None:
point += shift_match.start(1)
return point
def convert_query_to_point(view, query, options = {}):
if re.match(r'\d+', query) == None:
return
query = re.sub(r'\D', '', query)
max_line, _ = view.rowcol(view.size())
line = int(query) - 1
if line > max_line:
line = max_line
point = view.text_point(line, 0)
if 'position' in options and options['position'] == 'end':
point = view.line(point).b
else:
point = shift(view, point)
return point
def convert_query_to_region(view, query, options = {}):
start = end = convert_query_to_point(view, query, options)
if start == None or end == None:
return None
if 'select' in options and options['select']:
start = view.sel()[0].begin()
if 'position' not in options or options['position'] == None:
if end > start:
end = view.line(end).b
else:
end = shift(view, view.line(end).a)
return sublime.Region(start, end)
class GotoLineComplete(sublime_plugin.TextCommand):
def run(self, edit, position = None, select = False):
panel = get_panel()
if panel == None:
return
view, query = panel.get_opener(), panel.get_current_text()
region = convert_query_to_region(view, query, {
'position': position,
'select': select,
})
view.sel().clear()
view.sel().add(region)
panel.close(None, False)
view.show(region)
setattr(panel, 'success', True)
class GotoLineInsertZero(sublime_plugin.TextCommand):
def run(self, edit):
self.view.insert(edit, 0, '0')
class GotoLineFilterQuery(sublime_plugin.TextCommand):
def run(self, edit):
panel = get_panel()
if panel == None:
return
view = panel.get_opener()
all = sublime.Region(0, self.view.size())
query = self.view.substr(all)
new_query = self._filter_line(view, query)
if new_query != query:
saved_selection = self.view.sel()[0]
self.view.replace(edit, all, new_query)
if len(self.view.sel()) == 1:
sel = self.view.sel()[0]
max_line, _ = view.rowcol(view.size())
should_goto_beginning = (sel.a == sel.b and sel.a == self.view.size() and
sel.a == len(str(max_line)))
if should_goto_beginning:
self.view.sel().clear()
self.view.sel().add(sublime.Region(0, 0))
def _filter_line(self, view, line):
replace = [('n', '0'), ('m', '1'), (',', '2'), ('.', '3'), ('j', '4'),
('k', '5'), ('l', '6'), ('u', '7'), ('i', '8'), ('o', '9')]
for character, number in replace:
line = line.replace(character, number)
line = re.sub(r'[^\d]', '', line)
if line == '':
return ''
max_line, _ = view.rowcol(view.size())
max_line += 1 # because indexes starts with 1
if len(line) > len(str(max_line)):
line = line[0:len(str(max_line))]
return line
class InputHelper(sublime_plugin.EventListener):
def on_modified_async(self, view):
panel = get_panel()
if panel == None or panel.get_panel().id() != view.id():
return
view.run_command('goto_line_filter_query')
view, query = panel.get_opener(), panel.get_current_text()
region = convert_query_to_region(view, query)
if region == None:
return None
region = view.full_line(region)
view.add_regions('goto_line_enhanced', [region], 'string')
view.show(region) | {
"content_hash": "0e02ecc8bb9071495e08a0ca867255a5",
"timestamp": "",
"source": "github",
"line_count": 173,
"max_line_length": 79,
"avg_line_length": 28.566473988439306,
"alnum_prop": 0.6244435451234318,
"repo_name": "shagabutdinov/sublime-goto-line-enhanced",
"id": "c70d937f0f4ec16a11c16dc7b016e458528d6876",
"size": "4942",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "commands.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "2113"
},
{
"name": "Python",
"bytes": "9128"
}
],
"symlink_target": ""
} |
from message.models import *
from posts.models import Post, Status
from datetime import date
def update_requests():
all_requests = Request.objects.all()
today = date.today()
for elem in all_requests:
if elem.end_date < today:
if elem.status == 'Accepted':
# Update the Thingy status
post = Post.objects.get(Q(id=elem.thingy.id))
post.status = Status.objects.get(Q(id=1))
post.save()
# Update the Request status
elem.status = 'Ended'
elem.save()
update_requests()
| {
"content_hash": "acfb3c484ccea4f7b65f19b07b16c8c6",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 61,
"avg_line_length": 29.095238095238095,
"alnum_prop": 0.563011456628478,
"repo_name": "Guillaume-Docquier/Thingy",
"id": "2f82d006327d4e777fed44378b5f4456b8871316",
"size": "611",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "message/autoupdate.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "24435"
},
{
"name": "HTML",
"bytes": "67318"
},
{
"name": "JavaScript",
"bytes": "103866"
},
{
"name": "Python",
"bytes": "80116"
}
],
"symlink_target": ""
} |
from __future__ import annotations
import typing
import logging
import dataclasses
import pyuavcan
import pyuavcan.transport.serial
from pyuavcan.transport import Trace, TransferTrace, Capture, AlienSessionSpecifier, AlienTransferMetadata
from pyuavcan.transport import AlienTransfer, TransferFrom, Timestamp
from pyuavcan.transport.commons.high_overhead_transport import AlienTransferReassembler, TransferReassembler
from ._frame import SerialFrame
from ._stream_parser import StreamParser
_logger = logging.getLogger(__name__)
@dataclasses.dataclass(frozen=True)
class SerialCapture(pyuavcan.transport.Capture):
"""
Since UAVCAN/serial operates on top of unstructured L1 data links, there is no native concept of framing.
Therefore, the capture type defines only the timestamp, a raw chunk of bytes, and the direction (RX/TX).
When capturing data from a live interface, it is guaranteed by this library that each capture will contain
AT MOST one frame along with the delimiter bytes (at least the last byte of the fragment is zero).
When reading data from a file, it is trivial to split the data into frames by looking for the frame separators,
which are simply zero bytes.
"""
fragment: memoryview
own: bool
"""
True if the captured fragment was sent by the local transport instance.
False if it was received from the port.
"""
def __repr__(self) -> str:
"""
Captures that contain large fragments are truncated and appended with an ellipsis.
"""
limit = 64
if len(self.fragment) > limit:
fragment = bytes(self.fragment[:limit]).hex() + f"...<+{len(self.fragment) - limit}B>..."
else:
fragment = bytes(self.fragment).hex()
direction = "tx" if self.own else "rx"
return pyuavcan.util.repr_attributes(self, direction, fragment)
@staticmethod
def get_transport_type() -> typing.Type[pyuavcan.transport.serial.SerialTransport]:
return pyuavcan.transport.serial.SerialTransport
@dataclasses.dataclass(frozen=True)
class SerialErrorTrace(pyuavcan.transport.ErrorTrace):
error: TransferReassembler.Error
@dataclasses.dataclass(frozen=True)
class SerialOutOfBandTrace(pyuavcan.transport.ErrorTrace):
"""
Out-of-band data or a malformed frame received. See :class:`pyuavcan.serial.StreamParser`.
"""
data: memoryview
class SerialTracer(pyuavcan.transport.Tracer):
"""
This tracer does not differentiate between input and output traces,
but it keeps separate parsers for input and output captures such that there is no RX/TX state conflict.
If necessary, the user can distinguish RX/TX traces by checking :attr:`SerialCapture.direction`
before invoking :meth:`update`.
Return types from :meth:`update`:
- :class:`pyuavcan.transport.TransferTrace`
- :class:`SerialErrorTrace`
- :class:`SerialOutOfBandTrace`
"""
_MTU = 2 ** 32
"""Effectively unlimited."""
def __init__(self) -> None:
self._parsers = [
StreamParser(self._on_parsed, self._MTU),
StreamParser(self._on_parsed, self._MTU),
]
self._parser_output: typing.Optional[typing.Tuple[Timestamp, typing.Union[SerialFrame, memoryview]]] = None
self._sessions: typing.Dict[AlienSessionSpecifier, _AlienSession] = {}
def update(self, cap: Capture) -> typing.Optional[Trace]:
"""
If the capture encapsulates more than one serialized frame, a :class:`ValueError` will be raised.
To avoid this, always ensure that the captured fragments are split on the frame delimiters
(which are simply zero bytes).
Captures provided by PyUAVCAN are always fragmented correctly, but you may need to implement fragmentation
manually when reading data from an external file.
"""
if not isinstance(cap, SerialCapture):
return None
self._parsers[cap.own].process_next_chunk(cap.fragment, cap.timestamp)
if self._parser_output is None:
return None
timestamp, item = self._parser_output
self._parser_output = None
if isinstance(item, memoryview):
return SerialOutOfBandTrace(timestamp, item)
if isinstance(item, SerialFrame):
spec = AlienSessionSpecifier(
source_node_id=item.source_node_id,
destination_node_id=item.destination_node_id,
data_specifier=item.data_specifier,
)
return self._get_session(spec).update(timestamp, item)
assert False
def _get_session(self, specifier: AlienSessionSpecifier) -> _AlienSession:
try:
return self._sessions[specifier]
except KeyError:
self._sessions[specifier] = _AlienSession(specifier)
return self._sessions[specifier]
def _on_parsed(self, timestamp: Timestamp, data: memoryview, frame: typing.Optional[SerialFrame]) -> None:
_logger.debug(
"Stream parser output (conflict: %s): %s <%d bytes> %s",
bool(self._parser_output),
timestamp,
len(data),
frame,
)
if self._parser_output is None:
self._parser_output = timestamp, (data if frame is None else frame)
else:
self._parser_output = None
raise ValueError(
f"The supplied serial capture object contains more than one serialized entity. "
f"Such arrangement cannot be processed correctly by this implementation. "
f"Please update the caller code to always fragment the input byte stream at the frame delimiters, "
f"which are simply zero bytes. "
f"The timestamp of the offending capture is {timestamp}."
)
class _AlienSession:
def __init__(self, specifier: AlienSessionSpecifier) -> None:
self._specifier = specifier
src = specifier.source_node_id
self._reassembler = AlienTransferReassembler(src) if src is not None else None
def update(self, timestamp: Timestamp, frame: SerialFrame) -> typing.Optional[Trace]:
reasm = self._reassembler
tid_timeout = reasm.transfer_id_timeout if reasm is not None else 0.0
tr: typing.Union[TransferFrom, TransferReassembler.Error, None]
if reasm is not None:
tr = reasm.process_frame(timestamp, frame)
else:
tr = TransferReassembler.construct_anonymous_transfer(timestamp, frame)
if isinstance(tr, TransferReassembler.Error):
return SerialErrorTrace(timestamp=timestamp, error=tr)
if isinstance(tr, TransferFrom):
meta = AlienTransferMetadata(tr.priority, tr.transfer_id, self._specifier)
return TransferTrace(timestamp, AlienTransfer(meta, tr.fragmented_payload), tid_timeout)
assert tr is None
return None
# ---------------------------------------- TESTS GO BELOW THIS LINE ----------------------------------------
def _unittest_serial_tracer() -> None:
from pytest import raises, approx
from pyuavcan.transport import Priority, MessageDataSpecifier
from pyuavcan.transport.serial import SerialTransport
tr = SerialTransport.make_tracer()
ts = Timestamp.now()
def tx(x: typing.Union[bytes, bytearray, memoryview]) -> typing.Optional[Trace]:
return tr.update(SerialCapture(ts, memoryview(x), own=True))
def rx(x: typing.Union[bytes, bytearray, memoryview]) -> typing.Optional[Trace]:
return tr.update(SerialCapture(ts, memoryview(x), own=False))
buf = SerialFrame(
priority=Priority.SLOW,
transfer_id=1234567890,
index=0,
end_of_transfer=True,
payload=memoryview(b"abc"),
source_node_id=1111,
destination_node_id=None,
data_specifier=MessageDataSpecifier(6666),
).compile_into(bytearray(100))
head, tail = buf[:10], buf[10:]
assert None is tx(head) # Semi-complete.
trace = tx(head) # Double-head invalidates the previous one.
assert isinstance(trace, SerialOutOfBandTrace)
assert trace.timestamp == ts
assert trace.data.tobytes().strip(b"\0") == head.tobytes().strip(b"\0")
trace = tx(tail)
assert isinstance(trace, TransferTrace)
assert trace.timestamp == ts
assert trace.transfer_id_timeout == approx(2.0) # Initial value.
assert trace.transfer.metadata.transfer_id == 1234567890
assert trace.transfer.metadata.priority == Priority.SLOW
assert trace.transfer.metadata.session_specifier.source_node_id == 1111
assert trace.transfer.metadata.session_specifier.destination_node_id is None
assert trace.transfer.metadata.session_specifier.data_specifier == MessageDataSpecifier(6666)
assert trace.transfer.fragmented_payload == [memoryview(b"abc")]
buf = SerialFrame(
priority=Priority.SLOW,
transfer_id=1234567890,
index=0,
end_of_transfer=True,
payload=memoryview(b"abc"),
source_node_id=None,
destination_node_id=None,
data_specifier=MessageDataSpecifier(6666),
).compile_into(bytearray(100))
trace = rx(buf)
assert isinstance(trace, TransferTrace)
assert trace.timestamp == ts
assert trace.transfer.metadata.transfer_id == 1234567890
assert trace.transfer.metadata.session_specifier.source_node_id is None
assert trace.transfer.metadata.session_specifier.destination_node_id is None
assert None is tr.update(pyuavcan.transport.Capture(ts)) # Wrong type, ignore.
trace = tx(
SerialFrame(
priority=Priority.SLOW,
transfer_id=1234567890,
index=0,
end_of_transfer=False,
payload=memoryview(bytes(range(256))),
source_node_id=3333,
destination_node_id=None,
data_specifier=MessageDataSpecifier(6666),
).compile_into(bytearray(10_000))
)
assert trace is None
trace = tx(
SerialFrame(
priority=Priority.SLOW,
transfer_id=1234567890,
index=1,
end_of_transfer=True,
payload=memoryview(bytes(range(256))),
source_node_id=3333,
destination_node_id=None,
data_specifier=MessageDataSpecifier(6666),
).compile_into(bytearray(10_000))
)
assert isinstance(trace, SerialErrorTrace)
assert trace.error == TransferReassembler.Error.MULTIFRAME_INTEGRITY_ERROR
with raises(ValueError, match=".*delimiters.*"):
rx(b"".join([buf, buf]))
| {
"content_hash": "fdbefadd28a7d4e4dae734c2c09feabf",
"timestamp": "",
"source": "github",
"line_count": 274,
"max_line_length": 115,
"avg_line_length": 38.82846715328467,
"alnum_prop": 0.6635022088542156,
"repo_name": "UAVCAN/pyuavcan",
"id": "d3018b5f0dd13a8da2681b87de46926904c4e28a",
"size": "10790",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyuavcan/transport/serial/_tracer.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jinja",
"bytes": "36883"
},
{
"name": "Python",
"bytes": "1371788"
}
],
"symlink_target": ""
} |
from flask import render_template, request, flash,abort,redirect,url_for
from flask.ext.login import login_required, current_user
from . import main
from .. import db
from sqlalchemy import and_,desc,or_
from app.models import users,backhosts,customers,backarchives,config,backfailed,count_day_status,count_mon_status
from config import Config
import os,json,string,datetime
from random import choice
import py_compile
def GenPassword(length=8,chars=string.ascii_letters+string.digits):
return ''.join([choice(chars) for i in range(length)])
@main.route('/',methods=['GET', 'POST'])
@login_required
def index():
yesterday = str(datetime.date.today()-datetime.timedelta(days=1))
day_count = count_day_status.query.filter_by(count_date=yesterday).first()
if day_count:
back_success_file =day_count.back_file_success
back_customers_success =day_count.back_customers_success
back_file_failed =day_count.back_file_failed
back_customers_failed =day_count.back_customers_failed
else:
back_success_file =0
back_customers_success =0
back_file_failed =0
back_customers_failed =0
mon_count = count_mon_status.query.order_by(desc(count_mon_status.id)).limit(12).all()
customer_count = []
customer_count_customer = []
customer_count_file = []
for mon_item in mon_count:
mon_dict = {'y':str(mon_item.count_date),'a':str(mon_item.back_customers),'b':str(mon_item.back_customers_stop)}
mon_dict_customer = {'period':str(mon_item.count_date),'platform':str(mon_item.back_customers)}
mon_dict_file = {'period':str(mon_item.count_date),'file':str(mon_item.back_file)}
customer_count.append(mon_dict)
customer_count_customer.append(mon_dict_customer)
customer_count_file.append(mon_dict_file)
customer_count = customer_count[::-1]
customer_count_customer = customer_count_customer[::-1]
customer_count_file = customer_count_file[::-1]
yes_backfailed = backfailed.query.filter_by(count_date=yesterday).order_by(desc(backfailed.back_failed)).limit(4).all()
for i in range(0,int(4-len(yes_backfailed))):
yes_backfailed.append(0)
return render_template('index.html',back_success_file=back_success_file,back_customers_success=back_customers_success,
back_file_failed=back_file_failed,back_customers_failed=back_customers_failed,
customer_count=str(customer_count),customer_count_customer=str(customer_count_customer),
customer_count_file=str(customer_count_file),yes_backfailed=yes_backfailed)
@main.route('/build_config/',methods=['GET', 'POST'])
@login_required
def build_config():
if request.method == 'POST':
customers_name = request.form['customers_name']
customers_short = request.form['customers_short']
mysqldump_path = request.form['mysqldump_path']
local_back_dir = request.form['local_back_dir']
local_save = request.form['local_save']
db_ip = request.form['db_ip']
db_port = request.form['db_port']
db_user = request.form['db_user']
db_pass = request.form['db_pass']
db_name = request.form['db_name']
customer = customers.query.filter(or_(customers.customers_name==customers_name,customers.customers_short==customers_short
,customers.db_name==db_name)).all()
if customer:
flash(u'%s平台 添加记录失败,客户名称/数据库名称已存在!' %customers_name)
backhost = backhosts.query.all()
return render_template('customeradd.html',back_hosts=backhost)
backhost_id = int(request.form['backhost_id'])
random_pass = GenPassword()
customer = customers(customers_name=customers_name,customers_short=customers_short,customers_user=customers_short,
customers_pass=random_pass,mysqldump_path=mysqldump_path,local_back_dir=local_back_dir,
db_ip=db_ip,db_port=db_port,db_user=db_user,db_pass=db_pass,db_name=db_name,
backhost_id=backhost_id,local_save=local_save)
db.session.add(customer)
db.session.commit()
config_url = config.query.filter_by(key='apiurl').first()
config_apipath = config.query.filter_by(key='apipath').first()
config_apiport = config.query.filter_by(key='apiport').first()
config_CorpID = config.query.filter_by(key='CorpID').first()
config_Secret = config.query.filter_by(key='Secret').first()
config_dict = {'apiurl':config_url.value,'apipath':config_apipath.value,'apiport':int(config_apiport.value),
'mysql_dump':mysqldump_path,'mysql_host':db_ip,'mysql_port':db_port,
'mysql_user':db_user,'mysql_pass':db_pass,'database_name':db_name,
'backup_dir':local_back_dir,'customers_user':customers_short,'customers_pass':random_pass,
'CorpID':config_CorpID.value,'Secret':config_Secret.value}
print config_dict
back_code = open(Config.back_script).read()
code_string = string.Template(back_code)
pro_code = code_string.substitute(config_dict)
scripts_dir = Config.scripts_dir
print scripts_dir,customers_short
config_file_dir = os.path.join(scripts_dir,customers_short)
if not os.path.exists(config_file_dir):
os.mkdir(config_file_dir)
customers_back_file = customers_short + '_Backup_Mysql.py'
customers_back_file = os.path.join(config_file_dir,customers_back_file)
output = open(customers_back_file,'w')
output.write(pro_code)
output.close()
py_compile.compile(customers_back_file)
return redirect(request.url_root+str(customers_back_file+'c').split('BackManage')[1])
else:
backhost = backhosts.query.all()
return render_template('customeradd.html',back_hosts=backhost)
@main.route('/set_config/',methods=['GET', 'POST'])
@login_required
def set_config():
if request.method == 'POST':
try:
apiurl = request.form['apiurl']
apipath = request.form['apipath']
apiport = request.form['apiport']
CorpID = request.form['CorpID']
Secret = request.form['Secret']
apiurl_obj = config.query.filter_by(key='apiurl').first()
apiurl_obj.value = apiurl
apipath_obj = config.query.filter_by(key='apipath').first()
apipath_obj.value = apipath
apiport_obj = config.query.filter_by(key='apiport').first()
apiport_obj.value = apiport
CorpID_obj = config.query.filter_by(key='CorpID').first()
CorpID_obj.value = CorpID
Secret_obj = config.query.filter_by(key='Secret').first()
Secret_obj.value = Secret
db.session.add(apiurl_obj)
db.session.add(apipath_obj)
db.session.add(apiport_obj)
db.session.add(CorpID_obj)
db.session.add(Secret_obj)
db.session.commit()
except Exception as e:
print e.message
flash(u'系统设置更新失败!')
return redirect(url_for('main.set_config'))
flash(u'系统设置更新成功!')
return redirect(url_for('main.set_config'))
else:
try:
config_url = config.query.filter_by(key='apiurl').first().value
config_apipath = config.query.filter_by(key='apipath').first().value
config_apiport = config.query.filter_by(key='apiport').first().value
config_CorpID = config.query.filter_by(key='CorpID').first().value
config_Secret = config.query.filter_by(key='Secret').first().value
return render_template('config.html',apiurl=config_url,apipath=config_apipath,apiport=config_apiport,
CorpID=config_CorpID,Secret=config_Secret)
except Exception as e:
print e.message
return render_template('config.html')
@main.route('/api/', methods=['GET', 'POST'])
def api():
if request.method == 'POST':
operation = request.form['operation']
if operation == 'auth':
auth_user = request.form['user']
auth_pass = request.form['pass']
customer = customers.query.filter(and_(customers.customers_user==auth_user,customers.customers_pass==auth_pass,customers.customers_status==0)).first()
if customer:
back_host = backhosts.query.filter_by(id=customer.backhost_id).first()
data = {'auth':'ok','customer_name':customer.customers_name,'ftp_ip':back_host.ftp_ip,'ftp_port':int(back_host.ftp_port),'local_save':customer.local_save,
'ftp_user':back_host.ftp_user,'ftp_pass':back_host.ftp_pass,'ftp_dir':customer.customers_short}
return json.dumps(data)
elif operation == 'upload_info':
md5 = request.form['md5']
customer_short = request.form['name']
upload_ip = request.form['upload_ip']
upload_path = request.form['upload_path']
upload_name = request.form['upload_name']
upload_time = request.form['upload_time']
upload_size = request.form['upload_file_size']
customer = customers.query.filter_by(customers_short=customer_short).first()
backarchive = backarchives(customer_id=customer.id,back_name=upload_name,back_ip=upload_ip,back_path=upload_path,
back_time=upload_time,back_md5=md5,back_size=upload_size)
db.session.add(backarchive)
db.session.commit()
data = {'backup_info':'ok'}
return json.dumps(data)
else:
data = {'status':'ok'}
return json.dumps(data)
@main.route('/add_backnode/',methods=['GET', 'POST'])
@login_required
def add_backnode():
if request.method == 'POST':
node_name = request.form['node_name']
ftp_ip = request.form['ftp_ip']
ftp_port = request.form['ftp_port']
ftp_user = request.form['ftp_user']
ftp_pass = request.form['ftp_pass']
back_node = backhosts.query.filter(or_(backhosts.host_node==node_name,backhosts.ftp_ip==ftp_ip)).all()
if back_node:
flash(u'%s 节点已经存在,请勿重复添加!' %node_name)
return render_template('addbacknode.html')
backhost = backhosts(host_node=node_name,ftp_ip=ftp_ip,ftp_port=ftp_port,ftp_user=ftp_user,ftp_pass=ftp_pass)
db.session.add(backhost)
db.session.commit()
flash(u'%s 节点添加成功!' %node_name)
return render_template('addbacknode.html')
else:
return render_template('addbacknode.html')
@main.route('/backnode/',methods=['GET', 'POST'])
def backnode():
if request.method == 'POST':
pass
else:
backnodes = backhosts.query.all()
return render_template('backnode.html',backnodes=backnodes)
@main.route('/backmanage/',methods=['GET', 'POST'])
@login_required
def backmanage():
if request.method == 'POST':
pass
else:
backarchive_all = backarchives.query.order_by(desc(backarchives.id)).all()
return render_template('backarchives.html',backarchives=backarchive_all)
@main.route('/customer/',methods=['GET', 'POST'])
@login_required
def customer():
if request.method == 'POST':
try:
customer_id = request.form['customer_id']
customer_oper = request.form['customer_oper']
customer = customers.query.filter_by(id=customer_id).first()
if customer_oper == 'stop_back':
customer.customers_status = 1
else:
customer.customers_status = 0
db.session.add(customer)
db.session.commit()
return u"更新状态成功!"
except Exception, e:
print e
return u"更新状态失败!"
else:
customer_all = customers.query.all()
return render_template('customers.html',customers=customer_all)
@main.route('/failed_customer/',methods=['GET', 'POST'])
@login_required
def failed_customer():
backfaileds = backfailed.query.order_by(desc(backfailed.count_date)).all()
return render_template('backfailed.html',backfaileds=backfaileds)
@main.route('/help/',methods=['GET', 'POST'])
@login_required
def help():
return render_template('help.html') | {
"content_hash": "2215b0f789c22eb1241e4e2e8e2c7a38",
"timestamp": "",
"source": "github",
"line_count": 266,
"max_line_length": 169,
"avg_line_length": 47.83458646616541,
"alnum_prop": 0.6147437912606099,
"repo_name": "linuxyan/BackManager",
"id": "341c9eba0762d38d517d439e95f02672cfe49104",
"size": "12917",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/main/views.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "19070"
},
{
"name": "HTML",
"bytes": "33455"
},
{
"name": "JavaScript",
"bytes": "44647"
},
{
"name": "Python",
"bytes": "51600"
}
],
"symlink_target": ""
} |
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['deprecated'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: vultr_network_facts
deprecated:
removed_in: '2.13'
why: Deprecated in favour of C(_info) module.
alternative: Use M(vultr_network_info) instead.
short_description: Gather facts about the Vultr networks available.
description:
- Gather facts about networks available in Vultr.
version_added: "2.7"
author: "Yanis Guenane (@Spredzy)"
extends_documentation_fragment: vultr
'''
EXAMPLES = r'''
- name: Gather Vultr networks facts
local_action:
module: vultr_network_facts
- name: Print the gathered facts
debug:
var: ansible_facts.vultr_network_facts
'''
RETURN = r'''
---
vultr_api:
description: Response from Vultr API with a few additions/modification
returned: success
type: complex
contains:
api_account:
description: Account used in the ini file to select the key
returned: success
type: str
sample: default
api_timeout:
description: Timeout used for the API requests
returned: success
type: int
sample: 60
api_retries:
description: Amount of max retries for the API requests
returned: success
type: int
sample: 5
api_retry_max_delay:
description: Exponential backoff delay in seconds between retries up to this max delay value.
returned: success
type: int
sample: 12
version_added: '2.9'
api_endpoint:
description: Endpoint used for the API requests
returned: success
type: str
sample: "https://api.vultr.com"
vultr_network_facts:
description: Response from Vultr API
returned: success
type: complex
contains:
"vultr_network_facts": [
{
"date_created": "2018-08-02 11:18:49",
"id": "net5b62e8991adfg",
"name": "mynet",
"region": "Amsterdam",
"v4_subnet": "192.168.42.0",
"v4_subnet_mask": 24
}
]
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.vultr import (
Vultr,
vultr_argument_spec,
)
class AnsibleVultrNetworkFacts(Vultr):
def __init__(self, module):
super(AnsibleVultrNetworkFacts, self).__init__(module, "vultr_network_facts")
self.returns = {
'DCID': dict(key='region', transform=self._get_region_name),
'NETWORKID': dict(key='id'),
'date_created': dict(),
'description': dict(key='name'),
'v4_subnet': dict(),
'v4_subnet_mask': dict(convert_to='int'),
}
def _get_region_name(self, region):
return self.query_resource_by_key(
key='DCID',
value=region,
resource='regions',
use_cache=True
)['name']
def get_networks(self):
return self.api_query(path="/v1/network/list")
def parse_network_list(network_list):
if isinstance(network_list, list):
return []
return [network for id, network in network_list.items()]
def main():
argument_spec = vultr_argument_spec()
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
)
network_facts = AnsibleVultrNetworkFacts(module)
result = network_facts.get_result(parse_network_list(network_facts.get_networks()))
ansible_facts = {
'vultr_network_facts': result['vultr_network_facts']
}
module.exit_json(ansible_facts=ansible_facts, **result)
if __name__ == '__main__':
main()
| {
"content_hash": "fd47d556bf7b1941d199ac5118929f65",
"timestamp": "",
"source": "github",
"line_count": 140,
"max_line_length": 99,
"avg_line_length": 26.27857142857143,
"alnum_prop": 0.6287034520250068,
"repo_name": "thaim/ansible",
"id": "71b3193a725a98553df90b2dd60e1df2e04d5a40",
"size": "3847",
"binary": false,
"copies": "7",
"ref": "refs/heads/fix-broken-link",
"path": "lib/ansible/modules/cloud/vultr/_vultr_network_facts.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7"
},
{
"name": "Shell",
"bytes": "246"
}
],
"symlink_target": ""
} |
import yaml
import time
import random
import threading
import traceback
import subprocess
from rackattack import clientfactory
from rackattack.physical import config
from rackattack.api import Requirement, AllocationInfo
from rackattack.physical.tests.integration.main import useFakeGeneralConfiguration
class RackattackTestClients(threading.Thread):
SCENARIOS = dict(few=(1, 4), moreThanFew=(5, 9), many=(10, 30))
SCENARIOS_PROBABILITIES = dict(few=0.7, moreThanFew=0.2, many=0.1)
def __init__(self, nodeBaseName="node"):
assert(sum(self.SCENARIOS_PROBABILITIES.values()) == 1)
super(RackattackTestClients, self).__init__()
self._nodeBaseName = nodeBaseName
self._client = clientfactory.factory()
with open(config.CONFIGURATION_FILE) as f:
conf = yaml.load(f.read())
self._osmosisServerIP = conf["OSMOSIS_SERVER_IP"]
self._label = self._generateLabelName()
self._nrHosts = self._getNrHosts()
self._nrAllocatedHosts = 0
self._profiledAllocation = None
self._allocations = set()
self._stop = False
def run(self):
while True:
if self._stop:
while self._allocations:
allocation = self._allocations.pop()
allocation.free()
return
self._updateNrAllocatedHosts()
if self._nrAllocatedHosts == self._nrHosts:
self._free()
elif not self._allocations:
self._allocateForBackground()
elif self._nrAllocatedHosts <= self._nrHosts:
self._performRandomLoadAction()
else:
assert(False)
interval = 0.5 + random.random() * 1.2
time.sleep(interval)
def stop(self):
self._stop = True
def _updateNrAllocatedHosts(self):
stillAlive = set()
self._nrAllocatedHosts = 0
for allocation in self._allocations:
if allocation.dead() is None:
self._nrAllocatedHosts += len(allocation._requirements)
stillAlive.add(allocation)
self._allocations = stillAlive
def _generateLabelName(self):
cmd = "osmosis listlabels --objectStores=%(osmosisServerIP)s:1010 star | head -n 1" % \
dict(osmosisServerIP=self._osmosisServerIP)
print "Running %(cmd)s" % dict(cmd=cmd)
labelName = subprocess.check_output(cmd, shell=True)
labelName = labelName.strip()
return labelName
def _performRandomLoadAction(self):
wantedAllocationRatio = 0.65
allocationRatio = self._nrAllocatedHosts / float(self._nrHosts)
print "allocationRatio: {}, nrAllocated: {}, nrHosts: {}".format(allocationRatio,
self._nrAllocatedHosts,
self._nrHosts)
if allocationRatio < wantedAllocationRatio:
print "Will most likeliy allocate now..."
majorityAction = self._allocateForBackground
minorityAction = self._free
else:
print "Reached the wanted ratio..."
time.sleep(0.5)
print "Will most likeliy free now..."
majorityAction = self._free
minorityAction = self._allocateForBackground
withinWhatRange = random.random()
if withinWhatRange < 0.9:
majorityAction()
else:
minorityAction()
def _generateRequirements(self, nrHosts, pool, serverIDWildcard):
requirements = dict([("{}{}".format(self._nodeBaseName, nodeIdx),
Requirement(imageLabel=self._label,
imageHint=self._label,
hardwareConstraints=None,
pool=pool,
serverIDWildcard=serverIDWildcard))
for nodeIdx in xrange(nrHosts)])
return requirements
def _generateAllocationInfo(self):
allocationInfo = AllocationInfo(user="johabab", purpose="loadTests")
return allocationInfo
def allocate(self, nrHosts, pool="default", serverIDWildcard=""):
self._updateNrAllocatedHosts()
self._allocate(nrHosts, pool, serverIDWildcard)
def _allocateForBackground(self):
nrHosts = self._getRandomNrHosts()
self._allocate(nrHosts)
def _allocate(self, nrHostsToAllocate, pool="default", serverIDWildcard=""):
requirements = self._generateRequirements(nrHostsToAllocate,
pool=pool,
serverIDWildcard=serverIDWildcard)
allocationInfo = self._generateAllocationInfo()
print "Trying to allocate %(nrHosts)s hosts from %(pool)s" % dict(nrHosts=len(requirements),
pool=pool)
allocation = None
try:
allocation = self._client.allocate(requirements, allocationInfo)
self._allocations.add(allocation)
print "Allocation succeeded"
except Exception as e:
if 'not enough machines' in str(e):
print "Allocation failed: not enough machines"
else:
print str(e)
return allocation
def _getRandomNrHosts(self):
scenarioNames = self.SCENARIOS.keys()
scenarioNames.sort()
withinWhichRange = random.random()
rangeBound = 0
chosenScenarioName = None
for scenarioName in scenarioNames:
rangeBound += self.SCENARIOS_PROBABILITIES[scenarioName]
if withinWhichRange <= rangeBound:
chosenScenarioName = scenarioName
break
assert chosenScenarioName is not None
nrHosts = random.randint(*self.SCENARIOS[chosenScenarioName])
return nrHosts
def free(self):
self._updateNrAllocatedHosts()
self._free()
def _free(self):
allocation = self._allocations.pop()
print "Trying to free an allocation..."
try:
allocation.free()
except Exception as e:
print "Failed freeing allocation: {}".format(str(e))
print "Allocation freed."
def _getNrHosts(self):
status = self._client.call("admin__queryStatus")
return len(status["hosts"])
backgroundStressTestClient = None
profilingTestClient = None
def bgStress(mode):
if mode == "on":
print "Starting test clients..."
backgroundStressTestClient.start()
elif mode == "off":
print "Stopping test clients..."
backgroundStressTestClient.stop()
def allocate(nrHosts, pool="default", serverIDWildcard=""):
nrHosts = int(nrHosts)
profilingTestClient.allocate(nrHosts, pool=pool, serverIDWildcard=serverIDWildcard)
profilingAllocation = True
def free():
profilingTestClient.free()
def main():
print """Available commands:
bgstress on/off
\tRuns allocations (and frees them) in the background.
allocate nrHosts [pool=default]
\tAllocates the given number of hosts from the given pool.
free
\tFrees the current allocation (which was created with the 'allocate' command, if such allocation
exists."""
useFakeGeneralConfiguration()
import pdb
pdb.set_trace()
global backgroundStressTestClient, profilingTestClient, profilingAllocation
backgroundStressTestClient = RackattackTestClients("background-stress")
profilingTestClient = RackattackTestClients("profiling")
client = clientfactory.factory()
profilingAllocation = False
commands = dict(bgstress=bgStress, allocate=allocate, free=free)
while True:
cmdline = raw_input()
cmdline = cmdline.strip()
if not cmdline:
continue
cmdline = cmdline.split(" ")
cmdline = [item.strip() for item in cmdline]
commandName = cmdline[0]
args = cmdline[1:]
if commandName not in commands:
print "Invalid command: %(commandName)s" % dict(commandName=commandName)
continue
command = commands[commandName]
try:
command(*args)
except Exception as e:
print e.message
traceback.print_exc()
continue
if __name__ == '__main__':
main()
| {
"content_hash": "d5bab91b80726ff17920f8722d9d19a0",
"timestamp": "",
"source": "github",
"line_count": 229,
"max_line_length": 105,
"avg_line_length": 37.493449781659386,
"alnum_prop": 0.5948054973212206,
"repo_name": "Stratoscale/rackattack-physical",
"id": "0748b97f22e5a9da6e84ef4adc57ff8d01e7097e",
"size": "8586",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rackattack/physical/tests/integration/main_faketestclients.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1851"
},
{
"name": "M4",
"bytes": "688"
},
{
"name": "Makefile",
"bytes": "8407"
},
{
"name": "Python",
"bytes": "232666"
},
{
"name": "Shell",
"bytes": "6319"
}
],
"symlink_target": ""
} |
"""
Module with functions related to image coordinates and coordinate conversions.
"""
__author__ = 'Carlos Alberto Gomez Gonzalez, Valentin Christiaens'
__all__ = ['dist',
'dist_matrix',
'frame_center',
'cart_to_pol',
'pol_to_cart',
'pol_to_eq',
'QU_to_QUphi']
import math
from matplotlib.pyplot import xlim, ylim, axes, gca, show
import matplotlib.pyplot as plt
import numpy as np
def dist(yc, xc, y1, x1):
"""
Return the Euclidean distance between two points, or between an array
of positions and a point.
"""
return np.sqrt(np.power(yc-y1,2) + np.power(xc-x1,2))
def dist_matrix(n, cx=None, cy=None):
"""
Create matrix with euclidian distances from a reference point (cx, cy).
Parameters
----------
n : int
output image shape is (n, n)
cx,cy : float
reference point. Defaults to the center.
Returns
-------
im : ndarray with shape (n, n)
Notes
-----
This is a replacement for ANDROMEDA's DISTC.
"""
if cx is None:
cx = (n - 1) / 2
if cy is None:
cy = (n - 1) / 2
yy, xx = np.ogrid[:n, :n]
return np.sqrt((yy-cy)**2 + (xx-cx)**2)
def frame_center(array, verbose=False):
"""
Return the coordinates y,x of the frame(s) center.
If odd: dim/2-0.5
If even: dim/2
Parameters
----------
array : 2d/3d/4d numpy ndarray
Frame or cube.
verbose : bool optional
If True the center coordinates are printed out.
Returns
-------
cy, cx : int
Coordinates of the center.
"""
if array.ndim == 2:
shape = array.shape
elif array.ndim == 3:
shape = array[0].shape
elif array.ndim == 4:
shape = array[0, 0].shape
else:
raise ValueError('`array` is not a 2d, 3d or 4d array')
cy = shape[0] / 2
cx = shape[1] / 2
if shape[0]%2:
cy-=0.5
if shape[1]%2:
cx-=0.5
if verbose:
print('Center px coordinates at x,y = ({}, {})'.format(cx, cy))
return int(cy), int(cx)
def cart_to_pol(x, y, cx=0, cy=0, astro_convention=False):
"""
Returns polar coordinates for input cartesian coordinates
Parameters
----------
x : float or numpy ndarray
x coordinates with respect to the center
y : float or numpy ndarray
y coordinates with respect to the center
cx, cy : float or numpy ndarray
x, y coordinates of the center of the image to be considered for
conversion to cartesian coordinates.
astro_convention: bool
Whether to use angles measured from North up/East left (True), or
measured from the positive x axis (False).
Returns
-------
r, theta: floats or numpy ndarrays
radii and polar angles corresponding to the input x and y.
"""
r = dist(cy,cx,y,x)
theta = np.rad2deg(np.arctan2(y-cy,x-cx))
if astro_convention:
theta -= 90
return r, theta
def pol_to_cart(r, theta, r_err=0, theta_err=0, cx=0, cy=0,
astro_convention=False):
"""
Returns cartesian coordinates for input polar coordinates, with error
propagation.
Parameters
----------
r, theta : float or numpy ndarray
radii and position angles to be converted to cartesian coords x and y.
r_err : float, optional
Error on radial separation. Default is 0
theta_err : float, optional
Error on position angle, in degrees. Default is 0
cx, cy : float or numpy ndarray
x, y coordinates of the center to be considered for conversion to
cartesian coordinates.
astro_convention: bool
Whether to use angles measured from North up/East left (True), or
measured from the positive x axis (False). If True, the x axis is
reversed to match positive axis pointing East (left).
Returns
-------
x, y: floats or numpy ndarrays
x, y positions corresponding to input radii and position angles.
dx, dy: floats or numpy arrays
dx, dy uncertainties on positions propagated from input uncertainties
on r and theta.
"""
if astro_convention:
theta += 90
sign = -1
else:
sign = 1
theta = np.deg2rad(theta)
theta_err = np.deg2rad(theta_err)
x = cx+sign*r*np.cos(theta)
y = cy+r*np.sin(theta)
t1x = np.cos(theta)**2 * r_err**2
t2x = r**2 * np.sin(theta)**2 * theta_err**2
t1y = np.sin(theta)**2 * r_err**2
t2y = r**2 * np.cos(theta)**2 * theta_err**2
dx_err = np.sqrt(t1x + t2x)
dy_err = np.sqrt(t1y + t2y)
if r_err !=0 or theta_err != 0:
return x, y, dx_err, dy_err
else:
return x, y
def pol_to_eq(r, t, rError=0, tError=0, astro_convention=False, plot=False):
r"""
Converts a position (r,t) given in polar coordinates into :math:`\Delta` RA
and :math:`\Delta` DEC (equatorial coordinates), with error propagation.
Note: regardless of the assumption on input angle t (see description for
`astro_convention`), the output RA is counted positive towards left.
Parameters
----------
r: float
The radial coordinate.
t: float
The angular coordinate in degrees
rError: float, optional
The error bar related to r.
tError: float, optional
The error bar related to t, in deg.
astro_convention: bool, optional
Whether the input angle t is assumed to be measured from North up,
East left (True), or measured from the positive x axis (False).
plot: boolean, optional
If True, a figure illustrating the error ellipse is displayed.
Returns
-------
out : tuple
((RA, RA error), (DEC, DEC error))
"""
if not astro_convention:
t -= 90
ra = (r * np.sin(math.radians(t)))
dec = (r * np.cos(math.radians(t)))
u, v = (ra, dec)
nu = np.mod(np.pi/2-math.radians(t), 2*np.pi)
a, b = (rError,r*np.sin(math.radians(tError)))
beta = np.linspace(0, 2*np.pi, 5000)
x, y = (u + (a * np.cos(beta) * np.cos(nu) - b * np.sin(beta) * np.sin(nu)),
v + (b * np.sin(beta) * np.cos(nu) + a * np.cos(beta) * np.sin(nu)))
raErrorInf = u - np.amin(x)
raErrorSup = np.amax(x) - u
decErrorInf = v - np.amin(y)
decErrorSup = np.amax(y) - v
if plot:
plt.plot(u,v,'ks',x,y,'r')
plt.plot((r+rError) * np.cos(nu), (r+rError) * np.sin(nu),'ob',
(r-rError) * np.cos(nu), (r-rError) * np.sin(nu),'ob')
plt.plot(r * np.cos(nu+math.radians(tError)),
r*np.sin(nu+math.radians(tError)),'ok')
plt.plot(r * np.cos(nu-math.radians(tError)),
r*np.sin(nu-math.radians(tError)),'ok')
plt.plot(0,0,'og',np.cos(np.linspace(0,2*np.pi,10000)) * r,
np.sin(np.linspace(0,2*np.pi,10000)) * r,'y')
plt.plot([0,r*np.cos(nu+math.radians(tError*0))],
[0,r*np.sin(nu+math.radians(tError*0))],'k')
axes().set_aspect('equal')
lim = np.amax([a,b]) * 2.
xlim([ra-lim,ra+lim])
ylim([dec-lim,dec+lim])
gca().invert_xaxis()
show()
return ((ra,np.mean([raErrorInf,raErrorSup])),
(dec,np.mean([decErrorInf,decErrorSup])))
def QU_to_QUphi(Q, U, delta_x=0, delta_y=0, scale_r2=False,
north_convention=False):
"""
Returns Qphi and Uphi images, from input Q and U images.
Parameters
----------
Q: numpy ndarray
2d numpy array containing the Q component of polarisation.
U: numpy ndarray
2d numpy array containing the U component of polarisation. Should have
the same dimensions as Q.
delta_x, delta_y: float, opt
If the star is not at the center of the image, delta_x and delta_y
indicate by how much it is offset along the x and y dimensions, resp.
scale_r2: bool, opt
Whether to scale by r^2 during conversion.
north_convention: bool, opt
Whether to use angles measured from North up/East left (True), or
measured from the positive x axis (False).
Returns
-------
Qphi, Uphi: numpy ndarrays
Qphi and Uphi images
"""
cy,cx = frame_center(Q)
Qphi = np.zeros_like(Q)
Uphi = np.zeros_like(U)
for ii in range(Q.shape[1]):
for jj in range(Q.shape[0]):
x = float(ii-cx-delta_x)
y = float(jj-cy-delta_y)
rho, phi = cart_to_pol(x, y, north_convention=north_convention)
phi = np.deg2rad(phi)
if scale_r2:
Qphi[jj,ii] = (Q[jj,ii]*np.cos(2*phi) +
U[jj,ii]*np.sin(2*phi))*rho**2
Uphi[jj,ii] = (-Q[jj,ii]*np.sin(2*phi) +
U[jj,ii]*np.cos(2*phi))*rho**2
else:
Qphi[jj,ii] = Q[jj,ii]*np.cos(2*phi) + U[jj,ii]*np.sin(2*phi)
Uphi[jj,ii] = -Q[jj,ii]*np.sin(2*phi) + U[jj,ii]*np.cos(2*phi)
return Qphi, Uphi | {
"content_hash": "ff19a439b7a96653f821f384ee9ede2e",
"timestamp": "",
"source": "github",
"line_count": 305,
"max_line_length": 80,
"avg_line_length": 30.2655737704918,
"alnum_prop": 0.5619109522261944,
"repo_name": "henry-ngo/VIP",
"id": "0f6d2876c435f546a5280853dd83df65a27c11f7",
"size": "9255",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "vip_hci/var/coords.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "905"
},
{
"name": "Python",
"bytes": "1309524"
},
{
"name": "TeX",
"bytes": "38879"
}
],
"symlink_target": ""
} |
import unittest
from voluptuous import All, Length
from wtforms.validators import ValidationError
from datatypes.core import DictionaryValidator, SingleValueValidator
from datatypes.exceptions import *
class WtfTestDataType(SingleValueValidator):
def __init__(self):
super(self.__class__, self).__init__()
def define_schema(self):
return All(unicode, Length(max=3))
def define_error_message(self):
return "egg"
class TestDictValidator(DictionaryValidator):
def __init__(self):
super(self.__class__, self).__init__()
def define_schema(self):
return {
'foo': Length(max=3),
'bar': Length(max=1)
}
def define_error_dictionary(self):
return {
'foo': 'sausages'
}
class TestValidationCore(unittest.TestCase):
def test_can_filter_none_and_self_from_dictionary(self):
validator = TestDictValidator()
dictionary = {u'a': 1, u'b': u'2', u'c': None, u'self': self}
result = validator.clean_input(dictionary)
self.assertFalse(None in result.itervalues(),
"Result should not contain None: " + repr(result))
self.assertFalse(u'c' in result.iterkeys(),
"Result should not contain key 'c' as this is set to None: " + repr(result))
def test_can_filter_none_from_nested_dictionary(self):
validator = TestDictValidator()
dictionary = {u'a': None, u'b': {u'c': None, u'd': u'd'}}
result = validator.clean_input(dictionary)
self.assertFalse(u'a' in result.iterkeys())
sub_dictionary = result[u'b']
self.assertFalse(u'c' in sub_dictionary.iterkeys())
def test_single_value_validator_raises_correct_error_messages(self):
class TestDatType(SingleValueValidator):
def __init__(self):
super(self.__class__, self).__init__()
def define_schema(self):
return All(Length(max=3))
def define_error_message(self):
return "foo"
try:
TestDatType().validate("1234")
self.fail("Should have throw exception")
except DataDoesNotMatchSchemaException as exception:
self.assertEqual(exception.message, "foo")
self.assertEqual(repr(exception), str(exception))
def test_dictionary_validator_raises_correct_error_messages(self):
validator = TestDictValidator()
try:
validator.validate({u'foo': u'1234', u'bar': u'12'})
self.fail("exception should have been thrown")
except DataDoesNotMatchSchemaException as exception:
self.assertEqual(exception.field_errors['foo'], 'sausages')
self.assertEqual(exception.field_errors['bar'], 'length of value must be at most 1')
def test_raises_error_if_schema_not_defined(self):
class TestDataType(DictionaryValidator):
def __init__(self):
super(self.__class__, self).__init__()
def define_error_dictionary(self):
pass
self.assertRaises(NoSchemaException, TestDataType)
def test_raises_error_if_error_message_not_defined(self):
class TestDataType(SingleValueValidator):
def __init__(self):
super(self.__class__, self).__init__()
def define_schema(self):
pass
self.assertRaises(ErrorMessageNotDefined, TestDataType)
def test_raises_error_if_error_dictionary_is_not_defined(self):
class TestDataType(DictionaryValidator):
def __init__(self):
super(self.__class__, self).__init__()
def define_schema(self):
pass
self.assertRaises(NoErrorDictionaryDefined, TestDataType)
def test_single_value_wtform_error_handling(self):
validator = WtfTestDataType()
class FakeField(object):
data = "1234"
try:
wtvalidator = validator.wtform_validator(message="sausages")
wtvalidator(field=FakeField())
self.fail("Should have thrown an exception")
except ValidationError as exception:
self.assertEqual(exception.message, "sausages")
try:
wtvalidator = validator.wtform_validator()
wtvalidator(field=FakeField())
self.fail("Should have thrown exception")
except ValidationError as exception:
self.assertEqual(exception.message, "egg")
def test_can_validate_single_field_in_wtf(self):
class FakeField(object):
data = u"ab"
try:
validator = WtfTestDataType().wtform_validator()
validator(field=FakeField())
except ValidationError as exception:
self.fail("Should not have thrown exception " + repr(exception))
| {
"content_hash": "6f657f8f31dcde50fcae41040eae519c",
"timestamp": "",
"source": "github",
"line_count": 144,
"max_line_length": 101,
"avg_line_length": 33.93055555555556,
"alnum_prop": 0.6082685223086369,
"repo_name": "LandRegistry/datatypes-alpha",
"id": "3609d7b585f03d772a7105ad39da959f9753e90d",
"size": "4886",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_core.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "69250"
},
{
"name": "Shell",
"bytes": "0"
}
],
"symlink_target": ""
} |
from flask import render_template, Blueprint, abort, send_file
from flask_jwt import verify_jwt
from flask.ext.jwt import current_user
main = Blueprint('main', __name__, template_folder='templates', static_folder='static/gen', static_url_path='/static')
@main.route('/templates/<path:partial>')
def render_partial(partial=None):
return render_template(partial + '.html')
@main.route('/languages/<path:lang>')
def send_lang_json(lang=None):
if '..' in lang or lang.startswith('/'):
abort(404)
return send_file('languages/' + lang)
@main.route('/static/flags/<path:flag>')
def send_flag(flag=None):
if '..' in flag or flag.startswith('/'):
abort(404)
return send_file('static/bower_components/flag-icon-css/flags/' + flag)
@main.route('/favicon.ico')
def send_favicon():
return send_file('static/favicon.ico')
@main.route('/')
@main.route('/<path:path>')
def index(path=None):
return render_template('index.html')
| {
"content_hash": "c6146f7c7d2878bf1f086712dd5e9027",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 118,
"avg_line_length": 26.88888888888889,
"alnum_prop": 0.6787190082644629,
"repo_name": "timesqueezer/mdfork",
"id": "94819b969ab7e10f695e07d2ee1d88e241407e28",
"size": "968",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "mooddiary/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "10284"
},
{
"name": "HTML",
"bytes": "41842"
},
{
"name": "JavaScript",
"bytes": "35484"
},
{
"name": "Makefile",
"bytes": "427"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "37303"
},
{
"name": "Shell",
"bytes": "535"
}
],
"symlink_target": ""
} |
"""
sentry.plugins.sentry_urls.models
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2012 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import sentry
from django.utils.translation import ugettext_lazy as _
from sentry.plugins import register
from sentry.plugins.bases.tag import TagPlugin
class UrlsPlugin(TagPlugin):
"""
Automatically adds the 'url' tag from events containing interface data
from ``sentry.interfaes.Http``.
"""
slug = 'urls'
title = _('URLs')
version = sentry.VERSION
author = "Sentry Team"
author_url = "https://github.com/getsentry/sentry"
tag = 'url'
tag_label = _('URL')
def get_tag_values(self, event):
http = event.interfaces.get('sentry.interfaces.Http')
if not http:
return []
if not http.url:
return []
return [http.url]
register(UrlsPlugin)
| {
"content_hash": "93bc838daf81c3fbab55b151f7f20a5b",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 75,
"avg_line_length": 25.43243243243243,
"alnum_prop": 0.6301806588735388,
"repo_name": "simmetria/sentry",
"id": "6c45663a1743b8c1cba6adb0b4a6610875a07edc",
"size": "941",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/sentry/plugins/sentry_urls/models.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
from operator import attrgetter
from django.core.exceptions import FieldError, ValidationError
from django.db import connection, models
from django.test import SimpleTestCase, TestCase
from django.test.utils import CaptureQueriesContext, isolate_apps
from .models import (
Base, Chef, CommonInfo, GrandChild, GrandParent, ItalianRestaurant,
MixinModel, Parent, ParkingLot, Place, Post, Restaurant, Student, SubBase,
Supplier, Title, Worker,
)
class ModelInheritanceTests(TestCase):
def test_abstract(self):
# The Student and Worker models both have 'name' and 'age' fields on
# them and inherit the __str__() method, just as with normal Python
# subclassing. This is useful if you want to factor out common
# information for programming purposes, but still completely
# independent separate models at the database level.
w1 = Worker.objects.create(name="Fred", age=35, job="Quarry worker")
Worker.objects.create(name="Barney", age=34, job="Quarry worker")
s = Student.objects.create(name="Pebbles", age=5, school_class="1B")
self.assertEqual(str(w1), "Worker Fred")
self.assertEqual(str(s), "Student Pebbles")
# The children inherit the Meta class of their parents (if they don't
# specify their own).
self.assertSequenceEqual(
Worker.objects.values("name"), [
{"name": "Barney"},
{"name": "Fred"},
],
)
# Since Student does not subclass CommonInfo's Meta, it has the effect
# of completely overriding it. So ordering by name doesn't take place
# for Students.
self.assertEqual(Student._meta.ordering, [])
# However, the CommonInfo class cannot be used as a normal model (it
# doesn't exist as a model).
with self.assertRaisesMessage(AttributeError, "'CommonInfo' has no attribute 'objects'"):
CommonInfo.objects.all()
def test_reverse_relation_for_different_hierarchy_tree(self):
# Even though p.supplier for a Place 'p' (a parent of a Supplier), a
# Restaurant object cannot access that reverse relation, since it's not
# part of the Place-Supplier Hierarchy.
self.assertQuerysetEqual(Place.objects.filter(supplier__name="foo"), [])
msg = (
"Cannot resolve keyword 'supplier' into field. Choices are: "
"address, chef, chef_id, id, italianrestaurant, lot, name, "
"place_ptr, place_ptr_id, provider, rating, serves_hot_dogs, serves_pizza"
)
with self.assertRaisesMessage(FieldError, msg):
Restaurant.objects.filter(supplier__name="foo")
def test_model_with_distinct_accessors(self):
# The Post model has distinct accessors for the Comment and Link models.
post = Post.objects.create(title="Lorem Ipsum")
post.attached_comment_set.create(content="Save $ on V1agr@", is_spam=True)
post.attached_link_set.create(
content="The Web framework for perfections with deadlines.",
url="http://www.djangoproject.com/"
)
# The Post model doesn't have an attribute called
# 'attached_%(class)s_set'.
msg = "'Post' object has no attribute 'attached_%(class)s_set'"
with self.assertRaisesMessage(AttributeError, msg):
getattr(post, "attached_%(class)s_set")
def test_model_with_distinct_related_query_name(self):
self.assertQuerysetEqual(Post.objects.filter(attached_model_inheritance_comments__is_spam=True), [])
# The Post model doesn't have a related query accessor based on
# related_name (attached_comment_set).
msg = "Cannot resolve keyword 'attached_comment_set' into field."
with self.assertRaisesMessage(FieldError, msg):
Post.objects.filter(attached_comment_set__is_spam=True)
def test_meta_fields_and_ordering(self):
# Make sure Restaurant and ItalianRestaurant have the right fields in
# the right order.
self.assertEqual(
[f.name for f in Restaurant._meta.fields],
["id", "name", "address", "place_ptr", "rating", "serves_hot_dogs",
"serves_pizza", "chef"]
)
self.assertEqual(
[f.name for f in ItalianRestaurant._meta.fields],
["id", "name", "address", "place_ptr", "rating", "serves_hot_dogs",
"serves_pizza", "chef", "restaurant_ptr", "serves_gnocchi"],
)
self.assertEqual(Restaurant._meta.ordering, ["-rating"])
def test_custompk_m2m(self):
b = Base.objects.create()
b.titles.add(Title.objects.create(title="foof"))
s = SubBase.objects.create(sub_id=b.id)
b = Base.objects.get(pk=s.id)
self.assertNotEqual(b.pk, s.pk)
# Low-level test for related_val
self.assertEqual(s.titles.related_val, (s.id,))
# Higher level test for correct query values (title foof not
# accidentally found).
self.assertQuerysetEqual(s.titles.all(), [])
def test_update_parent_filtering(self):
"""
Updating a field of a model subclass doesn't issue an UPDATE
query constrained by an inner query (#10399).
"""
supplier = Supplier.objects.create(
name='Central market',
address='610 some street',
)
# Capture the expected query in a database agnostic way
with CaptureQueriesContext(connection) as captured_queries:
Place.objects.filter(pk=supplier.pk).update(name=supplier.name)
expected_sql = captured_queries[0]['sql']
# Capture the queries executed when a subclassed model instance is saved.
with CaptureQueriesContext(connection) as captured_queries:
supplier.save(update_fields=('name',))
for query in captured_queries:
sql = query['sql']
if 'UPDATE' in sql:
self.assertEqual(expected_sql, sql)
def test_create_child_no_update(self):
"""Creating a child with non-abstract parents only issues INSERTs."""
def a():
GrandChild.objects.create(
email='grand_parent@example.com',
first_name='grand',
last_name='parent',
)
def b():
GrandChild().save()
for i, test in enumerate([a, b]):
with self.subTest(i=i), self.assertNumQueries(4), CaptureQueriesContext(connection) as queries:
test()
for query in queries:
sql = query['sql']
self.assertIn('INSERT INTO', sql, sql)
def test_eq(self):
# Equality doesn't transfer in multitable inheritance.
self.assertNotEqual(Place(id=1), Restaurant(id=1))
self.assertNotEqual(Restaurant(id=1), Place(id=1))
def test_mixin_init(self):
m = MixinModel()
self.assertEqual(m.other_attr, 1)
@isolate_apps('model_inheritance')
def test_abstract_parent_link(self):
class A(models.Model):
pass
class B(A):
a = models.OneToOneField('A', parent_link=True, on_delete=models.CASCADE)
class Meta:
abstract = True
class C(B):
pass
self.assertIs(C._meta.parents[A], C._meta.get_field('a'))
@isolate_apps('model_inheritance')
def test_init_subclass(self):
saved_kwargs = {}
class A(models.Model):
def __init_subclass__(cls, **kwargs):
super().__init_subclass__()
saved_kwargs.update(kwargs)
kwargs = {'x': 1, 'y': 2, 'z': 3}
class B(A, **kwargs):
pass
self.assertEqual(saved_kwargs, kwargs)
@isolate_apps('model_inheritance')
def test_set_name(self):
class ClassAttr:
called = None
def __set_name__(self_, owner, name):
self.assertIsNone(self_.called)
self_.called = (owner, name)
class A(models.Model):
attr = ClassAttr()
self.assertEqual(A.attr.called, (A, 'attr'))
def test_inherited_ordering_pk_desc(self):
p1 = Parent.objects.create(first_name='Joe', email='joe@email.com')
p2 = Parent.objects.create(first_name='Jon', email='jon@email.com')
expected_order_by_sql = 'ORDER BY %s.%s DESC' % (
connection.ops.quote_name(Parent._meta.db_table),
connection.ops.quote_name(
Parent._meta.get_field('grandparent_ptr').column
),
)
qs = Parent.objects.all()
self.assertSequenceEqual(qs, [p2, p1])
self.assertIn(expected_order_by_sql, str(qs.query))
def test_queryset_class_getitem(self):
self.assertIs(models.QuerySet[Post], models.QuerySet)
self.assertIs(models.QuerySet[Post, Post], models.QuerySet)
self.assertIs(models.QuerySet[Post, int, str], models.QuerySet)
class ModelInheritanceDataTests(TestCase):
@classmethod
def setUpTestData(cls):
cls.restaurant = Restaurant.objects.create(
name="Demon Dogs",
address="944 W. Fullerton",
serves_hot_dogs=True,
serves_pizza=False,
rating=2,
)
chef = Chef.objects.create(name="Albert")
cls.italian_restaurant = ItalianRestaurant.objects.create(
name="Ristorante Miron",
address="1234 W. Ash",
serves_hot_dogs=False,
serves_pizza=False,
serves_gnocchi=True,
rating=4,
chef=chef,
)
def test_filter_inherited_model(self):
self.assertQuerysetEqual(
ItalianRestaurant.objects.filter(address="1234 W. Ash"), [
"Ristorante Miron",
],
attrgetter("name")
)
def test_update_inherited_model(self):
self.italian_restaurant.address = "1234 W. Elm"
self.italian_restaurant.save()
self.assertQuerysetEqual(
ItalianRestaurant.objects.filter(address="1234 W. Elm"), [
"Ristorante Miron",
],
attrgetter("name")
)
def test_parent_fields_available_for_filtering_in_child_model(self):
# Parent fields can be used directly in filters on the child model.
self.assertQuerysetEqual(
Restaurant.objects.filter(name="Demon Dogs"), [
"Demon Dogs",
],
attrgetter("name")
)
self.assertQuerysetEqual(
ItalianRestaurant.objects.filter(address="1234 W. Ash"), [
"Ristorante Miron",
],
attrgetter("name")
)
def test_filter_on_parent_returns_object_of_parent_type(self):
# Filters against the parent model return objects of the parent's type.
p = Place.objects.get(name="Demon Dogs")
self.assertIs(type(p), Place)
def test_parent_child_one_to_one_link(self):
# Since the parent and child are linked by an automatically created
# OneToOneField, you can get from the parent to the child by using the
# child's name.
self.assertEqual(
Place.objects.get(name="Demon Dogs").restaurant,
Restaurant.objects.get(name="Demon Dogs")
)
self.assertEqual(
Place.objects.get(name="Ristorante Miron").restaurant.italianrestaurant,
ItalianRestaurant.objects.get(name="Ristorante Miron")
)
self.assertEqual(
Restaurant.objects.get(name="Ristorante Miron").italianrestaurant,
ItalianRestaurant.objects.get(name="Ristorante Miron")
)
def test_parent_child_one_to_one_link_on_nonrelated_objects(self):
# This won't work because the Demon Dogs restaurant is not an Italian
# restaurant.
with self.assertRaises(ItalianRestaurant.DoesNotExist):
Place.objects.get(name="Demon Dogs").restaurant.italianrestaurant
def test_inherited_does_not_exist_exception(self):
# An ItalianRestaurant which does not exist is also a Place which does
# not exist.
with self.assertRaises(Place.DoesNotExist):
ItalianRestaurant.objects.get(name="The Noodle Void")
def test_inherited_multiple_objects_returned_exception(self):
# MultipleObjectsReturned is also inherited.
with self.assertRaises(Place.MultipleObjectsReturned):
Restaurant.objects.get()
def test_related_objects_for_inherited_models(self):
# Related objects work just as they normally do.
s1 = Supplier.objects.create(name="Joe's Chickens", address="123 Sesame St")
s1.customers.set([self.restaurant, self.italian_restaurant])
s2 = Supplier.objects.create(name="Luigi's Pasta", address="456 Sesame St")
s2.customers.set([self.italian_restaurant])
# This won't work because the Place we select is not a Restaurant (it's
# a Supplier).
p = Place.objects.get(name="Joe's Chickens")
with self.assertRaises(Restaurant.DoesNotExist):
p.restaurant
self.assertEqual(p.supplier, s1)
self.assertQuerysetEqual(
self.italian_restaurant.provider.order_by("-name"), [
"Luigi's Pasta",
"Joe's Chickens"
],
attrgetter("name")
)
self.assertQuerysetEqual(
Restaurant.objects.filter(provider__name__contains="Chickens"), [
"Ristorante Miron",
"Demon Dogs",
],
attrgetter("name")
)
self.assertQuerysetEqual(
ItalianRestaurant.objects.filter(provider__name__contains="Chickens"), [
"Ristorante Miron",
],
attrgetter("name"),
)
ParkingLot.objects.create(
name="Main St", address="111 Main St", main_site=s1
)
ParkingLot.objects.create(
name="Well Lit", address="124 Sesame St", main_site=self.italian_restaurant
)
self.assertEqual(
Restaurant.objects.get(lot__name="Well Lit").name,
"Ristorante Miron"
)
def test_update_works_on_parent_and_child_models_at_once(self):
# The update() command can update fields in parent and child classes at
# once (although it executed multiple SQL queries to do so).
rows = Restaurant.objects.filter(
serves_hot_dogs=True, name__contains="D"
).update(
name="Demon Puppies", serves_hot_dogs=False
)
self.assertEqual(rows, 1)
r1 = Restaurant.objects.get(pk=self.restaurant.pk)
self.assertFalse(r1.serves_hot_dogs)
self.assertEqual(r1.name, "Demon Puppies")
def test_values_works_on_parent_model_fields(self):
# The values() command also works on fields from parent models.
self.assertSequenceEqual(
ItalianRestaurant.objects.values("name", "rating"), [
{"rating": 4, "name": "Ristorante Miron"},
],
)
def test_select_related_works_on_parent_model_fields(self):
# select_related works with fields from the parent object as if they
# were a normal part of the model.
self.assertNumQueries(
2, lambda: ItalianRestaurant.objects.all()[0].chef
)
self.assertNumQueries(
1, lambda: ItalianRestaurant.objects.select_related("chef")[0].chef
)
def test_select_related_defer(self):
"""
#23370 - Should be able to defer child fields when using
select_related() from parent to child.
"""
qs = (Restaurant.objects.select_related("italianrestaurant")
.defer("italianrestaurant__serves_gnocchi").order_by("rating"))
# The field was actually deferred
with self.assertNumQueries(2):
objs = list(qs.all())
self.assertTrue(objs[1].italianrestaurant.serves_gnocchi)
# Model fields where assigned correct values
self.assertEqual(qs[0].name, 'Demon Dogs')
self.assertEqual(qs[0].rating, 2)
self.assertEqual(qs[1].italianrestaurant.name, 'Ristorante Miron')
self.assertEqual(qs[1].italianrestaurant.rating, 4)
def test_parent_cache_reuse(self):
place = Place.objects.create()
GrandChild.objects.create(place=place)
grand_parent = GrandParent.objects.latest('pk')
with self.assertNumQueries(1):
self.assertEqual(grand_parent.place, place)
parent = grand_parent.parent
with self.assertNumQueries(0):
self.assertEqual(parent.place, place)
child = parent.child
with self.assertNumQueries(0):
self.assertEqual(child.place, place)
grandchild = child.grandchild
with self.assertNumQueries(0):
self.assertEqual(grandchild.place, place)
def test_update_query_counts(self):
"""
Update queries do not generate unnecessary queries (#18304).
"""
with self.assertNumQueries(3):
self.italian_restaurant.save()
def test_filter_inherited_on_null(self):
# Refs #12567
Supplier.objects.create(
name="Central market",
address="610 some street",
)
self.assertQuerysetEqual(
Place.objects.filter(supplier__isnull=False), [
"Central market",
],
attrgetter("name")
)
self.assertQuerysetEqual(
Place.objects.filter(supplier__isnull=True).order_by("name"), [
"Demon Dogs",
"Ristorante Miron",
],
attrgetter("name")
)
def test_exclude_inherited_on_null(self):
# Refs #12567
Supplier.objects.create(
name="Central market",
address="610 some street",
)
self.assertQuerysetEqual(
Place.objects.exclude(supplier__isnull=False).order_by("name"), [
"Demon Dogs",
"Ristorante Miron",
],
attrgetter("name")
)
self.assertQuerysetEqual(
Place.objects.exclude(supplier__isnull=True), [
"Central market",
],
attrgetter("name")
)
@isolate_apps('model_inheritance', 'model_inheritance.tests')
class InheritanceSameModelNameTests(SimpleTestCase):
def test_abstract_fk_related_name(self):
related_name = '%(app_label)s_%(class)s_references'
class Referenced(models.Model):
class Meta:
app_label = 'model_inheritance'
class AbstractReferent(models.Model):
reference = models.ForeignKey(Referenced, models.CASCADE, related_name=related_name)
class Meta:
app_label = 'model_inheritance'
abstract = True
class Referent(AbstractReferent):
class Meta:
app_label = 'model_inheritance'
LocalReferent = Referent
class Referent(AbstractReferent):
class Meta:
app_label = 'tests'
ForeignReferent = Referent
self.assertFalse(hasattr(Referenced, related_name))
self.assertIs(Referenced.model_inheritance_referent_references.field.model, LocalReferent)
self.assertIs(Referenced.tests_referent_references.field.model, ForeignReferent)
class InheritanceUniqueTests(TestCase):
@classmethod
def setUpTestData(cls):
cls.grand_parent = GrandParent.objects.create(
email='grand_parent@example.com',
first_name='grand',
last_name='parent',
)
def test_unique(self):
grand_child = GrandChild(
email=self.grand_parent.email,
first_name='grand',
last_name='child',
)
msg = 'Grand parent with this Email already exists.'
with self.assertRaisesMessage(ValidationError, msg):
grand_child.validate_unique()
def test_unique_together(self):
grand_child = GrandChild(
email='grand_child@example.com',
first_name=self.grand_parent.first_name,
last_name=self.grand_parent.last_name,
)
msg = 'Grand parent with this First name and Last name already exists.'
with self.assertRaisesMessage(ValidationError, msg):
grand_child.validate_unique()
| {
"content_hash": "787ef9325e0f3196a3c07976280eee6b",
"timestamp": "",
"source": "github",
"line_count": 542,
"max_line_length": 108,
"avg_line_length": 38.04612546125461,
"alnum_prop": 0.6041899034964356,
"repo_name": "elena/django",
"id": "1ab0e15eeeda0e25f91b8fe2299268d5f1f99ea6",
"size": "20621",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "tests/model_inheritance/tests.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "43253"
},
{
"name": "HTML",
"bytes": "171768"
},
{
"name": "JavaScript",
"bytes": "105066"
},
{
"name": "Makefile",
"bytes": "125"
},
{
"name": "Python",
"bytes": "11016010"
},
{
"name": "Shell",
"bytes": "809"
},
{
"name": "Smarty",
"bytes": "130"
}
],
"symlink_target": ""
} |
from lxml import etree
import pytz
import xmltodict
from .ts_writer import TimeSeriesWriter
DNS = 'http://www.wldelft.nl/fews/PI' # default namespace
XSI = 'http://www.w3.org/2001/XMLSchema-instance'
XSD = 'http://fews.wldelft.nl/schemas/version1.0/pi-schemas/pi_timeseries.xsd'
nsmap = {None: DNS, 'xsi': XSI}
DATE_FMT = '%Y-%m-%d'
TIME_FMT = '%H:%M:%S'
def set_datetime(md, df):
md['header']['startDate']['@date'] = df.index[0].strftime(DATE_FMT)
md['header']['startDate']['@time'] = df.index[0].strftime(TIME_FMT)
md['header']['endDate']['@date'] = df.index[-1].strftime(DATE_FMT)
md['header']['endDate']['@time'] = df.index[-1].strftime(TIME_FMT)
class PiXmlWriter(TimeSeriesWriter):
"""docstring"""
def __init__(self, offset_in_hours=0.0):
"""docstring
Keyword arguments:
offset_in_hours -- fixed offset in hours from UTC (default 0.0)
Most time zones are offset from UTC by a whole number of hours,
but a few are offset by 30 or 45 minutes. Pass `None` to omit
the optional timeZone element in the resulting xml.
"""
self.root = etree.Element('TimeSeries', nsmap=nsmap)
self.root.attrib['{%s}schemaLocation' % XSI] = "%s %s" % (DNS, XSD)
self.root.attrib['version'] = '1.2'
if offset_in_hours is not None:
etree.SubElement(self.root, 'timeZone').text = str(offset_in_hours)
self.tz = pytz.FixedOffset(offset_in_hours * 60)
def set_series(self, metadata, dataframe):
"""docstring"""
series = etree.SubElement(self.root, 'series')
if not dataframe.empty:
dataframe.tz_convert(self.tz, copy=False)
set_datetime(metadata, dataframe)
header = xmltodict.unparse(metadata)
header = bytes(bytearray(header, encoding='utf-8'))
header = etree.XML(header)
series.append(header)
if dataframe.empty:
return
for idx, row in dataframe.iterrows():
event = etree.SubElement(series, 'event')
event.attrib['date'] = idx.strftime(DATE_FMT)
event.attrib['time'] = idx.strftime(TIME_FMT)
for col in dataframe.columns.tolist():
event.attrib[col] = str(row[col])
def write(self, out, pretty_print=True):
"""docstring"""
out.write(etree.tostring(self.root, pretty_print=pretty_print))
| {
"content_hash": "2ce7613e7a0852b1692427c3060508d4",
"timestamp": "",
"source": "github",
"line_count": 71,
"max_line_length": 79,
"avg_line_length": 34.056338028169016,
"alnum_prop": 0.6149710504549214,
"repo_name": "nens/tslib",
"id": "dca05f6a741f802ca7ebb3a2a006dfa658852071",
"size": "2418",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tslib/writers/pi_xml_writer.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "30541"
}
],
"symlink_target": ""
} |
"""Utilities for loading a pretrained distance model.
"""
import jax
import ml_collections
from scenic.projects.func_dist import model as scenic_model
from scenic.projects.func_dist import train_utils
from scenic.train_lib_deprecated import pretrain_utils
def restore_model(config: ml_collections.ConfigDict, ckpt_path: str):
"""Restore model definition, weights and config from a checkpoint path."""
rng = jax.random.PRNGKey(0)
model_cls = scenic_model.get_model_cls(config.model_name)
data_rng, rng = jax.random.split(rng)
# Only used for metadata.
dataset = train_utils.get_dataset(config, data_rng)
train_state = pretrain_utils.restore_pretrained_checkpoint(ckpt_path)
model = model_cls(config, dataset.meta_data)
return model, train_state, config
| {
"content_hash": "bab309f291f9dd88d09118d7a229fcff",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 76,
"avg_line_length": 35.22727272727273,
"alnum_prop": 0.7638709677419355,
"repo_name": "google-research/scenic",
"id": "922f6e91a7fdb9c62c87d185557d1b9373cda033",
"size": "775",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "scenic/projects/func_dist/pretrain_utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "1717873"
},
{
"name": "Python",
"bytes": "3692184"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, print_function, unicode_literals
from django.db import migrations, models
import wagtailvideos.models
class Migration(migrations.Migration):
dependencies = [
('wagtailvideos', '0003_auto_20160705_1646'),
]
operations = [
migrations.AlterField(
model_name='videotranscode',
name='file',
field=models.FileField(blank=True, null=True, upload_to=wagtailvideos.models.get_upload_to, verbose_name='file'),
),
]
| {
"content_hash": "bde87094082c5bd761cb86e478a23c71",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 125,
"avg_line_length": 26.35,
"alnum_prop": 0.6565464895635673,
"repo_name": "takeflight/wagtailvideos",
"id": "afc325998b98a8251d4cc90e423e40f22adf31f4",
"size": "599",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "wagtailvideos/migrations/0004_auto_20160706_1153.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "86"
},
{
"name": "HTML",
"bytes": "21965"
},
{
"name": "JavaScript",
"bytes": "10023"
},
{
"name": "Python",
"bytes": "88794"
}
],
"symlink_target": ""
} |
''' Tool for sorting imports alphabetically, and automatically separated into sections.
Copyright (C) 2013 Timothy Edmund Crosley
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and
to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or
substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
'''
from __future__ import absolute_import, division, print_function, unicode_literals
import argparse
import glob
import os
import re
import sys
from concurrent.futures import ProcessPoolExecutor
import functools
import setuptools
from isort import SortImports, __version__
from isort.settings import DEFAULT_SECTIONS, default, from_path, should_skip
from .pie_slice import itemsview
INTRO = r"""
/#######################################################################\
`sMMy`
.yyyy- `
##soos## ./o.
` ``..-..` ``...`.`` ` ```` ``-ssso```
.s:-y- .+osssssso/. ./ossss+:so+:` :+o-`/osso:+sssssssso/
.s::y- osss+.``.`` -ssss+-.`-ossso` ssssso/::..::+ssss:::.
.s::y- /ssss+//:-.` `ssss+ `ssss+ sssso` :ssss`
.s::y- `-/+oossssso/ `ssss/ sssso ssss/ :ssss`
.y-/y- ````:ssss` ossso. :ssss: ssss/ :ssss.
`/so:` `-//::/osss+ `+ssss+-/ossso: /sso- `osssso/.
\/ `-/oooo++/- .:/++:/++/-` .. `://++/.
isort your Python imports for you so you don't have to
VERSION {0}
\########################################################################/
""".format(__version__)
shebang_re = re.compile(br'^#!.*\bpython[23w]?\b')
def is_python_file(path):
if path.endswith('.py'):
return True
try:
with open(path, 'rb') as fp:
line = fp.readline(100)
except IOError:
return False
else:
return bool(shebang_re.match(line))
class SortAttempt(object):
def __init__(self, incorrectly_sorted, skipped):
self.incorrectly_sorted = incorrectly_sorted
self.skipped = skipped
def sort_imports(file_name, **arguments):
try:
result = SortImports(file_name, **arguments)
return SortAttempt(result.incorrectly_sorted, result.skipped)
except IOError as e:
print("WARNING: Unable to parse file {0} due to {1}".format(file_name, e))
return None
def iter_source_code(paths, config, skipped):
"""Iterate over all Python source files defined in paths."""
for path in paths:
if os.path.isdir(path):
if should_skip(path, config, os.getcwd()):
skipped.append(path)
continue
for dirpath, dirnames, filenames in os.walk(path, topdown=True):
for dirname in list(dirnames):
if should_skip(dirname, config, dirpath):
skipped.append(dirname)
dirnames.remove(dirname)
for filename in filenames:
filepath = os.path.join(dirpath, filename)
if is_python_file(filepath):
if should_skip(filename, config, dirpath):
skipped.append(filename)
else:
yield filepath
else:
yield path
class ISortCommand(setuptools.Command):
"""The :class:`ISortCommand` class is used by setuptools to perform
imports checks on registered modules.
"""
description = "Run isort on modules registered in setuptools"
user_options = []
def initialize_options(self):
default_settings = default.copy()
for (key, value) in itemsview(default_settings):
setattr(self, key, value)
def finalize_options(self):
"Get options from config files."
self.arguments = {}
computed_settings = from_path(os.getcwd())
for (key, value) in itemsview(computed_settings):
self.arguments[key] = value
def distribution_files(self):
"""Find distribution packages."""
# This is verbatim from flake8
if self.distribution.packages:
package_dirs = self.distribution.package_dir or {}
for package in self.distribution.packages:
pkg_dir = package
if package in package_dirs:
pkg_dir = package_dirs[package]
elif '' in package_dirs:
pkg_dir = package_dirs[''] + os.path.sep + pkg_dir
yield pkg_dir.replace('.', os.path.sep)
if self.distribution.py_modules:
for filename in self.distribution.py_modules:
yield "%s.py" % filename
# Don't miss the setup.py file itself
yield "setup.py"
def run(self):
arguments = self.arguments
wrong_sorted_files = False
arguments['check'] = True
for path in self.distribution_files():
for python_file in glob.iglob(os.path.join(path, '*.py')):
try:
incorrectly_sorted = SortImports(python_file, **arguments).incorrectly_sorted
if incorrectly_sorted:
wrong_sorted_files = True
except IOError as e:
print("WARNING: Unable to parse file {0} due to {1}".format(python_file, e))
if wrong_sorted_files:
exit(1)
def create_parser():
parser = argparse.ArgumentParser(description='Sort Python import definitions alphabetically '
'within logical sections.')
inline_args_group = parser.add_mutually_exclusive_group()
parser.add_argument('-a', '--add-import', dest='add_imports', action='append',
help='Adds the specified import line to all files, '
'automatically determining correct placement.')
parser.add_argument('-ac', '--atomic', dest='atomic', action='store_true',
help="Ensures the output doesn't save if the resulting file contains syntax errors.")
parser.add_argument('-af', '--force-adds', dest='force_adds', action='store_true',
help='Forces import adds even if the original file is empty.')
parser.add_argument('-b', '--builtin', dest='known_standard_library', action='append',
help='Force sortImports to recognize a module as part of the python standard library.')
parser.add_argument('-c', '--check-only', action='store_true', dest="check",
help='Checks the file for unsorted / unformatted imports and prints them to the '
'command line without modifying the file.')
parser.add_argument('-ca', '--combine-as', dest='combine_as_imports', action='store_true',
help="Combines as imports on the same line.")
parser.add_argument('-cs', '--combine-star', dest='combine_star', action='store_true',
help="Ensures that if a star import is present, nothing else is imported from that namespace.")
parser.add_argument('-d', '--stdout', help='Force resulting output to stdout, instead of in-place.',
dest='write_to_stdout', action='store_true')
parser.add_argument('-df', '--diff', dest='show_diff', action='store_true',
help="Prints a diff of all the changes isort would make to a file, instead of "
"changing it in place")
parser.add_argument('-ds', '--no-sections', help='Put all imports into the same section bucket', dest='no_sections',
action='store_true')
parser.add_argument('-dt', '--dont-order-by-type', dest='dont_order_by_type',
action='store_true', help='Only order imports alphabetically, do not attempt type ordering')
parser.add_argument('-e', '--balanced', dest='balanced_wrapping', action='store_true',
help='Balances wrapping to produce the most consistent line length possible')
parser.add_argument('-f', '--future', dest='known_future_library', action='append',
help='Force sortImports to recognize a module as part of the future compatibility libraries.')
parser.add_argument('-fas', '--force-alphabetical-sort', action='store_true', dest="force_alphabetical_sort",
help='Force all imports to be sorted as a single section')
parser.add_argument('-fass', '--force-alphabetical-sort-within-sections', action='store_true',
dest="force_alphabetical_sort", help='Force all imports to be sorted alphabetically within a '
'section')
parser.add_argument('-ff', '--from-first', dest='from_first',
help="Switches the typical ordering preference, showing from imports first then straight ones.")
parser.add_argument('-fgw', '--force-grid-wrap', nargs='?', const=2, type=int, dest="force_grid_wrap",
help='Force number of from imports (defaults to 2) to be grid wrapped regardless of line '
'length')
parser.add_argument('-fss', '--force-sort-within-sections', action='store_true', dest="force_sort_within_sections",
help='Force imports to be sorted by module, independent of import_type')
parser.add_argument('-i', '--indent', help='String to place for indents defaults to " " (4 spaces).',
dest='indent', type=str)
parser.add_argument('-j', '--jobs', help='Number of files to process in parallel.',
dest='jobs', type=int)
parser.add_argument('-k', '--keep-direct-and-as', dest='keep_direct_and_as_imports', action='store_true',
help="Turns off default behavior that removes direct imports when as imports exist.")
parser.add_argument('-l', '--lines', help='[Deprecated] The max length of an import line (used for wrapping '
'long imports).',
dest='line_length', type=int)
parser.add_argument('-lai', '--lines-after-imports', dest='lines_after_imports', type=int)
parser.add_argument('-lbt', '--lines-between-types', dest='lines_between_types', type=int)
parser.add_argument('-le', '--line-ending', dest='line_ending',
help="Forces line endings to the specified value. If not set, values will be guessed per-file.")
parser.add_argument('-ls', '--length-sort', help='Sort imports by their string length.',
dest='length_sort', action='store_true')
parser.add_argument('-m', '--multi-line', dest='multi_line_output', type=int, choices=[0, 1, 2, 3, 4, 5],
help='Multi line output (0-grid, 1-vertical, 2-hanging, 3-vert-hanging, 4-vert-grid, '
'5-vert-grid-grouped, 6-vert-grid-grouped-no-comma).')
inline_args_group.add_argument('-nis', '--no-inline-sort', dest='no_inline_sort', action='store_true',
help='Leaves `from` imports with multiple imports \'as-is\' (e.g. `from foo import a, c ,b`).')
parser.add_argument('-nlb', '--no-lines-before', help='Sections which should not be split with previous by empty lines',
dest='no_lines_before', action='append')
parser.add_argument('-ns', '--dont-skip', help='Files that sort imports should never skip over.',
dest='not_skip', action='append')
parser.add_argument('-o', '--thirdparty', dest='known_third_party', action='append',
help='Force sortImports to recognize a module as being part of a third party library.')
parser.add_argument('-ot', '--order-by-type', dest='order_by_type',
action='store_true', help='Order imports by type in addition to alphabetically')
parser.add_argument('-p', '--project', dest='known_first_party', action='append',
help='Force sortImports to recognize a module as being part of the current python project.')
parser.add_argument('-q', '--quiet', action='store_true', dest="quiet",
help='Shows extra quiet output, only errors are outputted.')
parser.add_argument('-r', '--remove-import', dest='remove_imports', action='append',
help='Removes the specified import from all files.')
parser.add_argument('-rc', '--recursive', dest='recursive', action='store_true',
help='Recursively look for Python files of which to sort imports')
parser.add_argument('-s', '--skip', help='Files that sort imports should skip over. If you want to skip multiple '
'files you should specify twice: --skip file1 --skip file2.', dest='skip', action='append')
parser.add_argument('-sd', '--section-default', dest='default_section',
help='Sets the default section for imports (by default FIRSTPARTY) options: ' +
str(DEFAULT_SECTIONS))
parser.add_argument('-sg', '--skip-glob', help='Files that sort imports should skip over.', dest='skip_glob',
action='append')
inline_args_group.add_argument('-sl', '--force-single-line-imports', dest='force_single_line', action='store_true',
help='Forces all from imports to appear on their own line')
parser.add_argument('-sp', '--settings-path', dest="settings_path",
help='Explicitly set the settings path instead of auto determining based on file location.')
parser.add_argument('-t', '--top', help='Force specific imports to the top of their appropriate section.',
dest='force_to_top', action='append')
parser.add_argument('-tc', '--trailing-comma', dest='include_trailing_comma', action='store_true',
help='Includes a trailing comma on multi line imports that include parentheses.')
parser.add_argument('-up', '--use-parentheses', dest='use_parentheses', action='store_true',
help='Use parenthesis for line continuation on length limit instead of slashes.')
parser.add_argument('-v', '--version', action='store_true', dest='show_version')
parser.add_argument('-vb', '--verbose', action='store_true', dest="verbose",
help='Shows verbose output, such as when files are skipped or when a check is successful.')
parser.add_argument('--virtual-env', dest='virtual_env',
help='Virtual environment to use for determining whether a package is third-party')
parser.add_argument('-vn', '--version-number', action='version', version=__version__,
help='Returns just the current version number without the logo')
parser.add_argument('-w', '--line-width', help='The max length of an import line (used for wrapping long imports).',
dest='line_length', type=int)
parser.add_argument('-wl', '--wrap-length', dest='wrap_length',
help="Specifies how long lines that are wrapped should be, if not set line_length is used.")
parser.add_argument('-ws', '--ignore-whitespace', action='store_true', dest="ignore_whitespace",
help='Tells isort to ignore whitespace differences when --check-only is being used.')
parser.add_argument('-y', '--apply', dest='apply', action='store_true',
help='Tells isort to apply changes recursively without asking')
parser.add_argument('files', nargs='*', help='One or more Python source files that need their imports sorted.')
arguments = {key: value for key, value in itemsview(vars(parser.parse_args())) if value}
if 'dont_order_by_type' in arguments:
arguments['order_by_type'] = False
return arguments
def main():
arguments = create_parser()
if arguments.get('show_version'):
print(INTRO)
return
if 'settings_path' in arguments:
sp = arguments['settings_path']
arguments['settings_path'] = os.path.abspath(sp) if os.path.isdir(sp) else os.path.dirname(os.path.abspath(sp))
if not os.path.isdir(arguments['settings_path']):
print("WARNING: settings_path dir does not exist: {0}".format(arguments['settings_path']))
if 'virtual_env' in arguments:
venv = arguments['virtual_env']
arguments['virtual_env'] = os.path.abspath(venv)
if not os.path.isdir(arguments['virtual_env']):
print("WARNING: virtual_env dir does not exist: {0}".format(arguments['virtual_env']))
file_names = arguments.pop('files', [])
if file_names == ['-']:
SortImports(file_contents=sys.stdin.read(), write_to_stdout=True, **arguments)
else:
if not file_names:
file_names = ['.']
arguments['recursive'] = True
if not arguments.get('apply', False):
arguments['ask_to_apply'] = True
config = from_path(os.path.abspath(file_names[0]) or os.getcwd()).copy()
config.update(arguments)
wrong_sorted_files = False
skipped = []
if arguments.get('recursive', False):
file_names = iter_source_code(file_names, config, skipped)
num_skipped = 0
if config['verbose'] or config.get('show_logo', False):
print(INTRO)
jobs = arguments.get('jobs')
if jobs:
executor = ProcessPoolExecutor(max_workers=jobs)
for sort_attempt in executor.map(functools.partial(sort_imports, **arguments), file_names):
if not sort_attempt:
continue
incorrectly_sorted = sort_attempt.incorrectly_sorted
if arguments.get('check', False) and incorrectly_sorted:
wrong_sorted_files = True
if sort_attempt.skipped:
num_skipped += 1
else:
for file_name in file_names:
try:
sort_attempt = SortImports(file_name, **arguments)
incorrectly_sorted = sort_attempt.incorrectly_sorted
if arguments.get('check', False) and incorrectly_sorted:
wrong_sorted_files = True
if sort_attempt.skipped:
num_skipped += 1
except IOError as e:
print("WARNING: Unable to parse file {0} due to {1}".format(file_name, e))
if wrong_sorted_files:
exit(1)
num_skipped += len(skipped)
if num_skipped and not arguments.get('quiet', False):
if config['verbose']:
for was_skipped in skipped:
print("WARNING: {0} was skipped as it's listed in 'skip' setting"
" or matches a glob in 'skip_glob' setting".format(was_skipped))
print("Skipped {0} files".format(num_skipped))
if __name__ == "__main__":
main()
| {
"content_hash": "ab2eeb5c171bc0eaa4ec5c915a8dd45b",
"timestamp": "",
"source": "github",
"line_count": 363,
"max_line_length": 124,
"avg_line_length": 55.27548209366391,
"alnum_prop": 0.5893346623473711,
"repo_name": "lucidmotifs/auto-aoc",
"id": "efb3793d0a1b02f4b872e9f003066483bfbd687e",
"size": "20087",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": ".venv/lib/python3.5/site-packages/isort/main.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "74"
},
{
"name": "C",
"bytes": "41695"
},
{
"name": "C++",
"bytes": "35306"
},
{
"name": "CSS",
"bytes": "96"
},
{
"name": "HTML",
"bytes": "48431"
},
{
"name": "JavaScript",
"bytes": "2043"
},
{
"name": "Python",
"bytes": "4850280"
},
{
"name": "Shell",
"bytes": "3778"
},
{
"name": "Visual Basic",
"bytes": "820"
},
{
"name": "XSLT",
"bytes": "2058"
}
],
"symlink_target": ""
} |
from Root import application
import unittest
class test1(unittest.TestCase):
def test_one(self):
self.assertEqual(application.foo(),1)
def test_two(self):
self.assertEqual(application.foo(),1)
def test_three(self):
self.assertEqual(application.foo(),1)
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "f7b31e3f2b47732303f9f3c1da3b0324",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 39,
"avg_line_length": 27.818181818181817,
"alnum_prop": 0.7222222222222222,
"repo_name": "codeboardio/mantra",
"id": "7a24f8342fc8eae8c40fe24ac64af3db2b8b1ed3",
"size": "306",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "test/test_resources/python-unittest/py_one_file/Root/test/test1.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "3416"
},
{
"name": "C++",
"bytes": "2328"
},
{
"name": "Haskell",
"bytes": "2068"
},
{
"name": "Java",
"bytes": "31758"
},
{
"name": "JavaScript",
"bytes": "259653"
},
{
"name": "Python",
"bytes": "13015"
},
{
"name": "Shell",
"bytes": "4102"
}
],
"symlink_target": ""
} |
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.append(os.path.abspath('..'))
import textify
os.environ['DJANGO_SETTINGS_MODULE'] = 'example.settings'
# -- General configuration -----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'django-textify'
copyright = u'2013, Ben Margolis'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = textify.get_version(short=True)
# The full version, including alpha/beta/rc tags.
release = textify.get_version()
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
html_show_sourcelink = False
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'django-textifydoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'app.tex', u'django-textify Documentation',
u'Ben Margolis', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
| {
"content_hash": "afb6f118ee94f89e03bbcc2d3064f644",
"timestamp": "",
"source": "github",
"line_count": 184,
"max_line_length": 80,
"avg_line_length": 33.75,
"alnum_prop": 0.6914653784219001,
"repo_name": "sixpearls/django-textify",
"id": "27ee63e1a135f1a178c91cc09ca825272bf4fe62",
"size": "6635",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "doc_src/conf.py",
"mode": "33261",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "14524"
},
{
"name": "JavaScript",
"bytes": "5161"
},
{
"name": "Python",
"bytes": "51766"
},
{
"name": "Shell",
"bytes": "3178"
}
],
"symlink_target": ""
} |
import geojson
import sys
def validate_geojson(geojson_path):
""" Validate a GeoJSON file """
## Validate the GeoJSON file
with open(geojson_path, 'r') as geojson_file:
geojson_dump = geojson_file.read()
features = geojson.loads(geojson_dump)
validation = features.is_valid
print "Is the geojson file valid? ", str(validation)
# if validation["message"]:
# print "Info: ", validation["message"]
return validation
if __name__ == '__main__':
## Check all our variables are in order
if len(sys.argv) > 1:
FILE_NAME = sys.argv[1]
else:
raise ValueError("FILE_NAME not defined")
validate_geojson(FILE_NAME)
| {
"content_hash": "d3f4cfd10611b7ded497d35e6e969a91",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 60,
"avg_line_length": 26.74074074074074,
"alnum_prop": 0.6052631578947368,
"repo_name": "Geovation/tiler",
"id": "6a5749f4379d9ff0a99a5379d1f83628ff5e9c97",
"size": "722",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tiler/tiler-scripts/validate_geojson.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "33"
},
{
"name": "Python",
"bytes": "74197"
},
{
"name": "Shell",
"bytes": "11409"
}
],
"symlink_target": ""
} |
"""
Handling of RSA, DSA, and EC keys.
"""
from __future__ import absolute_import, division
import binascii
import itertools
import warnings
from hashlib import md5, sha256
import base64
from incremental import Version
from cryptography.exceptions import InvalidSignature
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import hashes, serialization
from cryptography.hazmat.primitives.asymmetric import dsa, rsa, padding, ec
from cryptography.hazmat.primitives.serialization import (
load_pem_private_key, load_ssh_public_key)
from cryptography import utils
try:
from cryptography.hazmat.primitives.asymmetric.utils import (
encode_dss_signature, decode_dss_signature)
except ImportError:
from cryptography.hazmat.primitives.asymmetric.utils import (
encode_rfc6979_signature as encode_dss_signature,
decode_rfc6979_signature as decode_dss_signature)
from cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes
from pyasn1.error import PyAsn1Error
from pyasn1.type import univ
from pyasn1.codec.ber import decoder as berDecoder
from pyasn1.codec.ber import encoder as berEncoder
from twisted.conch.ssh import common, sexpy
from twisted.conch.ssh.common import int_from_bytes, int_to_bytes
from twisted.python import randbytes
from twisted.python.compat import (
iterbytes, long, izip, nativeString, unicode, _PY3,
_b64decodebytes as decodebytes, _b64encodebytes as encodebytes)
from twisted.python.constants import NamedConstant, Names
from twisted.python.deprecate import deprecated, getDeprecationWarningString
# Curve lookup table
_curveTable = {
b'ecdsa-sha2-nistp256': ec.SECP256R1(),
b'ecdsa-sha2-nistp384': ec.SECP384R1(),
b'ecdsa-sha2-nistp521': ec.SECP521R1(),
b'ecdsa-sha2-nistk163': ec.SECT163K1(),
b'ecdsa-sha2-nistp192': ec.SECP192R1(),
b'ecdsa-sha2-nistp224': ec.SECP224R1(),
b'ecdsa-sha2-nistk233': ec.SECT233K1(),
b'ecdsa-sha2-nistb233': ec.SECT233R1(),
b'ecdsa-sha2-nistk283': ec.SECT283K1(),
b'ecdsa-sha2-nistk409': ec.SECT409K1(),
b'ecdsa-sha2-nistb409': ec.SECT409R1(),
b'ecdsa-sha2-nistt571': ec.SECT571K1()
}
_secToNist = {
b'secp256r1' : b'nistp256',
b'secp384r1' : b'nistp384',
b'secp521r1' : b'nistp521',
b'sect163k1' : b'nistk163',
b'secp192r1' : b'nistp192',
b'secp224r1' : b'nistp224',
b'sect233k1' : b'nistk233',
b'sect233r1' : b'nistb233',
b'sect283k1' : b'nistk283',
b'sect409k1' : b'nistk409',
b'sect409r1' : b'nistb409',
b'sect571k1' : b'nistt571'
}
class BadKeyError(Exception):
"""
Raised when a key isn't what we expected from it.
XXX: we really need to check for bad keys
"""
class EncryptedKeyError(Exception):
"""
Raised when an encrypted key is presented to fromString/fromFile without
a password.
"""
class BadFingerPrintFormat(Exception):
"""
Raises when unsupported fingerprint formats are presented to fingerprint.
"""
class FingerprintFormats(Names):
"""
Constants representing the supported formats of key fingerprints.
@cvar MD5_HEX: Named constant representing fingerprint format generated
using md5[RFC1321] algorithm in hexadecimal encoding.
@type MD5_HEX: L{twisted.python.constants.NamedConstant}
@cvar SHA256_BASE64: Named constant representing fingerprint format
generated using sha256[RFC4634] algorithm in base64 encoding
@type SHA256_BASE64: L{twisted.python.constants.NamedConstant}
"""
MD5_HEX = NamedConstant()
SHA256_BASE64 = NamedConstant()
class Key(object):
"""
An object representing a key. A key can be either a public or
private key. A public key can verify a signature; a private key can
create or verify a signature. To generate a string that can be stored
on disk, use the toString method. If you have a private key, but want
the string representation of the public key, use Key.public().toString().
@ivar keyObject: DEPRECATED. The C{Crypto.PublicKey} object
that operations are performed with.
"""
@classmethod
def fromFile(cls, filename, type=None, passphrase=None):
"""
Load a key from a file.
@param filename: The path to load key data from.
@type type: L{str} or L{None}
@param type: A string describing the format the key data is in, or
L{None} to attempt detection of the type.
@type passphrase: L{bytes} or L{None}
@param passphrase: The passphrase the key is encrypted with, or L{None}
if there is no encryption.
@rtype: L{Key}
@return: The loaded key.
"""
with open(filename, 'rb') as f:
return cls.fromString(f.read(), type, passphrase)
@classmethod
def fromString(cls, data, type=None, passphrase=None):
"""
Return a Key object corresponding to the string data.
type is optionally the type of string, matching a _fromString_*
method. Otherwise, the _guessStringType() classmethod will be used
to guess a type. If the key is encrypted, passphrase is used as
the decryption key.
@type data: L{bytes}
@param data: The key data.
@type type: L{str} or L{None}
@param type: A string describing the format the key data is in, or
L{None} to attempt detection of the type.
@type passphrase: L{bytes} or L{None}
@param passphrase: The passphrase the key is encrypted with, or L{None}
if there is no encryption.
@rtype: L{Key}
@return: The loaded key.
"""
if isinstance(data, unicode):
data = data.encode("utf-8")
if isinstance(passphrase, unicode):
passphrase = passphrase.encode("utf-8")
if type is None:
type = cls._guessStringType(data)
if type is None:
raise BadKeyError('cannot guess the type of %r' % (data,))
method = getattr(cls, '_fromString_%s' % (type.upper(),), None)
if method is None:
raise BadKeyError('no _fromString method for %s' % (type,))
if method.__code__.co_argcount == 2: # No passphrase
if passphrase:
raise BadKeyError('key not encrypted')
return method(data)
else:
return method(data, passphrase)
@classmethod
def _fromString_BLOB(cls, blob):
"""
Return a public key object corresponding to this public key blob.
The format of a RSA public key blob is::
string 'ssh-rsa'
integer e
integer n
The format of a DSA public key blob is::
string 'ssh-dss'
integer p
integer q
integer g
integer y
The format of ECDSA-SHA2-* public key blob is::
string 'ecdsa-sha2-[identifier]'
integer x
integer y
identifier is the standard NIST curve name.
@type blob: L{bytes}
@param blob: The key data.
@return: A new key.
@rtype: L{twisted.conch.ssh.keys.Key}
@raises BadKeyError: if the key type (the first string) is unknown.
"""
keyType, rest = common.getNS(blob)
if keyType == b'ssh-rsa':
e, n, rest = common.getMP(rest, 2)
return cls(
rsa.RSAPublicNumbers(e, n).public_key(default_backend()))
elif keyType == b'ssh-dss':
p, q, g, y, rest = common.getMP(rest, 4)
return cls(
dsa.DSAPublicNumbers(
y=y,
parameter_numbers=dsa.DSAParameterNumbers(
p=p,
q=q,
g=g
)
).public_key(default_backend())
)
elif keyType in _curveTable:
# First we have to make an EllipticCuvePublicNumbers from the
# provided curve and points,
# then turn it into a public key object.
return cls(
ec.EllipticCurvePublicNumbers.from_encoded_point(
_curveTable[keyType],
common.getNS(rest, 2)[1]).public_key(default_backend()))
else:
raise BadKeyError('unknown blob type: %s' % (keyType,))
@classmethod
def _fromString_PRIVATE_BLOB(cls, blob):
"""
Return a private key object corresponding to this private key blob.
The blob formats are as follows:
RSA keys::
string 'ssh-rsa'
integer n
integer e
integer d
integer u
integer p
integer q
DSA keys::
string 'ssh-dss'
integer p
integer q
integer g
integer y
integer x
EC keys::
string 'ecdsa-sha2-[identifier]'
integer x
integer y
integer privateValue
identifier is the standard NIST curve name.
@type blob: L{bytes}
@param blob: The key data.
@return: A new key.
@rtype: L{twisted.conch.ssh.keys.Key}
@raises BadKeyError: if the key type (the first string) is unknown.
"""
keyType, rest = common.getNS(blob)
if keyType == b'ssh-rsa':
n, e, d, u, p, q, rest = common.getMP(rest, 6)
return cls._fromRSAComponents(n=n, e=e, d=d, p=p, q=q)
elif keyType == b'ssh-dss':
p, q, g, y, x, rest = common.getMP(rest, 5)
return cls._fromDSAComponents(y=y, g=g, p=p, q=q, x=x)
elif keyType in [curve for curve in list(_curveTable.keys())]:
x, y, privateValue, rest = common.getMP(rest, 3)
return cls._fromECComponents(x=x, y=y, curve=keyType,
privateValue=privateValue)
else:
raise BadKeyError('unknown blob type: %s' % (keyType,))
@classmethod
def _fromString_PUBLIC_OPENSSH(cls, data):
"""
Return a public key object corresponding to this OpenSSH public key
string. The format of an OpenSSH public key string is::
<key type> <base64-encoded public key blob>
@type data: L{bytes}
@param data: The key data.
@return: A new key.
@rtype: L{twisted.conch.ssh.keys.Key}
@raises BadKeyError: if the blob type is unknown.
"""
# ECDSA keys don't need base64 decoding which is required
# for RSA or DSA key.
if data.startswith(b'ecdsa-sha2'):
return cls(load_ssh_public_key(data, default_backend()))
blob = decodebytes(data.split()[1])
return cls._fromString_BLOB(blob)
@classmethod
def _fromString_PRIVATE_OPENSSH(cls, data, passphrase):
"""
Return a private key object corresponding to this OpenSSH private key
string. If the key is encrypted, passphrase MUST be provided.
Providing a passphrase for an unencrypted key is an error.
The format of an OpenSSH private key string is::
-----BEGIN <key type> PRIVATE KEY-----
[Proc-Type: 4,ENCRYPTED
DEK-Info: DES-EDE3-CBC,<initialization value>]
<base64-encoded ASN.1 structure>
------END <key type> PRIVATE KEY------
The ASN.1 structure of a RSA key is::
(0, n, e, d, p, q)
The ASN.1 structure of a DSA key is::
(0, p, q, g, y, x)
The ASN.1 structure of a ECDSA key is::
(ECParameters, OID, NULL)
@type data: L{bytes}
@param data: The key data.
@type passphrase: L{bytes} or L{None}
@param passphrase: The passphrase the key is encrypted with, or L{None}
if it is not encrypted.
@return: A new key.
@rtype: L{twisted.conch.ssh.keys.Key}
@raises BadKeyError: if
* a passphrase is provided for an unencrypted key
* the ASN.1 encoding is incorrect
@raises EncryptedKeyError: if
* a passphrase is not provided for an encrypted key
"""
lines = data.strip().splitlines()
kind = lines[0][11:-17]
if lines[1].startswith(b'Proc-Type: 4,ENCRYPTED'):
if not passphrase:
raise EncryptedKeyError('Passphrase must be provided '
'for an encrypted key')
# Determine cipher and initialization vector
try:
_, cipherIVInfo = lines[2].split(b' ', 1)
cipher, ivdata = cipherIVInfo.rstrip().split(b',', 1)
except ValueError:
raise BadKeyError('invalid DEK-info %r' % (lines[2],))
if cipher in (b'AES-128-CBC', b'AES-256-CBC'):
algorithmClass = algorithms.AES
keySize = int(int(cipher.split(b'-')[1])/8)
if len(ivdata) != 32:
raise BadKeyError('AES encrypted key with a bad IV')
elif cipher == b'DES-EDE3-CBC':
algorithmClass = algorithms.TripleDES
keySize = 24
if len(ivdata) != 16:
raise BadKeyError('DES encrypted key with a bad IV')
else:
raise BadKeyError('unknown encryption type %r' % (cipher,))
# Extract keyData for decoding
iv = bytes(bytearray([int(ivdata[i:i + 2], 16)
for i in range(0, len(ivdata), 2)]))
ba = md5(passphrase + iv[:8]).digest()
bb = md5(ba + passphrase + iv[:8]).digest()
decKey = (ba + bb)[:keySize]
b64Data = decodebytes(b''.join(lines[3:-1]))
decryptor = Cipher(
algorithmClass(decKey),
modes.CBC(iv),
backend=default_backend()
).decryptor()
keyData = decryptor.update(b64Data) + decryptor.finalize()
removeLen = ord(keyData[-1:])
keyData = keyData[:-removeLen]
else:
b64Data = b''.join(lines[1:-1])
keyData = decodebytes(b64Data)
try:
decodedKey = berDecoder.decode(keyData)[0]
except PyAsn1Error as e:
raise BadKeyError(
'Failed to decode key (Bad Passphrase?): %s' % (e,))
if kind == b'EC':
return cls(
load_pem_private_key(data, passphrase, default_backend()))
if kind == b'RSA':
if len(decodedKey) == 2: # Alternate RSA key
decodedKey = decodedKey[0]
if len(decodedKey) < 6:
raise BadKeyError('RSA key failed to decode properly')
n, e, d, p, q, dmp1, dmq1, iqmp = [
long(value) for value in decodedKey[1:9]
]
if p > q: # Make p smaller than q
p, q = q, p
return cls(
rsa.RSAPrivateNumbers(
p=p,
q=q,
d=d,
dmp1=dmp1,
dmq1=dmq1,
iqmp=iqmp,
public_numbers=rsa.RSAPublicNumbers(e=e, n=n),
).private_key(default_backend())
)
elif kind == b'DSA':
p, q, g, y, x = [long(value) for value in decodedKey[1: 6]]
if len(decodedKey) < 6:
raise BadKeyError('DSA key failed to decode properly')
return cls(
dsa.DSAPrivateNumbers(
x=x,
public_numbers=dsa.DSAPublicNumbers(
y=y,
parameter_numbers=dsa.DSAParameterNumbers(
p=p,
q=q,
g=g
)
)
).private_key(backend=default_backend())
)
else:
raise BadKeyError("unknown key type %s" % (kind,))
@classmethod
def _fromString_PUBLIC_LSH(cls, data):
"""
Return a public key corresponding to this LSH public key string.
The LSH public key string format is::
<s-expression: ('public-key', (<key type>, (<name, <value>)+))>
The names for a RSA (key type 'rsa-pkcs1-sha1') key are: n, e.
The names for a DSA (key type 'dsa') key are: y, g, p, q.
@type data: L{bytes}
@param data: The key data.
@return: A new key.
@rtype: L{twisted.conch.ssh.keys.Key}
@raises BadKeyError: if the key type is unknown
"""
sexp = sexpy.parse(decodebytes(data[1:-1]))
assert sexp[0] == b'public-key'
kd = {}
for name, data in sexp[1][1:]:
kd[name] = common.getMP(common.NS(data))[0]
if sexp[1][0] == b'dsa':
return cls._fromDSAComponents(
y=kd[b'y'], g=kd[b'g'], p=kd[b'p'], q=kd[b'q'])
elif sexp[1][0] == b'rsa-pkcs1-sha1':
return cls._fromRSAComponents(n=kd[b'n'], e=kd[b'e'])
else:
raise BadKeyError('unknown lsh key type %s' % (sexp[1][0],))
@classmethod
def _fromString_PRIVATE_LSH(cls, data):
"""
Return a private key corresponding to this LSH private key string.
The LSH private key string format is::
<s-expression: ('private-key', (<key type>, (<name>, <value>)+))>
The names for a RSA (key type 'rsa-pkcs1-sha1') key are: n, e, d, p, q.
The names for a DSA (key type 'dsa') key are: y, g, p, q, x.
@type data: L{bytes}
@param data: The key data.
@return: A new key.
@rtype: L{twisted.conch.ssh.keys.Key}
@raises BadKeyError: if the key type is unknown
"""
sexp = sexpy.parse(data)
assert sexp[0] == b'private-key'
kd = {}
for name, data in sexp[1][1:]:
kd[name] = common.getMP(common.NS(data))[0]
if sexp[1][0] == b'dsa':
assert len(kd) == 5, len(kd)
return cls._fromDSAComponents(
y=kd[b'y'], g=kd[b'g'], p=kd[b'p'], q=kd[b'q'], x=kd[b'x'])
elif sexp[1][0] == b'rsa-pkcs1':
assert len(kd) == 8, len(kd)
if kd[b'p'] > kd[b'q']: # Make p smaller than q
kd[b'p'], kd[b'q'] = kd[b'q'], kd[b'p']
return cls._fromRSAComponents(
n=kd[b'n'], e=kd[b'e'], d=kd[b'd'], p=kd[b'p'], q=kd[b'q'])
else:
raise BadKeyError('unknown lsh key type %s' % (sexp[1][0],))
@classmethod
def _fromString_AGENTV3(cls, data):
"""
Return a private key object corresponsing to the Secure Shell Key
Agent v3 format.
The SSH Key Agent v3 format for a RSA key is::
string 'ssh-rsa'
integer e
integer d
integer n
integer u
integer p
integer q
The SSH Key Agent v3 format for a DSA key is::
string 'ssh-dss'
integer p
integer q
integer g
integer y
integer x
@type data: L{bytes}
@param data: The key data.
@return: A new key.
@rtype: L{twisted.conch.ssh.keys.Key}
@raises BadKeyError: if the key type (the first string) is unknown
"""
keyType, data = common.getNS(data)
if keyType == b'ssh-dss':
p, data = common.getMP(data)
q, data = common.getMP(data)
g, data = common.getMP(data)
y, data = common.getMP(data)
x, data = common.getMP(data)
return cls._fromDSAComponents(y=y, g=g, p=p, q=q, x=x)
elif keyType == b'ssh-rsa':
e, data = common.getMP(data)
d, data = common.getMP(data)
n, data = common.getMP(data)
u, data = common.getMP(data)
p, data = common.getMP(data)
q, data = common.getMP(data)
return cls._fromRSAComponents(n=n, e=e, d=d, p=p, q=q, u=u)
else:
raise BadKeyError("unknown key type %s" % (keyType,))
@classmethod
def _guessStringType(cls, data):
"""
Guess the type of key in data. The types map to _fromString_*
methods.
@type data: L{bytes}
@param data: The key data.
"""
if data.startswith(b'ssh-') or data.startswith(b'ecdsa-sha2-'):
return 'public_openssh'
elif data.startswith(b'-----BEGIN'):
return 'private_openssh'
elif data.startswith(b'{'):
return 'public_lsh'
elif data.startswith(b'('):
return 'private_lsh'
elif data.startswith(b'\x00\x00\x00\x07ssh-') or data.startswith(b'\x00\x00\x00\x13ecdsa-'):
ignored, rest = common.getNS(data)
count = 0
while rest:
count += 1
ignored, rest = common.getMP(rest)
if count > 4:
return 'agentv3'
else:
return 'blob'
@classmethod
def _fromRSAComponents(cls, n, e, d=None, p=None, q=None, u=None):
"""
Build a key from RSA numerical components.
@type n: L{int}
@param n: The 'n' RSA variable.
@type e: L{int}
@param e: The 'e' RSA variable.
@type d: L{int} or L{None}
@param d: The 'd' RSA variable (optional for a public key).
@type p: L{int} or L{None}
@param p: The 'p' RSA variable (optional for a public key).
@type q: L{int} or L{None}
@param q: The 'q' RSA variable (optional for a public key).
@type u: L{int} or L{None}
@param u: The 'u' RSA variable. Ignored, as its value is determined by
p and q.
@rtype: L{Key}
@return: An RSA key constructed from the values as given.
"""
publicNumbers = rsa.RSAPublicNumbers(e=e, n=n)
if d is None:
# We have public components.
keyObject = publicNumbers.public_key(default_backend())
else:
privateNumbers = rsa.RSAPrivateNumbers(
p=p,
q=q,
d=d,
dmp1=rsa.rsa_crt_dmp1(d, p),
dmq1=rsa.rsa_crt_dmq1(d, q),
iqmp=rsa.rsa_crt_iqmp(p, q),
public_numbers=publicNumbers,
)
keyObject = privateNumbers.private_key(default_backend())
return cls(keyObject)
@classmethod
def _fromDSAComponents(cls, y, p, q, g, x=None):
"""
Build a key from DSA numerical components.
@type y: L{int}
@param y: The 'y' DSA variable.
@type p: L{int}
@param p: The 'p' DSA variable.
@type q: L{int}
@param q: The 'q' DSA variable.
@type g: L{int}
@param g: The 'g' DSA variable.
@type x: L{int} or L{None}
@param x: The 'x' DSA variable (optional for a public key)
@rtype: L{Key}
@return: A DSA key constructed from the values as given.
"""
publicNumbers = dsa.DSAPublicNumbers(
y=y, parameter_numbers=dsa.DSAParameterNumbers(p=p, q=q, g=g))
if x is None:
# We have public components.
keyObject = publicNumbers.public_key(default_backend())
else:
privateNumbers = dsa.DSAPrivateNumbers(
x=x, public_numbers=publicNumbers)
keyObject = privateNumbers.private_key(default_backend())
return cls(keyObject)
@classmethod
def _fromECComponents(cls, x, y, curve, privateValue=None):
"""
Build a key from EC components.
@param x: The affine x component of the public point used for verifying.
@type x: L{int}
@param y: The affine y component of the public point used for verifying.
@type y: L{int}
@param curve: NIST name of elliptic curve.
@type curve: L{bytes}
@param privateValue: The private value.
@type privateValue: L{int}
"""
publicNumbers = ec.EllipticCurvePublicNumbers(
x=x, y=y, curve=_curveTable[curve])
if privateValue is None:
# We have public components.
keyObject = publicNumbers.public_key(default_backend())
else:
privateNumbers = ec.EllipticCurvePrivateNumbers(
private_value=privateValue, public_numbers=publicNumbers)
keyObject = privateNumbers.private_key(default_backend())
return cls(keyObject)
def __init__(self, keyObject):
"""
Initialize with a private or public
C{cryptography.hazmat.primitives.asymmetric} key.
@param keyObject: Low level key.
@type keyObject: C{cryptography.hazmat.primitives.asymmetric} key.
"""
# Avoid importing PyCrypto if at all possible
if keyObject.__class__.__module__.startswith('Crypto.PublicKey'):
warningString = getDeprecationWarningString(
Key,
Version("Twisted", 16, 0, 0),
replacement='passing a cryptography key object')
warnings.warn(warningString, DeprecationWarning, stacklevel=2)
self.keyObject = keyObject
else:
self._keyObject = keyObject
def __eq__(self, other):
"""
Return True if other represents an object with the same key.
"""
if type(self) == type(other):
return self.type() == other.type() and self.data() == other.data()
else:
return NotImplemented
def __ne__(self, other):
"""
Return True if other represents anything other than this key.
"""
result = self.__eq__(other)
if result == NotImplemented:
return result
return not result
def __repr__(self):
"""
Return a pretty representation of this object.
"""
if self.type() == 'EC':
data = self.data()
name = data['curve'].decode('utf-8')
if self.isPublic():
out = '<Elliptic Curve Public Key (%s bits)' % (name[-3:],)
else:
out = '<Elliptic Curve Private Key (%s bits)' % (name[-3:],)
for k, v in sorted(data.items()):
if _PY3 and k == 'curve':
out += "\ncurve:\n\t%s" % (name,)
else:
out += "\n%s:\n\t%s" % (k, v)
return out + ">\n"
else:
lines = [
'<%s %s (%s bits)' % (
nativeString(self.type()),
self.isPublic() and 'Public Key' or 'Private Key',
self._keyObject.key_size)]
for k, v in sorted(self.data().items()):
lines.append('attr %s:' % (k,))
by = common.MP(v)[4:]
while by:
m = by[:15]
by = by[15:]
o = ''
for c in iterbytes(m):
o = o + '%02x:' % (ord(c),)
if len(m) < 15:
o = o[:-1]
lines.append('\t' + o)
lines[-1] = lines[-1] + '>'
return '\n'.join(lines)
@property
@deprecated(Version('Twisted', 16, 0, 0))
def keyObject(self):
"""
A C{Crypto.PublicKey} object similar to this key.
As PyCrypto is no longer used for the underlying operations, this
property should be avoided.
"""
# Lazy import to have PyCrypto as a soft dependency.
from Crypto.PublicKey import DSA, RSA
keyObject = None
keyType = self.type()
keyData = self.data()
isPublic = self.isPublic()
if keyType == 'RSA':
if isPublic:
keyObject = RSA.construct((
keyData['n'],
long(keyData['e']),
))
else:
keyObject = RSA.construct((
keyData['n'],
long(keyData['e']),
keyData['d'],
keyData['p'],
keyData['q'],
keyData['u'],
))
elif keyType == 'DSA':
if isPublic:
keyObject = DSA.construct((
keyData['y'],
keyData['g'],
keyData['p'],
keyData['q'],
))
else:
keyObject = DSA.construct((
keyData['y'],
keyData['g'],
keyData['p'],
keyData['q'],
keyData['x'],
))
else:
raise BadKeyError('Unsupported key type.')
return keyObject
@keyObject.setter
@deprecated(Version('Twisted', 16, 0, 0))
def keyObject(self, value):
# Lazy import to have PyCrypto as a soft dependency.
from Crypto.PublicKey import DSA, RSA
if isinstance(value, RSA._RSAobj):
rawKey = value.key
if rawKey.has_private():
newKey = self._fromRSAComponents(
e=rawKey.e,
n=rawKey.n,
p=rawKey.p,
q=rawKey.q,
d=rawKey.d,
u=rawKey.u,
)
else:
newKey = self._fromRSAComponents(e=rawKey.e, n=rawKey.n)
elif isinstance(value, DSA._DSAobj):
rawKey = value.key
if rawKey.has_private():
newKey = self._fromDSAComponents(
y=rawKey.y,
p=rawKey.p,
q=rawKey.q,
g=rawKey.g,
x=rawKey.x,
)
else:
newKey = self._fromDSAComponents(
y=rawKey.y,
p=rawKey.p,
q=rawKey.q,
g=rawKey.g,
)
else:
raise BadKeyError('PyCrypto key type not supported.')
self._keyObject = newKey._keyObject
def isPublic(self):
"""
Check if this instance is a public key.
@return: C{True} if this is a public key.
"""
return isinstance(
self._keyObject,
(rsa.RSAPublicKey, dsa.DSAPublicKey, ec.EllipticCurvePublicKey))
def public(self):
"""
Returns a version of this key containing only the public key data.
If this is a public key, this may or may not be the same object
as self.
@rtype: L{Key}
@return: A public key.
"""
return Key(self._keyObject.public_key())
def fingerprint(self, format=FingerprintFormats.MD5_HEX):
"""
The fingerprint of a public key consists of the output of the
message-digest algorithm in the specified format.
Supported formats include L{FingerprintFormats.MD5_HEX} and
L{FingerprintFormats.SHA256_BASE64}
The input to the algorithm is the public key data as specified by [RFC4253].
The output of sha256[RFC4634] algorithm is presented to the
user in the form of base64 encoded sha256 hashes.
Example: C{US5jTUa0kgX5ZxdqaGF0yGRu8EgKXHNmoT8jHKo1StM=}
The output of the MD5[RFC1321](default) algorithm is presented to the user as
a sequence of 16 octets printed as hexadecimal with lowercase letters
and separated by colons.
Example: C{c1:b1:30:29:d7:b8:de:6c:97:77:10:d7:46:41:63:87}
@param format: Format for fingerprint generation. Consists
hash function and representation format.
Default is L{FingerprintFormats.MD5_HEX}
@since: 8.2
@return: the user presentation of this L{Key}'s fingerprint, as a
string.
@rtype: L{str}
"""
if format is FingerprintFormats.SHA256_BASE64:
return nativeString(base64.b64encode(
sha256(self.blob()).digest()))
elif format is FingerprintFormats.MD5_HEX:
return nativeString(
b':'.join([binascii.hexlify(x)
for x in iterbytes(md5(self.blob()).digest())]))
else:
raise BadFingerPrintFormat(
'Unsupported fingerprint format: %s' % (format,))
def type(self):
"""
Return the type of the object we wrap. Currently this can only be
'RSA', 'DSA', or 'EC'.
@rtype: L{str}
@raises RuntimeError: If the object type is unknown.
"""
if isinstance(
self._keyObject, (rsa.RSAPublicKey, rsa.RSAPrivateKey)):
return 'RSA'
elif isinstance(
self._keyObject, (dsa.DSAPublicKey, dsa.DSAPrivateKey)):
return 'DSA'
elif isinstance(
self._keyObject, (ec.EllipticCurvePublicKey, ec.EllipticCurvePrivateKey)):
return 'EC'
else:
raise RuntimeError(
'unknown type of object: %r' % (self._keyObject,))
def sshType(self):
"""
Get the type of the object we wrap as defined in the SSH protocol,
defined in RFC 4253, Section 6.6. Currently this can only be b'ssh-rsa',
b'ssh-dss' or b'ecdsa-sha2-[identifier]'.
identifier is the standard NIST curve name
@return: The key type format.
@rtype: L{bytes}
"""
if self.type() == 'EC':
return b'ecdsa-sha2-' + _secToNist[self._keyObject.curve.name.encode('ascii')]
else:
return {'RSA': b'ssh-rsa', 'DSA': b'ssh-dss'}[self.type()]
def size(self):
"""
Return the size of the object we wrap.
@return: The size of the key.
@rtype: L{int}
"""
if self._keyObject is None:
return 0
elif self.type() == 'EC':
return self._keyObject.curve.key_size
return self._keyObject.key_size
def data(self):
"""
Return the values of the public key as a dictionary.
@rtype: L{dict}
"""
if isinstance(self._keyObject, rsa.RSAPublicKey):
numbers = self._keyObject.public_numbers()
return {
"n": numbers.n,
"e": numbers.e,
}
elif isinstance(self._keyObject, rsa.RSAPrivateKey):
numbers = self._keyObject.private_numbers()
return {
"n": numbers.public_numbers.n,
"e": numbers.public_numbers.e,
"d": numbers.d,
"p": numbers.p,
"q": numbers.q,
# Use a trick: iqmp is q^-1 % p, u is p^-1 % q
"u": rsa.rsa_crt_iqmp(numbers.q, numbers.p),
}
elif isinstance(self._keyObject, dsa.DSAPublicKey):
numbers = self._keyObject.public_numbers()
return {
"y": numbers.y,
"g": numbers.parameter_numbers.g,
"p": numbers.parameter_numbers.p,
"q": numbers.parameter_numbers.q,
}
elif isinstance(self._keyObject, dsa.DSAPrivateKey):
numbers = self._keyObject.private_numbers()
return {
"x": numbers.x,
"y": numbers.public_numbers.y,
"g": numbers.public_numbers.parameter_numbers.g,
"p": numbers.public_numbers.parameter_numbers.p,
"q": numbers.public_numbers.parameter_numbers.q,
}
elif isinstance(self._keyObject, ec.EllipticCurvePublicKey):
numbers = self._keyObject.public_numbers()
return {
"x": numbers.x,
"y": numbers.y,
"curve": self.sshType(),
}
elif isinstance(self._keyObject, ec.EllipticCurvePrivateKey):
numbers = self._keyObject.private_numbers()
return {
"x": numbers.public_numbers.x,
"y": numbers.public_numbers.y,
"privateValue": numbers.private_value,
"curve": self.sshType(),
}
else:
raise RuntimeError("Unexpected key type: %s" % (self._keyObject,))
def blob(self):
"""
Return the public key blob for this key. The blob is the
over-the-wire format for public keys.
SECSH-TRANS RFC 4253 Section 6.6.
RSA keys::
string 'ssh-rsa'
integer e
integer n
DSA keys::
string 'ssh-dss'
integer p
integer q
integer g
integer y
EC keys::
string 'ecdsa-sha2-[identifier]'
integer x
integer y
identifier is the standard NIST curve name
@rtype: L{bytes}
"""
type = self.type()
data = self.data()
if type == 'RSA':
return (common.NS(b'ssh-rsa') + common.MP(data['e']) +
common.MP(data['n']))
elif type == 'DSA':
return (common.NS(b'ssh-dss') + common.MP(data['p']) +
common.MP(data['q']) + common.MP(data['g']) +
common.MP(data['y']))
else: # EC
byteLength = (self._keyObject.curve.key_size + 7) // 8
return (common.NS(data['curve']) + common.NS(data["curve"][-8:]) +
common.NS(b'\x04' + utils.int_to_bytes(data['x'], byteLength) +
utils.int_to_bytes(data['y'], byteLength)))
def privateBlob(self):
"""
Return the private key blob for this key. The blob is the
over-the-wire format for private keys:
Specification in OpenSSH PROTOCOL.agent
RSA keys::
string 'ssh-rsa'
integer n
integer e
integer d
integer u
integer p
integer q
DSA keys::
string 'ssh-dss'
integer p
integer q
integer g
integer y
integer x
EC keys::
string 'ecdsa-sha2-[identifier]'
integer x
integer y
integer privateValue
identifier is the NIST standard curve name.
"""
type = self.type()
data = self.data()
if type == 'RSA':
return (common.NS(b'ssh-rsa') + common.MP(data['n']) +
common.MP(data['e']) + common.MP(data['d']) +
common.MP(data['u']) + common.MP(data['p']) +
common.MP(data['q']))
elif type == 'DSA':
return (common.NS(b'ssh-dss') + common.MP(data['p']) +
common.MP(data['q']) + common.MP(data['g']) +
common.MP(data['y']) + common.MP(data['x']))
else: # EC
return (common.NS(data['curve']) + common.MP(data['x']) +
common.MP(data['y']) + common.MP(data['privateValue']))
def toString(self, type, extra=None):
"""
Create a string representation of this key. If the key is a private
key and you want the representation of its public key, use
C{key.public().toString()}. type maps to a _toString_* method.
@param type: The type of string to emit. Currently supported values
are C{'OPENSSH'}, C{'LSH'}, and C{'AGENTV3'}.
@type type: L{str}
@param extra: Any extra data supported by the selected format which
is not part of the key itself. For public OpenSSH keys, this is
a comment. For private OpenSSH keys, this is a passphrase to
encrypt with.
@type extra: L{bytes} or L{unicode} or L{None}
@rtype: L{bytes}
"""
if isinstance(extra, unicode):
extra = extra.encode("utf-8")
method = getattr(self, '_toString_%s' % (type.upper(),), None)
if method is None:
raise BadKeyError('unknown key type: %s' % (type,))
if method.__code__.co_argcount == 2:
return method(extra)
else:
return method()
def _toString_OPENSSH(self, extra):
"""
Return a public or private OpenSSH string. See
_fromString_PUBLIC_OPENSSH and _fromString_PRIVATE_OPENSSH for the
string formats. If extra is present, it represents a comment for a
public key, or a passphrase for a private key.
@param extra: Comment for a public key or passphrase for a
private key
@type extra: L{bytes}
@rtype: L{bytes}
"""
data = self.data()
if self.isPublic():
if self.type() == 'EC':
if not extra:
extra = b''
return (self._keyObject.public_bytes(
serialization.Encoding.OpenSSH,
serialization.PublicFormat.OpenSSH
) + b' ' + extra).strip()
b64Data = encodebytes(self.blob()).replace(b'\n', b'')
if not extra:
extra = b''
return (self.sshType() + b' ' + b64Data + b' ' + extra).strip()
else:
if self.type() == 'EC':
# EC keys has complex ASN.1 structure hence we do this this way.
if not extra:
# unencrypted private key
encryptor = serialization.NoEncryption()
else:
encryptor = serialization.BestAvailableEncryption(extra)
return self._keyObject.private_bytes(
serialization.Encoding.PEM,
serialization.PrivateFormat.TraditionalOpenSSL,
encryptor)
lines = [b''.join((b'-----BEGIN ', self.type().encode('ascii'),
b' PRIVATE KEY-----'))]
if self.type() == 'RSA':
p, q = data['p'], data['q']
objData = (0, data['n'], data['e'], data['d'], q, p,
data['d'] % (q - 1), data['d'] % (p - 1),
data['u'])
else:
objData = (0, data['p'], data['q'], data['g'], data['y'],
data['x'])
asn1Sequence = univ.Sequence()
for index, value in izip(itertools.count(), objData):
asn1Sequence.setComponentByPosition(index, univ.Integer(value))
asn1Data = berEncoder.encode(asn1Sequence)
if extra:
iv = randbytes.secureRandom(8)
hexiv = ''.join(['%02X' % (ord(x),) for x in iterbytes(iv)])
hexiv = hexiv.encode('ascii')
lines.append(b'Proc-Type: 4,ENCRYPTED')
lines.append(b'DEK-Info: DES-EDE3-CBC,' + hexiv + b'\n')
ba = md5(extra + iv).digest()
bb = md5(ba + extra + iv).digest()
encKey = (ba + bb)[:24]
padLen = 8 - (len(asn1Data) % 8)
asn1Data += (chr(padLen) * padLen).encode('ascii')
encryptor = Cipher(
algorithms.TripleDES(encKey),
modes.CBC(iv),
backend=default_backend()
).encryptor()
asn1Data = encryptor.update(asn1Data) + encryptor.finalize()
b64Data = encodebytes(asn1Data).replace(b'\n', b'')
lines += [b64Data[i:i + 64] for i in range(0, len(b64Data), 64)]
lines.append(b''.join((b'-----END ', self.type().encode('ascii'),
b' PRIVATE KEY-----')))
return b'\n'.join(lines)
def _toString_LSH(self):
"""
Return a public or private LSH key. See _fromString_PUBLIC_LSH and
_fromString_PRIVATE_LSH for the key formats.
@rtype: L{bytes}
"""
data = self.data()
type = self.type()
if self.isPublic():
if type == 'RSA':
keyData = sexpy.pack([[b'public-key',
[b'rsa-pkcs1-sha1',
[b'n', common.MP(data['n'])[4:]],
[b'e', common.MP(data['e'])[4:]]]]])
elif type == 'DSA':
keyData = sexpy.pack([[b'public-key',
[b'dsa',
[b'p', common.MP(data['p'])[4:]],
[b'q', common.MP(data['q'])[4:]],
[b'g', common.MP(data['g'])[4:]],
[b'y', common.MP(data['y'])[4:]]]]])
else:
raise BadKeyError("unknown key type %s" % (type,))
return (b'{' + encodebytes(keyData).replace(b'\n', b'') +
b'}')
else:
if type == 'RSA':
p, q = data['p'], data['q']
return sexpy.pack([[b'private-key',
[b'rsa-pkcs1',
[b'n', common.MP(data['n'])[4:]],
[b'e', common.MP(data['e'])[4:]],
[b'd', common.MP(data['d'])[4:]],
[b'p', common.MP(q)[4:]],
[b'q', common.MP(p)[4:]],
[b'a', common.MP(
data['d'] % (q - 1))[4:]],
[b'b', common.MP(
data['d'] % (p - 1))[4:]],
[b'c', common.MP(data['u'])[4:]]]]])
elif type == 'DSA':
return sexpy.pack([[b'private-key',
[b'dsa',
[b'p', common.MP(data['p'])[4:]],
[b'q', common.MP(data['q'])[4:]],
[b'g', common.MP(data['g'])[4:]],
[b'y', common.MP(data['y'])[4:]],
[b'x', common.MP(data['x'])[4:]]]]])
else:
raise BadKeyError("unknown key type %s'" % (type,))
def _toString_AGENTV3(self):
"""
Return a private Secure Shell Agent v3 key. See
_fromString_AGENTV3 for the key format.
@rtype: L{bytes}
"""
data = self.data()
if not self.isPublic():
if self.type() == 'RSA':
values = (data['e'], data['d'], data['n'], data['u'],
data['p'], data['q'])
elif self.type() == 'DSA':
values = (data['p'], data['q'], data['g'], data['y'],
data['x'])
return common.NS(self.sshType()) + b''.join(map(common.MP, values))
def sign(self, data):
"""
Sign some data with this key.
SECSH-TRANS RFC 4253 Section 6.6.
@type data: L{bytes}
@param data: The data to sign.
@rtype: L{bytes}
@return: A signature for the given data.
"""
keyType = self.type()
if keyType == 'RSA':
signer = self._keyObject.signer(
padding.PKCS1v15(), hashes.SHA1())
signer.update(data)
ret = common.NS(signer.finalize())
elif keyType == 'DSA':
signer = self._keyObject.signer(hashes.SHA1())
signer.update(data)
signature = signer.finalize()
(r, s) = decode_dss_signature(signature)
# SSH insists that the DSS signature blob be two 160-bit integers
# concatenated together. The sig[0], [1] numbers from obj.sign
# are just numbers, and could be any length from 0 to 160 bits.
# Make sure they are padded out to 160 bits (20 bytes each)
ret = common.NS(int_to_bytes(r, 20) + int_to_bytes(s, 20))
elif keyType == 'EC': # Pragma: no branch
# Hash size depends on key size
keySize = self.size()
if keySize <= 256:
hashSize = hashes.SHA256()
elif keySize <= 384:
hashSize = hashes.SHA384()
else:
hashSize = hashes.SHA512()
signer = self._keyObject.signer(ec.ECDSA(hashSize))
signer.update(data)
signature = signer.finalize()
(r, s) = decode_dss_signature(signature)
rb = int_to_bytes(r)
sb = int_to_bytes(s)
# Int_to_bytes returns rb[0] as a str in python2
# and an as int in python3
if type(rb[0]) is str:
rcomp = ord(rb[0])
else:
rcomp = rb[0]
# If the MSB is set, prepend a null byte for correct formatting.
if rcomp & 0x80:
rb = b"\x00" + rb
if type(sb[0]) is str:
scomp = ord(sb[0])
else:
scomp = sb[0]
if scomp & 0x80:
sb = b"\x00" + sb
ret = common.NS(common.NS(rb) + common.NS(sb))
return common.NS(self.sshType()) + ret
def verify(self, signature, data):
"""
Verify a signature using this key.
@type signature: L{bytes}
@param signature: The signature to verify.
@type data: L{bytes}
@param data: The signed data.
@rtype: L{bool}
@return: C{True} if the signature is valid.
"""
if len(signature) == 40:
# DSA key with no padding
signatureType, signature = b'ssh-dss', common.NS(signature)
else:
signatureType, signature = common.getNS(signature)
if signatureType != self.sshType():
return False
keyType = self.type()
if keyType == 'RSA':
k = self._keyObject
if not self.isPublic():
k = k.public_key()
verifier = k.verifier(
common.getNS(signature)[0],
padding.PKCS1v15(),
hashes.SHA1(),
)
elif keyType == 'DSA':
concatenatedSignature = common.getNS(signature)[0]
r = int_from_bytes(concatenatedSignature[:20], 'big')
s = int_from_bytes(concatenatedSignature[20:], 'big')
signature = encode_dss_signature(r, s)
k = self._keyObject
if not self.isPublic():
k = k.public_key()
verifier = k.verifier(
signature, hashes.SHA1())
elif keyType == 'EC': # Pragma: no branch
concatenatedSignature = common.getNS(signature)[0]
rstr, sstr, rest = common.getNS(concatenatedSignature, 2)
r = int_from_bytes(rstr, 'big')
s = int_from_bytes(sstr, 'big')
signature = encode_dss_signature(r, s)
k = self._keyObject
if not self.isPublic():
k = k.public_key()
keySize = self.size()
if keySize <= 256: # Hash size depends on key size
hashSize = hashes.SHA256()
elif keySize <= 384:
hashSize = hashes.SHA384()
else:
hashSize = hashes.SHA512()
verifier = k.verifier(signature, ec.ECDSA(hashSize))
verifier.update(data)
try:
verifier.verify()
except InvalidSignature:
return False
else:
return True
@deprecated(Version("Twisted", 15, 5, 0))
def objectType(obj):
"""
DEPRECATED. Return the SSH key type corresponding to a
C{Crypto.PublicKey.pubkey.pubkey} object.
@param obj: Key for which the type is returned.
@type obj: C{Crypto.PublicKey.pubkey.pubkey}
@return: Return the SSH key type corresponding to a PyCrypto object.
@rtype: L{str}
"""
keyDataMapping = {
('n', 'e', 'd', 'p', 'q'): b'ssh-rsa',
('n', 'e', 'd', 'p', 'q', 'u'): b'ssh-rsa',
('y', 'g', 'p', 'q', 'x'): b'ssh-dss'
}
try:
return keyDataMapping[tuple(obj.keydata)]
except (KeyError, AttributeError):
raise BadKeyError("invalid key object", obj)
def _getPersistentRSAKey(location, keySize=4096):
"""
This function returns a persistent L{Key}.
The key is loaded from a PEM file in C{location}. If it does not exist, a
key with the key size of C{keySize} is generated and saved.
@param location: Where the key is stored.
@type location: L{twisted.python.filepath.FilePath}
@param keySize: The size of the key, if it needs to be generated.
@type keySize: L{int}
@returns: A persistent key.
@rtype: L{Key}
"""
location.parent().makedirs(ignoreExistingDirectory=True)
# If it doesn't exist, we want to generate a new key and save it
if not location.exists():
privateKey = rsa.generate_private_key(
public_exponent=65537,
key_size=keySize,
backend=default_backend()
)
pem = privateKey.private_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PrivateFormat.TraditionalOpenSSL,
encryption_algorithm=serialization.NoEncryption()
)
location.setContent(pem)
# By this point (save any hilarious race conditions) we should have a
# working PEM file. Load it!
# (Future archaeological readers: I chose not to short circuit above,
# because then there's two exit paths to this code!)
with location.open("rb") as keyFile:
privateKey = serialization.load_pem_private_key(
keyFile.read(),
password=None,
backend=default_backend()
)
return Key(privateKey)
if _PY3:
# The objectType function is deprecated and not being ported to Python 3.
del objectType
| {
"content_hash": "21370ae3b396c763badfed36194e5497",
"timestamp": "",
"source": "github",
"line_count": 1552,
"max_line_length": 100,
"avg_line_length": 35.06829896907217,
"alnum_prop": 0.5179877264542682,
"repo_name": "ntuecon/server",
"id": "65760920b4a073d72c395667e98964da26630828",
"size": "54554",
"binary": false,
"copies": "11",
"ref": "refs/heads/master",
"path": "pyenv/Lib/site-packages/twisted/conch/ssh/keys.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ASP",
"bytes": "2209"
},
{
"name": "Batchfile",
"bytes": "1509"
},
{
"name": "C",
"bytes": "504013"
},
{
"name": "C++",
"bytes": "96440"
},
{
"name": "CSS",
"bytes": "133288"
},
{
"name": "GAP",
"bytes": "18122"
},
{
"name": "HTML",
"bytes": "150026"
},
{
"name": "JavaScript",
"bytes": "243314"
},
{
"name": "Objective-C",
"bytes": "1292"
},
{
"name": "PowerShell",
"bytes": "8325"
},
{
"name": "Python",
"bytes": "27048260"
},
{
"name": "Shell",
"bytes": "47820"
},
{
"name": "Tcl",
"bytes": "1237796"
},
{
"name": "Visual Basic",
"bytes": "949"
},
{
"name": "XSLT",
"bytes": "2113"
}
],
"symlink_target": ""
} |
from Foundation import *
from AppKit import *
import objc
import urllib2
import tumblr
from tumblr import Api
from DashboardController import *
class LoginController(NSWindowController):
blog = objc.IBOutlet()
password = objc.IBOutlet()
user = objc.IBOutlet()
def init(self):
self.errors = {'403':'Login o password incorrectos','404':'Tumblrlog incorrecto','urlopen':'no ingreso su tumblrlog'}
return self
@objc.IBAction
def authTumblr_(self, sender):
self.p = self.password.stringValue()
self.u = self.user.stringValue()
self.b = self.blog.stringValue()
self.api = Api(self.b, self.u, self.p)
NSLog("Blog %s, User %s , Password %s" % (self.b, self.u, self.p))
try:
self.auth = self.api.auth_check()
self.destroy()
DashboardController.show()
except tumblr.TumblrAuthError:
print self.errors['403']
except tumblr.TumblrError(self):
print self.errors['404']
#except urllib2.HTTPError():
# print self.errors['404']
#except urllib2.URLError:
# print self.errors['urlopen']
def destroy(self):
app = NSApplication.sharedApplication()
appdelegate = app.delegate()
appdelegate.w.close()
| {
"content_hash": "6792e2f0caf5522714720ae5fde5d28c",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 119,
"avg_line_length": 27.761904761904763,
"alnum_prop": 0.6963979416809606,
"repo_name": "jyr/opentumblr-cocoa",
"id": "8ce550524fb87f9f401dedd8ee00c35d1be6df8a",
"size": "1166",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "LoginController.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "76128"
},
{
"name": "Objective-C",
"bytes": "1823"
},
{
"name": "Python",
"bytes": "79652"
}
],
"symlink_target": ""
} |
"""GitHub interactions."""
import json
import time
import gitlab
CommonError = gitlab.exceptions.GitlabError
def set_ref(repo, ref, sha, *, force=False, auto_create=True, retry=1):
branch_name = ref[len("heads/"):] if ref.startswith('heads') else ref
# Delete branch first
try:
repo.branches.delete(branch_name)
except CommonError:
pass
repo.branches.create({"branch": branch_name, "ref": sha})
class Status:
def __init__(self, info):
self.state = info.status
self.context = info.name
def iter_statuses(repo, sha):
for item in repo.commits.get(sha).statuses.list():
yield Status(item)
def create_status(
repo, sha, state, target_url='', description='', *,
context='',
):
data = {
'state': state, 'target_url': target_url,
'description': description, 'context': context,
}
repo.commits.get(sha).statuses.create(data)
def login(host, access_token):
return gitlab.Gitlab(host, private_token=access_token)
def iter_issue_comments(repo, num):
return repo.mergerequests.get(num).notes.list()
def get_ref_sha(repo, ref):
branch_name = ref[len("heads/"):] if ref.startswith('heads/') else ref
return repo.branches.get(branch_name).commit["id"]
def get_pull(repo, num):
return repo.mergerequests.get(num)
def get_pull_request_sha(repo, num):
return get_pull(repo, num).sha
def get_pull_request_user(repo, num):
return get_pull(repo, num).author.username
def get_parent_shas(repo, sha):
return repo.commit(sha).parent_ids
def get_commit(repo, sha):
return repo.commits.get(sha)
def is_collaborator(repo, username):
return True
def get_repository(gitlab, owner, name):
return gitlab.projects.get("{owner}/{name}".format(owner=owner, name=name))
| {
"content_hash": "1496322d43ffe1fb655eea0233ebfd1a",
"timestamp": "",
"source": "github",
"line_count": 83,
"max_line_length": 79,
"avg_line_length": 21.96385542168675,
"alnum_prop": 0.6615469007131103,
"repo_name": "coldnight/homu",
"id": "657ff51e324831a9948cc073851042602e86db85",
"size": "1823",
"binary": false,
"copies": "1",
"ref": "refs/heads/gitlab",
"path": "homu/gitlab.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "10833"
},
{
"name": "Python",
"bytes": "87424"
}
],
"symlink_target": ""
} |
from django.test import TestCase
from rest_assured.testcases import CreateAPITestCaseMixin
from tests import mocks
from tests.models import Stuff
class TestCreateTestCase(TestCase):
def get_case(self, **kwargs):
class MockCreateTestCase(CreateAPITestCaseMixin, mocks.MockTestCase):
base_name = kwargs.pop('base_name', 'stuff')
factory_class = mocks.StuffFactory
create_data = {"name": "moar stuff"}
self.case_class = MockCreateTestCase
return MockCreateTestCase(**kwargs)
def test_get_create_url(self):
instance = self.get_case(methodName='dummy')
assert instance.get_create_url() == '/stuff/'
def test_get_create_data(self):
instance = self.get_case(methodName='dummy')
assert instance.get_create_data() is self.case_class.create_data
def test_get_create_response(self):
instance = self.get_case(methodName='dummy')
assert instance.get_create_response()
def test_get_lookup_from_response(self):
instance = self.get_case(methodName='dummy')
response = instance.get_create_response()
assert instance.get_lookup_from_response(response.data)
def test_test_create(self):
instance = self.get_case(methodName='dummy')
instance.setUp()
response, created = instance.test_create()
assert response
assert created
assert isinstance(created, Stuff)
assert response.data['name'] == created.name
# try again using a different lookup field
instance.response_lookup_field = 'name'
instance.lookup_field = 'name'
response, created = instance.test_create({'name': 'unique stuff'})
assert response
assert created
assert isinstance(created, Stuff)
assert response.data['name'] == created.name
def test_test_create_with_hyperlinkedmodelserializer(self):
instance = self.get_case(methodName='dummy', base_name='stuff-linked')
instance.setUp()
instance.response_lookup_field = 'name'
instance.lookup_field = 'name'
response, created = instance.test_create({'name': 'moar unique stuff'})
assert response
assert created
assert isinstance(created, Stuff)
assert response.data['name'] == created.name
assert response.data['url']
| {
"content_hash": "93e16eee74f0e18e66dc922bfffc1964",
"timestamp": "",
"source": "github",
"line_count": 64,
"max_line_length": 79,
"avg_line_length": 37.09375,
"alnum_prop": 0.6588037068239259,
"repo_name": "yprez/django-rest-assured",
"id": "36f98d0693a5d8fa2846f3722cf20d3a69f73cf7",
"size": "2374",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/test_create.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "36588"
}
],
"symlink_target": ""
} |
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'Spell.casting_time'
db.alter_column('spell', 'casting_time', self.gf('django.db.models.fields.CharField')(max_length=64))
# Changing field 'Spell.subschool'
db.alter_column('spell', 'subschool', self.gf('django.db.models.fields.CharField')(max_length=128))
def backwards(self, orm):
# Changing field 'Spell.casting_time'
db.alter_column('spell', 'casting_time', self.gf('django.db.models.fields.CharField')(max_length=32))
# Changing field 'Spell.subschool'
db.alter_column('spell', 'subschool', self.gf('django.db.models.fields.CharField')(max_length=32))
models = {
'srd20.characterclass': {
'Meta': {'ordering': "('name',)", 'object_name': 'CharacterClass', 'db_table': "'class'"},
'alignment': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'class_skills': ('django.db.models.fields.TextField', [], {}),
'epic_feat_base_level': ('django.db.models.fields.CharField', [], {'max_length': '4'}),
'epic_feat_interval': ('django.db.models.fields.CharField', [], {'max_length': '4'}),
'epic_feat_list': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'epic_full_text': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'full_text': ('django.db.models.fields.TextField', [], {}),
'hit_die': ('django.db.models.fields.CharField', [], {'max_length': '4'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'proficiencies': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'reference': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'req_base_attack_bonus': ('django.db.models.fields.CharField', [], {'max_length': '4', 'blank': 'True'}),
'req_epic_feat': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'req_feat': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'req_languages': ('django.db.models.fields.CharField', [], {'max_length': '16', 'blank': 'True'}),
'req_psionics': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'req_race': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'req_skill': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'req_special': ('django.db.models.fields.CharField', [], {'max_length': '256', 'blank': 'True'}),
'req_spells': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'req_weapon_proficiency': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'skill_points': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'skill_points_ability': ('django.db.models.fields.CharField', [], {'max_length': '3'}),
'spell_list_1': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'spell_list_2': ('django.db.models.fields.CharField', [], {'max_length': '256', 'blank': 'True'}),
'spell_list_3': ('django.db.models.fields.CharField', [], {'max_length': '256', 'blank': 'True'}),
'spell_list_4': ('django.db.models.fields.CharField', [], {'max_length': '256', 'blank': 'True'}),
'spell_list_5': ('django.db.models.fields.CharField', [], {'max_length': '256', 'blank': 'True'}),
'spell_stat': ('django.db.models.fields.CharField', [], {'max_length': '4', 'blank': 'True'}),
'spell_type': ('django.db.models.fields.CharField', [], {'max_length': '16'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '32'})
},
'srd20.feat': {
'Meta': {'ordering': "('name',)", 'object_name': 'Feat', 'db_table': "'feat'"},
'altname': ('django.db.models.fields.SlugField', [], {'max_length': '64', 'db_index': 'True'}),
'benefit': ('django.db.models.fields.TextField', [], {}),
'choice': ('django.db.models.fields.CharField', [], {'max_length': '256', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'multiple': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'normal': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'prerequisite': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'blank': 'True'}),
'reference': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'special': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'stack': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '32'})
},
'srd20.spell': {
'Meta': {'ordering': "('name',)", 'object_name': 'Spell', 'db_table': "'spell'"},
'altname': ('django.db.models.fields.SlugField', [], {'max_length': '64', 'db_index': 'True'}),
'arcane_focus': ('django.db.models.fields.CharField', [], {'max_length': '256', 'blank': 'True'}),
'arcane_material_components': ('django.db.models.fields.CharField', [], {'max_length': '256', 'blank': 'True'}),
'area': ('django.db.models.fields.CharField', [], {'max_length': '256', 'blank': 'True'}),
'casting_time': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'cleric_focus': ('django.db.models.fields.CharField', [], {'max_length': '256', 'blank': 'True'}),
'components': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'descriptor': ('django.db.models.fields.CharField', [], {'max_length': '256', 'blank': 'True'}),
'druid_focus': ('django.db.models.fields.CharField', [], {'max_length': '256', 'blank': 'True'}),
'duration': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'effect': ('django.db.models.fields.CharField', [], {'max_length': '256', 'blank': 'True'}),
'focus': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'material_components': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'range': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'reference': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'saving_throw': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'school': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'short_description': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'spell_resistance': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'spellcraft_dc': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'subschool': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'target': ('django.db.models.fields.CharField', [], {'max_length': '256', 'blank': 'True'}),
'to_develop': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'verbal_components': ('django.db.models.fields.CharField', [], {'max_length': '256', 'blank': 'True'}),
'xp_cost': ('django.db.models.fields.TextField', [], {'blank': 'True'})
}
}
complete_apps = ['srd20']
| {
"content_hash": "0850fbf476da183ee0298362662050b7",
"timestamp": "",
"source": "github",
"line_count": 112,
"max_line_length": 124,
"avg_line_length": 76.03571428571429,
"alnum_prop": 0.5497886331611085,
"repo_name": "machinalis/django-srd20",
"id": "49db94b44c1cdf7cf86de924623655086459c1bf",
"size": "8534",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "srd20/migrations/0021_auto__chg_field_spell_casting_time__chg_field_spell_subschool.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "1458"
},
{
"name": "Python",
"bytes": "225560"
}
],
"symlink_target": ""
} |
from tests import utils
from tornado import testing
class TestRenderHandler(utils.HandlerTestCase):
@testing.gen_test
def test_options_skips_prepare(self):
# Would crash b/c lack of mocks
yield self.http_client.fetch(
self.get_url('/render'),
method='OPTIONS'
)
| {
"content_hash": "ea662f4ac2a9831fa11f87ad034b6339",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 47,
"avg_line_length": 24.692307692307693,
"alnum_prop": 0.6417445482866043,
"repo_name": "AddisonSchiller/modular-file-renderer",
"id": "c6b9ae371bf31c11266a2cc696a0cac2130152a4",
"size": "321",
"binary": false,
"copies": "6",
"ref": "refs/heads/develop",
"path": "tests/server/handlers/test_render.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "93955"
},
{
"name": "HTML",
"bytes": "28280"
},
{
"name": "Java",
"bytes": "835161"
},
{
"name": "JavaScript",
"bytes": "1238281"
},
{
"name": "Jupyter Notebook",
"bytes": "1202318"
},
{
"name": "Mako",
"bytes": "35815"
},
{
"name": "Python",
"bytes": "233412"
}
],
"symlink_target": ""
} |
import numpy as np
from menpo.base import doc_inherit, name_of_callable
from menpo.math import as_matrix, ipca, pca, pcacov
from .linear import MeanLinearVectorModel
from .vectorizable import VectorizableBackedModel
class PCAVectorModel(MeanLinearVectorModel):
r"""
A :map:`MeanLinearModel` where components are Principal Components.
Principal Component Analysis (PCA) by eigenvalue decomposition of the
data's scatter matrix. For details of the implementation of PCA, see
:map:`pca`.
Parameters
----------
samples : `ndarray` or `list` or `iterable` of `ndarray`
List or iterable of numpy arrays to build the model from, or an
existing data matrix.
centre : `bool`, optional
When ``True`` (default) PCA is performed after mean centering the data.
If ``False`` the data is assumed to be centred, and the mean will be
``0``.
n_samples : `int`, optional
If provided then ``samples`` must be an iterator that yields
``n_samples``. If not provided then samples has to be a `list` (so we
know how large the data matrix needs to be).
max_n_components : `int`, optional
The maximum number of components to keep in the model. Any components
above and beyond this one are discarded.
inplace : `bool`, optional
If ``True`` the data matrix is modified in place. Otherwise, the data
matrix is copied.
"""
def __init__(
self, samples, centre=True, n_samples=None, max_n_components=None, inplace=True
):
# Generate data matrix
data, self.n_samples = self._data_to_matrix(samples, n_samples)
# Compute pca
e_vectors, e_values, mean = pca(data, centre=centre, inplace=inplace)
# The call to __init__ of MeanLinearModel is done in here
self._constructor_helper(
eigenvalues=e_values,
eigenvectors=e_vectors,
mean=mean,
centred=centre,
max_n_components=max_n_components,
)
@classmethod
def init_from_covariance_matrix(
cls, C, mean, n_samples, centred=True, is_inverse=False, max_n_components=None
):
r"""
Build the Principal Component Analysis (PCA) by eigenvalue
decomposition of the provided covariance/scatter matrix. For details
of the implementation of PCA, see :map:`pcacov`.
Parameters
----------
C : ``(n_features, n_features)`` `ndarray` or `scipy.sparse`
The Covariance/Scatter matrix. If it is a precision matrix (inverse
covariance), then set `is_inverse=True`.
mean : ``(n_features, )`` `ndarray`
The mean vector.
n_samples : `int`
The number of samples used to generate the covariance matrix.
centred : `bool`, optional
When ``True`` we assume that the data were centered before
computing the covariance matrix.
is_inverse : `bool`, optional
It ``True``, then it is assumed that `C` is a precision matrix (
inverse covariance). Thus, the eigenvalues will be inverted. If
``False``, then it is assumed that `C` is a covariance matrix.
max_n_components : `int`, optional
The maximum number of components to keep in the model. Any
components above and beyond this one are discarded.
"""
# Compute pca on covariance
e_vectors, e_values = pcacov(C, is_inverse=is_inverse)
# Create new pca instance
model = cls.__new__(cls)
model.n_samples = n_samples
# The call to __init__ of MeanLinearModel is done in here
model._constructor_helper(
eigenvalues=e_values,
eigenvectors=e_vectors,
mean=mean,
centred=centred,
max_n_components=max_n_components,
)
return model
@classmethod
def init_from_components(
cls, components, eigenvalues, mean, n_samples, centred, max_n_components=None
):
r"""
Build the Principal Component Analysis (PCA) using the provided
components (eigenvectors) and eigenvalues.
Parameters
----------
components : ``(n_components, n_features)`` `ndarray`
The eigenvectors to be used.
eigenvalues : ``(n_components, )`` `ndarray`
The corresponding eigenvalues.
mean : ``(n_features, )`` `ndarray`
The mean vector.
n_samples : `int`
The number of samples used to generate the eigenvectors.
centred : `bool`
When ``True`` we assume that the data were centered before
computing the eigenvectors.
max_n_components : `int`, optional
The maximum number of components to keep in the model. Any
components above and beyond this one are discarded.
"""
# This is a bit of a filthy trick that by rights should not be done,
# but we want to have these nice static constructors so we are living
# with the shame (create an empty object instance which we fill in).
model = cls.__new__(cls)
model.n_samples = n_samples
# The call to __init__ of MeanLinearModel is done in here
model._constructor_helper(
eigenvalues=eigenvalues,
eigenvectors=components,
mean=mean,
centred=centred,
max_n_components=max_n_components,
)
return model
def _constructor_helper(
self, eigenvalues, eigenvectors, mean, centred, max_n_components
):
# if covariance is not centred, mean must be zeros.
if centred:
MeanLinearVectorModel.__init__(self, eigenvectors, mean)
else:
MeanLinearVectorModel.__init__(
self, eigenvectors, np.zeros(mean.shape, dtype=mean.dtype)
)
self.centred = centred
self._eigenvalues = eigenvalues
# start the active components as all the components
self._n_active_components = int(self.n_components)
self._trimmed_eigenvalues = np.array([])
if max_n_components is not None:
self.trim_components(max_n_components)
def _data_to_matrix(self, data, n_samples):
# build a data matrix from all the samples
if n_samples is None:
n_samples = len(data)
# Assumed data is ndarray of (n_samples, n_features) or list of samples
if not isinstance(data, np.ndarray):
# Make sure we have an array, slice of the number of requested
# samples
data = np.array(data)[:n_samples]
return data, n_samples
def __setstate__(self, state):
if "mean_vector" in state:
state["_mean"] = state["mean_vector"]
del state["mean_vector"]
self.__dict__ = state
@property
def n_active_components(self):
r"""
The number of components currently in use on this model.
:type: `int`
"""
return self._n_active_components
@n_active_components.setter
def n_active_components(self, value):
r"""
Sets an updated number of active components on this model. The number
of active components represents the number of principal components
that will be used for generative purposes. Note that this therefore
makes the model stateful. Also note that setting the number of
components will not affect memory unless :meth:`trim_components`
is called.
Parameters
----------
value : `int`
The new number of active components.
Raises
------
ValueError
Tried setting n_active_components to {value} - value needs to be a
float 0.0 < n_components < self._total_kept_variance_ratio ({}) or
an integer 1 < n_components < self.n_components ({})
"""
err_str = (
"Tried setting n_active_components to {} - "
"value needs to be a float "
"0.0 < n_components < self._total_kept_variance_ratio "
"({}) or an integer 1 < n_components < "
"self.n_components ({})".format(
value, self._total_variance_ratio(), self.n_components
)
)
# check value
if isinstance(value, float):
if 0.0 < value <= self._total_variance_ratio():
# value needed to capture desired variance
value = (
np.sum(
[r < value for r in self._total_eigenvalues_cumulative_ratio()]
)
+ 1
)
else:
# variance must be bigger than 0.0
raise ValueError(err_str)
if isinstance(value, int):
if value < 1:
# at least 1 value must be kept
raise ValueError(err_str)
elif value >= self.n_components:
if self.n_active_components < self.n_components:
# if the number of available components is smaller than
# the total number of components set value to the later
value = self.n_components
else:
# if the previous is false and value bigger than the
# total number of components, do nothing
return
if 0 < value <= self.n_components:
self._n_active_components = int(value)
else:
raise ValueError(err_str)
@property
def components(self):
r"""
Returns the active components of the model.
:type: ``(n_active_components, n_features)`` `ndarray`
"""
return self._components[: self.n_active_components, :]
@components.setter
def components(self, value):
r"""
Updates the components of this linear model, ensuring that the shape
of the components is not changed.
Parameters
----------
value : ``(n_components, n_features)`` `ndarray`
The new components array.
Raises
------
ValueError
Trying to replace components of shape {} with some of shape {}
"""
if value.shape != self._components.shape:
raise ValueError(
"Trying to replace components of shape {} with some of "
"shape {}".format(self.components.shape, value.shape)
)
else:
np.copyto(self._components, value, casting="safe")
@property
def eigenvalues(self):
r"""
Returns the eigenvalues associated with the active components of the
model, i.e. the amount of variance captured by each active component,
sorted form largest to smallest.
:type: ``(n_active_components,)`` `ndarray`
"""
return self._eigenvalues[: self.n_active_components]
def whitened_components(self):
r"""
Returns the active components of the model, whitened.
Returns
-------
whitened_components : ``(n_active_components, n_features)`` `ndarray`
The whitened components.
"""
return self.components / (
np.sqrt(self.eigenvalues * self.n_samples + self.noise_variance())[:, None]
)
def original_variance(self):
r"""
Returns the total amount of variance captured by the original model,
i.e. the amount of variance present on the original samples.
Returns
-------
optional_variance : `float`
The variance captured by the model.
"""
return self._eigenvalues.sum() + self._trimmed_eigenvalues.sum()
def variance(self):
r"""
Returns the total amount of variance retained by the active
components.
Returns
-------
variance : `float`
Total variance captured by the active components.
"""
return self.eigenvalues.sum()
def _total_variance(self):
r"""
Returns the total amount of variance retained by all components
(active and inactive). Useful when the model has been trimmed.
Returns
-------
total_variance : `float`
Total variance captured by all components.
"""
return self._eigenvalues.sum()
def variance_ratio(self):
r"""
Returns the ratio between the amount of variance retained by the
active components and the total amount of variance present on the
original samples.
Returns
-------
variance_ratio : `float`
Ratio of active components variance and total variance present
in original samples.
"""
return self.variance() / self.original_variance()
def _total_variance_ratio(self):
r"""
Returns the ratio between the total amount of variance retained by
all components (active and inactive) and the total amount of variance
present on the original samples.
Returns
-------
total_variance_ratio : `float`
Ratio of total variance over the original variance.
"""
return self._total_variance() / self.original_variance()
def eigenvalues_ratio(self):
r"""
Returns the ratio between the variance captured by each active
component and the total amount of variance present on the original
samples.
Returns
-------
eigenvalues_ratio : ``(n_active_components,)`` `ndarray`
The active eigenvalues array scaled by the original variance.
"""
return self.eigenvalues / self.original_variance()
def _total_eigenvalues_ratio(self):
r"""
Returns the ratio between the variance captured by each active
component and the total amount of variance present on the original
samples.
Returns
-------
total_eigenvalues_ratio : ``(n_components,)`` `ndarray`
Array of eigenvalues scaled by the original variance.
"""
return self._eigenvalues / self.original_variance()
def eigenvalues_cumulative_ratio(self):
r"""
Returns the cumulative ratio between the variance captured by the
active components and the total amount of variance present on the
original samples.
Returns
-------
eigenvalues_cumulative_ratio : ``(n_active_components,)`` `ndarray`
Array of cumulative eigenvalues.
"""
return np.cumsum(self.eigenvalues_ratio())
def _total_eigenvalues_cumulative_ratio(self):
r"""
Returns the cumulative ratio between the variance captured by the
active components and the total amount of variance present on the
original samples.
Returns
-------
total_eigenvalues_cumulative_ratio : ``(n_active_components,)`` `ndarray`
Array of total cumulative eigenvalues.
"""
return np.cumsum(self._total_eigenvalues_ratio())
def noise_variance(self):
r"""
Returns the average variance captured by the inactive components,
i.e. the sample noise assumed in a Probabilistic PCA formulation.
If all components are active, then ``noise_variance == 0.0``.
Returns
-------
noise_variance : `float`
The mean variance of the inactive components.
"""
if self.n_active_components == self.n_components:
if self._trimmed_eigenvalues.size != 0:
noise_variance = self._trimmed_eigenvalues.mean()
else:
noise_variance = 0.0
else:
noise_variance = np.hstack(
(
self._eigenvalues[self.n_active_components :],
self._trimmed_eigenvalues,
)
).mean()
return noise_variance
def noise_variance_ratio(self):
r"""
Returns the ratio between the noise variance and the total amount of
variance present on the original samples.
Returns
-------
noise_variance_ratio : `float`
The ratio between the noise variance and the variance present
in the original samples.
"""
return self.noise_variance() / self.original_variance()
def inverse_noise_variance(self):
r"""
Returns the inverse of the noise variance.
Returns
-------
inverse_noise_variance : `float`
Inverse of the noise variance.
Raises
------
ValueError
If ``noise_variance() == 0``
"""
noise_variance = self.noise_variance()
if np.allclose(noise_variance, 0):
raise ValueError(
"noise variance is effectively 0 - " "cannot take the inverse"
)
return 1.0 / noise_variance
def component(self, index, with_mean=True, scale=1.0):
r"""
A particular component of the model, in vectorized form.
Parameters
----------
index : `int`
The component that is to be returned
with_mean: `bool`, optional
If ``True``, the component will be blended with the mean vector
before being returned. If not, the component is returned on it's
own.
scale : `float`, optional
A scale factor that should be applied to the component. Only
valid in the case where with_mean is ``True``. The scale is applied
in units of standard deviations (so a scale of ``1.0``
`with_mean` visualizes the mean plus ``1`` std. dev of the component
in question).
Returns
-------
component_vector : ``(n_features,)`` `ndarray`
The component vector of the given index.
"""
if with_mean:
# on PCA, scale is in units of std. deviations...
scaled_eigval = scale * np.sqrt(self.eigenvalues[index])
return (scaled_eigval * self.components[index]) + self._mean
else:
return self.components[index]
def instance_vectors(self, weights, normalized_weights=False):
"""
Creates new vectorized instances of the model using the first
components in a particular weighting.
Parameters
----------
weights : ``(n_vectors, n_weights)`` `ndarray` or `list` of `lists`
The weightings for the first `n_weights` components that
should be used per instance that is to be produced
``weights[i, j]`` is the linear contribution of the j'th
principal component to the i'th instance vector produced. Note
that if ``n_weights < n_components``, only the first ``n_weight``
components are used in the reconstruction (i.e. unspecified
weights are implicitly ``0``).
normalized_weights : `bool`, optional
If ``True``, the weights are assumed to be normalized w.r.t the
eigenvalues. This can be easier to create unique instances by
making the weights more interpretable.
Returns
-------
vectors : ``(n_vectors, n_features)`` `ndarray`
The instance vectors for the weighting provided.
Raises
------
ValueError
If n_weights > n_components
"""
weights = np.asarray(weights) # if eg a list is provided
n_instances, n_weights = weights.shape
if n_weights > self.n_active_components:
raise ValueError(
"Number of weightings cannot be greater than {}".format(
self.n_active_components
)
)
else:
full_weights = np.zeros(
(n_instances, self.n_active_components), dtype=self._components.dtype
)
full_weights[..., :n_weights] = weights
weights = full_weights
if normalized_weights:
# If the weights were normalized, then they are all relative to
# to the scale of the eigenvalues and thus must be multiplied by
# the sqrt of the eigenvalues.
weights *= self.eigenvalues ** 0.5
return self._instance_vectors_for_full_weights(weights)
def instance(self, weights, normalized_weights=False):
r"""
Creates a new vector instance of the model by weighting together the
components.
Parameters
----------
weights : ``(n_weights,)`` `ndarray` or `list`
The weightings for the first `n_weights` components that should be
used.
``weights[j]`` is the linear contribution of the j'th principal
component to the instance vector.
normalized_weights : `bool`, optional
If ``True``, the weights are assumed to be normalized w.r.t the
eigenvalues. This can be easier to create unique instances by
making the weights more interpretable.
Returns
-------
vector : ``(n_features,)`` `ndarray`
The instance vector for the weighting provided.
"""
weights = np.asarray(weights)
return self.instance_vectors(
weights[None, :], normalized_weights=normalized_weights
).flatten()
def trim_components(self, n_components=None):
r"""
Permanently trims the components down to a certain amount. The number of
active components will be automatically reset to this particular value.
This will reduce `self.n_components` down to `n_components`
(if ``None``, `self.n_active_components` will be used), freeing up
memory in the process.
Once the model is trimmed, the trimmed components cannot be recovered.
Parameters
----------
n_components: `int` >= ``1`` or `float` > ``0.0`` or ``None``, optional
The number of components that are kept or else the amount (ratio)
of variance that is kept. If ``None``, `self.n_active_components` is
used.
Notes
-----
In case `n_components` is greater than the total number of components or
greater than the amount of variance currently kept, this method does
not perform any action.
"""
if n_components is None:
# by default trim using the current n_active_components
n_components = self.n_active_components
# set self.n_active_components to n_components
self.n_active_components = n_components
if self.n_active_components < self.n_components:
# Just stored so that we can fit < 80 chars
nac = self.n_active_components
# set self.n_components to n_components. We have to copy to ensure
# that the data is actually removed, otherwise a view is returned
self._components = self._components[:nac].copy()
# store the eigenvalues associated to the discarded components
self._trimmed_eigenvalues = np.hstack(
(
self._trimmed_eigenvalues,
self._eigenvalues[self.n_active_components :],
)
)
# make sure that the eigenvalues are trimmed too
self._eigenvalues = self._eigenvalues[:nac].copy()
def project_whitened(self, vector_instance):
"""
Projects the `vector_instance` onto the whitened components,
retrieving the whitened linear weightings.
Parameters
----------
vector_instance : ``(n_features,)`` `ndarray`
A novel vector.
Returns
-------
projected : ``(n_features,)`` `ndarray`
A vector of whitened linear weightings
"""
whitened_components = self.whitened_components()
return np.dot(vector_instance, whitened_components.T)
def orthonormalize_against_inplace(self, linear_model):
r"""
Enforces that the union of this model's components and another are
both mutually orthonormal.
Note that the model passed in is guaranteed to not have it's number
of available components changed. This model, however, may loose some
dimensionality due to reaching a degenerate state.
The removed components will always be trimmed from the end of
components (i.e. the components which capture the least variance).
If trimming is performed, `n_components` and `n_available_components`
would be altered - see :meth:`trim_components` for details.
Parameters
----------
linear_model : :map:`LinearModel`
A second linear model to orthonormalize this against.
"""
# take the QR decomposition of the model components
Q = (
np.linalg.qr(np.hstack((linear_model._components.T, self._components.T)))[0]
).T
# the model passed to us went first, so all it's components will
# survive. Pull them off, and update the other model.
linear_model.components = Q[: linear_model.n_components, :]
# it's possible that all of our components didn't survive due to
# degeneracy. We need to trim our components down before replacing
# them to ensure the number of components is consistent (otherwise
# the components setter will complain at us)
n_available_components = Q.shape[0] - linear_model.n_components
if n_available_components < self.n_components:
# oh dear, we've lost some components from the end of our model.
if self.n_active_components < n_available_components:
# save the current number of active components
n_active_components = self.n_active_components
else:
# save the current number of available components
n_active_components = n_available_components
# call trim_components to update our state.
self.trim_components(n_components=n_available_components)
if n_active_components < n_available_components:
# reset the number of active components
self.n_active_components = n_active_components
# now we can set our own components with the updated orthogonal ones
self.components = Q[linear_model.n_components :, :]
def increment(self, data, n_samples=None, forgetting_factor=1.0, verbose=False):
r"""
Update the eigenvectors, eigenvalues and mean vector of this model
by performing incremental PCA on the given samples.
Parameters
----------
samples : `list` of :map:`Vectorizable`
List of new samples to update the model from.
n_samples : `int`, optional
If provided then ``samples`` must be an iterator that yields
``n_samples``. If not provided then samples has to be a
list (so we know how large the data matrix needs to be).
forgetting_factor : ``[0.0, 1.0]`` `float`, optional
Forgetting factor that weights the relative contribution of new
samples vs old samples. If 1.0, all samples are weighted equally
and, hence, the results is the exact same as performing batch
PCA on the concatenated list of old and new simples. If <1.0,
more emphasis is put on the new samples. See [1] for details.
References
----------
.. [1] David Ross, Jongwoo Lim, Ruei-Sung Lin, Ming-Hsuan Yang.
"Incremental Learning for Robust Visual Tracking". IJCV, 2007.
"""
data, n_new_samples = self._data_to_matrix(data, n_samples)
# compute incremental pca
e_vectors, e_values, m_vector = ipca(
data,
self._components,
self._eigenvalues,
self.n_samples,
m_a=self._mean,
f=forgetting_factor,
)
# if the number of active components is the same as the total number
# of components so it will be after this method is executed
reset = self.n_active_components == self.n_components
# update mean, components, eigenvalues and number of samples
self._mean = m_vector
self._components = e_vectors
self._eigenvalues = e_values
self.n_samples += n_new_samples
# reset the number of active components to the total number of
# components
if reset:
self.n_active_components = self.n_components
def plot_eigenvalues(
self,
figure_id=None,
new_figure=False,
render_lines=True,
line_colour="b",
line_style="-",
line_width=2,
render_markers=True,
marker_style="o",
marker_size=6,
marker_face_colour="b",
marker_edge_colour="k",
marker_edge_width=1.0,
render_axes=True,
axes_font_name="sans-serif",
axes_font_size=10,
axes_font_style="normal",
axes_font_weight="normal",
figure_size=(10, 6),
render_grid=True,
grid_line_style="--",
grid_line_width=0.5,
):
r"""
Plot of the eigenvalues.
Parameters
----------
figure_id : `object`, optional
The id of the figure to be used.
new_figure : `bool`, optional
If ``True``, a new figure is created.
render_lines : `bool`, optional
If ``True``, the line will be rendered.
line_colour : See Below, optional
The colour of the lines.
Example options ::
{``r``, ``g``, ``b``, ``c``, ``m``, ``k``, ``w``}
or
``(3, )`` `ndarray`
or
`list` of length ``3``
line_style : {``-``, ``--``, ``-.``, ``:``}, optional
The style of the lines.
line_width : `float`, optional
The width of the lines.
render_markers : `bool`, optional
If ``True``, the markers will be rendered.
marker_style : See Below, optional
The style of the markers.
Example options ::
{``.``, ``,``, ``o``, ``v``, ``^``, ``<``, ``>``, ``+``,
``x``, ``D``, ``d``, ``s``, ``p``, ``*``, ``h``, ``H``,
``1``, ``2``, ``3``, ``4``, ``8``}
marker_size : `int`, optional
The size of the markers in points.
marker_face_colour : See Below, optional
The face (filling) colour of the markers.
Example options ::
{``r``, ``g``, ``b``, ``c``, ``m``, ``k``, ``w``}
or
``(3, )`` `ndarray`
or
`list` of length ``3``
marker_edge_colour : See Below, optional
The edge colour of the markers.
Example options ::
{``r``, ``g``, ``b``, ``c``, ``m``, ``k``, ``w``}
or
``(3, )`` `ndarray`
or
`list` of length ``3``
marker_edge_width : `float`, optional
The width of the markers' edge.
render_axes : `bool`, optional
If ``True``, the axes will be rendered.
axes_font_name : See Below, optional
The font of the axes.
Example options ::
{``serif``, ``sans-serif``, ``cursive``, ``fantasy``,
``monospace``}
axes_font_size : `int`, optional
The font size of the axes.
axes_font_style : {``normal``, ``italic``, ``oblique``}, optional
The font style of the axes.
axes_font_weight : See Below, optional
The font weight of the axes.
Example options ::
{``ultralight``, ``light``, ``normal``, ``regular``,
``book``, ``medium``, ``roman``, ``semibold``,
``demibold``, ``demi``, ``bold``, ``heavy``,
``extra bold``, ``black``}
figure_size : (`float`, `float`) or ``None``, optional
The size of the figure in inches.
render_grid : `bool`, optional
If ``True``, the grid will be rendered.
grid_line_style : {``-``, ``--``, ``-.``, ``:``}, optional
The style of the grid lines.
grid_line_width : `float`, optional
The width of the grid lines.
Returns
-------
viewer : :map:`MatplotlibRenderer`
The viewer object.
"""
from menpo.visualize import plot_curve
return plot_curve(
range(self.n_active_components),
[self.eigenvalues],
figure_id=figure_id,
new_figure=new_figure,
legend_entries=None,
title="Eigenvalues",
x_label="Component Number",
y_label="Eigenvalue",
axes_x_limits=[0, self.n_active_components - 1],
axes_y_limits=None,
axes_x_ticks=None,
axes_y_ticks=None,
render_lines=render_lines,
line_colour=line_colour,
line_style=line_style,
line_width=line_width,
render_markers=render_markers,
marker_style=marker_style,
marker_size=marker_size,
marker_face_colour=marker_face_colour,
marker_edge_colour=marker_edge_colour,
marker_edge_width=marker_edge_width,
render_legend=False,
render_axes=render_axes,
axes_font_name=axes_font_name,
axes_font_size=axes_font_size,
axes_font_style=axes_font_style,
axes_font_weight=axes_font_weight,
figure_size=figure_size,
render_grid=render_grid,
grid_line_style=grid_line_style,
grid_line_width=grid_line_width,
)
def plot_eigenvalues_ratio(
self,
figure_id=None,
new_figure=False,
render_lines=True,
line_colour="b",
line_style="-",
line_width=2,
render_markers=True,
marker_style="o",
marker_size=6,
marker_face_colour="b",
marker_edge_colour="k",
marker_edge_width=1.0,
render_axes=True,
axes_font_name="sans-serif",
axes_font_size=10,
axes_font_style="normal",
axes_font_weight="normal",
figure_size=(10, 6),
render_grid=True,
grid_line_style="--",
grid_line_width=0.5,
):
r"""
Plot of the variance ratio captured by the eigenvalues.
Parameters
----------
figure_id : `object`, optional
The id of the figure to be used.
new_figure : `bool`, optional
If ``True``, a new figure is created.
render_lines : `bool`, optional
If ``True``, the line will be rendered.
line_colour : See Below, optional
The colour of the lines.
Example options ::
{``r``, ``g``, ``b``, ``c``, ``m``, ``k``, ``w``}
or
``(3, )`` `ndarray`
or
`list` of length ``3``
line_style : {``-``, ``--``, ``-.``, ``:``}, optional
The style of the lines.
line_width : `float`, optional
The width of the lines.
render_markers : `bool`, optional
If ``True``, the markers will be rendered.
marker_style : See Below, optional
The style of the markers.
Example options ::
{``.``, ``,``, ``o``, ``v``, ``^``, ``<``, ``>``, ``+``,
``x``, ``D``, ``d``, ``s``, ``p``, ``*``, ``h``, ``H``,
``1``, ``2``, ``3``, ``4``, ``8``}
marker_size : `int`, optional
The size of the markers in points.
marker_face_colour : See Below, optional
The face (filling) colour of the markers.
Example options ::
{``r``, ``g``, ``b``, ``c``, ``m``, ``k``, ``w``}
or
``(3, )`` `ndarray`
or
`list` of length ``3``
marker_edge_colour : See Below, optional
The edge colour of the markers.
Example options ::
{``r``, ``g``, ``b``, ``c``, ``m``, ``k``, ``w``}
or
``(3, )`` `ndarray`
or
`list` of length ``3``
marker_edge_width : `float`, optional
The width of the markers' edge.
render_axes : `bool`, optional
If ``True``, the axes will be rendered.
axes_font_name : See Below, optional
The font of the axes.
Example options ::
{``serif``, ``sans-serif``, ``cursive``, ``fantasy``,
``monospace``}
axes_font_size : `int`, optional
The font size of the axes.
axes_font_style : {``normal``, ``italic``, ``oblique``}, optional
The font style of the axes.
axes_font_weight : See Below, optional
The font weight of the axes.
Example options ::
{``ultralight``, ``light``, ``normal``, ``regular``,
``book``, ``medium``, ``roman``, ``semibold``,
``demibold``, ``demi``, ``bold``, ``heavy``,
``extra bold``, ``black``}
figure_size : (`float`, `float`) or `None`, optional
The size of the figure in inches.
render_grid : `bool`, optional
If ``True``, the grid will be rendered.
grid_line_style : {``-``, ``--``, ``-.``, ``:``}, optional
The style of the grid lines.
grid_line_width : `float`, optional
The width of the grid lines.
Returns
-------
viewer : :map:`MatplotlibRenderer`
The viewer object.
"""
from menpo.visualize import plot_curve
return plot_curve(
range(self.n_active_components),
[self.eigenvalues_ratio()],
figure_id=figure_id,
new_figure=new_figure,
legend_entries=None,
title="Variance Ratio of Eigenvalues",
x_label="Component Number",
y_label="Variance Ratio",
axes_x_limits=[0, self.n_active_components - 1],
axes_y_limits=None,
axes_x_ticks=None,
axes_y_ticks=None,
render_lines=render_lines,
line_colour=line_colour,
line_style=line_style,
line_width=line_width,
render_markers=render_markers,
marker_style=marker_style,
marker_size=marker_size,
marker_face_colour=marker_face_colour,
marker_edge_colour=marker_edge_colour,
marker_edge_width=marker_edge_width,
render_legend=False,
render_axes=render_axes,
axes_font_name=axes_font_name,
axes_font_size=axes_font_size,
axes_font_style=axes_font_style,
axes_font_weight=axes_font_weight,
figure_size=figure_size,
render_grid=render_grid,
grid_line_style=grid_line_style,
grid_line_width=grid_line_width,
)
def plot_eigenvalues_cumulative_ratio(
self,
figure_id=None,
new_figure=False,
render_lines=True,
line_colour="b",
line_style="-",
line_width=2,
render_markers=True,
marker_style="o",
marker_size=6,
marker_face_colour="b",
marker_edge_colour="k",
marker_edge_width=1.0,
render_axes=True,
axes_font_name="sans-serif",
axes_font_size=10,
axes_font_style="normal",
axes_font_weight="normal",
figure_size=(10, 6),
render_grid=True,
grid_line_style="--",
grid_line_width=0.5,
):
r"""
Plot of the cumulative variance ratio captured by the eigenvalues.
Parameters
----------
figure_id : `object`, optional
The id of the figure to be used.
new_figure : `bool`, optional
If ``True``, a new figure is created.
render_lines : `bool`, optional
If ``True``, the line will be rendered.
line_colour : See Below, optional
The colour of the lines.
Example options ::
{``r``, ``g``, ``b``, ``c``, ``m``, ``k``, ``w``}
or
``(3, )`` `ndarray`
or
`list` of length ``3``
line_style : {``-``, ``--``, ``-.``, ``:``}, optional
The style of the lines.
line_width : `float`, optional
The width of the lines.
render_markers : `bool`, optional
If ``True``, the markers will be rendered.
marker_style : See Below, optional
The style of the markers.
Example options ::
{``.``, ``,``, ``o``, ``v``, ``^``, ``<``, ``>``, ``+``,
``x``, ``D``, ``d``, ``s``, ``p``, ``*``, ``h``, ``H``,
``1``, ``2``, ``3``, ``4``, ``8``}
marker_size : `int`, optional
The size of the markers in points.
marker_face_colour : See Below, optional
The face (filling) colour of the markers.
Example options ::
{``r``, ``g``, ``b``, ``c``, ``m``, ``k``, ``w``}
or
``(3, )`` `ndarray`
or
`list` of length ``3``
marker_edge_colour : See Below, optional
The edge colour of the markers.
Example options ::
{``r``, ``g``, ``b``, ``c``, ``m``, ``k``, ``w``}
or
``(3, )`` `ndarray`
or
`list` of length ``3``
marker_edge_width : `float`, optional
The width of the markers' edge.
render_axes : `bool`, optional
If ``True``, the axes will be rendered.
axes_font_name : See Below, optional
The font of the axes.
Example options ::
{``serif``, ``sans-serif``, ``cursive``, ``fantasy``,
``monospace``}
axes_font_size : `int`, optional
The font size of the axes.
axes_font_style : {``normal``, ``italic``, ``oblique``}, optional
The font style of the axes.
axes_font_weight : See Below, optional
The font weight of the axes.
Example options ::
{``ultralight``, ``light``, ``normal``, ``regular``,
``book``, ``medium``, ``roman``, ``semibold``,
``demibold``, ``demi``, ``bold``, ``heavy``,
``extra bold``, ``black``}
figure_size : (`float`, `float`) or `None`, optional
The size of the figure in inches.
render_grid : `bool`, optional
If ``True``, the grid will be rendered.
grid_line_style : {``-``, ``--``, ``-.``, ``:``}, optional
The style of the grid lines.
grid_line_width : `float`, optional
The width of the grid lines.
Returns
-------
viewer : :map:`MatplotlibRenderer`
The viewer object.
"""
from menpo.visualize import plot_curve
return plot_curve(
range(self.n_active_components),
[self.eigenvalues_cumulative_ratio()],
figure_id=figure_id,
new_figure=new_figure,
legend_entries=None,
title="Cumulative Variance Ratio of Eigenvalues",
x_label="Component Number",
y_label="Cumulative Variance Ratio",
axes_x_limits=[0, self.n_active_components - 1],
axes_y_limits=None,
axes_x_ticks=None,
axes_y_ticks=None,
render_lines=render_lines,
line_colour=line_colour,
line_style=line_style,
line_width=line_width,
render_markers=render_markers,
marker_style=marker_style,
marker_size=marker_size,
marker_face_colour=marker_face_colour,
marker_edge_colour=marker_edge_colour,
marker_edge_width=marker_edge_width,
render_legend=False,
render_axes=render_axes,
axes_font_name=axes_font_name,
axes_font_size=axes_font_size,
axes_font_style=axes_font_style,
axes_font_weight=axes_font_weight,
figure_size=figure_size,
render_grid=render_grid,
grid_line_style=grid_line_style,
grid_line_width=grid_line_width,
)
def __str__(self):
str_out = (
"PCA Vector Model \n"
" - centred: {}\n"
" - # features: {}\n"
" - # active components: {}\n"
" - kept variance: {:.2} {:.1%}\n"
" - noise variance: {:.2} {:.1%}\n"
" - total # components: {}\n"
" - components shape: {}\n".format(
self.centred,
self.n_features,
self.n_active_components,
self.variance(),
self.variance_ratio(),
self.noise_variance(),
self.noise_variance_ratio(),
self.n_components,
self.components.shape,
)
)
return str_out
class PCAModel(VectorizableBackedModel, PCAVectorModel):
r"""
A :map:`MeanLinearModel` where components are Principal Components
and the components are vectorized instances.
Principal Component Analysis (PCA) by eigenvalue decomposition of the
data's scatter matrix. For details of the implementation of PCA, see
:map:`pca`.
Parameters
----------
samples : `list` or `iterable` of :map:`Vectorizable`
List or iterable of samples to build the model from.
centre : `bool`, optional
When ``True`` (default) PCA is performed after mean centering the data.
If ``False`` the data is assumed to be centred, and the mean will be
``0``.
n_samples : `int`, optional
If provided then ``samples`` must be an iterator that yields
``n_samples``. If not provided then samples has to be a `list` (so we
know how large the data matrix needs to be).
max_n_components : `int`, optional
The maximum number of components to keep in the model. Any components
above and beyond this one are discarded.
inplace : `bool`, optional
If ``True`` the data matrix is modified in place. Otherwise, the data
matrix is copied.
verbose : `bool`, optional
Whether to print building information or not.
"""
def __init__(
self,
samples,
centre=True,
n_samples=None,
max_n_components=None,
inplace=True,
verbose=False,
):
# build a data matrix from all the samples
data, template = as_matrix(
samples, length=n_samples, return_template=True, verbose=verbose
)
n_samples = data.shape[0]
PCAVectorModel.__init__(
self,
data,
centre=centre,
max_n_components=max_n_components,
n_samples=n_samples,
inplace=inplace,
)
VectorizableBackedModel.__init__(self, template)
@classmethod
def init_from_covariance_matrix(
cls, C, mean, n_samples, centred=True, is_inverse=False, max_n_components=None
):
r"""
Build the Principal Component Analysis (PCA) by eigenvalue
decomposition of the provided covariance/scatter matrix. For details
of the implementation of PCA, see :map:`pcacov`.
Parameters
----------
C : ``(n_features, n_features)`` `ndarray` or `scipy.sparse`
The Covariance/Scatter matrix. If it is a precision matrix (inverse
covariance), then set `is_inverse=True`.
mean : :map:`Vectorizable`
The mean instance. It must be a :map:`Vectorizable` and *not* an
`ndarray`.
n_samples : `int`
The number of samples used to generate the covariance matrix.
centred : `bool`, optional
When ``True`` we assume that the data were centered before
computing the covariance matrix.
is_inverse : `bool`, optional
It ``True``, then it is assumed that `C` is a precision matrix (
inverse covariance). Thus, the eigenvalues will be inverted. If
``False``, then it is assumed that `C` is a covariance matrix.
max_n_components : `int`, optional
The maximum number of components to keep in the model. Any
components above and beyond this one are discarded.
"""
# Create new pca instance
self_model = PCAVectorModel.__new__(cls)
self_model.n_samples = n_samples
# Compute pca on covariance
e_vectors, e_values = pcacov(C, is_inverse=is_inverse)
# The call to __init__ of MeanLinearModel is done in here
self_model._constructor_helper(
eigenvalues=e_values,
eigenvectors=e_vectors,
mean=mean.as_vector(),
centred=centred,
max_n_components=max_n_components,
)
VectorizableBackedModel.__init__(self_model, mean)
return self_model
@classmethod
def init_from_components(
cls, components, eigenvalues, mean, n_samples, centred, max_n_components=None
):
r"""
Build the Principal Component Analysis (PCA) using the provided
components (eigenvectors) and eigenvalues.
Parameters
----------
components : ``(n_components, n_features)`` `ndarray`
The eigenvectors to be used.
eigenvalues : ``(n_components, )`` `ndarray`
The corresponding eigenvalues.
mean : :map:`Vectorizable`
The mean instance. It must be a :map:`Vectorizable` and *not* an
`ndarray`.
n_samples : `int`
The number of samples used to generate the eigenvectors.
centred : `bool`, optional
When ``True`` we assume that the data were centered before
computing the eigenvectors.
max_n_components : `int`, optional
The maximum number of components to keep in the model. Any
components above and beyond this one are discarded.
"""
# Create new pca instance
self_model = PCAVectorModel.__new__(cls)
self_model.n_samples = n_samples
# The call to __init__ of MeanLinearModel is done in here
self_model._constructor_helper(
eigenvalues=eigenvalues,
eigenvectors=components,
mean=mean.as_vector(),
centred=centred,
max_n_components=max_n_components,
)
VectorizableBackedModel.__init__(self_model, mean)
return self_model
def mean(self):
r"""
Return the mean of the model.
:type: :map:`Vectorizable`
"""
return self.template_instance.from_vector(self._mean)
@property
def mean_vector(self):
r"""
Return the mean of the model as a 1D vector.
:type: `ndarray`
"""
return self._mean
@doc_inherit(name="project_out")
def project_out_vector(self, instance_vector):
return PCAVectorModel.project_out(self, instance_vector)
@doc_inherit(name="reconstruct")
def reconstruct_vector(self, instance_vector):
return PCAVectorModel.reconstruct(self, instance_vector)
@doc_inherit(name="project")
def project_vector(self, instance_vector):
return PCAVectorModel.project(self, instance_vector)
@doc_inherit(name="instance")
def instance_vector(self, weights, normalized_weights=False):
return PCAVectorModel.instance(
self, weights, normalized_weights=normalized_weights
)
@doc_inherit(name="component")
def component_vector(self, index, with_mean=True, scale=1.0):
return PCAVectorModel.component(self, index, with_mean=with_mean, scale=scale)
@doc_inherit(name="project_whitened")
def project_whitened_vector(self, vector_instance):
return PCAVectorModel.project_whitened(self, vector_instance)
def component(self, index, with_mean=True, scale=1.0):
r"""
Return a particular component of the linear model.
Parameters
----------
index : `int`
The component that is to be returned
with_mean: `bool`, optional
If ``True``, the component will be blended with the mean vector
before being returned. If not, the component is returned on it's
own.
scale : `float`, optional
A scale factor that should be applied to the component. Only
valid in the case where ``with_mean == True``. See
:meth:`component_vector` for how this scale factor is interpreted.
Returns
-------
component : `type(self.template_instance)`
The requested component instance.
"""
return self.template_instance.from_vector(
self.component_vector(index, with_mean=with_mean, scale=scale)
)
def instance(self, weights, normalized_weights=False):
"""
Creates a new instance of the model using the first ``len(weights)``
components.
Parameters
----------
weights : ``(n_weights,)`` `ndarray` or `list`
``weights[i]`` is the linear contribution of the i'th component
to the instance vector.
normalized_weights : `bool`, optional
If ``True``, the weights are assumed to be normalized w.r.t the
eigenvalues. This can be easier to create unique instances by
making the weights more interpretable.
Raises
------
ValueError
If n_weights > n_components
Returns
-------
instance : `type(self.template_instance)`
An instance of the model.
"""
v = self.instance_vector(weights, normalized_weights=normalized_weights)
return self.template_instance.from_vector(v)
def project_whitened(self, instance):
"""
Projects the `instance` onto the whitened components, retrieving the
whitened linear weightings.
Parameters
----------
instance : :map:`Vectorizable`
A novel instance.
Returns
-------
projected : (n_components,)
A vector of whitened linear weightings
"""
return self.project_whitened_vector(instance.as_vector())
def increment(self, samples, n_samples=None, forgetting_factor=1.0, verbose=False):
r"""
Update the eigenvectors, eigenvalues and mean vector of this model
by performing incremental PCA on the given samples.
Parameters
----------
samples : `list` of :map:`Vectorizable`
List of new samples to update the model from.
n_samples : `int`, optional
If provided then ``samples`` must be an iterator that yields
``n_samples``. If not provided then samples has to be a
list (so we know how large the data matrix needs to be).
forgetting_factor : ``[0.0, 1.0]`` `float`, optional
Forgetting factor that weights the relative contribution of new
samples vs old samples. If 1.0, all samples are weighted equally
and, hence, the results is the exact same as performing batch
PCA on the concatenated list of old and new simples. If <1.0,
more emphasis is put on the new samples. See [1] for details.
References
----------
.. [1] David Ross, Jongwoo Lim, Ruei-Sung Lin, Ming-Hsuan Yang.
"Incremental Learning for Robust Visual Tracking". IJCV, 2007.
"""
# build a data matrix from the new samples
data = as_matrix(samples, length=n_samples, verbose=verbose)
n_new_samples = data.shape[0]
PCAVectorModel.increment(
self,
data,
n_samples=n_new_samples,
forgetting_factor=forgetting_factor,
verbose=verbose,
)
def __str__(self):
str_out = (
"PCA Model \n"
" - instance class: {}\n"
" - centred: {}\n"
" - # features: {}\n"
" - # active components: {}\n"
" - kept variance: {:.2} {:.1%}\n"
" - noise variance: {:.2} {:.1%}\n"
" - total # components: {}\n"
" - components shape: {}\n".format(
name_of_callable(self.template_instance),
self.centred,
self.n_features,
self.n_active_components,
self.variance(),
self.variance_ratio(),
self.noise_variance(),
self.noise_variance_ratio(),
self.n_components,
self.components.shape,
)
)
return str_out
| {
"content_hash": "f1beb133442755517ca2a19df4314f44",
"timestamp": "",
"source": "github",
"line_count": 1568,
"max_line_length": 88,
"avg_line_length": 37.00701530612245,
"alnum_prop": 0.5515880538370069,
"repo_name": "menpo/menpo",
"id": "23a96945476d96082648cda0af818b49f297b4a1",
"size": "58027",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "menpo/model/pca.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "63587"
},
{
"name": "C++",
"bytes": "5665"
},
{
"name": "Makefile",
"bytes": "83"
},
{
"name": "Python",
"bytes": "1539441"
}
],
"symlink_target": ""
} |
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting model 'Teacher'
db.delete_table('contacts_and_people_teacher')
# Deleting field 'Entity.url'
db.delete_column('contacts_and_people_entity', 'url')
# Changing field 'Building.number'
db.alter_column('contacts_and_people_building', 'number', self.gf('django.db.models.fields.CharField')(default='', max_length=10))
# Changing field 'Building.street'
db.alter_column('contacts_and_people_building', 'street', self.gf('django.db.models.fields.CharField')(default='', max_length=100))
# Deleting field 'Person.url'
db.delete_column('contacts_and_people_person', 'url')
def backwards(self, orm):
# Adding model 'Teacher'
db.create_table('contacts_and_people_teacher', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('person', self.gf('django.db.models.fields.related.ForeignKey')(related_name='teacher', unique=True, null=True, to=orm['contacts_and_people.Person'], blank=True)),
('dummy_field_two', self.gf('django.db.models.fields.CharField')(max_length=100, null=True, blank=True)),
('dummy_field_one', self.gf('django.db.models.fields.CharField')(max_length=100, null=True, blank=True)),
))
db.send_create_signal('contacts_and_people', ['Teacher'])
# Adding field 'Entity.url'
db.add_column('contacts_and_people_entity', 'url',
self.gf('django.db.models.fields.URLField')(max_length=200, null=True, blank=True),
keep_default=False)
# Changing field 'Building.number'
db.alter_column('contacts_and_people_building', 'number', self.gf('django.db.models.fields.CharField')(max_length=10, null=True))
# Changing field 'Building.street'
db.alter_column('contacts_and_people_building', 'street', self.gf('django.db.models.fields.CharField')(max_length=100, null=True))
# Adding field 'Person.url'
db.add_column('contacts_and_people_person', 'url',
self.gf('django.db.models.fields.URLField')(max_length=200, null=True, blank=True),
keep_default=False)
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'cms.cmsplugin': {
'Meta': {'object_name': 'CMSPlugin'},
'changed_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2014, 3, 15, 0, 0)'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.CMSPlugin']", 'null': 'True', 'blank': 'True'}),
'placeholder': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Placeholder']", 'null': 'True'}),
'plugin_type': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}),
'position': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'cms.page': {
'Meta': {'ordering': "('site', 'tree_id', 'lft')", 'object_name': 'Page'},
'changed_by': ('django.db.models.fields.CharField', [], {'max_length': '70'}),
'changed_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'created_by': ('django.db.models.fields.CharField', [], {'max_length': '70'}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'in_navigation': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'limit_visibility_in_menu': ('django.db.models.fields.SmallIntegerField', [], {'default': 'None', 'null': 'True', 'db_index': 'True', 'blank': 'True'}),
'login_required': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'moderator_state': ('django.db.models.fields.SmallIntegerField', [], {'default': '1', 'blank': 'True'}),
'navigation_extenders': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '80', 'null': 'True', 'blank': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['cms.Page']"}),
'placeholders': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['cms.Placeholder']", 'symmetrical': 'False'}),
'publication_date': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'publication_end_date': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'published': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'publisher_is_draft': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'publisher_public': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'publisher_draft'", 'unique': 'True', 'null': 'True', 'to': "orm['cms.Page']"}),
'publisher_state': ('django.db.models.fields.SmallIntegerField', [], {'default': '0', 'db_index': 'True'}),
'reverse_id': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '40', 'null': 'True', 'blank': 'True'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sites.Site']"}),
'soft_root': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'template': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'cms.placeholder': {
'Meta': {'object_name': 'Placeholder'},
'default_width': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slot': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'})
},
'contacts_and_people.building': {
'Meta': {'ordering': "('site', 'street', 'number', 'name')", 'object_name': 'Building'},
'access_and_parking': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'building_access_and_parking'", 'null': 'True', 'to': "orm['cms.Placeholder']"}),
'additional_street_address': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'building_description'", 'null': 'True', 'to': "orm['cms.Placeholder']"}),
'getting_here': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'getting_here'", 'null': 'True', 'to': "orm['cms.Placeholder']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['filer.Image']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'latitude': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'longitude': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'map': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'number': ('django.db.models.fields.CharField', [], {'max_length': '10', 'blank': 'True'}),
'postcode': ('django.db.models.fields.CharField', [], {'max_length': '9', 'null': 'True', 'blank': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'place'", 'on_delete': 'models.PROTECT', 'to': "orm['contacts_and_people.Site']"}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'street': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'summary': ('django.db.models.fields.TextField', [], {'default': "''", 'max_length': '256'}),
'zoom': ('django.db.models.fields.IntegerField', [], {'default': '17', 'null': 'True', 'blank': 'True'})
},
'contacts_and_people.entity': {
'Meta': {'ordering': "['tree_id', 'lft']", 'object_name': 'Entity', '_ormbases': ['contacts_and_people.EntityLite']},
'abstract_entity': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'access_note': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'auto_contacts_page': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'auto_news_page': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'auto_vacancies_page': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'building': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contacts_and_people.Building']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'building_recapitulates_entity_name': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'contacts_page_intro': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'contacts_page_intro'", 'null': 'True', 'to': "orm['cms.Placeholder']"}),
'contacts_page_menu_title': ('django.db.models.fields.CharField', [], {'default': "'Contacts & people'", 'max_length': '50'}),
'display_parent': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'entitylite_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['contacts_and_people.EntityLite']", 'unique': 'True', 'primary_key': 'True'}),
'external_url': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'entity_item'", 'null': 'True', 'on_delete': 'models.PROTECT', 'to': "orm['links.ExternalLink']"}),
'image': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['filer.Image']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'news_page_intro': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'news_page_intro'", 'null': 'True', 'to': "orm['cms.Placeholder']"}),
'news_page_menu_title': ('django.db.models.fields.CharField', [], {'default': "'News & events'", 'max_length': '50'}),
'parent': ('mptt.fields.TreeForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['contacts_and_people.Entity']"}),
'precise_location': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'short_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '60', 'blank': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'vacancies_page_intro': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'vacancies_page_intro'", 'null': 'True', 'to': "orm['cms.Placeholder']"}),
'vacancies_page_menu_title': ('django.db.models.fields.CharField', [], {'default': "'Vacancies & studentships'", 'max_length': '50'}),
'website': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'entity'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['cms.Page']", 'blank': 'True', 'unique': 'True'})
},
'contacts_and_people.entityautopagelinkplugineditor': {
'Meta': {'object_name': 'EntityAutoPageLinkPluginEditor', 'db_table': "'cmsplugin_entityautopagelinkplugineditor'", '_ormbases': ['cms.CMSPlugin']},
'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['cms.CMSPlugin']", 'unique': 'True', 'primary_key': 'True'}),
'entity': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'auto_page_plugin'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['contacts_and_people.Entity']"}),
'link_to': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'text_override': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'})
},
'contacts_and_people.entitydirectoryplugineditor': {
'Meta': {'object_name': 'EntityDirectoryPluginEditor', 'db_table': "'cmsplugin_entitydirectoryplugineditor'", '_ormbases': ['cms.CMSPlugin']},
'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['cms.CMSPlugin']", 'unique': 'True', 'primary_key': 'True'}),
'display_descriptions_to_level': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'entity': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'directory_plugin'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['contacts_and_people.Entity']"}),
'levels': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'link_icons': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'use_short_names': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
'contacts_and_people.entitylite': {
'Meta': {'object_name': 'EntityLite'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'contacts_and_people.entitymembersplugineditor': {
'Meta': {'object_name': 'EntityMembersPluginEditor', 'db_table': "'cmsplugin_entitymembersplugineditor'", '_ormbases': ['cms.CMSPlugin']},
'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['cms.CMSPlugin']", 'unique': 'True', 'primary_key': 'True'}),
'entity': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'entity_members_plugin'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['contacts_and_people.Entity']"})
},
'contacts_and_people.membership': {
'Meta': {'ordering': "('-importance_to_entity', 'person__surname')", 'object_name': 'Membership'},
'display_role': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'display_roles'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['contacts_and_people.Membership']"}),
'entity': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'members'", 'to': "orm['contacts_and_people.Entity']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'importance_to_entity': ('django.db.models.fields.IntegerField', [], {'default': '1', 'null': 'True', 'blank': 'True'}),
'importance_to_person': ('django.db.models.fields.IntegerField', [], {'default': '1', 'null': 'True', 'blank': 'True'}),
'key_contact': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'person': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'member_of'", 'to': "orm['contacts_and_people.Person']"}),
'role': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'})
},
'contacts_and_people.person': {
'Meta': {'ordering': "['surname', 'given_name', 'user']", 'object_name': 'Person', '_ormbases': ['contacts_and_people.PersonLite']},
'access_note': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'building': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contacts_and_people.Building']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'data_feed_locked': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'description': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Placeholder']", 'null': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'entities': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'people'", 'to': "orm['contacts_and_people.Entity']", 'through': "orm['contacts_and_people.Membership']", 'blank': 'True', 'symmetrical': 'False', 'null': 'True'}),
'external_url': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'person_item'", 'null': 'True', 'on_delete': 'models.PROTECT', 'to': "orm['links.ExternalLink']"}),
'image': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['filer.Image']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'institutional_username': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
'override_entity': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'people_override'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['contacts_and_people.Entity']"}),
'personlite_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['contacts_and_people.PersonLite']", 'unique': 'True', 'primary_key': 'True'}),
'please_contact': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'contact_for'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['contacts_and_people.Person']"}),
'precise_location': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '60', 'blank': 'True'}),
'staff_id': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'person_user'", 'null': 'True', 'on_delete': 'models.PROTECT', 'to': "orm['auth.User']", 'blank': 'True', 'unique': 'True'})
},
'contacts_and_people.personlite': {
'Meta': {'object_name': 'PersonLite'},
'given_name': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'middle_names': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'surname': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'title': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contacts_and_people.Title']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'})
},
'contacts_and_people.phonecontact': {
'Meta': {'ordering': "('label',)", 'object_name': 'PhoneContact'},
'area_code': ('django.db.models.fields.CharField', [], {'default': "'029'", 'max_length': '5'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'country_code': ('django.db.models.fields.CharField', [], {'default': "'44'", 'max_length': '5'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'internal_extension': ('django.db.models.fields.CharField', [], {'max_length': '6', 'null': 'True', 'blank': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'number': ('django.db.models.fields.CharField', [], {'max_length': '12'}),
'object_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'})
},
'contacts_and_people.site': {
'Meta': {'ordering': "('country', 'site_name', 'post_town')", 'object_name': 'Site'},
'country': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'description': ('django.db.models.fields.TextField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'post_town': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'site_name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50'})
},
'contacts_and_people.title': {
'Meta': {'ordering': "['title']", 'object_name': 'Title'},
'abbreviation': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '20'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'filer.file': {
'Meta': {'object_name': 'File'},
'_file_size': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'file': ('django.db.models.fields.files.FileField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'folder': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'all_files'", 'null': 'True', 'to': "orm['filer.Folder']"}),
'has_all_mandatory_data': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_public': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'modified_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}),
'original_filename': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'owned_files'", 'null': 'True', 'to': "orm['auth.User']"}),
'polymorphic_ctype': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'polymorphic_filer.file_set'", 'null': 'True', 'to': "orm['contenttypes.ContentType']"}),
'sha1': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '40', 'blank': 'True'}),
'uploaded_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'})
},
'filer.folder': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('parent', 'name'),)", 'object_name': 'Folder'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'modified_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'filer_owned_folders'", 'null': 'True', 'to': "orm['auth.User']"}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['filer.Folder']"}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'uploaded_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'})
},
'filer.image': {
'Meta': {'object_name': 'Image', '_ormbases': ['filer.File']},
'_height': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'_width': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'author': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'date_taken': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'default_alt_text': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'default_caption': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'file_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['filer.File']", 'unique': 'True', 'primary_key': 'True'}),
'must_always_publish_author_credit': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'must_always_publish_copyright': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'subject_location': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '64', 'null': 'True', 'blank': 'True'})
},
'links.externallink': {
'Meta': {'ordering': "['title']", 'object_name': 'ExternalLink'},
'description': ('django.db.models.fields.TextField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'external_site': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'links'", 'null': 'True', 'on_delete': 'models.PROTECT', 'to': "orm['links.ExternalSite']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'kind': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'links'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['links.LinkType']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'url': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'links.externalsite': {
'Meta': {'ordering': "['domain']", 'object_name': 'ExternalSite'},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['links.ExternalSite']"}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'site': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'links.linktype': {
'Meta': {'object_name': 'LinkType'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'scheme': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50'})
},
'sites.site': {
'Meta': {'ordering': "('domain',)", 'object_name': 'Site', 'db_table': "'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['contacts_and_people'] | {
"content_hash": "8bc0a3c1810b6d54940d85d34f7856c1",
"timestamp": "",
"source": "github",
"line_count": 358,
"max_line_length": 262,
"avg_line_length": 94.31284916201118,
"alnum_prop": 0.56572088615093,
"repo_name": "bubenkoff/Arkestra",
"id": "0f82bcdf4787abe801c85f08df28c990ac0f182e",
"size": "33788",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "contacts_and_people/migrations/0007_auto__del_teacher__del_field_entity_url__chg_field_building_number__ch.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "161649"
},
{
"name": "HTML",
"bytes": "724894"
},
{
"name": "JavaScript",
"bytes": "656447"
},
{
"name": "Python",
"bytes": "1461948"
}
],
"symlink_target": ""
} |
from django.views.generic import ListView, RedirectView
from django.views.generic.edit import FormView, UpdateView, DeletionMixin, CreateView
from django.http import HttpResponseRedirect
from django.shortcuts import render
from django.template.loader import render_to_string
from django.core.urlresolvers import reverse_lazy
from django.contrib import messages
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.mixins import LoginRequiredMixin, PermissionRequiredMixin
from django.core.mail import send_mail
from account.models import User, Expert
from account.forms import LoginForm, ExpertForm, ModeratorForm, PasswordChangeForm, ForgotPasswordForm
class SendEmailMixin:
email_template_name = '' # path to template for email message
email_subject = ''
email_context_data = {}
from_email = 'admin@lucas.com'
receivers = tuple()
def get_email_context_data(self):
return self.email_context_data
def get_receivers(self):
return self.receivers
def render_email(self):
return render_to_string(self.email_template_name, self.get_email_context_data())
def send(self):
send_mail(self.email_subject, self.render_email(), self.from_email, self.get_receivers(), fail_silently=False)
class IndexView(RedirectView):
def get(self, request, *args, **kwargs):
if request.user.is_authenticated():
return HttpResponseRedirect(reverse_lazy('account:cabinet'))
else:
return HttpResponseRedirect(reverse_lazy('account:login'))
class LoginView(FormView):
form_class = LoginForm
template_name = 'account/login.html'
success_url = reverse_lazy("account:cabinet")
def dispatch(self, request, *args, **kwargs):
logout(request)
return super(LoginView, self).dispatch(request, *args, **kwargs)
def form_valid(self, form):
login(self.request, form.get_user())
return HttpResponseRedirect(self.success_url)
class ForgotPasswordView(FormView, SendEmailMixin):
form_class = ForgotPasswordForm
template_name = 'account/forgot_password.html'
success_url = reverse_lazy('account:login')
email_template_name = 'account/email/new_password.html'
email_subject = 'Пароль обновлен'
object = None
password = None
def form_valid(self, form):
self.object = form.get_user()
self.password = User.objects.make_random_password(length=4)
self.object.set_password(self.password)
self.object.save()
messages.success(self.request, 'пароль обновлен')
self.send()
return HttpResponseRedirect(self.success_url)
def get_receivers(self):
return self.object.email,
def get_email_context_data(self):
return {'user': self.object, 'password': self.password}
class ShowProfileView(LoginRequiredMixin, FormView):
model = User
success_url = reverse_lazy("account:cabinet")
template_name = 'account/profile/show_profile.html'
def get_form(self, form_class=None):
if self.get_object().is_admin:
form = ModeratorForm
else:
form = ExpertForm
return form(**self.get_form_kwargs())
def get_context_data(self, **kwargs):
context = super(ShowProfileView, self).get_context_data(**kwargs)
context['password_change_form'] = PasswordChangeForm()
return context
def get_initial(self):
return self.model.objects.filter(email=self.get_object().email).values()[0]
def get_object(self):
return self.request.user
def form_valid(self, form=None):
profile = User.objects.filter(email=self.get_object().email)
profile.update(**form.cleaned_data)
messages.success(self.request, 'Информация изменена')
return HttpResponseRedirect(self.success_url)
def form_invalid(self, form=None):
messages.error(self.request, 'Форма невалидна')
return render(self.request, self.template_name, {"form": form})
class ChangePasswordView(LoginRequiredMixin, FormView):
http_method_names = ['post']
form_class = PasswordChangeForm
success_url = reverse_lazy('account:cabinet')
def form_valid(self, form):
user = self.get_object()
logout(self.request)
user.set_password(form.cleaned_data['new_password'])
user.save()
user = authenticate(email=user.email, password=form.cleaned_data['new_password'])
login(self.request, user)
messages.success(self.request, 'Пароль успешно изменён')
return HttpResponseRedirect(self.success_url)
def form_invalid(self, form):
print(form)
messages.error(self.request, 'Некорректно заполненая форма')
return HttpResponseRedirect(self.success_url)
def get_object(self):
return self.request.user
class ExpertList(PermissionRequiredMixin, ListView):
queryset = Expert.objects.filter(is_expert=True)
template_name = "account/expert/index.html"
permission_required = ('manipulate_expert',)
class CreateExpertView(SendEmailMixin, CreateView):
model = Expert
form_class = ExpertForm
object = None
password = None
template_name = 'account/expert/new.html'
success_url = reverse_lazy('account:experts')
permission_required = ('manipulate_expert',)
email_subject = 'Добро пожаловать'
email_template_name = 'account/email/invite_expert.html'
def form_valid(self, form):
self.object = form.save(commit=False)
self.password = User.objects.make_random_password(length=4)
self.object.set_password(self.password)
self.object.save()
messages.success(self.request, "Пользователь добавлен")
self.send()
return HttpResponseRedirect(self.success_url)
def get_email_context_data(self):
return {'user': self.object, 'password': self.password}
def get_receivers(self):
return self.object.email,
class ExpertView(DeletionMixin, UpdateView):
model = Expert
form_class = ExpertForm
template_name = 'account/expert/edit.html'
success_url = reverse_lazy('account:experts')
permission_required = ('manipulate_expert',)
class ToggleActivityExpertView(UpdateView):
http_method_names = ['get']
model = Expert
success_url = reverse_lazy('account:experts')
permission_required = ('manipulate_expert',)
def get(self, request, *args, **kwargs):
if self.object.is_active:
self.object.is_active = False
messages.success(self.request, "Пользователь заморожен")
else:
self.object.is_active = True
messages.success(self.request, "Пользователь разморожен")
self.object.save()
HttpResponseRedirect(self.success_url)
class ResetPasswordView(SendEmailMixin, UpdateView):
http_method_names = ['get']
model = Expert
success_url = reverse_lazy('account:experts')
permission_required = ('manipulate_expert',)
email_template_name = 'account/email/new_password.html'
email_subject = 'Пароль обновлен'
from_email = 'admin@lucas.com'
password = None
def get(self, request, *args, **kwargs):
self.password = User.objects.make_random_password(length=4)
expert = self.get_object()
expert.set_password(self.password)
expert.save()
self.send()
return HttpResponseRedirect(self.success_url)
def get_email_context_data(self):
return {'user': self.get_object(), 'password': self.password}
def get_receivers(self):
return self.get_object().email,
| {
"content_hash": "9e867aced0192dbf4cf7bb761089b001",
"timestamp": "",
"source": "github",
"line_count": 239,
"max_line_length": 118,
"avg_line_length": 31.96652719665272,
"alnum_prop": 0.6827225130890052,
"repo_name": "AlexeyBerezhnoy/lucas",
"id": "b893974c5890e9b97efc406ea115cf5122986383",
"size": "7838",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "account/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "4268"
}
],
"symlink_target": ""
} |
from eth_utils import (
is_checksum_address,
is_list_like,
is_same_address,
)
PRIVATE_KEY_HEX = '0x56ebb41875ceedd42e395f730e03b5c44989393c9f0484ee6bc05f933673458f'
PASSWORD = 'web3-testing'
ADDRESS = '0x844B417c0C58B02c2224306047B9fb0D3264fE8c'
PRIVATE_KEY_FOR_UNLOCK = '0x392f63a79b1ff8774845f3fa69de4a13800a59e7083f5187f1558f0797ad0f01'
ACCOUNT_FOR_UNLOCK = '0x12efDc31B1a8FA1A1e756DFD8A1601055C971E13'
class PersonalModuleTest(object):
def test_personal_importRawKey(self, web3):
actual = web3.personal.importRawKey(PRIVATE_KEY_HEX, PASSWORD)
assert actual == ADDRESS
def test_personal_listAccounts(self, web3):
accounts = web3.personal.listAccounts
assert is_list_like(accounts)
assert len(accounts) > 0
assert all((
is_checksum_address(item)
for item
in accounts
))
def test_personal_lockAccount(self, web3, unlocked_account):
# TODO: how do we test this better?
web3.personal.lockAccount(unlocked_account)
def test_personal_unlockAccount_success(self,
web3,
unlockable_account,
unlockable_account_pw):
result = web3.personal.unlockAccount(unlockable_account, unlockable_account_pw)
assert result is True
def test_personal_unlockAccount_failure(self,
web3,
unlockable_account):
result = web3.personal.unlockAccount(unlockable_account, 'bad-password')
assert result is False
def test_personal_newAccount(self, web3):
new_account = web3.personal.newAccount(PASSWORD)
assert is_checksum_address(new_account)
def test_personal_sendTransaction(self,
web3,
unlockable_account,
unlockable_account_pw):
assert web3.eth.getBalance(unlockable_account) > web3.toWei(1, 'ether')
txn_params = {
'from': unlockable_account,
'to': unlockable_account,
'gas': 21000,
'value': 1,
'gasPrice': web3.toWei(1, 'gwei'),
}
txn_hash = web3.personal.sendTransaction(txn_params, unlockable_account_pw)
assert txn_hash
transaction = web3.eth.getTransaction(txn_hash)
assert transaction['from'] == txn_params['from']
assert transaction['to'] == txn_params['to']
assert transaction['gas'] == txn_params['gas']
assert transaction['value'] == txn_params['value']
assert transaction['gasPrice'] == txn_params['gasPrice']
def test_personal_sign_and_ecrecover(self,
web3,
unlockable_account,
unlockable_account_pw):
message = 'test-web3-personal-sign'
signature = web3.personal.sign(message, unlockable_account, unlockable_account_pw)
signer = web3.personal.ecRecover(message, signature)
assert is_same_address(signer, unlockable_account)
| {
"content_hash": "555244dc72c27abc14f44cbd0e307240",
"timestamp": "",
"source": "github",
"line_count": 81,
"max_line_length": 93,
"avg_line_length": 40.24691358024691,
"alnum_prop": 0.5874233128834356,
"repo_name": "pipermerriam/web3.py",
"id": "848686d1b2266e65f58b9f8552c4928a5abb3bde",
"size": "3260",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "web3/utils/module_testing/personal_module.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "999"
},
{
"name": "Python",
"bytes": "619517"
}
],
"symlink_target": ""
} |
from setuptools import setup, find_packages
version = '0.1'
LONG_DESCRIPTION = """
This is a basic forum component that can plug into any existing Django installation
and use its' existing templates, users, and admin interface. Perfect for adding
forum functionality to an existing website.
"""
setup(
name='django-forum',
version=version,
description="django-forum",
long_description=LONG_DESCRIPTION,
classifiers=[
"Programming Language :: Python",
"Topic :: Other/Nonlisted Topic",
"Framework :: Django",
"Environment :: Web Environment",
],
keywords='forum,django',
author='Ross Poulton',
author_email='ross@rossp.org',
url='http://django-forum.googlecode.com/',
license='BSD',
packages=find_packages(),
include_package_data=True,
zip_safe=False,
install_requires=['setuptools'],
)
| {
"content_hash": "455022bc06f84ab3ab36d055a8159893",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 83,
"avg_line_length": 28.516129032258064,
"alnum_prop": 0.6753393665158371,
"repo_name": "RockHoward/django-forum",
"id": "f2733469f2e5b69b3461a650e649816da04c6583",
"size": "884",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "25731"
}
],
"symlink_target": ""
} |
'''
Created on Apr 24, 2016
@author: marina
'''
from __future__ import unicode_literals, division
import hashlib
import time
import requests
import json
import logging
from datetime import datetime
from datetime import timedelta
from requests.exceptions import ConnectionError
from enums import SortBy, SortOrder
LOGGER = logging.getLogger(__name__)
CID = 'XXXXXXX'
API_KEY = 'XXXXXXXXXXXXXXXXXX'
SIGNATURE_SECRET = 'XXXXXXXXXXXX'
THUMBNAIL_PREFIX = 'http://images.travelnow.com//'
def run_expedia_request(ean_tags, ip_addr = '127.0.0.1', results_num=10):
signature_input = API_KEY + SIGNATURE_SECRET + str(int(time.time()))
md_res = hashlib.md5()
md_res.update(signature_input)
sig = md_res.hexdigest()
req_url = "http://api.ean.com/ean-services/rs/hotel/v3/list?"
preset_tags = "?minorRev=30&cid={}&apiKey={}&sig={}&customerUserAgent=Mozilla/4.0&customerIpAddress={}&locale=en_US¤cyCode=USD&numberOfResults={}&room1=2".format(CID, API_KEY, sig,
ip_addr,
results_num)
req_url += preset_tags
for tag_code, tag_value in ean_tags.iteritems():
req_url += '&{}={}'.format(tag_code, tag_value)
response = requests.get(req_url)
LOGGER.debug("run_expedia_request: url %s", req_url)
return json.loads(response.content)
def sort_by_ean_value(sortby, order):
value = None
if sortby in (SortBy.price, SortBy.price_per_person):
if order in (SortOrder.descending, SortOrder.reverse):
value = "PRICE_REVERSE"
else:
value = "PRICE"
elif sortby in (SortBy.stars, SortBy.rating):
if order in (SortOrder.descending, SortOrder.reverse):
value = "QUALITY_REVERSE"
else:
value = "QUALITY"
elif sortby in (SortBy.popularity, SortBy.guest_rating, SortBy.recommendations):
# tripadvisor - no reverse
value = "TRIP_ADVISOR"
elif sortby == SortBy.name:
value = "ALPHA"
elif sortby == SortBy.distance:
value = "PROXIMITY"
return value
def get_ean_tags_from_webhook_input(body):
""" input: {location, arriveDate: '2016-05-04T00:00:00', duration: '1', travelers, attributes,
sortBy, sortOrder, messagingProvider}"""
ean_tags = {}
if body.get('arriveDate'):
arrivalDatetime = datetime.strptime(body['arriveDate'], "%Y-%m-%dT%H:%M:%S")
ean_tags['arrivalDate'] = arrivalDatetime.strftime("%m/%d/%Y")
if body.get('duration'):
departureDatetime = arrivalDatetime + timedelta(days=int(body['duration']))
ean_tags['departureDate'] = departureDatetime.strftime("%m/%d/%Y")
location = body.get('location', {})
if location:
if location.get('longitude'):
ean_tags['longitude'] = location['longitude']
if location.get('latitude'):
ean_tags['latitude'] = location['latitude']
sortby = body.get('sortBy')
order = body.get('sortOrder')
if sortby:
value = sort_by_ean_value(sortby, order)
if value:
ean_tags['sort']=value
return ean_tags
def expedia_search_request_to_facebook(ean_tags):
""" expedia search into facebook format """
try:
ean_response = run_expedia_request(ean_tags, results_num=10)
except ValueError, ConnectionError:
pass # expedia returned not json or connection error
else:
expedia_hotels_list = ean_response.get('HotelListResponse',{}).get('HotelList', {}).get('HotelSummary', [])
elements = []
for hotel_item in expedia_hotels_list:
element = dict(title=hotel_item['name'],
image_url='http://images.travelnow.com/{}'.format(hotel_item['thumbNailUrl'].replace('_t.', '_b.')),
item_url=hotel_item['deepLink'],
buttons=[{"type":"web_url",
"url":hotel_item['deepLink'],
"title":"View Hotel"}])
subtitle = None
room_rate = hotel_item.get('lowRate') or hotel_item.get('highRate')
if hotel_item.get('lowRate') and hotel_item.get('highRate'):
if hotel_item.get('lowRate') != hotel_item.get('highRate'):
subtitle = "Room Rate is between {} and {} {}".format(hotel_item['lowRate'], hotel_item['highRate'], hotel_item['rateCurrencyCode'])
if subtitle is None and room_rate:
subtitle = "Room Rate is {}{}".format(room_rate, hotel_item['rateCurrencyCode'])
if subtitle is not None:
element['subtitle']=subtitle
elements.append(element)
message = dict(attachment=dict(type="template",
payload=dict(template_type="generic",
elements=elements)))
return message
| {
"content_hash": "91a6e45392a11f0848ef2d3b6c7c3032",
"timestamp": "",
"source": "github",
"line_count": 118,
"max_line_length": 191,
"avg_line_length": 44.389830508474574,
"alnum_prop": 0.5631920580374189,
"repo_name": "sudhakargmail/botkitwebhooks",
"id": "8a0a6aed28fa10d8cb6cc1b70b639aae15f2148a",
"size": "5238",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "expedia.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "896"
},
{
"name": "Python",
"bytes": "24764"
}
],
"symlink_target": ""
} |
import optparse
import StringIO
import time
import unittest
import sys
from webkitpy.common.system import executive_mock
from webkitpy.common.system.executive_mock import MockExecutive2
from webkitpy.common.system.systemhost_mock import MockSystemHost
from webkitpy.layout_tests.port import chromium_android
from webkitpy.layout_tests.port import chromium_port_testcase
from webkitpy.layout_tests.port import driver
from webkitpy.layout_tests.port import driver_unittest
from webkitpy.tool.mocktool import MockOptions
class MockRunCommand(object):
def __init__(self):
self._mock_logcat = ''
self._mock_devices_output = ''
self._mock_devices = []
self._mock_ls_tombstones = ''
def mock_run_command_fn(self, args):
if args[0] != 'adb':
return ''
if args[1] == 'devices':
return self._mock_devices_output
assert len(args) > 3
assert args[1] == '-s'
assert args[2] in self._mock_devices
if args[3] == 'shell':
if args[4:] == ['ls', '-n', '/data/tombstones']:
return self._mock_ls_tombstones
elif args[4] == 'cat':
return args[5] + '\nmock_contents\n'
elif args[3] == 'logcat':
return self._mock_logcat
return ''
def mock_no_device(self):
self._mock_devices = []
self._mock_devices_output = 'List of devices attached'
def mock_one_device(self):
self._mock_devices = ['123456789ABCDEF0']
self._mock_devices_output = ('List of devices attached\n'
'%s\tdevice\n' % self._mock_devices[0])
def mock_two_devices(self):
self._mock_devices = ['123456789ABCDEF0', '23456789ABCDEF01']
self._mock_devices_output = ('* daemon not running. starting it now on port 5037 *'
'* daemon started successfully *'
'List of devices attached\n'
'%s\tdevice\n'
'%s\tdevice\n' % (self._mock_devices[0], self._mock_devices[1]))
def mock_no_tombstone_dir(self):
self._mock_ls_tombstones = '/data/tombstones: No such file or directory'
def mock_no_tombstone_file(self):
self._mock_ls_tombstones = ''
def mock_ten_tombstones(self):
self._mock_ls_tombstones = ('-rw------- 1000 1000 218643 2012-04-26 18:15 tombstone_00\n'
'-rw------- 1000 1000 241695 2012-04-26 18:15 tombstone_01\n'
'-rw------- 1000 1000 219472 2012-04-26 18:16 tombstone_02\n'
'-rw------- 1000 1000 45316 2012-04-27 16:33 tombstone_03\n'
'-rw------- 1000 1000 82022 2012-04-23 16:57 tombstone_04\n'
'-rw------- 1000 1000 82015 2012-04-23 16:57 tombstone_05\n'
'-rw------- 1000 1000 81974 2012-04-23 16:58 tombstone_06\n'
'-rw------- 1000 1000 237409 2012-04-26 17:41 tombstone_07\n'
'-rw------- 1000 1000 276089 2012-04-26 18:15 tombstone_08\n'
'-rw------- 1000 1000 219618 2012-04-26 18:15 tombstone_09\n')
def mock_logcat(self, content):
self._mock_logcat = content
class ChromiumAndroidPortTest(chromium_port_testcase.ChromiumPortTestCase):
port_name = 'chromium-android'
port_maker = chromium_android.ChromiumAndroidPort
def make_port(self, **kwargs):
port = super(ChromiumAndroidPortTest, self).make_port(**kwargs)
self.mock_run_command = MockRunCommand()
self.mock_run_command.mock_one_device()
port._executive = MockExecutive2(run_command_fn=self.mock_run_command.mock_run_command_fn)
return port
def test_attributes(self):
port = self.make_port()
self.assertEqual(port.baseline_path(), port._webkit_baseline_path('chromium-android'))
def test_default_timeout_ms(self):
self.assertEqual(self.make_port(options=optparse.Values({'configuration': 'Release'})).default_timeout_ms(), 10000)
self.assertEqual(self.make_port(options=optparse.Values({'configuration': 'Debug'})).default_timeout_ms(), 10000)
def test_expectations_files(self):
# FIXME: override this test temporarily while we're still upstreaming the android port and
# using a custom expectations file.
pass
def test_get_devices_no_device(self):
port = self.make_port()
self.mock_run_command.mock_no_device()
self.assertRaises(AssertionError, port._get_devices)
def test_get_devices_one_device(self):
port = self.make_port()
self.mock_run_command.mock_one_device()
self.assertEqual(self.mock_run_command._mock_devices, port._get_devices())
self.assertEqual(1, port.default_child_processes())
def test_get_devices_two_devices(self):
port = self.make_port()
self.mock_run_command.mock_two_devices()
self.assertEqual(self.mock_run_command._mock_devices, port._get_devices())
self.assertEqual(2, port.default_child_processes())
def test_get_device_serial_no_device(self):
port = self.make_port()
self.mock_run_command.mock_no_device()
self.assertRaises(AssertionError, port._get_device_serial, 0)
def test_get_device_serial_one_device(self):
port = self.make_port()
self.mock_run_command.mock_one_device()
self.assertEqual(self.mock_run_command._mock_devices[0], port._get_device_serial(0))
self.assertRaises(AssertionError, port._get_device_serial, 1)
def test_get_device_serial_two_devices(self):
port = self.make_port()
self.mock_run_command.mock_two_devices()
self.assertEqual(self.mock_run_command._mock_devices[0], port._get_device_serial(0))
self.assertEqual(self.mock_run_command._mock_devices[1], port._get_device_serial(1))
self.assertRaises(AssertionError, port._get_device_serial, 2)
def test_must_require_http_server(self):
port = self.make_port()
self.assertEqual(port.requires_http_server(), True)
class ChromiumAndroidDriverTest(unittest.TestCase):
def setUp(self):
self.mock_run_command = MockRunCommand()
self.mock_run_command.mock_one_device()
self.port = chromium_android.ChromiumAndroidPort(
MockSystemHost(executive=MockExecutive2(run_command_fn=self.mock_run_command.mock_run_command_fn)),
'chromium-android')
self.driver = chromium_android.ChromiumAndroidDriver(self.port, worker_number=0, pixel_tests=True)
def test_get_last_stacktrace(self):
self.mock_run_command.mock_no_tombstone_dir()
self.assertEqual(self.driver._get_last_stacktrace(), '')
self.mock_run_command.mock_no_tombstone_file()
self.assertEqual(self.driver._get_last_stacktrace(), '')
self.mock_run_command.mock_ten_tombstones()
self.assertEqual(self.driver._get_last_stacktrace(),
'-rw------- 1000 1000 45316 2012-04-27 16:33 tombstone_03\n'
'/data/tombstones/tombstone_03\nmock_contents\n')
def test_get_crash_log(self):
self.mock_run_command.mock_logcat('logcat contents\n')
self.mock_run_command.mock_ten_tombstones()
self.driver._crashed_process_name = 'foo'
self.driver._crashed_pid = 1234
self.assertEqual(self.driver._get_crash_log('out bar\nout baz\n', 'err bar\nerr baz\n', newer_than=None),
('err bar\n'
'err baz\n'
'********* [123456789ABCDEF0] Tombstone file:\n'
'-rw------- 1000 1000 45316 2012-04-27 16:33 tombstone_03\n'
'/data/tombstones/tombstone_03\n'
'mock_contents\n',
u'crash log for foo (pid 1234):\n'
u'STDOUT: out bar\n'
u'STDOUT: out baz\n'
u'STDOUT: ********* [123456789ABCDEF0] Logcat:\n'
u'STDOUT: logcat contents\n'
u'STDERR: err bar\n'
u'STDERR: err baz\n'
u'STDERR: ********* [123456789ABCDEF0] Tombstone file:\n'
u'STDERR: -rw------- 1000 1000 45316 2012-04-27 16:33 tombstone_03\n'
u'STDERR: /data/tombstones/tombstone_03\n'
u'STDERR: mock_contents\n'))
self.driver._crashed_process_name = None
self.driver._crashed_pid = None
self.assertEqual(self.driver._get_crash_log(None, None, newer_than=None),
('********* [123456789ABCDEF0] Tombstone file:\n'
'-rw------- 1000 1000 45316 2012-04-27 16:33 tombstone_03\n'
'/data/tombstones/tombstone_03\n'
'mock_contents\n',
u'crash log for <unknown process name> (pid <unknown>):\n'
u'STDOUT: ********* [123456789ABCDEF0] Logcat:\n'
u'STDOUT: logcat contents\n'
u'STDERR: ********* [123456789ABCDEF0] Tombstone file:\n'
u'STDERR: -rw------- 1000 1000 45316 2012-04-27 16:33 tombstone_03\n'
u'STDERR: /data/tombstones/tombstone_03\n'
u'STDERR: mock_contents\n'))
def test_cmd_line(self):
cmd_line = self.driver.cmd_line(True, ['anything'])
self.assertEqual(['adb', '-s', self.mock_run_command._mock_devices[0], 'shell'], cmd_line)
def test_drt_cmd_line(self):
cmd_line = self.driver._drt_cmd_line(True, ['--a'])
self.assertTrue('--a' in cmd_line)
self.assertTrue('--create-stdin-fifo' in cmd_line)
self.assertTrue('--separate-stderr-fifo' in cmd_line)
def test_read_prompt(self):
self.driver._server_process = driver_unittest.MockServerProcess(lines=['root@android:/ # '])
self.assertEqual(self.driver._read_prompt(time.time() + 1), None)
self.driver._server_process = driver_unittest.MockServerProcess(lines=['$ '])
self.assertEqual(self.driver._read_prompt(time.time() + 1), None)
def test_command_from_driver_input(self):
driver_input = driver.DriverInput('foo/bar/test.html', 10, 'checksum', True)
expected_command = "/data/local/tmp/third_party/WebKit/LayoutTests/foo/bar/test.html'--pixel-test'checksum\n"
if (sys.platform != "cygwin"):
self.assertEqual(self.driver._command_from_driver_input(driver_input), expected_command)
driver_input = driver.DriverInput('http/tests/foo/bar/test.html', 10, 'checksum', True)
expected_command = "http://127.0.0.1:8000/foo/bar/test.html'--pixel-test'checksum\n"
self.assertEqual(self.driver._command_from_driver_input(driver_input), expected_command)
class ChromiumAndroidDriverTwoDriversTest(unittest.TestCase):
def test_two_drivers(self):
mock_run_command = MockRunCommand()
mock_run_command.mock_two_devices()
port = chromium_android.ChromiumAndroidPort(
MockSystemHost(executive=MockExecutive2(run_command_fn=mock_run_command.mock_run_command_fn)),
'chromium-android')
driver0 = chromium_android.ChromiumAndroidDriver(port, worker_number=0, pixel_tests=True)
driver1 = chromium_android.ChromiumAndroidDriver(port, worker_number=1, pixel_tests=True)
cmd_line0 = driver0.cmd_line(True, ['anything'])
self.assertEqual(['adb', '-s', mock_run_command._mock_devices[0], 'shell'], cmd_line0)
cmd_line1 = driver1.cmd_line(True, ['anything'])
self.assertEqual(['adb', '-s', mock_run_command._mock_devices[1], 'shell'], cmd_line1)
class ChromiumAndroidTwoPortsTest(unittest.TestCase):
def test_options_with_two_ports(self):
options = MockOptions(additional_drt_flag=['--foo=bar', '--foo=baz'])
mock_run_command = MockRunCommand()
mock_run_command.mock_two_devices()
port0 = chromium_android.ChromiumAndroidPort(
MockSystemHost(executive=MockExecutive2(run_command_fn=mock_run_command.mock_run_command_fn)),
'chromium-android', options=options)
port1 = chromium_android.ChromiumAndroidPort(
MockSystemHost(executive=MockExecutive2(run_command_fn=mock_run_command.mock_run_command_fn)),
'chromium-android', options=options)
cmd_line = port1.driver_cmd_line()
self.assertEqual(cmd_line.count('--encode-binary'), 1)
self.assertEqual(cmd_line.count('--enable-hardware-gpu'), 1)
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "ca131f22263d2ac42102e262cc750a39",
"timestamp": "",
"source": "github",
"line_count": 266,
"max_line_length": 123,
"avg_line_length": 47.8421052631579,
"alnum_prop": 0.6088323118026089,
"repo_name": "leighpauls/k2cro4",
"id": "856ac31517c50f89a472d65454dd35365a3262b8",
"size": "14253",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "third_party/WebKit/Tools/Scripts/webkitpy/layout_tests/port/chromium_android_unittest.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ASP",
"bytes": "3062"
},
{
"name": "AppleScript",
"bytes": "25392"
},
{
"name": "Arduino",
"bytes": "464"
},
{
"name": "Assembly",
"bytes": "68131038"
},
{
"name": "C",
"bytes": "242794338"
},
{
"name": "C#",
"bytes": "11024"
},
{
"name": "C++",
"bytes": "353525184"
},
{
"name": "Common Lisp",
"bytes": "3721"
},
{
"name": "D",
"bytes": "1931"
},
{
"name": "Emacs Lisp",
"bytes": "1639"
},
{
"name": "F#",
"bytes": "4992"
},
{
"name": "FORTRAN",
"bytes": "10404"
},
{
"name": "Java",
"bytes": "3845159"
},
{
"name": "JavaScript",
"bytes": "39146656"
},
{
"name": "Lua",
"bytes": "13768"
},
{
"name": "Matlab",
"bytes": "22373"
},
{
"name": "Objective-C",
"bytes": "21887598"
},
{
"name": "PHP",
"bytes": "2344144"
},
{
"name": "Perl",
"bytes": "49033099"
},
{
"name": "Prolog",
"bytes": "2926122"
},
{
"name": "Python",
"bytes": "39863959"
},
{
"name": "R",
"bytes": "262"
},
{
"name": "Racket",
"bytes": "359"
},
{
"name": "Ruby",
"bytes": "304063"
},
{
"name": "Scheme",
"bytes": "14853"
},
{
"name": "Shell",
"bytes": "9195117"
},
{
"name": "Tcl",
"bytes": "1919771"
},
{
"name": "Verilog",
"bytes": "3092"
},
{
"name": "Visual Basic",
"bytes": "1430"
},
{
"name": "eC",
"bytes": "5079"
}
],
"symlink_target": ""
} |
import dask
from .scheduler import ray_dask_get, ray_dask_get_sync
from .callbacks import (
RayDaskCallback,
local_ray_callbacks,
unpack_ray_callbacks,
)
from .optimizations import dataframe_optimize
dask_persist = dask.persist
def ray_dask_persist(*args, **kwargs):
kwargs["ray_persist"] = True
return dask_persist(*args, **kwargs)
ray_dask_persist.__doc__ = dask_persist.__doc__
dask_persist_mixin = dask.base.DaskMethodsMixin.persist
def ray_dask_persist_mixin(self, **kwargs):
kwargs["ray_persist"] = True
return dask_persist_mixin(self, **kwargs)
ray_dask_persist_mixin.__doc__ = dask_persist_mixin.__doc__
# We patch dask in order to inject a kwarg into its `dask.persist()` calls,
# which the Dask-on-Ray scheduler needs.
# FIXME(Clark): Monkey patching is bad and we should try to avoid this.
def patch_dask(ray_dask_persist, ray_dask_persist_mixin):
dask.persist = ray_dask_persist
dask.base.DaskMethodsMixin.persist = ray_dask_persist_mixin
patch_dask(ray_dask_persist, ray_dask_persist_mixin)
__all__ = [
# Schedulers
"ray_dask_get",
"ray_dask_get_sync",
# Helpers
"ray_dask_persist",
# Callbacks
"RayDaskCallback",
"local_ray_callbacks",
"unpack_ray_callbacks",
# Optimizations
"dataframe_optimize",
]
| {
"content_hash": "7ef9ca7ca1a106220164d6d2591ce974",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 75,
"avg_line_length": 24.69811320754717,
"alnum_prop": 0.6951871657754011,
"repo_name": "pcmoritz/ray-1",
"id": "ac07c68345aaeb3841a019802236b8d9dd6955fb",
"size": "1309",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/ray/util/dask/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "70670"
},
{
"name": "C++",
"bytes": "4670851"
},
{
"name": "CSS",
"bytes": "10912"
},
{
"name": "Dockerfile",
"bytes": "14159"
},
{
"name": "HTML",
"bytes": "30414"
},
{
"name": "Java",
"bytes": "1338604"
},
{
"name": "JavaScript",
"bytes": "914"
},
{
"name": "Jupyter Notebook",
"bytes": "1615"
},
{
"name": "Makefile",
"bytes": "234"
},
{
"name": "Python",
"bytes": "10523389"
},
{
"name": "Shell",
"bytes": "117557"
},
{
"name": "Smarty",
"bytes": "239"
},
{
"name": "Starlark",
"bytes": "238506"
},
{
"name": "TypeScript",
"bytes": "259269"
}
],
"symlink_target": ""
} |
from django import http
from django.utils.translation import ugettext
from olympia.access import acl
from olympia.addons.models import Addon
from olympia.amo.feeds import NonAtomicFeed
from olympia.amo.templatetags.jinja_helpers import absolutify, page_name
from olympia.amo.urlresolvers import reverse
from olympia.browse.feeds import AddonFeedMixin
from . import views
class CollectionFeedMixin(NonAtomicFeed):
"""Common pieces for collections in a feed."""
def item_link(self, c):
return absolutify(c.get_url_path())
def item_title(self, c):
return unicode(c.name or '')
def item_description(self, c):
return unicode(c.description or '')
def item_author_name(self, c):
return c.author_username
def item_pubdate(self, c):
sort = self.request.GET.get('sort')
return c.created if sort == 'created' else c.modified
class CollectionFeed(CollectionFeedMixin, NonAtomicFeed):
request = None
def get_object(self, request):
self.request = request
def title(self, c):
app = page_name(self.request.APP)
# L10n: {0} is 'Add-ons for <app>'.
return ugettext(u'Collections :: %s') % app
def link(self):
return absolutify(reverse('collections.list'))
def description(self):
return ugettext(
'Collections are groups of related add-ons that anyone can '
'create and share.')
def items(self):
return views.get_filter(self.request).qs[:20]
class CollectionDetailFeed(AddonFeedMixin, NonAtomicFeed):
def get_object(self, request, username, slug):
self.request = request
c = views.get_collection(request, username, slug)
if not (c.listed or acl.check_collection_ownership(request, c)):
# 403 can't be raised as an exception.
raise http.Http404()
return c
def title(self, c):
app = page_name(self.request.APP)
# L10n: {0} is a collection name, {1} is 'Add-ons for <app>'.
return ugettext(u'{0} :: Collections :: {1}').format(c.name, app)
def link(self, c):
return absolutify(c.feed_url())
def description(self, c):
return c.description
def items(self, c):
addons = Addon.objects.valid() & c.addons.all()
return addons.order_by('-collectionaddon__created')[:20]
| {
"content_hash": "3888327355221c7bc4b94dbe5f84e5d7",
"timestamp": "",
"source": "github",
"line_count": 81,
"max_line_length": 73,
"avg_line_length": 29.296296296296298,
"alnum_prop": 0.6510745891276865,
"repo_name": "harry-7/addons-server",
"id": "0b47827badc55b9b1d0dd5b728738a1e31a4589f",
"size": "2373",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/olympia/bandwagon/feeds.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "806148"
},
{
"name": "HTML",
"bytes": "673309"
},
{
"name": "JavaScript",
"bytes": "1066531"
},
{
"name": "Makefile",
"bytes": "821"
},
{
"name": "PLSQL",
"bytes": "1074"
},
{
"name": "PLpgSQL",
"bytes": "2381"
},
{
"name": "Python",
"bytes": "4647485"
},
{
"name": "SQLPL",
"bytes": "559"
},
{
"name": "Shell",
"bytes": "9339"
},
{
"name": "Smarty",
"bytes": "1881"
}
],
"symlink_target": ""
} |
import logging
import sets
from openerp import models, fields, api, _
_logger = logging.getLogger(__name__)
class barcode_rule(models.Model):
_inherit = 'barcode.rule'
def _get_type_selection(self):
types = sets.Set(super(barcode_rule, self)._get_type_selection())
types.update([
('credit', _('Credit Card'))
])
return list(types)
class pos_mercury_payment_data(models.Model):
_name = 'pos_mercury.configuration'
# FIELDS #
name = fields.Char(required=True, help='Name of this Mercury configuration')
merchant_id = fields.Char(string='Merchant ID', required=True, help='ID of the merchant to authenticate him on the payment provider server')
merchant_pwd = fields.Char(string='Merchant Password', required=True, help='Password of the merchant to authenticate him on the payment provider server')
class account_bank_statement_line(models.Model):
_inherit = "account.bank.statement.line"
mercury_card_number = fields.Char(string='Card Number', help='The last 4 numbers of the card used to pay')
mercury_prefixed_card_number = fields.Char(string='Card Number', compute='_compute_prefixed_card_number', help='The card number used for the payment.')
mercury_card_brand = fields.Char(string='Card Brand', help='The brand of the payment card (e.g. Visa, AMEX, ...)')
mercury_card_owner_name = fields.Char(string='Card Owner Name', help='The name of the card owner')
mercury_ref_no = fields.Char(string='Mercury reference number', help='Payment reference number from Mercury Pay')
mercury_record_no = fields.Char(string='Mercury record number', help='Payment record number from Mercury Pay')
mercury_invoice_no = fields.Float(string='Mercury invoice number', help='Invoice number from Mercury Pay')
@api.one
def _compute_prefixed_card_number(self):
if self.mercury_card_number:
self.mercury_prefixed_card_number = "********" + self.mercury_card_number
else:
self.mercury_prefixed_card_number = ""
class account_journal(models.Model):
_inherit = 'account.journal'
pos_mercury_config_id = fields.Many2one('pos_mercury.configuration', string='Mercury configuration', help='The configuration of Mercury used for this journal')
class pos_order_card(models.Model):
_inherit = "pos.order"
@api.model
def _payment_fields(self, ui_paymentline):
fields = super(pos_order_card, self)._payment_fields(ui_paymentline)
fields.update({
'card_number': ui_paymentline.get('mercury_card_number'),
'card_brand': ui_paymentline.get('mercury_card_brand'),
'card_owner_name': ui_paymentline.get('mercury_card_owner_name'),
'ref_no': ui_paymentline.get('mercury_ref_no'),
'record_no': ui_paymentline.get('mercury_record_no'),
'invoice_no': ui_paymentline.get('mercury_invoice_no')
})
return fields
@api.model
def add_payment(self, order_id, data):
statement_id = super(pos_order_card, self).add_payment(order_id, data)
statement_lines = self.env['account.bank.statement.line'].search([('statement_id', '=', statement_id),
('pos_statement_id', '=', order_id),
('journal_id', '=', data['journal']),
('amount', '=', data['amount'])])
# we can get multiple statement_lines when there are >1 credit
# card payments with the same amount. In that case it doesn't
# matter which statement line we pick, just pick one that
# isn't already used.
for line in statement_lines:
if not line.mercury_card_brand:
line.mercury_card_brand = data.get('card_brand')
line.mercury_card_number = data.get('card_number')
line.mercury_card_owner_name = data.get('card_owner_name')
line.mercury_ref_no = data.get('ref_no')
line.mercury_record_no = data.get('record_no')
line.mercury_invoice_no = data.get('invoice_no')
break
return statement_id
| {
"content_hash": "5ee1cb49c1fdd9e908e810908d250bdf",
"timestamp": "",
"source": "github",
"line_count": 92,
"max_line_length": 163,
"avg_line_length": 46.73913043478261,
"alnum_prop": 0.622093023255814,
"repo_name": "vileopratama/vitech",
"id": "fb5b52d01d039de0bff518cea7876a94061e1f44",
"size": "4400",
"binary": false,
"copies": "22",
"ref": "refs/heads/master",
"path": "src/addons/pos_mercury/models/pos_mercury.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "9611"
},
{
"name": "CSS",
"bytes": "2125999"
},
{
"name": "HTML",
"bytes": "252393"
},
{
"name": "Java",
"bytes": "1840167"
},
{
"name": "JavaScript",
"bytes": "6176224"
},
{
"name": "Makefile",
"bytes": "19072"
},
{
"name": "Mako",
"bytes": "7659"
},
{
"name": "NSIS",
"bytes": "16782"
},
{
"name": "Python",
"bytes": "9438805"
},
{
"name": "Ruby",
"bytes": "220"
},
{
"name": "Shell",
"bytes": "22312"
},
{
"name": "Vim script",
"bytes": "406"
},
{
"name": "XSLT",
"bytes": "11489"
}
],
"symlink_target": ""
} |
from sys import argv, stdout
from os import SEEK_END, SEEK_CUR, SEEK_SET
from struct import pack, unpack
from array import array
from cStringIO import StringIO
def bits(byte):
return ((byte >> 7) & 1,
(byte >> 6) & 1,
(byte >> 5) & 1,
(byte >> 4) & 1,
(byte >> 3) & 1,
(byte >> 2) & 1,
(byte >> 1) & 1,
(byte) & 1)
def decompress(f):
"""Decompress an LZSS-compressed file. Returns an array.array."""
data = array('c')
def write(s):
data.extend(s)
def readbyte():
return unpack(">B", f.read(1))[0]
def readshort():
return unpack(">H", f.read(2))[0]
b = readbyte()
assert b == 0x10
decompressed_size, = unpack("<L", f.read(3) + "\x00")
#print hex(decompressed_size)
while len(data) < decompressed_size:
b = readbyte()
if b == '\x00':
# optimization
write(f.read(8))
continue
flags = bits(b)
for flag in flags:
if flag == 0:
write(f.read(1))
elif flag == 1:
sh = readshort()
count = (sh >> 0xc) + 3
disp = (sh & 0xfff) + 3
for _ in range(count):
write(data[-disp])
else:
raise ValueError(flag)
if decompressed_size <= len(data):
break
assert len(data) == decompressed_size
#extra = f.read()
#assert len(extra) == 0, repr(extra)
return data
def main():
f = open(argv[1], "rb")
f.seek(-8, SEEK_END)
header = f.read(8)
# end_offset == here - end of decompression
# start_offset == start of decompression - here
# end < here < start
end_offset, start_offset = unpack("<LL", header)
armlen = f.tell()
padding = end_offset >> 0x18
end_offset &= 0xFFFFFF
uncompressed_size = start_offset + end_offset
start_offset -= padding
f.seek(0, SEEK_SET)
header = '\x10' + pack("<L", uncompressed_size)[:3]
data = array('c')
data.fromfile(f, armlen - padding)
data.extend(header[::-1])
data.reverse()
#stdout.write(data.tostring())
infile = StringIO(data.tostring())
uncompressed_data = decompress(infile)
uncompressed_data.reverse()
f.seek(0, SEEK_SET)
stdout.write(f.read(armlen - end_offset))
uncompressed_data.tofile(stdout)
if __name__ == '__main__':
main()
| {
"content_hash": "5dee6d61f9dc5289e1c7e06620629531",
"timestamp": "",
"source": "github",
"line_count": 100,
"max_line_length": 69,
"avg_line_length": 24.84,
"alnum_prop": 0.5209339774557166,
"repo_name": "magical/nlzss",
"id": "6c12dd14ef665e4b6e1ed1c790fc186ce30453ae",
"size": "2508",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "armdecomp.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "34346"
}
],
"symlink_target": ""
} |
import socket
from pysnmp.carrier.base import AbstractTransportAddress
from pysnmp.carrier.asyncio.dgram.base import DgramAsyncioProtocol
from pysnmp.carrier import error
try:
import asyncio
except ImportError:
import trollius as asyncio
loop = asyncio.get_event_loop()
domainName = snmpUDPDomain = (1, 3, 6, 1, 6, 1, 1)
class UdpTransportAddress(tuple, AbstractTransportAddress): pass
class UdpAsyncioTransport(DgramAsyncioProtocol):
sockFamily = socket.AF_INET
addressType = UdpTransportAddress
UdpTransport = UdpAsyncioTransport
| {
"content_hash": "6c3415c43bd27109247b5b7496aa465f",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 66,
"avg_line_length": 27.65,
"alnum_prop": 0.8010849909584087,
"repo_name": "imron/scalyr-agent-2",
"id": "653c83c40e1c5a781ccd1fb0f41e4a56ed6be538",
"size": "1979",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "scalyr_agent/third_party/pysnmp/carrier/asyncio/dgram/udp.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1297"
},
{
"name": "Dockerfile",
"bytes": "1461"
},
{
"name": "Python",
"bytes": "2093708"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
import pytest
import simple_graph
@pytest.fixture(scope="function")
def create_graph():
new_graph = simple_graph.G()
return new_graph
@pytest.fixture(scope="function")
def build_graph(create_graph):
jerry = simple_graph.Node('Jerry', 5)
allen = simple_graph.Node('Allen', 8)
six = simple_graph.Node('6', 6)
# jerry2allen = simple_graph.Edge(jerry, allen)
# allen2six = simple_graph.Edge(allen, six)
create_graph.add_node(jerry)
create_graph.add_node(allen)
create_graph.add_node(six)
create_graph.add_edge(jerry, allen)
create_graph.add_edge(allen, six)
return create_graph
# g.nodes(): return a list of all nodes in the graph
def test_nodes(build_graph):
build_graph_node_names = [i.name for i in build_graph.nodes()]
assert set(build_graph_node_names) == set(['Jerry', 'Allen', '6'])
# g.edges(): return a list of all edges in the graph
def test_edges(build_graph):
build_graph_edge_names = [(i[0].name, i[1].name) for i in build_graph.edges()]
assert set(build_graph_edge_names) == set([('Jerry', 'Allen'), ('Allen', '6')])
# g.add_node(n): adds a new node 'n' to the graph
def test_add_node(build_graph):
new_node = simple_graph.Node('Jimmy', 0)
build_graph.add_node(new_node)
assert new_node in build_graph.nodes()
# g.add_edge(n1, n2): adds a new edge to the graph connecting 'n1' and 'n2', if
# either n1 or n2 are not already present in the graph, they should be added.
def test_add_edge(build_graph):
new_node1 = simple_graph.Node('new1', 1)
new_node2 = simple_graph.Node('new2', 2)
build_graph.add_node(new_node1)
build_graph.add_node(new_node2)
build_graph.add_edge(new_node1, new_node2)
assert new_node1, new_node2 in build_graph.edges()
def test_add_edge_from_new_nodes():
new_node1 = simple_graph.Node('new1', 1)
new_node2 = simple_graph.Node('new2', 2)
build_graph.add_edge(new_node1, new_node2)
assert new_node1, new_node2 in build_graph.edges()
# g.del_node(n): deletes the node 'n' from the graph, raises an error if no
# such node exists
def test_del_node(build_graph):
current_node = build_graph.nodes()[0]
build_graph.del_node(current_node)
assert current_node not in build_graph.nodes()
# we expect edges to be consistent and updated with nodes
assert current_node not in [
build_graph.edges()[i] for i in range(len(build_graph.edge()))
]
def test_del_nonexistent_node(build_graph):
new_node = simple_graph.Node('new', 1)
# not in build_graph
with pytest.raises(ValueError):
assert build_graph.del_node(new_node)
# g.del_edge(n1, n2): deletes the edge connecting 'n1' and 'n2' from the graph,
# raises an error if no such edge exists
def test_del_edge(build_graph):
current_edge = build_graph.edges()[0]
build_graph.del_edge(current_edge)
assert current_edge not in build_graph.edges()
def test_del_nonexistent_edge(build_graph):
new_node1 = simple_graph.Node('new1', 1)
new_node2 = simple_graph.Node('new2', 2)
new_edge = (new_node1, new_node2)
with pytest.raises(ValueError):
assert build_graph.del_node(new_edge)
# g.has_node(n): True if node 'n' is contained in the graph, False if not.
def test_has_node(build_graph):
contained_node = build_graph.nodes()[0]
assert build_graph.test_has_node(contained_node)
def test_node_not_contained(build_graph):
new_node = simple_graph.Node('new', 1)
assert not build_graph.test_has_node(new_node)
# g.neighbors(n): returns the list of all nodes connected to 'n' by edges,
# raises an error if n is not in g
def test_neighbors(build_graph):
pass
# g.adjacent(n1, n2): returns True if there is an edge connecting n1 and n2,
# False if not, raises an error if either of the supplied nodes are not in g
def test_adjacent(build_graph):
pass
| {
"content_hash": "0a6af4b1d5a39894043b9fd384bc69ad",
"timestamp": "",
"source": "github",
"line_count": 120,
"max_line_length": 83,
"avg_line_length": 32.56666666666667,
"alnum_prop": 0.6811668372569089,
"repo_name": "jesseklein406/data-structures",
"id": "012daef37098227d2a998458dfcf369d90116361",
"size": "3955",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_simple_graph.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Cucumber",
"bytes": "49"
},
{
"name": "Python",
"bytes": "94797"
}
],
"symlink_target": ""
} |
import os
import argparse
import sys
# argparse for information
parser = argparse.ArgumentParser()
parser.add_argument("-d", "--dir", help="input directory with files (will iterate over all files)")
parser.add_argument("-l", "--list", help="list of files to purge")
args = parser.parse_args()
# sanity check
if not len(sys.argv) > 1:
print "this script takes a folder and purge all files in the given list"
parser.print_help()
sys.exit(0)
def get_purge_list(dir):
purge_list = []
with open(dir, 'r') as file_handle:
for line in file_handle:
purge_list.append(line.rstrip())
return purge_list
def purge(dir, purge_list):
purge_counter = 0
for f in os.listdir(dir):
if f in purge_list:
print "removing: ", f
os.remove(os.path.join(dir, f))
purge_counter += 1
return purge_counter
# ------------------------------------------------- main script ------------------------------------------------------ #
purge_list = get_purge_list(args.list)
purge_counter = purge(args.dir, purge_list)
print "purged ", purge_counter, " files"
| {
"content_hash": "db691f73ffe59ffe5a8c931cf950b55e",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 120,
"avg_line_length": 25.818181818181817,
"alnum_prop": 0.5845070422535211,
"repo_name": "Twinstar2/Python_Master_scripts",
"id": "e4ec734cda17c67852e2307e4285c444be4a14e5",
"size": "1183",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "other_scripts/purge_folder.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "112035"
},
{
"name": "Shell",
"bytes": "604"
}
],
"symlink_target": ""
} |
from .settings import * # noqa
# Settings meant to run the test suite with Django’s development server, for integration tests.
DEBUG = True
DATABASES["default"]["NAME"] = "ui_tests.db" # noqa
INSTALLED_APPS += [ # noqa
"pattern_library",
]
TEMPLATES[0]["OPTIONS"]["builtins"] = ["pattern_library.loader_tags"] # noqa
PATTERN_LIBRARY = {
# Groups of templates for the pattern library navigation. The keys
# are the group titles and the values are lists of template name prefixes that will
# be searched to populate the groups.
"SECTIONS": (("components", ["wagtailadmin/shared", "wagtailadmin/panels"]),),
# Configure which files to detect as templates.
"TEMPLATE_SUFFIX": ".html",
"PATTERN_BASE_TEMPLATE_NAME": "",
"BASE_TEMPLATE_NAMES": [],
}
| {
"content_hash": "43058712482c18b2be8ac5dbbe2369d8",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 95,
"avg_line_length": 34.30434782608695,
"alnum_prop": 0.6831432192648923,
"repo_name": "rsalmaso/wagtail",
"id": "12bf4849c3402fa9381a2378a960aac008d4a0b5",
"size": "791",
"binary": false,
"copies": "4",
"ref": "refs/heads/main",
"path": "wagtail/test/settings_ui.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "2522"
},
{
"name": "Dockerfile",
"bytes": "2041"
},
{
"name": "HTML",
"bytes": "593672"
},
{
"name": "JavaScript",
"bytes": "624463"
},
{
"name": "Makefile",
"bytes": "1413"
},
{
"name": "Python",
"bytes": "6598232"
},
{
"name": "SCSS",
"bytes": "221911"
},
{
"name": "Shell",
"bytes": "6845"
},
{
"name": "TypeScript",
"bytes": "296087"
}
],
"symlink_target": ""
} |
import json
import responses
from conftest import Mock
from upcloud_api import Tag
def tag_post_callback(request):
print(request.body)
request_body = json.loads(request.body)
if 'name' not in request_body['tag']:
raise Exception('required field missing')
if 'servers' in request_body['tag']:
assert isinstance(request_body['tag']['servers'], dict)
assert isinstance(request_body['tag']['servers']['server'], list)
if len(request_body['tag']['servers']['server']) > 0:
assert isinstance(request_body['tag']['servers']['server'][0], str)
if 'description' in request_body['tag']:
assert isinstance(request_body['tag']['description'], str)
return (201, {}, json.dumps(request_body))
class TestTags:
@responses.activate
def test_get_tag(self, manager):
Mock.mock_get('tag/TheTestTag')
tag = manager.get_tag('TheTestTag')
assert tag.name == 'TheTestTag'
assert tag.description == 'Description of TheTestTag'
assert len(tag.servers) == 2
assert tag.servers[0].uuid == '0057e20a-6878-43a7-b2b3-530c4a4bdc55'
@responses.activate
def test_get_tags(self, manager):
Mock.mock_get('tag')
tags = manager.get_tags()
assert len(tags) == 2
assert tags[0].name == 'TheTestTag1'
assert tags[1].name == 'TheTestTag2'
assert tags[0].servers[0].uuid == '0057e20a-6878-43a7-b2b3-530c4a4bdc55'
@responses.activate
def test_create_new_tag(self, manager):
for _ in range(1, 4):
responses.add_callback(
responses.POST,
Mock.base_url + '/tag',
content_type='application/json',
callback=tag_post_callback,
)
tag1 = manager.create_tag('Tag1')
tag2 = manager.create_tag('Tag2', 'a nice tag')
tag3 = manager.create_tag('Tag3', 'a nicer tag', ['00798b85-efdc-41ca-8021-f6ef457b8531'])
assert tag1.name == 'Tag1'
assert tag2.name == 'Tag2'
assert tag3.name == 'Tag3'
assert isinstance(tag3.servers, list)
assert tag3.servers[0].uuid == '00798b85-efdc-41ca-8021-f6ef457b8531'
@responses.activate
def test_edit_tag(self, manager):
Mock.mock_get('tag/TheTestTag')
tag = manager.get_tag('TheTestTag')
responses.add_callback(
responses.PUT,
Mock.base_url + '/tag/TheTestTag',
content_type='application/json',
callback=tag_post_callback,
)
tag.name = 'AnotherTestTag'
assert tag._api_name == 'TheTestTag'
tag.save()
assert tag.name == 'AnotherTestTag'
assert tag._api_name == 'AnotherTestTag'
@responses.activate
def test_assign_tags_to_server(self, manager):
data = Mock.mock_get('server/00798b85-efdc-41ca-8021-f6ef457b8531')
server = manager.get_server('00798b85-efdc-41ca-8021-f6ef457b8531')
responses.add(
responses.POST,
Mock.base_url + '/server/00798b85-efdc-41ca-8021-f6ef457b8531/tag/tag1,tag2',
body=json.dumps({'foo': 'bar'}),
content_type='application/json',
status=200,
)
server.add_tags(['tag1', Tag('tag2')])
for tag in ['web1', 'tag1', 'tag2']:
assert tag in server.tags
@responses.activate
def test_remove_tags_from_server(self, manager):
data = Mock.mock_get('server/00798b85-efdc-41ca-8021-f6ef457b8531')
server = manager.get_server('00798b85-efdc-41ca-8021-f6ef457b8531')
responses.add(
responses.POST,
Mock.base_url + '/server/00798b85-efdc-41ca-8021-f6ef457b8531/untag/tag1,tag2',
body=json.dumps({'foo': 'bar'}),
content_type='application/json',
status=200,
)
server.remove_tags(['tag1', Tag('tag2')])
for tag in ['tag1', 'tag2']:
assert tag not in server.tags
assert 'web1' in server.tags
| {
"content_hash": "e9062c3772906aa2b321f4f4fc3f2cb1",
"timestamp": "",
"source": "github",
"line_count": 124,
"max_line_length": 98,
"avg_line_length": 32.66129032258065,
"alnum_prop": 0.5958024691358025,
"repo_name": "UpCloudLtd/upcloud-python-api",
"id": "538876f966cbbf4dbb411e13b396fd08d66b077e",
"size": "4050",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "test/test_tags.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "140582"
}
],
"symlink_target": ""
} |
from django.contrib import admin
from .models import Grill, GrillComment, GrillCommentVote
# Register your models here.
admin.site.register(Grill)
admin.site.register(GrillComment)
admin.site.register(GrillCommentVote)
| {
"content_hash": "ddd1def92f538d7501c428a8987eeab2",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 57,
"avg_line_length": 31.428571428571427,
"alnum_prop": 0.8318181818181818,
"repo_name": "sparcs-kaist/araplus",
"id": "974b5e728b58cdb174648359b6145c97b5aff80c",
"size": "220",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "apps/grill/admin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "967"
},
{
"name": "HTML",
"bytes": "49759"
},
{
"name": "JavaScript",
"bytes": "7989"
},
{
"name": "Python",
"bytes": "64716"
}
],
"symlink_target": ""
} |
import os
import time
import memory
from generic import obj
from project_base import Pobj
from simulation import Simulation
def trivial(sim,*args,**kwargs):
None
#end def trivial
class ProjectManager(Pobj):
def __init__(self):
#variables determined by self
modes = self.modes
self.persistent_modes = set([modes.submit,modes.all])
self.simulations = obj()
self.cascades = obj()
self.progressing_cascades = obj()
self.operations = []
#end def __init__
def add_simulations(self,*simulations):
if len(simulations)>0 and not isinstance(simulations[0],Simulation):
simulations = simulations[0]
#end if
for sim in simulations:
if len(sim.dependencies)==0:
self.add_cascade(sim)
#end if
self.simulations[sim.simid]=sim
#end for
#end def add_simulations
def add_cascade(self,cascade):
cid = cascade.simid
self.cascades[cid]=cascade
self.progressing_cascades[cid]=cascade
#end def add_cascade
def init_cascades(self):
self.resolve_file_collisions()
self.propagate_blockages()
self.log('loading cascade images',n=1)
if self.load_images:
self.load_cascades()
else:
self.log('cascades',n=1)
#end if
for c in self.progressing_cascades:
self.log('cascade',c.simid,'checking in',n=2)
#end for
self.perform_operations()
#self.write_cascade_status()
self.check_dependencies()
#end def init_cascades
def run_project(self,status=False,status_only=False):
self.log('\nProject starting',n=0)
self.init_cascades()
status_only = status_only or self.status_only
status = status or status_only
if status:
self.write_simulation_status()
if status_only:
return
#end if
#end if
self.log('\nstarting runs:\n'+30*'~',n=1)
if self.dependent_modes <= self.stages_set:
if self.monitor:
ipoll = 0
while len(self.progressing_cascades)>0:
self.dlog('\n\n\n'+70*'=',n=1)
self.log('poll',ipoll,' memory %3.2f MB'%(memory.resident()/1e6),n=1)
Pobj.wrote_something = False
self.dlog('cascades',self.progressing_cascades.keys(),n=2)
ipoll+=1
self.machine.query_queue()
self.progress_cascades()
self.machine.submit_jobs()
self.update_process_ids()
self.dlog('sleeping',self.sleep,n=2)
time.sleep(self.sleep)
self.dlog('awake',n=2)
if Pobj.wrote_something:
self.log()
#end if
#end while
elif len(self.progressing_cascades)>0:
self.machine.query_queue()
self.progress_cascades()
self.machine.submit_jobs()
self.update_process_ids()
#end if
else:
self.progress_cascades()
#end if
self.log('Project finished\n')
#end def run_project
def load_cascades(self):
self.dlog('~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~load cascades',n=1)
cascades = obj()
progressing_cascades = obj()
for cid,cascade in self.cascades.iteritems():
self.dlog('~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~reconstruct cascade',n=1)
rc = cascade.reconstruct_cascade()
self.dlog('~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~end reconstruct cascade',n=1)
cascades[rc.simid] = rc
progressing_cascades[rc.simid] = rc
#end for
self.cascades = cascades
self.progressing_cascades = progressing_cascades
self.dlog('~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~end load cascades',n=1)
#end def load_cascades
def perform_operations(self):
for op in self.operations:
operation = op.operation
sims = op.sims
for sim in sims:
operation(sim)
#end for
#end for
self.operations = []
#end def perform_operations
def reset_indicators(self,sims):
op = obj(
operation = Simulation.reset_indicators,
sims = sims
)
self.operations.append(op)
#end def reset_indicators
def traverse_cascades(self,operation=trivial,*args,**kwargs):
for cascade in self.cascades:
cascade.reset_wait_ids()
#end for
for cascade in self.cascades:
cascade.traverse_cascade(operation,*args,**kwargs)
#end for
return
#end def traverse_cascades
def save_cascades(self):
def save(sim):
sim.save_image()
#end def save
self.traverse_cascades(save)
#end def save_cascades
def propagate_blockages(self):
def collect_blocked(sim,blocked):
if sim.block or sim.block_subcascade:
blocked.append(sim)
#end if
#end def collect_blocks
blocked=[]
self.traverse_cascades(collect_blocked,blocked)
for sim in blocked:
sim.block_dependents(block_self=False)
#end for
#end def propagate_blockages
def propagate_values(self,**values):
def set_values(sim,**values):
for name,value in values.iteritems():
sim[name] = value
#end for
#end def set_values
self.traverse_cascades(set_values,**values)
#end def propagate_values
def write_simulation_status(self):
self.log('cascade status',n=1)
self.log('setup, sent_files, submitted, finished, got_output, analyzed',n=2)
indicators = ('setup','sent_files','submitted','finished','got_output','analyzed')
for isim in self.simulations.keys():
sim = self.simulations[isim]
stats = sim.tuple(*indicators)
status = ''
for stat in stats:
status+=str(int(stat))
#end for
self.log('{0} {1} {2}'.format(status,sim.identifier,sim.locdir),n=2)
#end for
self.log('setup, sent_files, submitted, finished, got_output, analyzed',n=2)
#end def write_simulation_status
def write_cascade_status(self):
self.log('cascade status',n=1)
self.log('setup, sent_files, submitted, finished, got_output, analyzed',n=2)
def write_status(sim):
indicators = ('setup','sent_files','submitted','finished','got_output','analyzed')
stats = sim.tuple(*indicators)
status = ''
for stat in stats:
status+=str(int(stat))
#end for
self.log('{0} {1} {2}'.format(status,sim.identifier,sim.locdir),n=2)
#self.log(str(sim.simid)+' '+str(sim.identifier),n=2)
#self.log('setup = '+str(sim.setup ),n=4)
#self.log('sent_files = '+str(sim.sent_files),n=4)
#self.log('submitted = '+str(sim.submitted ),n=4)
#self.log('finished = '+str(sim.finished ),n=4)
#self.log('got_output = '+str(sim.got_output),n=4)
#self.log('analyzed = '+str(sim.analyzed ),n=4)
#end def write_status
self.traverse_cascades(write_status)
self.log('setup, sent_files, submitted, finished, got_output, analyzed',n=2)
#end def write_cascade_status
def write_cascade_dependents(self):
self.log('cascade dependents',n=1)
for cascade in self.cascades:
cascade.reset_wait_ids()
#end for
for cascade in self.cascades:
self.log(cascade.__class__.__name__+' '+str(cascade.simid),n=2)
cascade.write_dependents(n=2)
#end for
return
#end def write_cascade_dependents
def resolve_file_collisions(self):
self.log('checking for file collisions',n=1)
entry_order = obj()
def set_entry_order(sim,entry_order):
locdir = sim.locdir
if not locdir in entry_order:
entry_order[locdir] = [sim]
else:
entry_order[locdir].append(sim)
#end if
#end def set_entry_order
self.traverse_cascades(set_entry_order,entry_order)
any_collisions = False
collpath = ''
for path,simlist in entry_order.iteritems():
if len(simlist)>1:
#raise an error if any in/out/err files will collide
filespace = dict()
for sim in simlist:
if not sim.allow_overlapping_files:
files = sim.list('infile','outfile','errfile')
for f in files:
if f not in filespace:
filespace[f] = [sim]
else:
filespace[f].append(sim)
#end if
#end for
#end if
#end for
for f,sims in filespace.iteritems():
if len(sims)>1 and f!=None:
any_collisions = True
msg = 'collision: file '+f+' is overwritten by '
for sim in sims:
msg +=str(sim.identifier)+' '+str(sim.simid)+','
#end for
self.log(msg[:-1],n=2)
collpath = path
#end if
#end for
#end if
#end for
if any_collisions:
self.error('file collisions found in directory\n '+path+'\n set a unique identifier for each simulation')
#end if
#end def resolve_file_collisions
def check_dependencies(self):
self.log('checking cascade dependencies',n=1)
result = obj()
result.dependencies_satisfied = True
self.traverse_cascades(Simulation.check_dependencies,result)
if result.dependencies_satisfied:
self.log('all simulation dependencies satisfied',n=2)
else:
self.error('some simulation dependecies are not satisfied')
#end if
#end def check_dependencies
def progress_cascades(self):
self.dlog('~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~progress cascades',n=1)
self.gc.collect()
finished = []
progressing_cascades = self.progressing_cascades
self.dlog('~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~reset wait ids',n=1)
for cid,cascade in progressing_cascades.iteritems():
cascade.reset_wait_ids()
#end for
self.dlog('~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~end reset wait ids',n=1)
for cid,cascade in progressing_cascades.iteritems():
self.dlog('~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~progress',n=1)
cascade.progress()
self.dlog('~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~end progress',n=1)
self.dlog('~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~check subcascade',n=1)
cascade.check_subcascade()
self.dlog('~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~end check subcascade',n=1)
if cascade.subcascade_finished:
finished.append(cid)
#end if
#end for
for cid in finished:
self.dlog('removing cascade:',cid,n=1)
del progressing_cascades[cid]
#end for
self.dlog('~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~end progress cascades',n=1)
#end def progress_cascades
def update_process_ids(self):
for sim in self.simulations:
sim.update_process_id()
#end for
#end def update_process_ids
#end class ProjectManager
| {
"content_hash": "2a38b29b4432b17e8849a2c0a79e9abd",
"timestamp": "",
"source": "github",
"line_count": 339,
"max_line_length": 119,
"avg_line_length": 35.607669616519175,
"alnum_prop": 0.5198409410985005,
"repo_name": "habanero-rice/hcpp",
"id": "ca3a7e34b32119d77b05d686829fc3c60640c283",
"size": "12072",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "test/performance-regression/full-apps/qmcpack/nexus/library/project_manager.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Assembly",
"bytes": "26226"
},
{
"name": "C",
"bytes": "330261"
},
{
"name": "C++",
"bytes": "255831"
},
{
"name": "Cuda",
"bytes": "10347"
},
{
"name": "Makefile",
"bytes": "7838"
},
{
"name": "Perl",
"bytes": "1748"
},
{
"name": "Shell",
"bytes": "16630"
}
],
"symlink_target": ""
} |
import logging
from typing import Tuple, Union
import anndata
import numba
import numpy as np
import pandas as pd
import scipy.sparse as sp_sparse
logger = logging.getLogger(__name__)
def _compute_library_size(
data: Union[sp_sparse.spmatrix, np.ndarray]
) -> Tuple[np.ndarray, np.ndarray]:
sum_counts = data.sum(axis=1)
masked_log_sum = np.ma.log(sum_counts)
if np.ma.is_masked(masked_log_sum):
logger.warning(
"This dataset has some empty cells, this might fail inference."
"Data should be filtered with `scanpy.pp.filter_cells()`"
)
log_counts = masked_log_sum.filled(0)
local_mean = (np.mean(log_counts).reshape(-1, 1)).astype(np.float32)
local_var = (np.var(log_counts).reshape(-1, 1)).astype(np.float32)
return local_mean, local_var
def _compute_library_size_batch(
adata,
batch_key: str,
local_l_mean_key: str = None,
local_l_var_key: str = None,
layer=None,
copy: bool = False,
):
"""
Computes the library size.
Parameters
----------
adata
anndata object containing counts
batch_key
key in obs for batch information
local_l_mean_key
key in obs to save the local log mean
local_l_var_key
key in obs to save the local log variance
layer
if not None, will use this in adata.layers[] for X
copy
if True, returns a copy of the adata
Returns
-------
type
anndata.AnnData if copy was True, else None
"""
if batch_key not in adata.obs_keys():
raise ValueError("batch_key not valid key in obs dataframe")
local_means = np.zeros((adata.shape[0], 1))
local_vars = np.zeros((adata.shape[0], 1))
batch_indices = adata.obs[batch_key]
for i_batch in np.unique(batch_indices):
idx_batch = np.squeeze(batch_indices == i_batch)
if layer is not None:
if layer not in adata.layers.keys():
raise ValueError("layer not a valid key for adata.layers")
data = adata[idx_batch].layers[layer]
else:
data = adata[idx_batch].X
(local_means[idx_batch], local_vars[idx_batch]) = _compute_library_size(data)
if local_l_mean_key is None:
local_l_mean_key = "_scvi_local_l_mean"
if local_l_var_key is None:
local_l_var_key = "_scvi_local_l_var"
if copy:
copy = adata.copy()
copy.obs[local_l_mean_key] = local_means
copy.obs[local_l_var_key] = local_vars
return copy
else:
adata.obs[local_l_mean_key] = local_means
adata.obs[local_l_var_key] = local_vars
def _check_nonnegative_integers(
data: Union[pd.DataFrame, np.ndarray, sp_sparse.spmatrix]
):
"""Approximately checks values of data to ensure it is count data."""
if isinstance(data, np.ndarray):
data = data
elif issubclass(type(data), sp_sparse.spmatrix):
data = data.data
elif isinstance(data, pd.DataFrame):
data = data.to_numpy()
else:
raise TypeError("data type not understood")
check = data[:10]
return _check_is_counts(check)
@numba.njit(cache=True)
def _check_is_counts(data):
for d in data.flat:
if d < 0 or d % 1 != 0:
return False
return True
def _get_batch_mask_protein_data(
adata: anndata.AnnData, protein_expression_obsm_key: str, batch_key: str
):
"""
Returns a list with length number of batches where each entry is a mask.
The mask is over cell measurement columns that are present (observed)
in each batch. Absence is defined by all 0 for that protein in that batch.
"""
pro_exp = adata.obsm[protein_expression_obsm_key]
pro_exp = pro_exp.to_numpy() if isinstance(pro_exp, pd.DataFrame) else pro_exp
batches = adata.obs[batch_key].values
batch_mask = {}
for b in np.unique(batches):
b_inds = np.where(batches.ravel() == b)[0]
batch_sum = pro_exp[b_inds, :].sum(axis=0)
all_zero = batch_sum == 0
batch_mask[b] = ~all_zero
return batch_mask
def _check_anndata_setup_equivalence(adata_source, adata_target):
"""Checks if target setup is equivalent to source."""
if isinstance(adata_source, anndata.AnnData):
_scvi_dict = adata_source.uns["_scvi"]
else:
_scvi_dict = adata_source
adata = adata_target
stats = _scvi_dict["summary_stats"]
target_n_vars = adata.shape[1]
error_msg = (
"Number of {} in anndata different from initial anndata used for training."
)
if target_n_vars != stats["n_vars"]:
raise ValueError(error_msg.format("vars"))
error_msg = (
"There are more {} categories in the data than were originally registered. "
+ "Please check your {} categories as well as adata.uns['_scvi']['categorical_mappings']."
)
self_categoricals = _scvi_dict["categorical_mappings"]
self_batch_mapping = self_categoricals["_scvi_batch"]["mapping"]
adata_categoricals = adata.uns["_scvi"]["categorical_mappings"]
adata_batch_mapping = adata_categoricals["_scvi_batch"]["mapping"]
# check if the categories are the same
error_msg = (
"Categorial encoding for {} is not the same between "
+ "the anndata used to train the model and the anndata just passed in. "
+ "Categorical encoding needs to be same elements, same order, and same datatype.\n"
+ "Expected categories: {}. Received categories: {}.\n"
+ "Try running `dataset.transfer_anndata_setup()` or deleting `adata.uns['_scvi']."
)
if not _assert_equal_mapping(self_batch_mapping, adata_batch_mapping):
raise ValueError(
error_msg.format("batch", self_batch_mapping, adata_batch_mapping)
)
self_labels_mapping = self_categoricals["_scvi_labels"]["mapping"]
adata_labels_mapping = adata_categoricals["_scvi_labels"]["mapping"]
if not _assert_equal_mapping(self_labels_mapping, adata_labels_mapping):
raise ValueError(
error_msg.format("label", self_labels_mapping, adata_labels_mapping)
)
# validate any extra categoricals
if "extra_categorical_mappings" in _scvi_dict.keys():
target_extra_cat_maps = adata.uns["_scvi"]["extra_categorical_mappings"]
for key, val in _scvi_dict["extra_categorical_mappings"].items():
target_map = target_extra_cat_maps[key]
if not _assert_equal_mapping(val, target_map):
raise ValueError(error_msg.format(key, val, target_map))
# validate any extra continuous covs
if "extra_continuous_keys" in _scvi_dict.keys():
if "extra_continuous_keys" not in adata.uns["_scvi"].keys():
raise ValueError('extra_continuous_keys not in adata.uns["_scvi"]')
target_cont_keys = adata.uns["_scvi"]["extra_continuous_keys"]
if not _scvi_dict["extra_continuous_keys"].equals(target_cont_keys):
raise ValueError(
"extra_continous_keys are not the same between source and target"
)
def _assert_equal_mapping(mapping1, mapping2):
return pd.Index(mapping1).equals(pd.Index(mapping2))
| {
"content_hash": "d275ac46534460588baec5719223794a",
"timestamp": "",
"source": "github",
"line_count": 202,
"max_line_length": 98,
"avg_line_length": 35.5,
"alnum_prop": 0.6356156742434806,
"repo_name": "YosefLab/scVI",
"id": "0a38a763ed715ec14e8291d02b00e92ef6debf2d",
"size": "7171",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scvi/data/_utils.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "226"
},
{
"name": "Python",
"bytes": "582001"
}
],
"symlink_target": ""
} |
"""
Main function registry for Sharrock.
"""
from django.conf import settings
from django.template.defaultfilters import slugify
import inspect
import os.path
descriptor_registry = {}
resource_registry = {}
def get_module(module_name):
"""
Imports and returns the named module.
"""
module = __import__(module_name)
components = module_name.split('.')
for comp in components[1:]:
module = getattr(module,comp)
return module
def build_registry():
"""
Builds the function descriptor registry.
"""
for app_path in settings.INSTALLED_APPS:
if app_path != 'sharrock': # don't load yourself
try:
module = get_module('%s.descriptors' % app_path)
if is_package(module):
load_multiple_versions(app_path,module)
else:
load_descriptors(app_path,module)
except ImportError:
pass # no descriptors in that module
def load_multiple_versions(app_path,package):
"""
Loads multiple versions of an app's API. Multiple versions are stored in submodules of
a 'descriptors' package within the app. (When there is only one version of an API,
'descriptors' is a simple module).
"""
for sublabel in package.__all__:
submodule = get_module('%s.%s' % (package.__name__,sublabel))
load_descriptors(app_path,submodule)
def load_descriptors(app_path,descriptor_module):
"""
Loads descriptors in the module into the directory.
"""
from sharrock.descriptors import Descriptor, Resource
from sharrock.modelresource import ModelResource
version = '0.1dev' # default version
if hasattr(descriptor_module,'version'):
version = getattr(descriptor_module,'version')
module_deprecated = None
if hasattr(descriptor_module,'deprecated'):
# entire module is deprecated
module_deprecated = descriptor_module.deprecated
for name,attribute in inspect.getmembers(descriptor_module):
if inspect.isclass(attribute) and issubclass(attribute,Descriptor) and not attribute is Descriptor:
if not hasattr(attribute,'visible') or attribute.visible: # skip over descriptors with visible=False set
descriptor_registry[(app_path,version,slugify(name))] = attribute(is_deprecated=module_deprecated) # put instance of the descriptor into the registry
elif inspect.isclass(attribute) and issubclass(attribute,Resource) and not attribute is Resource and not attribute is ModelResource:
descriptor_registry[(app_path,version,slugify(name))] = attribute(is_deprecated=module_deprecated) # put instance of resource into registry
def get_descriptor(app_label,version,descriptor_slug):
"""
Gets the matching descriptor.
"""
return descriptor_registry[(app_label,version,descriptor_slug)]
def is_package(module):
"""
Checks if the specified module is a package.
"""
return module.__file__.endswith('__init__.py') or module.__file__.endswith('__init__.pyc')
def directory(app_label=None,specified_version=None):
"""
Creates a directory of service descriptors.
"""
ensure_registry()
from sharrock.descriptors import Resource
d = {}
for key, value in descriptor_registry.items():
app,version,name = key
if not app_label or app_label == app:
app_dict = d.get(app,{})
if not specified_version or specified_version == version:
descriptors = app_dict.get(version,{'resources':[],'functions':[]})
if issubclass(value.__class__,Resource):
descriptors['resources'].append(value)
else:
descriptors['functions'].append(value)
app_dict[version] = descriptors
d[app] = app_dict
return d
def resource_directory(app_label=None,specified_version=None):
"""
Creates a directory for resources.
"""
d = {}
for key, value in resource_registry.items():
app,version,name = key
if not app_label or app_label == app:
app_dict = d.get(app,{})
if not specified_version or specified_version == version:
resources = app_dict.get(version,[])
resources.append(value)
app_dict[version] = resources
d[app] = app_dict
return d
def ensure_registry():
"""
Loads descriptors if the repository is unpopulated.
"""
if not descriptor_registry:
build_registry()
| {
"content_hash": "4112656851c298f2dd5ed796c01f5187",
"timestamp": "",
"source": "github",
"line_count": 129,
"max_line_length": 165,
"avg_line_length": 35.72868217054263,
"alnum_prop": 0.6378824039921892,
"repo_name": "Axilent/sharrock",
"id": "a031a8a1c5e339a270310c9a5bfdf8cb18ace881",
"size": "4609",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "sharrock/registry.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "5629"
},
{
"name": "Python",
"bytes": "59099"
}
],
"symlink_target": ""
} |
"""Pytest configuration."""
from __future__ import absolute_import, print_function
import os
import shutil
import tempfile
from os.path import dirname, join
import pytest
from elasticsearch.exceptions import RequestError
from flask import Flask
from flask.cli import ScriptInfo
from flask_login import LoginManager
from invenio_celery import InvenioCelery
from invenio_db import InvenioDB
from invenio_db import db as db_
from invenio_indexer import InvenioIndexer
from invenio_indexer.api import RecordIndexer
from invenio_jsonschemas import InvenioJSONSchemas
from invenio_pidstore import InvenioPIDStore
from invenio_records import InvenioRecords
from invenio_records.models import RecordMetadata
from invenio_records_rest.utils import PIDConverter, PIDPathConverter
from invenio_search import InvenioSearch, current_search
from invenio_search.errors import IndexAlreadyExistsError
from sqlalchemy_utils.functions import create_database, database_exists
from invenio_openaire import InvenioOpenAIRE
from invenio_openaire.tasks import harvest_fundref, harvest_openaire_projects
class MockSickle(object):
"""Mock of the OAI-PMH harvester.
Load the grant XML data from file and mock the Sickle datatype.
"""
def __init__(self, source):
"""Initialize the harvester."""
self.source = source
fname = join(dirname(__file__), 'testdata/mock_oai_pmh.txt')
with open(fname, 'r') as f:
self.data = f.readlines()
class MockRecordType(object):
"""Mock the OAI-PMH data type."""
def __init__(self, raw_data):
"""Init the data type."""
self.raw = raw_data
def ListRecords(self, metadataPrefix=None, set=None):
"""Record list generator."""
for grant_xml in self.data:
yield self.MockRecordType(grant_xml)
@pytest.yield_fixture()
def app(request):
"""Flask application fixture."""
# Set temporary instance path for sqlite
instance_path = tempfile.mkdtemp()
app = Flask('testapp', instance_path=instance_path)
app.config.update(
SQLALCHEMY_DATABASE_URI=os.environ.get(
'SQLALCHEMY_DATABASE_URI', 'sqlite:///test.db'),
INDEXER_REPLACE_REFS=True,
CELERY_ALWAYS_EAGER=True,
CELERY_RESULT_BACKEND="cache",
CELERY_CACHE_BACKEND="memory",
CELERY_EAGER_PROPAGATES_EXCEPTIONS=True,
JSONSCHEMAS_HOST='inveniosoftware.org',
OPENAIRE_OAI_LOCAL_SOURCE='invenio_openaire/data/oaire_local.sqlite',
TESTING=True,
)
app.url_map.converters['pid'] = PIDConverter
app.url_map.converters['pidpath'] = PIDPathConverter
LoginManager(app)
InvenioDB(app)
InvenioIndexer(app)
InvenioRecords(app)
InvenioCelery(app)
InvenioPIDStore(app)
InvenioOpenAIRE(app)
InvenioSearch(app)
InvenioJSONSchemas(app)
with app.app_context():
yield app
shutil.rmtree(instance_path)
@pytest.yield_fixture()
def db(app):
"""Setup database."""
if not database_exists(str(db_.engine.url)):
create_database(str(db_.engine.url))
db_.create_all()
yield db_
db_.session.remove()
db_.drop_all()
@pytest.yield_fixture()
def script_info(app, db):
"""CLI object."""
with app.app_context():
yield ScriptInfo(create_app=lambda info: app)
@pytest.yield_fixture()
def es(app):
"""Provide elasticsearch access."""
try:
list(current_search.create())
except (IndexAlreadyExistsError, RequestError):
list(current_search.delete(ignore=[404]))
list(current_search.create(ignore=[400]))
yield current_search
list(current_search.delete(ignore=[404]))
@pytest.yield_fixture()
def funders(app, es, db):
"""Funder records fixture."""
harvest_fundref(source='tests/testdata/fundref_test.rdf')
@pytest.yield_fixture()
def grants(app, es, db, funders):
"""Grant records fixture."""
harvest_openaire_projects(source='tests/testdata/openaire_test.sqlite')
records = []
for record in RecordMetadata.query.all():
records.append(record.id)
RecordIndexer().index_by_id(record.id)
es.flush_and_refresh('_all')
yield records
@pytest.yield_fixture()
def sqlite_tmpdb():
"""Create a temporary sqlite database file."""
fd, path = tempfile.mkstemp("_db.sqlite")
yield path
os.remove(path)
| {
"content_hash": "a1ab72225222a9fbf99510424b2e9d0a",
"timestamp": "",
"source": "github",
"line_count": 152,
"max_line_length": 77,
"avg_line_length": 28.80921052631579,
"alnum_prop": 0.6921671614523864,
"repo_name": "inveniosoftware/invenio-openaire",
"id": "ddeab0e8383198efbe4e4eb6275157c1e1c67c43",
"size": "4615",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/conftest.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "77471"
},
{
"name": "Shell",
"bytes": "466"
}
],
"symlink_target": ""
} |
"""The tests for the MQTT statestream component."""
import homeassistant.components.mqtt_statestream as statestream
from homeassistant.core import State
from homeassistant.setup import async_setup_component
from tests.async_mock import ANY, call
from tests.common import mock_state_change_event
async def add_statestream(
hass,
base_topic=None,
publish_attributes=None,
publish_timestamps=None,
publish_include=None,
publish_exclude=None,
):
"""Add a mqtt_statestream component."""
config = {}
if base_topic:
config["base_topic"] = base_topic
if publish_attributes:
config["publish_attributes"] = publish_attributes
if publish_timestamps:
config["publish_timestamps"] = publish_timestamps
if publish_include:
config["include"] = publish_include
if publish_exclude:
config["exclude"] = publish_exclude
return await async_setup_component(
hass, statestream.DOMAIN, {statestream.DOMAIN: config}
)
async def test_fails_with_no_base(hass, mqtt_mock):
"""Setup should fail if no base_topic is set."""
assert await add_statestream(hass) is False
async def test_setup_succeeds_without_attributes(hass, mqtt_mock):
"""Test the success of the setup with a valid base_topic."""
assert await add_statestream(hass, base_topic="pub")
async def test_setup_succeeds_with_attributes(hass, mqtt_mock):
"""Test setup with a valid base_topic and publish_attributes."""
assert await add_statestream(hass, base_topic="pub", publish_attributes=True)
async def test_state_changed_event_sends_message(hass, mqtt_mock):
"""Test the sending of a new message if event changed."""
e_id = "fake.entity"
base_topic = "pub"
# Add the statestream component for publishing state updates
assert await add_statestream(hass, base_topic=base_topic)
await hass.async_block_till_done()
# Reset the mock because it will have already gotten calls for the
# mqtt_statestream state change on initialization, etc.
mqtt_mock.async_publish.reset_mock()
# Set a state of an entity
mock_state_change_event(hass, State(e_id, "on"))
await hass.async_block_till_done()
await hass.async_block_till_done()
# Make sure 'on' was published to pub/fake/entity/state
mqtt_mock.async_publish.assert_called_with("pub/fake/entity/state", "on", 1, True)
assert mqtt_mock.async_publish.called
async def test_state_changed_event_sends_message_and_timestamp(hass, mqtt_mock):
"""Test the sending of a message and timestamps if event changed."""
e_id = "another.entity"
base_topic = "pub"
# Add the statestream component for publishing state updates
assert await add_statestream(
hass, base_topic=base_topic, publish_attributes=None, publish_timestamps=True
)
await hass.async_block_till_done()
# Reset the mock because it will have already gotten calls for the
# mqtt_statestream state change on initialization, etc.
mqtt_mock.async_publish.reset_mock()
# Set a state of an entity
mock_state_change_event(hass, State(e_id, "on"))
await hass.async_block_till_done()
await hass.async_block_till_done()
# Make sure 'on' was published to pub/fake/entity/state
calls = [
call.async_publish("pub/another/entity/state", "on", 1, True),
call.async_publish("pub/another/entity/last_changed", ANY, 1, True),
call.async_publish("pub/another/entity/last_updated", ANY, 1, True),
]
mqtt_mock.async_publish.assert_has_calls(calls, any_order=True)
assert mqtt_mock.async_publish.called
async def test_state_changed_attr_sends_message(hass, mqtt_mock):
"""Test the sending of a new message if attribute changed."""
e_id = "fake.entity"
base_topic = "pub"
# Add the statestream component for publishing state updates
assert await add_statestream(hass, base_topic=base_topic, publish_attributes=True)
await hass.async_block_till_done()
# Reset the mock because it will have already gotten calls for the
# mqtt_statestream state change on initialization, etc.
mqtt_mock.async_publish.reset_mock()
test_attributes = {"testing": "YES", "list": ["a", "b", "c"], "bool": False}
# Set a state of an entity
mock_state_change_event(hass, State(e_id, "off", attributes=test_attributes))
await hass.async_block_till_done()
await hass.async_block_till_done()
# Make sure 'on' was published to pub/fake/entity/state
calls = [
call.async_publish("pub/fake/entity/state", "off", 1, True),
call.async_publish("pub/fake/entity/testing", '"YES"', 1, True),
call.async_publish("pub/fake/entity/list", '["a", "b", "c"]', 1, True),
call.async_publish("pub/fake/entity/bool", "false", 1, True),
]
mqtt_mock.async_publish.assert_has_calls(calls, any_order=True)
assert mqtt_mock.async_publish.called
async def test_state_changed_event_include_domain(hass, mqtt_mock):
"""Test that filtering on included domain works as expected."""
base_topic = "pub"
incl = {"domains": ["fake"]}
excl = {}
# Add the statestream component for publishing state updates
# Set the filter to allow fake.* items
assert await add_statestream(
hass, base_topic=base_topic, publish_include=incl, publish_exclude=excl
)
await hass.async_block_till_done()
# Reset the mock because it will have already gotten calls for the
# mqtt_statestream state change on initialization, etc.
mqtt_mock.async_publish.reset_mock()
# Set a state of an entity
mock_state_change_event(hass, State("fake.entity", "on"))
await hass.async_block_till_done()
await hass.async_block_till_done()
# Make sure 'on' was published to pub/fake/entity/state
mqtt_mock.async_publish.assert_called_with("pub/fake/entity/state", "on", 1, True)
assert mqtt_mock.async_publish.called
mqtt_mock.async_publish.reset_mock()
# Set a state of an entity that shouldn't be included
mock_state_change_event(hass, State("fake2.entity", "on"))
await hass.async_block_till_done()
await hass.async_block_till_done()
assert not mqtt_mock.async_publish.called
async def test_state_changed_event_include_entity(hass, mqtt_mock):
"""Test that filtering on included entity works as expected."""
base_topic = "pub"
incl = {"entities": ["fake.entity"]}
excl = {}
# Add the statestream component for publishing state updates
# Set the filter to allow fake.* items
assert await add_statestream(
hass, base_topic=base_topic, publish_include=incl, publish_exclude=excl
)
await hass.async_block_till_done()
# Reset the mock because it will have already gotten calls for the
# mqtt_statestream state change on initialization, etc.
mqtt_mock.async_publish.reset_mock()
# Set a state of an entity
mock_state_change_event(hass, State("fake.entity", "on"))
await hass.async_block_till_done()
await hass.async_block_till_done()
# Make sure 'on' was published to pub/fake/entity/state
mqtt_mock.async_publish.assert_called_with("pub/fake/entity/state", "on", 1, True)
assert mqtt_mock.async_publish.called
mqtt_mock.async_publish.reset_mock()
# Set a state of an entity that shouldn't be included
mock_state_change_event(hass, State("fake.entity2", "on"))
await hass.async_block_till_done()
await hass.async_block_till_done()
assert not mqtt_mock.async_publish.called
async def test_state_changed_event_exclude_domain(hass, mqtt_mock):
"""Test that filtering on excluded domain works as expected."""
base_topic = "pub"
incl = {}
excl = {"domains": ["fake2"]}
# Add the statestream component for publishing state updates
# Set the filter to allow fake.* items
assert await add_statestream(
hass, base_topic=base_topic, publish_include=incl, publish_exclude=excl
)
await hass.async_block_till_done()
# Reset the mock because it will have already gotten calls for the
# mqtt_statestream state change on initialization, etc.
mqtt_mock.async_publish.reset_mock()
# Set a state of an entity
mock_state_change_event(hass, State("fake.entity", "on"))
await hass.async_block_till_done()
await hass.async_block_till_done()
# Make sure 'on' was published to pub/fake/entity/state
mqtt_mock.async_publish.assert_called_with("pub/fake/entity/state", "on", 1, True)
assert mqtt_mock.async_publish.called
mqtt_mock.async_publish.reset_mock()
# Set a state of an entity that shouldn't be included
mock_state_change_event(hass, State("fake2.entity", "on"))
await hass.async_block_till_done()
await hass.async_block_till_done()
assert not mqtt_mock.async_publish.called
async def test_state_changed_event_exclude_entity(hass, mqtt_mock):
"""Test that filtering on excluded entity works as expected."""
base_topic = "pub"
incl = {}
excl = {"entities": ["fake.entity2"]}
# Add the statestream component for publishing state updates
# Set the filter to allow fake.* items
assert await add_statestream(
hass, base_topic=base_topic, publish_include=incl, publish_exclude=excl
)
await hass.async_block_till_done()
# Reset the mock because it will have already gotten calls for the
# mqtt_statestream state change on initialization, etc.
mqtt_mock.async_publish.reset_mock()
# Set a state of an entity
mock_state_change_event(hass, State("fake.entity", "on"))
await hass.async_block_till_done()
await hass.async_block_till_done()
# Make sure 'on' was published to pub/fake/entity/state
mqtt_mock.async_publish.assert_called_with("pub/fake/entity/state", "on", 1, True)
assert mqtt_mock.async_publish.called
mqtt_mock.async_publish.reset_mock()
# Set a state of an entity that shouldn't be included
mock_state_change_event(hass, State("fake.entity2", "on"))
await hass.async_block_till_done()
await hass.async_block_till_done()
assert not mqtt_mock.async_publish.called
async def test_state_changed_event_exclude_domain_include_entity(hass, mqtt_mock):
"""Test filtering with excluded domain and included entity."""
base_topic = "pub"
incl = {"entities": ["fake.entity"]}
excl = {"domains": ["fake"]}
# Add the statestream component for publishing state updates
# Set the filter to allow fake.* items
assert await add_statestream(
hass, base_topic=base_topic, publish_include=incl, publish_exclude=excl
)
await hass.async_block_till_done()
# Reset the mock because it will have already gotten calls for the
# mqtt_statestream state change on initialization, etc.
mqtt_mock.async_publish.reset_mock()
# Set a state of an entity
mock_state_change_event(hass, State("fake.entity", "on"))
await hass.async_block_till_done()
await hass.async_block_till_done()
# Make sure 'on' was published to pub/fake/entity/state
mqtt_mock.async_publish.assert_called_with("pub/fake/entity/state", "on", 1, True)
assert mqtt_mock.async_publish.called
mqtt_mock.async_publish.reset_mock()
# Set a state of an entity that shouldn't be included
mock_state_change_event(hass, State("fake.entity2", "on"))
await hass.async_block_till_done()
await hass.async_block_till_done()
assert not mqtt_mock.async_publish.called
async def test_state_changed_event_include_domain_exclude_entity(hass, mqtt_mock):
"""Test filtering with included domain and excluded entity."""
base_topic = "pub"
incl = {"domains": ["fake"]}
excl = {"entities": ["fake.entity2"]}
# Add the statestream component for publishing state updates
# Set the filter to allow fake.* items
assert await add_statestream(
hass, base_topic=base_topic, publish_include=incl, publish_exclude=excl
)
await hass.async_block_till_done()
# Reset the mock because it will have already gotten calls for the
# mqtt_statestream state change on initialization, etc.
mqtt_mock.async_publish.reset_mock()
# Set a state of an entity
mock_state_change_event(hass, State("fake.entity", "on"))
await hass.async_block_till_done()
await hass.async_block_till_done()
# Make sure 'on' was published to pub/fake/entity/state
mqtt_mock.async_publish.assert_called_with("pub/fake/entity/state", "on", 1, True)
assert mqtt_mock.async_publish.called
mqtt_mock.async_publish.reset_mock()
# Set a state of an entity that shouldn't be included
mock_state_change_event(hass, State("fake.entity2", "on"))
await hass.async_block_till_done()
await hass.async_block_till_done()
assert not mqtt_mock.async_publish.called
async def test_state_changed_event_include_globs(hass, mqtt_mock):
"""Test that filtering on included glob works as expected."""
base_topic = "pub"
incl = {"entity_globs": ["*.included_*"]}
excl = {}
# Add the statestream component for publishing state updates
# Set the filter to allow *.included_* items
assert await add_statestream(
hass, base_topic=base_topic, publish_include=incl, publish_exclude=excl
)
await hass.async_block_till_done()
# Reset the mock because it will have already gotten calls for the
# mqtt_statestream state change on initialization, etc.
mqtt_mock.async_publish.reset_mock()
# Set a state of an entity with included glob
mock_state_change_event(hass, State("fake2.included_entity", "on"))
await hass.async_block_till_done()
await hass.async_block_till_done()
# Make sure 'on' was published to pub/fake2/included_entity/state
mqtt_mock.async_publish.assert_called_with(
"pub/fake2/included_entity/state", "on", 1, True
)
assert mqtt_mock.async_publish.called
mqtt_mock.async_publish.reset_mock()
# Set a state of an entity that shouldn't be included
mock_state_change_event(hass, State("fake2.entity", "on"))
await hass.async_block_till_done()
await hass.async_block_till_done()
assert not mqtt_mock.async_publish.called
async def test_state_changed_event_exclude_globs(hass, mqtt_mock):
"""Test that filtering on excluded globs works as expected."""
base_topic = "pub"
incl = {}
excl = {"entity_globs": ["*.excluded_*"]}
# Add the statestream component for publishing state updates
# Set the filter to allow *.excluded_* items
assert await add_statestream(
hass, base_topic=base_topic, publish_include=incl, publish_exclude=excl
)
await hass.async_block_till_done()
# Reset the mock because it will have already gotten calls for the
# mqtt_statestream state change on initialization, etc.
mqtt_mock.async_publish.reset_mock()
# Set a state of an entity
mock_state_change_event(hass, State("fake.entity", "on"))
await hass.async_block_till_done()
await hass.async_block_till_done()
# Make sure 'on' was published to pub/fake/entity/state
mqtt_mock.async_publish.assert_called_with("pub/fake/entity/state", "on", 1, True)
assert mqtt_mock.async_publish.called
mqtt_mock.async_publish.reset_mock()
# Set a state of an entity that shouldn't be included by glob
mock_state_change_event(hass, State("fake.excluded_entity", "on"))
await hass.async_block_till_done()
await hass.async_block_till_done()
assert not mqtt_mock.async_publish.called
async def test_state_changed_event_exclude_domain_globs_include_entity(hass, mqtt_mock):
"""Test filtering with excluded domain and glob and included entity."""
base_topic = "pub"
incl = {"entities": ["fake.entity"]}
excl = {"domains": ["fake"], "entity_globs": ["*.excluded_*"]}
# Add the statestream component for publishing state updates
# Set the filter to exclude with include filter
assert await add_statestream(
hass, base_topic=base_topic, publish_include=incl, publish_exclude=excl
)
await hass.async_block_till_done()
# Reset the mock because it will have already gotten calls for the
# mqtt_statestream state change on initialization, etc.
mqtt_mock.async_publish.reset_mock()
# Set a state of an entity
mock_state_change_event(hass, State("fake.entity", "on"))
await hass.async_block_till_done()
await hass.async_block_till_done()
# Make sure 'on' was published to pub/fake/entity/state
mqtt_mock.async_publish.assert_called_with("pub/fake/entity/state", "on", 1, True)
assert mqtt_mock.async_publish.called
mqtt_mock.async_publish.reset_mock()
# Set a state of an entity that doesn't match any filters
mock_state_change_event(hass, State("fake2.included_entity", "on"))
await hass.async_block_till_done()
await hass.async_block_till_done()
# Make sure 'on' was published to pub/fake/entity/state
mqtt_mock.async_publish.assert_called_with(
"pub/fake2/included_entity/state", "on", 1, True
)
assert mqtt_mock.async_publish.called
mqtt_mock.async_publish.reset_mock()
# Set a state of an entity that shouldn't be included by domain
mock_state_change_event(hass, State("fake.entity2", "on"))
await hass.async_block_till_done()
await hass.async_block_till_done()
assert not mqtt_mock.async_publish.called
mqtt_mock.async_publish.reset_mock()
# Set a state of an entity that shouldn't be included by glob
mock_state_change_event(hass, State("fake.excluded_entity", "on"))
await hass.async_block_till_done()
await hass.async_block_till_done()
assert not mqtt_mock.async_publish.called
async def test_state_changed_event_include_domain_globs_exclude_entity(hass, mqtt_mock):
"""Test filtering with included domain and glob and excluded entity."""
base_topic = "pub"
incl = {"domains": ["fake"], "entity_globs": ["*.included_*"]}
excl = {"entities": ["fake.entity2"]}
# Add the statestream component for publishing state updates
# Set the filter to include with exclude filter
assert await add_statestream(
hass, base_topic=base_topic, publish_include=incl, publish_exclude=excl
)
await hass.async_block_till_done()
# Reset the mock because it will have already gotten calls for the
# mqtt_statestream state change on initialization, etc.
mqtt_mock.async_publish.reset_mock()
# Set a state of an entity included by domain
mock_state_change_event(hass, State("fake.entity", "on"))
await hass.async_block_till_done()
await hass.async_block_till_done()
# Make sure 'on' was published to pub/fake/entity/state
mqtt_mock.async_publish.assert_called_with("pub/fake/entity/state", "on", 1, True)
assert mqtt_mock.async_publish.called
mqtt_mock.async_publish.reset_mock()
# Set a state of an entity included by glob
mock_state_change_event(hass, State("fake.included_entity", "on"))
await hass.async_block_till_done()
await hass.async_block_till_done()
# Make sure 'on' was published to pub/fake/entity/state
mqtt_mock.async_publish.assert_called_with(
"pub/fake/included_entity/state", "on", 1, True
)
assert mqtt_mock.async_publish.called
mqtt_mock.async_publish.reset_mock()
# Set a state of an entity that shouldn't be included
mock_state_change_event(hass, State("fake.entity2", "on"))
await hass.async_block_till_done()
await hass.async_block_till_done()
assert not mqtt_mock.async_publish.called
mqtt_mock.async_publish.reset_mock()
# Set a state of an entity that doesn't match any filters
mock_state_change_event(hass, State("fake2.entity", "on"))
await hass.async_block_till_done()
await hass.async_block_till_done()
assert not mqtt_mock.async_publish.called
| {
"content_hash": "345de17f19f53a06a6b06b5fd398104c",
"timestamp": "",
"source": "github",
"line_count": 535,
"max_line_length": 88,
"avg_line_length": 37.183177570093456,
"alnum_prop": 0.6926054390991806,
"repo_name": "tboyce1/home-assistant",
"id": "cea4b492f3ed1bacc3a0f22366f7a833445a5b83",
"size": "19893",
"binary": false,
"copies": "5",
"ref": "refs/heads/dev",
"path": "tests/components/mqtt_statestream/test_init.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "1510140"
},
{
"name": "Python",
"bytes": "5144365"
},
{
"name": "Ruby",
"bytes": "379"
},
{
"name": "Shell",
"bytes": "14079"
}
],
"symlink_target": ""
} |
import argparse as ap
import numpy as np
import pandas as pd
import os
import pkg_resources
from sklearn.linear_model import LogisticRegressionCV
from sklearn.model_selection import train_test_split
from itertools import chain
from sklearn.metrics import f1_score
from sklearn import preprocessing
from sklearn.externals import joblib
from sklearn.utils import shuffle
from sklearn.pipeline import Pipeline
class Trainer(object):
def __init__(self, **kwargs):
if bool(kwargs):
self.args = self.get_args(self.to_args_list(kwargs))
else:
self.args = self.get_args()
def get_args(self, args=None):
playdata = 'data/Xy.csv'
parser = ap.ArgumentParser(description='Train wincast')
parser.add_argument('--search-iter', type=int, default=32)
parser.add_argument('--playdata', '-p', type=str, default=playdata)
parser.add_argument('--outdir', '-o', default=False)
parser.add_argument('--indir', '-i', default=False)
parser.add_argument('--validation-split', type=float, default=0.15)
return parser.parse_args(args=args)
def to_args_list(self, args):
args = [['--%s' % key, value] for key, value in args.items()]
return chain.from_iterable(args)
def get_features(self, X):
return X.loc[:, [
'qtr',
'min',
'sec',
'ptso',
'ptsd',
'timo',
'timd',
'dwn',
'ytg',
'yfog',
'ou',
'pts_s',
'off_h',
]]
def train(self):
if self.args.indir:
return self.read()
self.scaler = preprocessing.StandardScaler()
X = pd.read_csv(self.args.playdata)
y = X.loc[:, 'y']
X = self.get_features(X)
X, y = shuffle(X, y)
self.clf = LogisticRegressionCV()
X_train, X_test, y_train, y_test = train_test_split(X, y,
test_size=0.15)
X_train = self.scaler.fit_transform(X_train, y_train)
X_test = self.scaler.transform(X_test, y_test)
self.clf.fit(X_train, y_train)
test_acc = self.clf.score(X_test, y_test)
print('Test accuracy: %s' % test_acc)
if self.args.outdir:
self.write()
def predict_proba(self, X):
return self.clf.predict_proba(self.scaler.transform(X))
def predict(self, X):
return self.clf.transform(self.scaler.transform(X))
def get_data(self):
X = pd.read_csv(self.args.playdata)
y = X.loc[:, 'y']
X = self.get_features(X)
X, y = shuffle(X, y)
return X, y
def get_pipeline(self):
pipeline = Pipeline(steps=[
('scaler', self.scaler),
('clf', self.clf)
])
return pipeline
def read(self):
indir = pkg_resources.resource_filename('wincast', self.args.indir)
self.clf = joblib.load(os.path.normpath(
os.path.join(indir, 'wincast.clf.pkl')))
self.scaler = joblib.load(os.path.normpath(
os.path.join(indir, 'wincast.scaler.pkl')))
def write(self):
joblib.dump(self.clf, os.path.normpath(
os.path.join(self.args.outdir, 'wincast.clf.pkl')))
joblib.dump(self.scaler, os.path.normpath(
os.path.join(self.args.outdir, 'wincast.scaler.pkl')))
if __name__ == '__main__':
Trainer().train()
| {
"content_hash": "d4734a29967b5fad190664e1f78f750d",
"timestamp": "",
"source": "github",
"line_count": 123,
"max_line_length": 75,
"avg_line_length": 28.504065040650406,
"alnum_prop": 0.5633200228180263,
"repo_name": "kahnjw/wincast",
"id": "7cd0dd966d945464b12f6cd71cb4c16f5b9b59b1",
"size": "3506",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "wincast/train.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "2557"
},
{
"name": "Python",
"bytes": "8839"
}
],
"symlink_target": ""
} |
"""
Entry point
"""
from repl import Repl
def run():
Repl().loop()
| {
"content_hash": "afb58a5310fd428ed260591e4765e30c",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 21,
"avg_line_length": 9.125,
"alnum_prop": 0.5753424657534246,
"repo_name": "thuo/bc-6-matlabette",
"id": "dc56ec7e00c52f968527081298a92d9c09ed9bfc",
"size": "73",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "matlabette/main.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "30441"
}
],
"symlink_target": ""
} |
"""The test service unit tests."""
from __future__ import print_function
import contextlib
import os
import shutil
import mock
from chromite.api.gen.chromiumos import common_pb2
from chromite.cbuildbot import commands
from chromite.cbuildbot import goma_util
from chromite.lib import build_target_lib
from chromite.lib import chroot_lib
from chromite.lib import constants
from chromite.lib import cros_build_lib
from chromite.lib import cros_test_lib
from chromite.lib import failures_lib
from chromite.lib import image_lib
from chromite.lib import moblab_vm
from chromite.lib import osutils
from chromite.lib import portage_util
from chromite.lib import sysroot_lib
from chromite.service import test
class BuildTargetUnitTestResultTest(cros_test_lib.TestCase):
"""BuildTargetUnitTestResult tests."""
def testSuccess(self):
"""Test success case."""
result = test.BuildTargetUnitTestResult(0, None)
self.assertTrue(result.success)
def testPackageFailure(self):
"""Test packages failed."""
# Supposed to be CPVs, but not actually necessary at the moment.
packages = ['a', 'b']
# Should have a non-zero return code when packages fail.
result = test.BuildTargetUnitTestResult(1, packages)
self.assertFalse(result.success)
# Make sure failed packages alone are enough.
result = test.BuildTargetUnitTestResult(0, packages)
self.assertFalse(result.success)
def testScriptFailure(self):
"""Test non-package failure."""
# Should have a non-zero return code when packages fail.
result = test.BuildTargetUnitTestResult(1, None)
self.assertFalse(result.success)
class BuildTargetUnitTestTest(cros_test_lib.RunCommandTempDirTestCase):
"""BuildTargetUnitTest tests."""
def setUp(self):
self.board = 'board'
self.build_target = build_target_lib.BuildTarget(self.board)
self.chroot = chroot_lib.Chroot(path=self.tempdir)
# Make the chroot's tmp directory, used for the parallel emerge status file.
tempdir = os.path.join(self.tempdir, 'tmp')
osutils.SafeMakedirs(tempdir)
def testSuccess(self):
"""Test simple success case."""
result = test.BuildTargetUnitTest(self.build_target, self.chroot)
self.assertCommandContains(['cros_run_unit_tests', '--board', self.board])
self.assertTrue(result.success)
def testBlacklist(self):
"""Test the blacklist argument."""
blacklist = ['foo/bar', 'cat/pkg']
test.BuildTargetUnitTest(self.build_target, self.chroot,
blacklist=blacklist)
self.assertCommandContains(['--blacklist_packages', 'foo/bar cat/pkg'])
def testFailure(self):
"""Test non-zero return code and failed package handling."""
packages = ['foo/bar', 'cat/pkg']
cpvs = [portage_util.SplitCPV(p, strict=False) for p in packages]
self.PatchObject(portage_util, 'ParseDieHookStatusFile',
return_value=cpvs)
expected_rc = 1
self.rc.SetDefaultCmdResult(returncode=expected_rc)
result = test.BuildTargetUnitTest(self.build_target, self.chroot)
self.assertFalse(result.success)
self.assertEqual(expected_rc, result.return_code)
self.assertCountEqual(cpvs, result.failed_cpvs)
class BuildTargetUnitTestTarballTest(cros_test_lib.MockTestCase):
"""BuildTargetUnitTestTarball tests."""
def setUp(self):
self.chroot = chroot_lib.Chroot(path='/chroot/path')
self.sysroot = sysroot_lib.Sysroot('/chroot/path/sysroot/path')
self.result_path = '/result/path'
def testSuccess(self):
"""Test success handling."""
result = cros_build_lib.CommandResult(returncode=0)
self.PatchObject(cros_build_lib, 'CreateTarball', return_value=result)
path = test.BuildTargetUnitTestTarball(self.chroot, self.sysroot,
self.result_path)
self.assertStartsWith(path, self.result_path)
def testFailure(self):
"""Test failure creating tarball."""
result = cros_build_lib.CommandResult(returncode=1)
self.PatchObject(cros_build_lib, 'CreateTarball', return_value=result)
path = test.BuildTargetUnitTestTarball(self.chroot, self.sysroot,
self.result_path)
self.assertIsNone(path)
class DebugInfoTestTest(cros_test_lib.RunCommandTestCase):
"""DebugInfoTest tests."""
def testSuccess(self):
"""Test command success."""
self.assertTrue(test.DebugInfoTest('/sysroot/path'))
self.assertCommandContains(['debug_info_test',
'/sysroot/path/usr/lib/debug'])
def testFailure(self):
"""Test command failure."""
self.rc.SetDefaultCmdResult(returncode=1)
self.assertFalse(test.DebugInfoTest('/sysroot/path'))
class MoblabVmTestCase(cros_test_lib.RunCommandTempDirTestCase):
"""Tests for the SetupBoardRunConfig class."""
def MockDirectory(self, path):
"""Create an empty directory.
Args:
path (str): Relative path for the directory.
Returns:
str: Path to the directory.
"""
path = os.path.join(self.tempdir, path)
osutils.SafeMakedirs(path)
return path
def setUp(self):
self.builder = 'moblab-generic-vm/R12-3.4.5-67-890'
self.image_dir = self.MockDirectory('files/image')
self.payload_dir = self.MockDirectory('files/payload')
self.results_dir = self.MockDirectory('results')
self.vms = moblab_vm.MoblabVm(self.tempdir)
self.chroot = chroot_lib.Chroot(path=self.tempdir)
class CreateMoblabVmTest(MoblabVmTestCase):
"""Unit tests for CreateMoblabVm."""
def setUp(self):
self.mock_vm_create = self.PatchObject(moblab_vm.MoblabVm, 'Create')
def testBasic(self):
vms = test.CreateMoblabVm(self.tempdir, self.chroot.path, self.image_dir)
self.assertEqual(vms.workspace, self.tempdir)
self.assertEqual(vms.chroot, self.chroot.path)
self.assertEqual(
self.mock_vm_create.call_args_list,
[mock.call(self.image_dir, dut_image_dir=self.image_dir,
create_vm_images=False)])
class PrepareMoblabVmImageCacheTest(MoblabVmTestCase):
"""Unit tests for PrepareMoblabVmImageCache."""
def setUp(self):
@contextlib.contextmanager
def MountedMoblabDiskContextMock(*_args, **_kwargs):
yield self.tempdir
self.PatchObject(moblab_vm.MoblabVm, 'MountedMoblabDiskContext',
MountedMoblabDiskContextMock)
self.payload_file_name = 'payload.bin'
self.payload_file = os.path.join(self.payload_dir, self.payload_file_name)
self.payload_file_content = 'A Lannister always pays his debts.'
osutils.WriteFile(os.path.join(self.payload_dir, self.payload_file_name),
self.payload_file_content)
def testBasic(self):
"""PrepareMoblabVmImageCache loads all payloads into the vm."""
image_cache_dir = test.PrepareMoblabVmImageCache(self.vms, self.builder,
[self.payload_dir])
expected_cache_dir = 'static/prefetched/moblab-generic-vm/R12-3.4.5-67-890'
self.assertEqual(image_cache_dir,
os.path.join('/mnt/moblab/', expected_cache_dir))
copied_payload_file = os.path.join(self.tempdir, expected_cache_dir,
self.payload_file_name)
self.assertExists(copied_payload_file)
self.assertEqual(osutils.ReadFile(copied_payload_file),
self.payload_file_content)
class RunMoblabVmTestTest(MoblabVmTestCase):
"""Unit tests for RunMoblabVmTestTest."""
def setUp(self):
self.image_cache_dir = '/mnt/moblab/whatever'
self.PatchObject(moblab_vm.MoblabVm, 'Start')
self.PatchObject(moblab_vm.MoblabVm, 'Stop')
def testBasic(self):
"""RunMoblabVmTest calls test_that with correct args."""
test.RunMoblabVmTest(self.chroot, self.vms, self.builder,
self.image_cache_dir, self.results_dir)
self.assertCommandContains([
'test_that', '--no-quickmerge',
'--results_dir', self.results_dir,
'-b', 'moblab-generic-vm',
'moblab_DummyServerNoSspSuite',
'--args',
'services_init_timeout_m=10 '
'target_build="%s" '
'test_timeout_hint_m=90 '
'clear_devserver_cache=False '
'image_storage_server="%s"' % (self.builder,
self.image_cache_dir + '/'),
], enter_chroot=True, chroot_args=self.chroot.get_enter_args())
class SimpleChromeWorkflowTestTest(cros_test_lib.MockTempDirTestCase):
"""Unit tests for SimpleChromeWorkflowTest."""
def setUp(self):
self.chrome_root = '/path/to/chrome/root'
self.sysroot_path = '/chroot/path/sysroot/path'
self.build_target = 'board'
self.goma_mock = self.PatchObject(goma_util, 'Goma')
self.chrome_sdk_run_mock = self.PatchObject(commands.ChromeSDK, 'Run')
# SimpleChromeTest workflow creates directories based on objects that are
# mocked for this test, so patch osutils.WriteFile
self.write_mock = self.PatchObject(osutils, 'WriteFile')
self.PatchObject(cros_build_lib, 'CmdToStr', return_value='CmdToStr value')
self.PatchObject(shutil, 'copy2')
def testSimpleChromeWorkflowTest(self):
goma_test_dir = os.path.join(self.tempdir, 'goma_test_dir')
goma_test_json_string = os.path.join(self.tempdir, 'goma_json_string.txt')
chromeos_goma_dir = os.path.join(self.tempdir, 'chromeos_goma_dir')
goma_config = common_pb2.GomaConfig(goma_dir=goma_test_dir,
goma_client_json=goma_test_json_string)
osutils.SafeMakedirs(goma_test_dir)
osutils.SafeMakedirs(chromeos_goma_dir)
osutils.Touch(goma_test_json_string)
goma = goma_util.Goma(
goma_config.goma_dir,
goma_config.goma_client_json,
stage_name='BuildApiTestSimpleChrome',
chromeos_goma_dir=chromeos_goma_dir)
mock_goma_log_dir = os.path.join(self.tempdir, 'goma_log_dir')
osutils.SafeMakedirs(mock_goma_log_dir)
goma.goma_log_dir = mock_goma_log_dir
# For this test, we avoid running test._VerifySDKEnvironment because use of
# other mocks prevent creating the SDK dir that _VerifySDKEnvironment checks
# for
self.PatchObject(test, '_VerifySDKEnvironment')
self.PatchObject(os.path, 'exists', return_value=True)
ninja_cmd = self.PatchObject(commands.ChromeSDK, 'GetNinjaCommand',
return_value='ninja command')
test.SimpleChromeWorkflowTest(self.sysroot_path, self.build_target,
self.chrome_root, goma)
# Verify ninja_cmd calls.
ninja_calls = [mock.call(), mock.call(debug=False)]
ninja_cmd.assert_has_calls(ninja_calls)
# Verify calls with args to chrome_sdk_run made by service/test.py.
gn_dir = os.path.join(self.chrome_root, 'buildtools/linux64/gn')
board_out_dir = os.path.join(self.chrome_root, 'out_board/Release')
self.chrome_sdk_run_mock.assert_any_call(['gclient', 'runhooks'])
self.chrome_sdk_run_mock.assert_any_call(['true'])
self.chrome_sdk_run_mock.assert_any_call(
['bash', '-c', ('%s gen "%s" --args="$GN_ARGS"'
% (gn_dir, board_out_dir))])
self.chrome_sdk_run_mock.assert_any_call(
['env', '--null'], run_args=mock.ANY)
self.chrome_sdk_run_mock.assert_any_call('ninja command', run_args=mock.ANY)
# Create expected paths from constants so that the tests work inside or
# outside the SDK.
deploy_chrome_path = os.path.join(constants.SOURCE_ROOT,
constants.CHROMITE_BIN_SUBDIR,
'deploy_chrome')
image_dir_symlink = image_lib.GetLatestImageLink(self.build_target)
image_path = os.path.join(image_dir_symlink, constants.VM_IMAGE_BIN)
self.chrome_sdk_run_mock.assert_any_call(
[deploy_chrome_path, '--build-dir', board_out_dir, '--staging-only',
'--staging-dir', mock.ANY])
self.chrome_sdk_run_mock.assert_any_call(
['cros_run_test', '--copy-on-write', '--deploy', '--board=board',
('--image-path=%s' % (image_path)),
'--build-dir=out_board/Release'])
# Verify goma mock was started and stopped.
# TODO(crbug/1065172): Invalid assertions that had previously been mocked.
# self.goma_mock.Start.assert_called_once()
# self.goma_mock.Stop.assert_called_once()
class ValidateMoblabVmTestTest(MoblabVmTestCase):
"""Unit tests for ValidateMoblabVmTest."""
def setUp(self):
self.logs_dir = os.path.join(self.results_dir, 'debug')
osutils.SafeMakedirs(self.logs_dir)
self.logs_file = os.path.join(self.logs_dir, 'test_that.INFO')
def testValidateMoblabVmTestSuccess(self):
"""ValidateMoblabVmTest does not die when tests succeeded."""
osutils.WriteFile(self.logs_file, 'dummy_PassServer [PASSED]')
test.ValidateMoblabVmTest(self.results_dir)
def testValidateMoblabVmTestNoLogs(self):
"""ValidateMoblabVmTest dies when test_that logs not present."""
self.assertRaises(failures_lib.TestFailure,
test.ValidateMoblabVmTest, self.results_dir)
def testValidateMoblabVmTestFailure(self):
"""ValidateMoblabVmTest dies when tests failed."""
osutils.WriteFile(self.logs_file, 'dummy_PassServer [FAILED]')
self.assertRaises(failures_lib.TestFailure,
test.ValidateMoblabVmTest, self.results_dir)
| {
"content_hash": "38480257542d39157109352c264e7aba",
"timestamp": "",
"source": "github",
"line_count": 351,
"max_line_length": 80,
"avg_line_length": 38.162393162393165,
"alnum_prop": 0.6809257185516984,
"repo_name": "endlessm/chromium-browser",
"id": "187c21ce9b75c78832de98dc590e374a64321a08",
"size": "13585",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "third_party/chromite/service/test_unittest.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
import datetime
import logging
from django.conf import settings
from common import api
from common import clock
from common import exception
from common import throttle
from common.test import base
from common.test import util as test_util
class ThrottleTest(base.FixturesTestCase):
def setUp(self):
super(ThrottleTest, self).setUp()
self.popular = api.actor_get(api.ROOT, 'popular@example.com')
def test_basic(self):
# lather
# succeed the first two times, fail the third
throttle.throttle(self.popular, 'test', minute=2)
throttle.throttle(self.popular, 'test', minute=2)
def _failPants():
throttle.throttle(self.popular, 'test', minute=2)
self.assertRaises(exception.ApiThrottled, _failPants)
# rinse
# magically advance time by a couple minutes
o = test_util.override_clock(clock, seconds=120)
# repeat
# succeed the first two times, fail the third
throttle.throttle(self.popular, 'test', minute=2)
throttle.throttle(self.popular, 'test', minute=2)
self.assertRaises(exception.ApiThrottled, _failPants)
o.reset()
| {
"content_hash": "695a4a7faaac07a89dd38b37e708d515",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 65,
"avg_line_length": 27.146341463414632,
"alnum_prop": 0.7178796046720575,
"repo_name": "CollabQ/CollabQ",
"id": "e34728bdb4ba9de409c68a849168c7dcf875e1ba",
"size": "1729",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "common/test/throttle.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "400472"
},
{
"name": "C++",
"bytes": "20"
},
{
"name": "JavaScript",
"bytes": "327809"
},
{
"name": "Python",
"bytes": "6590397"
},
{
"name": "R",
"bytes": "1277"
},
{
"name": "Shell",
"bytes": "5624"
}
],
"symlink_target": ""
} |
import os.path
import sys
ROOT = os.path.dirname(os.path.realpath(__file__))
# ******
# Admins
# ******
ADMINS = (
('Foo Bar', 'foobar@sogetthis.com'),
)
MANAGERS = ADMINS
# ****
# Time
# ****
TIME_ZONE = 'America/Chicago'
USE_TZ = False
DATETIME_FORMAT = 'd F, Y H:i'
DATE_FORMAT = 'd F, Y'
# ****
# I18N
# ****
LANGUAGE_CODE = 'en'
USE_I18N = True
USE_L10N = True
# ************
# Static Files
# ************
MEDIA_ROOT = os.path.join(ROOT, 'static')
MEDIA_URL = '/static/'
STATIC_ROOT = os.path.join(ROOT, 'static/pub')
STATIC_URL = '/static/pub/'
STATICFILES_DIRS = ()
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
# *********
# Templates
# *********
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
TEMPLATE_DIRS = (
os.path.join(ROOT, 'templates'),
)
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
'django.core.context_processors.debug',
'django.core.context_processors.i18n',
'django.core.context_processors.media',
'django.core.context_processors.request',
'django.core.context_processors.static',
'django.contrib.messages.context_processors.messages',
)
# ******************
# Apps & Middlewares
# ******************
MIDDLEWARE_CLASSES = (
#'django.middleware.cache.UpdateCacheMiddleware',
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
#'debug_toolbar.middleware.DebugToolbarMiddleware',
#'django.middleware.cache.FetchFromCacheMiddleware',
#'django.middleware.transaction.TransactionMiddleware',
)
INSTALLED_APPS = (
# django
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.admin',
# 3rd party libraries
'common',
'django_extensions',
'south',
'feedzilla',
'taggit',
#'pytils',
# local project modules
)
# *******
# Logging
# *******
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'verbose': {
'format': '%(asctime)s %(module)s %(process)d %(thread)d %(message)s'
},
'simple': {
'format': '%(name)s %(levelname)s %(message)s'
},
},
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'class': 'django.utils.log.AdminEmailHandler',
'filters': ['require_debug_false'],
},
'null': {
'level':'DEBUG',
'class':'django.utils.log.NullHandler',
},
'console':{
'level':'DEBUG',
'class':'logging.StreamHandler',
'formatter': 'simple'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
'django': {
'handlers': ['console'],
'level': 'INFO',
},
'south': {
'handlers': ['console'],
'level': 'INFO',
},
'': {
'handlers': ['console'],
'level': 'DEBUG',
},
}
}
# *****
# Debug
# *****
ALLOWED_HOSTS = []
DEBUG = True
TEMPLATE_DEBUG = DEBUG
TEST_DATABASE_CHARSET = 'utf8'
DEBUG_TOOLBAR_CONFIG = {
'INTERCEPT_REDIRECTS': False,
}
# *****
# Cache
# *****
#CACHES = {
# 'default': {
# 'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
# 'LOCATION': '127.0.0.1:11211',
# }
#}
#CACHE_MIDDLEWARE_ANONYMOUS_ONLY = True
# **********************
# Miscellanious Settings
# **********************
ROOT_URLCONF = 'urls'
WSGI_APPLICATION = 'app.application'
SITE_ID = 1
HOSTNAME = 'localhost:8000'
INTERNAL_IPS = ('127.0.0.1',)
from feedzilla.settings import *
# **************
# Local settings
# **************
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(ROOT, 'data/db.sqlite'),
}
}
SECRET_KEY = '123'
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
# **************
# Local settings
# **************
try:
from settings_local import *
except ImportError:
pass
# **********
# Secret Key
# **********
if not SECRET_KEY:
raise Exception('You must provide SECRET_KEY value in settings_local.py')
| {
"content_hash": "a80fb800e84c11fd6cc7f2b01798c55e",
"timestamp": "",
"source": "github",
"line_count": 232,
"max_line_length": 81,
"avg_line_length": 21.375,
"alnum_prop": 0.5680580762250453,
"repo_name": "feedzilla/feedzilla",
"id": "06a34eeddfbd909798b56bd63b2b3020cadcb735",
"size": "5035",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "demo/settings.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "18501"
},
{
"name": "JavaScript",
"bytes": "3472"
},
{
"name": "Python",
"bytes": "153119"
},
{
"name": "Shell",
"bytes": "85"
}
],
"symlink_target": ""
} |
from abc import ABCMeta, abstractmethod
import typing
class Core(metaclass=ABCMeta):
@abstractmethod
def get_keys(self) -> typing.List[str]:
pass
@abstractmethod
def get_modifiers(self) -> typing.List[str]:
pass
| {
"content_hash": "0f8cbfbac234e44109518dd931031464",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 48,
"avg_line_length": 20.583333333333332,
"alnum_prop": 0.6720647773279352,
"repo_name": "flacjacket/qtile",
"id": "71d262f35c243d34bf28b4d9faec2f9d4b3dc6f3",
"size": "247",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "libqtile/core/base.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "994"
},
{
"name": "Python",
"bytes": "1173072"
},
{
"name": "Roff",
"bytes": "3605"
},
{
"name": "Shell",
"bytes": "6235"
}
],
"symlink_target": ""
} |
import os.path
from xmloutput import XMLOutput
def handleFile(o, fullname):
with o.scope('p'):
with o.scope('b'):
o << fullname
o.endl()
with o.scope('pre'):
o << '# Copyright (c) 2010 Nicholas Bray'
o.endl()
o.endl()
for line in open(fullname):
o << line.rstrip()
o.endl()
o = open("crunch.html", 'w')
o = XMLOutput(o)
with o.scope('html'):
with o.scope('body'):
for path, dirs, files in os.walk('bin'):
for f in files:
if f[-3:] == '.py':
fullname = os.path.join(path, f)
print fullname
handleFile(o, fullname)
| {
"content_hash": "1ee37f8411502cf4752a4dcbfaa9411d",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 43,
"avg_line_length": 18.29032258064516,
"alnum_prop": 0.5978835978835979,
"repo_name": "ncbray/pystream",
"id": "0229661dd28a7210b133271d86107505def5b485",
"size": "1145",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tools/crunch.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "2982"
},
{
"name": "C++",
"bytes": "23809"
},
{
"name": "Python",
"bytes": "2232087"
},
{
"name": "Shell",
"bytes": "245"
}
],
"symlink_target": ""
} |
import os
'''
db_stewart_connect = {
"host": "127.0.0.1",
"user": "pg8000-test",
"database": "pg8000-test",
"password": "pg8000-test",
"socket_timeout": 5,
"ssl": False}
db_local_connect = {
"unix_sock": "/tmp/.s.PGSQL.5432",
"user": "mfenniak"}
db_local_win_connect = {
"host": "localhost",
"user": "mfenniak",
"password": "password",
"database": "mfenniak"}
db_oracledev2_connect = {
"host": "oracledev2",
"user": "mfenniak",
"password": "password",
"database": "mfenniak"}
'''
NAME_VAR = "PG8000_TEST_NAME"
try:
TEST_NAME = os.environ[NAME_VAR]
except KeyError:
raise Exception(
"The environment variable " + NAME_VAR + " needs to be set. It should "
"contain the name of the environment variable that contains the "
"kwargs for the connect() function.")
db_connect = eval(os.environ[TEST_NAME])
| {
"content_hash": "7577c766d0737268b0a354be16918e04",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 79,
"avg_line_length": 23.710526315789473,
"alnum_prop": 0.5982241953385128,
"repo_name": "kaniini/pg8000",
"id": "92d27b92e8ad08fa41b9cde3b44c4179c0f2e584",
"size": "901",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "pg8000/tests/connection_settings.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "298178"
},
{
"name": "Shell",
"bytes": "15010"
}
],
"symlink_target": ""
} |
from kafkatest.services.zookeeper import ZookeeperService
from kafkatest.services.kafka import KafkaService
from kafkatest.services.console_consumer import ConsoleConsumer
from kafkatest.services.verifiable_producer import VerifiableProducer
from kafkatest.services.transactional_message_copier import TransactionalMessageCopier
from kafkatest.utils import is_int
from ducktape.tests.test import Test
from ducktape.mark import matrix
from ducktape.mark.resource import cluster
from ducktape.utils.util import wait_until
class TransactionsTest(Test):
"""Tests transactions by transactionally copying data from a source topic to
a destination topic and killing the copy process as well as the broker
randomly through the process. In the end we verify that the final output
topic contains exactly one committed copy of each message in the input
topic.
"""
def __init__(self, test_context):
""":type test_context: ducktape.tests.test.TestContext"""
super(TransactionsTest, self).__init__(test_context=test_context)
self.input_topic = "input-topic"
self.output_topic = "output-topic"
self.num_brokers = 3
# Test parameters
self.num_input_partitions = 2
self.num_output_partitions = 3
self.num_seed_messages = 100000
self.transaction_size = 750
self.transaction_timeout = 10000
self.consumer_group = "transactions-test-consumer-group"
self.zk = ZookeeperService(test_context, num_nodes=1)
self.kafka = KafkaService(test_context,
num_nodes=self.num_brokers,
zk=self.zk)
def setUp(self):
self.zk.start()
def seed_messages(self, topic, num_seed_messages):
seed_timeout_sec = 10000
seed_producer = VerifiableProducer(context=self.test_context,
num_nodes=1,
kafka=self.kafka,
topic=topic,
message_validator=is_int,
max_messages=num_seed_messages,
enable_idempotence=True)
seed_producer.start()
wait_until(lambda: seed_producer.num_acked >= num_seed_messages,
timeout_sec=seed_timeout_sec,
err_msg="Producer failed to produce messages %d in %ds." %\
(self.num_seed_messages, seed_timeout_sec))
return seed_producer.acked
def get_messages_from_topic(self, topic, num_messages):
consumer = self.start_consumer(topic, group_id="verifying_consumer")
return self.drain_consumer(consumer, num_messages)
def bounce_brokers(self, clean_shutdown):
for node in self.kafka.nodes:
if clean_shutdown:
self.kafka.restart_node(node, clean_shutdown = True)
else:
self.kafka.stop_node(node, clean_shutdown = False)
wait_until(lambda: len(self.kafka.pids(node)) == 0 and not self.kafka.is_registered(node),
timeout_sec=self.kafka.zk_session_timeout + 5,
err_msg="Failed to see timely deregistration of \
hard-killed broker %s" % str(node.account))
self.kafka.start_node(node)
def create_and_start_message_copier(self, input_topic, input_partition, output_topic, transactional_id, use_group_metadata):
message_copier = TransactionalMessageCopier(
context=self.test_context,
num_nodes=1,
kafka=self.kafka,
transactional_id=transactional_id,
consumer_group=self.consumer_group,
input_topic=input_topic,
input_partition=input_partition,
output_topic=output_topic,
max_messages=-1,
transaction_size=self.transaction_size,
transaction_timeout=self.transaction_timeout,
use_group_metadata=use_group_metadata
)
message_copier.start()
wait_until(lambda: message_copier.alive(message_copier.nodes[0]),
timeout_sec=10,
err_msg="Message copier failed to start after 10 s")
return message_copier
def bounce_copiers(self, copiers, clean_shutdown):
for _ in range(3):
for copier in copiers:
wait_until(lambda: copier.progress_percent() >= 20.0,
timeout_sec=30,
err_msg="%s : Message copier didn't make enough progress in 30s. Current progress: %s" \
% (copier.transactional_id, str(copier.progress_percent())))
self.logger.info("%s - progress: %s" % (copier.transactional_id,
str(copier.progress_percent())))
copier.restart(clean_shutdown)
def create_and_start_copiers(self, input_topic, output_topic, num_copiers, use_group_metadata):
copiers = []
for i in range(0, num_copiers):
copiers.append(self.create_and_start_message_copier(
input_topic=input_topic,
output_topic=output_topic,
input_partition=i,
transactional_id="copier-" + str(i),
use_group_metadata=use_group_metadata
))
return copiers
def start_consumer(self, topic_to_read, group_id):
consumer = ConsoleConsumer(context=self.test_context,
num_nodes=1,
kafka=self.kafka,
topic=topic_to_read,
group_id=group_id,
message_validator=is_int,
from_beginning=True,
isolation_level="read_committed")
consumer.start()
# ensure that the consumer is up.
wait_until(lambda: (len(consumer.messages_consumed[1]) > 0) == True,
timeout_sec=60,
err_msg="Consumer failed to consume any messages for %ds" %\
60)
return consumer
def drain_consumer(self, consumer, num_messages):
# wait until we read at least the expected number of messages.
# This is a safe check because both failure modes will be caught:
# 1. If we have 'num_seed_messages' but there are duplicates, then
# this is checked for later.
#
# 2. If we never reach 'num_seed_messages', then this will cause the
# test to fail.
wait_until(lambda: len(consumer.messages_consumed[1]) >= num_messages,
timeout_sec=90,
err_msg="Consumer consumed only %d out of %d messages in %ds" %\
(len(consumer.messages_consumed[1]), num_messages, 90))
consumer.stop()
return consumer.messages_consumed[1]
def copy_messages_transactionally(self, failure_mode, bounce_target,
input_topic, output_topic,
num_copiers, num_messages_to_copy,
use_group_metadata):
"""Copies messages transactionally from the seeded input topic to the
output topic, either bouncing brokers or clients in a hard and soft
way as it goes.
This method also consumes messages in read_committed mode from the
output topic while the bounces and copy is going on.
It returns the concurrently consumed messages.
"""
copiers = self.create_and_start_copiers(input_topic=input_topic,
output_topic=output_topic,
num_copiers=num_copiers,
use_group_metadata=use_group_metadata)
concurrent_consumer = self.start_consumer(output_topic,
group_id="concurrent_consumer")
clean_shutdown = False
if failure_mode == "clean_bounce":
clean_shutdown = True
if bounce_target == "brokers":
self.bounce_brokers(clean_shutdown)
elif bounce_target == "clients":
self.bounce_copiers(copiers, clean_shutdown)
copier_timeout_sec = 120
for copier in copiers:
wait_until(lambda: copier.is_done,
timeout_sec=copier_timeout_sec,
err_msg="%s - Failed to copy all messages in %ds." %\
(copier.transactional_id, copier_timeout_sec))
self.logger.info("finished copying messages")
return self.drain_consumer(concurrent_consumer, num_messages_to_copy)
def setup_topics(self):
self.kafka.topics = {
self.input_topic: {
"partitions": self.num_input_partitions,
"replication-factor": 3,
"configs": {
"min.insync.replicas": 2
}
},
self.output_topic: {
"partitions": self.num_output_partitions,
"replication-factor": 3,
"configs": {
"min.insync.replicas": 2
}
}
}
@cluster(num_nodes=9)
@matrix(failure_mode=["hard_bounce", "clean_bounce"],
bounce_target=["brokers", "clients"],
check_order=[True, False],
use_group_metadata=[True, False])
def test_transactions(self, failure_mode, bounce_target, check_order, use_group_metadata):
security_protocol = 'PLAINTEXT'
self.kafka.security_protocol = security_protocol
self.kafka.interbroker_security_protocol = security_protocol
self.kafka.logs["kafka_data_1"]["collect_default"] = True
self.kafka.logs["kafka_data_2"]["collect_default"] = True
self.kafka.logs["kafka_operational_logs_debug"]["collect_default"] = True
if check_order:
# To check ordering, we simply create input and output topics
# with a single partition.
# We reduce the number of seed messages to copy to account for the fewer output
# partitions, and thus lower parallelism. This helps keep the test
# time shorter.
self.num_seed_messages = self.num_seed_messages / 3
self.num_input_partitions = 1
self.num_output_partitions = 1
self.setup_topics()
self.kafka.start()
input_messages = self.seed_messages(self.input_topic, self.num_seed_messages)
concurrently_consumed_messages = self.copy_messages_transactionally(
failure_mode, bounce_target, input_topic=self.input_topic,
output_topic=self.output_topic, num_copiers=self.num_input_partitions,
num_messages_to_copy=self.num_seed_messages, use_group_metadata=use_group_metadata)
output_messages = self.get_messages_from_topic(self.output_topic, self.num_seed_messages)
concurrently_consumed_message_set = set(concurrently_consumed_messages)
output_message_set = set(output_messages)
input_message_set = set(input_messages)
num_dups = abs(len(output_messages) - len(output_message_set))
num_dups_in_concurrent_consumer = abs(len(concurrently_consumed_messages)
- len(concurrently_consumed_message_set))
assert num_dups == 0, "Detected %d duplicates in the output stream" % num_dups
assert input_message_set == output_message_set, "Input and output message sets are not equal. Num input messages %d. Num output messages %d" %\
(len(input_message_set), len(output_message_set))
assert num_dups_in_concurrent_consumer == 0, "Detected %d dups in concurrently consumed messages" % num_dups_in_concurrent_consumer
assert input_message_set == concurrently_consumed_message_set, \
"Input and concurrently consumed output message sets are not equal. Num input messages: %d. Num concurrently_consumed_messages: %d" %\
(len(input_message_set), len(concurrently_consumed_message_set))
if check_order:
assert input_messages == sorted(input_messages), "The seed messages themselves were not in order"
assert output_messages == input_messages, "Output messages are not in order"
assert concurrently_consumed_messages == output_messages, "Concurrently consumed messages are not in order"
| {
"content_hash": "5443f250165b7425dc73a3647bdaeb0f",
"timestamp": "",
"source": "github",
"line_count": 260,
"max_line_length": 151,
"avg_line_length": 49.05384615384615,
"alnum_prop": 0.5860122314567978,
"repo_name": "sslavic/kafka",
"id": "2889f84687481776b5e52f6bd77be000b00ef743",
"size": "13535",
"binary": false,
"copies": "1",
"ref": "refs/heads/trunk",
"path": "tests/kafkatest/tests/core/transactions_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "26633"
},
{
"name": "Dockerfile",
"bytes": "5117"
},
{
"name": "HTML",
"bytes": "3739"
},
{
"name": "Java",
"bytes": "14966996"
},
{
"name": "Python",
"bytes": "802091"
},
{
"name": "Scala",
"bytes": "5802403"
},
{
"name": "Shell",
"bytes": "94955"
},
{
"name": "XSLT",
"bytes": "7116"
}
],
"symlink_target": ""
} |
"django-taggable"
from taggable import signals
__version__ = '0.01'
signals.register()
del signals
| {
"content_hash": "75efd99dcc7bd16e8ef2535c5ac8710a",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 28,
"avg_line_length": 12.75,
"alnum_prop": 0.7254901960784313,
"repo_name": "tabo/django-taggable",
"id": "d2449c38d9d00f5d7a00858b70a63a594284843d",
"size": "102",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "taggable/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "39946"
}
],
"symlink_target": ""
} |
import pytest
import sklearn.datasets as datasets
import sklearn.covariance as covariance
import pandas_ml as pdml
import pandas_ml.util.testing as tm
class TestCovariance(tm.TestCase):
def test_objectmapper(self):
df = pdml.ModelFrame([])
self.assertIs(df.covariance.EmpiricalCovariance, covariance.EmpiricalCovariance)
self.assertIs(df.covariance.EllipticEnvelope, covariance.EllipticEnvelope)
self.assertIs(df.covariance.GraphLasso, covariance.GraphLasso)
self.assertIs(df.covariance.GraphLassoCV, covariance.GraphLassoCV)
self.assertIs(df.covariance.LedoitWolf, covariance.LedoitWolf)
self.assertIs(df.covariance.MinCovDet, covariance.MinCovDet)
self.assertIs(df.covariance.OAS, covariance.OAS)
self.assertIs(df.covariance.ShrunkCovariance, covariance.ShrunkCovariance)
self.assertIs(df.covariance.shrunk_covariance, covariance.shrunk_covariance)
self.assertIs(df.covariance.graph_lasso, covariance.graph_lasso)
def test_empirical_covariance(self):
iris = datasets.load_iris()
df = pdml.ModelFrame(iris)
result = df.covariance.empirical_covariance()
expected = covariance.empirical_covariance(iris.data)
self.assertIsInstance(result, pdml.ModelFrame)
tm.assert_index_equal(result.index, df.data.columns)
tm.assert_index_equal(result.columns, df.data.columns)
self.assert_numpy_array_almost_equal(result.values, expected)
def test_ledoit_wolf(self):
iris = datasets.load_iris()
df = pdml.ModelFrame(iris)
result = df.covariance.ledoit_wolf()
expected = covariance.ledoit_wolf(iris.data)
self.assertEqual(len(result), 2)
self.assertIsInstance(result[0], pdml.ModelFrame)
tm.assert_index_equal(result[0].index, df.data.columns)
tm.assert_index_equal(result[0].columns, df.data.columns)
self.assert_numpy_array_almost_equal(result[0].values, expected[0])
self.assert_numpy_array_almost_equal(result[1], expected[1])
def test_oas(self):
iris = datasets.load_iris()
df = pdml.ModelFrame(iris)
result = df.covariance.oas()
expected = covariance.oas(iris.data)
self.assertEqual(len(result), 2)
self.assertIsInstance(result[0], pdml.ModelFrame)
tm.assert_index_equal(result[0].index, df.data.columns)
tm.assert_index_equal(result[0].columns, df.data.columns)
self.assert_numpy_array_almost_equal(result[0].values, expected[0])
self.assert_numpy_array_almost_equal(result[1], expected[1])
@pytest.mark.parametrize("algo", ['EmpiricalCovariance', 'LedoitWolf'])
def test_Covariance(self, algo):
iris = datasets.load_iris()
df = pdml.ModelFrame(iris)
mod1 = getattr(df.covariance, algo)()
mod2 = getattr(covariance, algo)()
df.fit(mod1)
mod2.fit(iris.data)
self.assert_numpy_array_almost_equal(mod1.covariance_, mod2.covariance_)
| {
"content_hash": "2666ed37fcea470fd5f4c8970b00c904",
"timestamp": "",
"source": "github",
"line_count": 80,
"max_line_length": 88,
"avg_line_length": 38.95,
"alnum_prop": 0.6729781771501926,
"repo_name": "sinhrks/expandas",
"id": "9dd99212292e018d09131c8846673e1992739bfa",
"size": "3139",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "pandas_ml/skaccessors/test/test_covariance.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "387987"
},
{
"name": "Shell",
"bytes": "816"
}
],
"symlink_target": ""
} |
"""
Query message
In the simple query protocol, the frontend sends a Query message, which contains
an SQL command (or commands) expressed as a text string. The backend then sends
one or more response messages depending on the contents of the query command
string, and finally a ReadyForQuery message.
"""
from __future__ import print_function, division, absolute_import
from struct import pack
from ..message import BulkFrontendMessage
class Query(BulkFrontendMessage):
message_id = b'Q'
def __init__(self, query_string):
BulkFrontendMessage.__init__(self)
self._query_string = query_string
def read_bytes(self):
encoded = self._query_string.encode('utf-8')
bytes_ = pack('{0}sx'.format(len(encoded)), encoded)
return bytes_
| {
"content_hash": "fa89a85f539a046311a18e7539174734",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 80,
"avg_line_length": 29.074074074074073,
"alnum_prop": 0.710828025477707,
"repo_name": "uber/vertica-python",
"id": "29b9aa71267eb8aaa93e53d5587ea3dae1055863",
"size": "2548",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "vertica_python/vertica/messages/frontend_messages/query.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "209204"
}
],
"symlink_target": ""
} |
'''
ComputationalArt.py
John Bozzella
'''
from random import choice
import numpy.random as npr
from math import *
from PIL import Image
def blockrecurse(depth, blocks, parameters):
"""
Choose a random function block from the list of blocks, if depth greater than one, recurse and start again
"""
if depth == 1:
return (choice(blocks), choice(parameters))
else:
depth = depth-1
return (choice(blocks), blockrecurse(depth, blocks, parameters))
def functioneval(function, a, b):
"""
Evaluate the function based on two parameters a and b
"""
if function[0] == "x":
func = a
elif function[0] == "y":
func = b
elif function[0] == "prod(x, y)":
func = functioneval(function[1], a, b)*b
elif function[0] == "avg(x, y)":
func = (functioneval(function[1], a, b)+b)/2.0
elif function[0] == "cos_pi(x)":
func = cos(pi*functioneval(function[1], a ,b))
elif function[0] == "sin_pi(x)":
func = cos(pi*functioneval(function[1], a, b))
return func
def remap_interval(val, inp_int_start, inp_int_end, out_int_start, out_int_end):
"""
Maps the input value that is in the interval [input_interval_start, input_interval_end]
to the output interval [output_interval_start, output_interval_end]. The mapping
is an affine one (i.e. output = input*c + b). Calculates the intervals, finds the relative position
in the input interval compared to the output and then remaps the that relative position in the output interval
"""
input_diff = float(inp_int_end) - float(inp_int_start) # Calculate input interval
output_diff = float(out_int_end) - float(out_int_start) # Calculate output interval
position = val - float(inp_int_start) # Finds the position on the input interval
relative_position = position/input_diff
remap = (relative_position * output_diff) + out_int_start
return remap
def colormap(val):
""" Assign remapped values a color """
color = remap_interval(val, -1, 1, 0, 255)
return int(color)
def create(filename, width, height):
""" create the actual art and save as an image file.
file is a string that is the filename for the image (should be .png)
width and height are used set image dimensions
"""
# Define the necessary variables and parameters
min_depth = 6
max_depth = 10
depth = npr.randint(min_depth, max_depth+1)
blocks = ["prod(x, y)" , "avg(x, y)", "cos_pi(x)", "sin_pi(x)", "x" , "y"]
parameters = ["x", "y"]
# Generate a function for each color
functionr = blockrecurse(depth, blocks, parameters)
functiong = blockrecurse(depth, blocks, parameters)
functionb = blockrecurse(depth, blocks, parameters)
# Generate image and loop color mapping on each pixel
picture = Image.new("RGB", (width, height))
pixels = picture.load()
for i in range(width):
for j in range(height):
x = remap_interval(i, 0, width, -1, 1)
y = remap_interval(j, 0, height, -1, 1)
pixels[i, j] = (
colormap(functioneval(functionr, x, y)),
colormap(functioneval(functiong, x, y)),
colormap(functioneval(functionb, x, y))
)
picture.save(filename)
create('test6.png',500,500) | {
"content_hash": "b26a7373fefc81c8dbd8f099f2b42505",
"timestamp": "",
"source": "github",
"line_count": 94,
"max_line_length": 111,
"avg_line_length": 33.37234042553192,
"alnum_prop": 0.6742110296461588,
"repo_name": "bozzellaj/SoftwareDesignFall15",
"id": "3bd1c15cbe0f87429e60e5dfa6d9436ba9a647ab",
"size": "3137",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "MP2/CompArt.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "69851"
}
],
"symlink_target": ""
} |
"""
alfred.helpers
~~~~~~~~~~~~~~
Model validation helpers mostly.
"""
import ipaddress
from bson.errors import InvalidId
from bson.objectid import ObjectId
def is_object_id(val):
"""Tries to determine if the given value is a Mongo ObjectId"""
try:
ObjectId(val)
return True
except InvalidId:
return False
def is_valid_ipv4_address(ip_address):
"""Tries to determine if the given IP address is valid"""
try:
ipaddress.ip_address(ip_address)
return True
except ValueError:
return False
| {
"content_hash": "2752f01757c93812f1b24136ad1e51a8",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 67,
"avg_line_length": 22.88,
"alnum_prop": 0.6416083916083916,
"repo_name": "vktr/alfred",
"id": "128ecd34e3950427f899d4ca1a2667d926bf82af",
"size": "572",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "api/alfred/helpers.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C#",
"bytes": "39693"
},
{
"name": "Python",
"bytes": "17778"
}
],
"symlink_target": ""
} |
"""BlameThrower modules for reading the output of version control system
"blame" (or "annotate") commands."""
__all__ = ['git', 'hg']
if __debug__:
def _check_reporeader_output(files_authors):
"""Validate the output from a reporeader `analyze` function and pass it through."""
for filename, authorlist in files_authors:
assert filename
assert authorlist and authorlist[0] is None
assert not any(author is None for author in authorlist[1:])
yield filename, authorlist
else:
# Yes, I timed it and this really is faster, even with -O.
def _check_reporeader_output(files_authors):
return files_authors
| {
"content_hash": "183e962f10d6effa19237c4376631804",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 91,
"avg_line_length": 40.23529411764706,
"alnum_prop": 0.6578947368421053,
"repo_name": "jkleint/blamethrower",
"id": "2cafca1f7bed24409be6f1547a50c278a9bac06a",
"size": "788",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "blamethrower/reporeaders/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "42702"
},
{
"name": "Shell",
"bytes": "5976"
}
],
"symlink_target": ""
} |
"""
Interface to the accounts table. Data format is dicts, not objects.
"""
from anchore_engine.configuration.localconfig import ADMIN_ACCOUNT_NAME
from anchore_engine.db import Account, AccountStates, AccountTypes
from anchore_engine.db.entities.common import anchore_now
class AccountNotFoundError(Exception):
def __init__(self, account_name):
super(AccountNotFoundError, self).__init__(
"User account not found. Name={}".format(account_name)
)
self.account_name = account_name
class AccountAlreadyExistsError(Exception):
def __init__(self, account_name):
super(AccountAlreadyExistsError, self).__init__(
"User account already exists. name={}".format(account_name)
)
self.account_name = account_name
class InvalidStateError(Exception):
def __init__(self, current_state, desired_state):
super(InvalidStateError, self).__init__(
"Invalid account state change requested. Cannot go from state {} to state {}".format(
current_state.value, desired_state.value
)
)
self.current_state = current_state
self.desired_state = desired_state
class DisableAdminAccountError(Exception):
def __init__(self):
super(DisableAdminAccountError, self).__init__(
"Cannot disable the admin account"
)
def add(
account_name,
state=AccountStates.enabled,
account_type=AccountTypes.user,
email=None,
session=None,
):
found_account = session.query(Account).filter_by(name=account_name).one_or_none()
if found_account:
raise AccountAlreadyExistsError(account_name)
accnt = Account()
accnt.name = account_name
accnt.state = state
accnt.type = account_type
accnt.email = email
accnt.created_at = anchore_now()
accnt.last_updated = anchore_now()
session.add(accnt)
return accnt.to_dict()
def update_state(name, new_state, session=None):
"""
Update state of the account. Allowed transitions:
enabled -> disabled (if account is not the admin one)
disabled -> enabled
disabled -> deleting
Deleting is a terminal state, and can be reached only from disabled
:param name:
:param new_state:
:param session:
:return:
"""
accnt = session.query(Account).filter_by(name=name).one_or_none()
if not accnt:
raise AccountNotFoundError(name)
# Deleting state is terminal. Must deactivate account prior to deleting it.
if accnt.state == AccountStates.deleting or (
accnt.state == AccountStates.enabled and new_state == AccountStates.deleting
):
raise InvalidStateError(accnt.state, new_state)
# Both Account Name and Type should be equal to "admin" for the Admin Account, but just to be safe...
if (
accnt.name == ADMIN_ACCOUNT_NAME or accnt.type == AccountTypes.admin
) and new_state != AccountStates.enabled:
raise DisableAdminAccountError()
accnt.state = new_state
return accnt.to_dict()
def get_all(with_state=None, session=None):
if with_state is not None:
return [
x.to_dict()
for x in session.query(Account).filter(Account.state == with_state)
]
else:
return [x.to_dict() for x in session.query(Account)]
def get(name, session=None):
accnt = session.query(Account).filter_by(name=name).one_or_none()
if accnt:
return accnt.to_dict()
else:
return None
def delete(name, session=None):
accnt = session.query(Account).filter_by(name=name).one_or_none()
if accnt:
session.delete(accnt)
return True
else:
return False
| {
"content_hash": "67eff1f1f639e727feb166f577dcc6a5",
"timestamp": "",
"source": "github",
"line_count": 126,
"max_line_length": 105,
"avg_line_length": 29.38888888888889,
"alnum_prop": 0.6559546313799622,
"repo_name": "anchore/anchore-engine",
"id": "a94c86ad346b1f082503360b6dc24c0a253479c7",
"size": "3703",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "anchore_engine/db/db_accounts.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "3889"
},
{
"name": "Dockerfile",
"bytes": "10954"
},
{
"name": "Makefile",
"bytes": "12274"
},
{
"name": "Python",
"bytes": "4529553"
},
{
"name": "Shell",
"bytes": "16598"
}
],
"symlink_target": ""
} |
"""
sentry.utils.http
~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2014 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
import six
import urllib
from collections import namedtuple
from urlparse import urlparse, urljoin
from ipaddr import IPNetwork
from django.conf import settings
ParsedUriMatch = namedtuple('ParsedUriMatch', ['scheme', 'domain', 'path'])
def absolute_uri(url=None):
if not url:
return settings.SENTRY_URL_PREFIX
return urljoin(settings.SENTRY_URL_PREFIX.rstrip('/') + '/', url.lstrip('/'))
def safe_urlencode(params, doseq=0):
"""
UTF-8-safe version of safe_urlencode
The stdlib safe_urlencode prior to Python 3.x chokes on UTF-8 values
which can't fail down to ascii.
"""
# Snippet originally from pysolr: https://github.com/toastdriven/pysolr
if hasattr(params, "items"):
params = params.items()
new_params = list()
for k, v in params:
k = k.encode("utf-8")
if isinstance(v, six.string_types):
new_params.append((k, v.encode("utf-8")))
elif isinstance(v, (list, tuple)):
new_params.append((k, [i.encode("utf-8") for i in v]))
else:
new_params.append((k, six.text_type(v)))
return urllib.urlencode(new_params, doseq)
def is_same_domain(url1, url2):
"""
Returns true if the two urls should be treated as if they're from the same
domain (trusted).
"""
url1 = urlparse(url1)
url2 = urlparse(url2)
return url1.netloc == url2.netloc
def get_origins(project=None):
if settings.SENTRY_ALLOW_ORIGIN == '*':
return frozenset(['*'])
if settings.SENTRY_ALLOW_ORIGIN:
result = settings.SENTRY_ALLOW_ORIGIN.split(' ')
else:
result = []
if project:
optval = project.get_option('sentry:origins', ['*'])
if optval:
result.extend(optval)
# lowercase and strip the trailing slash from all origin values
# filter out empty values
return frozenset(filter(bool, map(lambda x: x.lower().rstrip('/'), result)))
def parse_uri_match(value):
if '://' in value:
scheme, value = value.split('://', 1)
else:
scheme = '*'
if '/' in value:
domain, path = value.split('/', 1)
else:
domain, path = value, '*'
return ParsedUriMatch(scheme, domain, path)
def is_valid_origin(origin, project=None, allowed=None):
"""
Given an ``origin`` which matches a base URI (e.g. http://example.com)
determine if a valid origin is present in the project settings.
Origins may be defined in several ways:
- http://domain.com[:port]: exact match for base URI (must include port)
- *: allow any domain
- *.domain.com: matches domain.com and all subdomains, on any port
- domain.com: matches domain.com on any port
"""
if allowed is None:
allowed = get_origins(project)
if '*' in allowed:
return True
if not origin:
return False
# we always run a case insensitive check
origin = origin.lower()
# Fast check
if origin in allowed:
return True
# XXX: In some cases origin might be localhost (or something similar) which causes a string value
# of 'null' to be sent as the origin
if origin == 'null':
return False
parsed = urlparse(origin)
# There is no hostname, so the header is probably invalid
if parsed.hostname is None:
return False
for value in allowed:
bits = parse_uri_match(value)
# scheme supports exact and any match
if bits.scheme not in ('*', parsed.scheme):
continue
# domain supports exact, any, and prefix match
if bits.domain[:2] == '*.':
if parsed.hostname.endswith(bits.domain[1:]) or parsed.hostname == bits.domain[2:]:
return True
continue
elif bits.domain not in ('*', parsed.hostname, parsed.netloc):
continue
# path supports exact, any, and suffix match (with or without *)
path = bits.path
if path == '*':
return True
if path.endswith('*'):
path = path[:-1]
if parsed.path.startswith(path):
return True
return False
def is_valid_ip(ip_address, project):
"""
Verify that an IP address is not being blacklisted
for the given project.
"""
blacklist = project.get_option('sentry:blacklisted_ips')
if not blacklist:
return True
ip_network = IPNetwork(ip_address)
for addr in blacklist:
# We want to error fast if it's an exact match
if ip_address == addr:
return False
# Check to make sure it's actually a range before
# attempting to see if we're within that range
if '/' in addr and ip_network in IPNetwork(addr):
return False
return True
| {
"content_hash": "fb0c58c8bf6113dc873c54d85d7aea6d",
"timestamp": "",
"source": "github",
"line_count": 183,
"max_line_length": 101,
"avg_line_length": 27.256830601092897,
"alnum_prop": 0.6138732959101845,
"repo_name": "imankulov/sentry",
"id": "8c47c2729d4ae62a65c5f95a8d17be8c46a1f90c",
"size": "4988",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "src/sentry/utils/http.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "156760"
},
{
"name": "HTML",
"bytes": "192342"
},
{
"name": "JavaScript",
"bytes": "443498"
},
{
"name": "Makefile",
"bytes": "4647"
},
{
"name": "Python",
"bytes": "7194349"
}
],
"symlink_target": ""
} |
#
# Pyrex - Parse tree nodes
#
import string, sys
import Code
from Errors import error, one_time_warning, InternalError
import Naming
import PyrexTypes
from PyrexTypes import py_object_type, c_int_type, error_type, \
CTypedefType, CFuncType
from Symtab import ModuleScope, LocalScope, \
StructOrUnionScope, PyClassScope, CClassScope
from Pyrex.Utils import open_new_file, replace_suffix
import Options
from DebugFlags import debug_disposal_code
class Node:
# pos (string, int, int) Source file position
# is_name boolean Is a NameNode
# is_literal boolean Is a ConstNode
is_name = 0
is_literal = 0
def __init__(self, pos, **kw):
self.pos = pos
self.__dict__.update(kw)
gil_message = "Operation"
def gil_check(self, env):
if env.nogil:
self.gil_error()
def gil_error(self, message = None):
error(self.pos, "%s not allowed without gil" % (message or self.gil_message))
#
# There are 3 phases of parse tree processing, applied in order to
# all the statements in a given scope-block:
#
# (1) analyse_declarations
# Make symbol table entries for all declarations at the current
# level, both explicit (def, cdef, etc.) and implicit (assignment
# to an otherwise undeclared name).
#
# (2) analyse_expressions
# Determine the result types of expressions and fill in the
# 'type' attribute of each ExprNode. Insert coercion nodes into the
# tree where needed to convert to and from Python objects.
# Allocate temporary locals for intermediate results.
#
# (3) generate_code
# Emit C code for all declarations, statements and expressions.
# Recursively applies the 3 processing phases to the bodies of
# functions.
#
def analyse_declarations(self, env):
pass
def analyse_expressions(self, env):
raise InternalError("analyse_expressions not implemented for %s" % \
self.__class__.__name__)
def generate_code(self, code):
raise InternalError("generate_code not implemented for %s" % \
self.__class__.__name__)
class BlockNode:
# Mixin class for nodes representing a declaration block.
pass
# def generate_const_definitions(self, env, code):
# if env.const_entries:
# code.putln("")
# for entry in env.const_entries:
# if not entry.is_interned:
# code.put_var_declaration(entry, static = 1)
# def generate_interned_name_decls(self, env, code):
# # Flush accumulated interned names from the global scope
# # and generate declarations for them.
# genv = env.global_scope()
# intern_map = genv.intern_map
# names = genv.interned_names
# if names:
# code.putln("")
# for name in names:
# code.putln(
# "static PyObject *%s;" % intern_map[name])
# del names[:]
# def generate_py_string_decls(self, env, code):
# entries = env.pystring_entries
# if entries:
# code.putln("")
# for entry in entries:
# code.putln(
# "static PyObject *%s;" % entry.pystring_cname)
class StatListNode(Node):
# stats a list of StatNode
def analyse_declarations(self, env):
#print "StatListNode.analyse_declarations" ###
for stat in self.stats:
stat.analyse_declarations(env)
def analyse_expressions(self, env):
#print "StatListNode.analyse_expressions" ###
for stat in self.stats:
stat.analyse_expressions(env)
def generate_function_definitions(self, env, code):
#print "StatListNode.generate_function_definitions" ###
for stat in self.stats:
stat.generate_function_definitions(env, code)
def generate_execution_code(self, code):
#print "StatListNode.generate_execution_code" ###
for stat in self.stats:
code.mark_pos(stat.pos)
stat.generate_execution_code(code)
class StatNode(Node):
#
# Code generation for statements is split into the following subphases:
#
# (1) generate_function_definitions
# Emit C code for the definitions of any structs,
# unions, enums and functions defined in the current
# scope-block.
#
# (2) generate_execution_code
# Emit C code for executable statements.
#
def generate_function_definitions(self, env, code):
pass
def generate_execution_code(self, code):
raise InternalError("generate_execution_code not implemented for %s" % \
self.__class__.__name__)
class CDefExternNode(StatNode):
# include_file string or None
# body StatNode
def analyse_declarations(self, env):
if self.include_file:
env.add_include_file(self.include_file)
old_cinclude_flag = env.in_cinclude
env.in_cinclude = 1
self.body.analyse_declarations(env)
env.in_cinclude = old_cinclude_flag
def analyse_expressions(self, env):
pass
def generate_execution_code(self, code):
pass
class CDeclaratorNode(Node):
# Part of a C declaration.
#
# Processing during analyse_declarations phase:
#
# analyse
# Returns (name, type) pair where name is the
# CNameDeclaratorNode of the name being declared
# and type is the type it is being declared as.
#
# calling_convention string Calling convention of CFuncDeclaratorNode
# for which this is a base
calling_convention = ""
class CNameDeclaratorNode(CDeclaratorNode):
# name string The Pyrex name being declared
# cname string or None C name, if specified
def analyse(self, base_type, env):
return self, base_type
class CPtrDeclaratorNode(CDeclaratorNode):
# base CDeclaratorNode
def analyse(self, base_type, env):
if base_type.is_pyobject:
error(self.pos,
"Pointer base type cannot be a Python object")
ptr_type = PyrexTypes.c_ptr_type(base_type)
return self.base.analyse(ptr_type, env)
class CArrayDeclaratorNode(CDeclaratorNode):
# base CDeclaratorNode
# dimension ExprNode
def analyse(self, base_type, env):
if self.dimension:
self.dimension.analyse_const_expression(env)
if not self.dimension.type.is_int:
error(self.dimension.pos, "Array dimension not integer")
size = self.dimension.result()
else:
size = None
if not base_type.is_complete():
error(self.pos,
"Array element type '%s' is incomplete" % base_type)
if base_type.is_pyobject:
error(self.pos,
"Array element cannot be a Python object")
if base_type.is_cfunction:
error(self.pos,
"Array element cannot be a function")
array_type = PyrexTypes.c_array_type(base_type, size)
return self.base.analyse(array_type, env)
class CFuncDeclaratorNode(CDeclaratorNode):
# base CDeclaratorNode
# args [CArgDeclNode]
# has_varargs boolean
# exception_value ConstNode
# exception_check boolean True if PyErr_Occurred check needed
# nogil boolean Can be called without gil
# with_gil boolean Acquire gil around function body
def analyse(self, return_type, env):
func_type_args = []
for arg_node in self.args:
name_declarator, type = arg_node.analyse(env)
name = name_declarator.name
if name_declarator.cname:
error(self.pos,
"Function argument cannot have C name specification")
# Turn *[] argument into **
if type.is_array:
type = PyrexTypes.c_ptr_type(type.base_type)
# Catch attempted C-style func(void) decl
if type.is_void:
error(arg_node.pos, "Function argument cannot be void")
func_type_args.append(
PyrexTypes.CFuncTypeArg(name, type, arg_node.pos))
if arg_node.default:
error(arg_node.pos, "C function argument cannot have default value")
exc_val = None
exc_check = 0
if return_type.is_pyobject \
and (self.exception_value or self.exception_check):
error(self.pos,
"Exception clause not allowed for function returning Python object")
else:
if self.exception_value:
self.exception_value.analyse_const_expression(env)
exc_val = self.exception_value.result()
if not return_type.assignable_from(self.exception_value.type):
error(self.exception_value.pos,
"Exception value incompatible with function return type")
exc_check = self.exception_check
if return_type.is_array:
error(self.pos,
"Function cannot return an array")
if return_type.is_cfunction:
error(self.pos,
"Function cannot return a function")
func_type = PyrexTypes.CFuncType(
return_type, func_type_args, self.has_varargs,
exception_value = exc_val, exception_check = exc_check,
calling_convention = self.base.calling_convention,
nogil = self.nogil, with_gil = self.with_gil)
return self.base.analyse(func_type, env)
class CArgDeclNode(Node):
# Item in a function declaration argument list.
#
# base_type CBaseTypeNode
# declarator CDeclaratorNode
# #not_none boolean Tagged with 'not None'
# allow_none tristate True == 'or None', False == 'not None', None = unspecified
# default ExprNode or None
# default_entry Symtab.Entry Entry for the variable holding the default value
# is_self_arg boolean Is the "self" arg of an extension type method
# is_kw_only boolean Is a keyword-only argument
is_self_arg = 0
def analyse(self, env):
#print "CArgDeclNode.analyse: is_self_arg =", self.is_self_arg ###
base_type = self.base_type.analyse(env)
return self.declarator.analyse(base_type, env)
class CBaseTypeNode(Node):
# Abstract base class for C base type nodes.
#
# Processing during analyse_declarations phase:
#
# analyse
# Returns the type.
pass
class CSimpleBaseTypeNode(CBaseTypeNode):
# name string
# module_path [string] Qualifying name components
# is_basic_c_type boolean
# signed boolean
# longness integer
# is_self_arg boolean Is self argument of C method
def analyse(self, env):
# Return type descriptor.
#print "CSimpleBaseTypeNode.analyse: is_self_arg =", self.is_self_arg ###
type = None
if self.is_basic_c_type:
type = PyrexTypes.simple_c_type(self.signed, self.longness, self.name)
if not type:
error(self.pos, "Unrecognised type modifier combination")
elif self.name == "object" and not self.module_path:
type = py_object_type
elif self.name is None:
if self.is_self_arg and env.is_c_class_scope:
#print "CSimpleBaseTypeNode.analyse: defaulting to parent type" ###
type = env.parent_type
else:
type = py_object_type
else:
scope = env.find_imported_module(self.module_path, self.pos)
if scope:
entry = scope.find(self.name, self.pos)
if entry and entry.is_type:
type = entry.type
else:
error(self.pos, "'%s' is not a type identifier" % self.name)
if type:
return type
else:
return PyrexTypes.error_type
class CComplexBaseTypeNode(CBaseTypeNode):
# base_type CBaseTypeNode
# declarator CDeclaratorNode
def analyse(self, env):
base = self.base_type.analyse(env)
_, type = self.declarator.analyse(base, env)
return type
class CVarDefNode(StatNode):
# C variable definition or forward/extern function declaration.
#
# visibility 'private' or 'public' or 'extern'
# base_type CBaseTypeNode
# declarators [CDeclaratorNode]
# in_pxd boolean
# api boolean
def analyse_declarations(self, env, dest_scope = None):
if not dest_scope:
dest_scope = env
base_type = self.base_type.analyse(env)
for declarator in self.declarators:
name_declarator, type = declarator.analyse(base_type, env)
if not type.is_complete():
if not (self.visibility == 'extern' and type.is_array):
error(declarator.pos,
"Variable type '%s' is incomplete" % type)
if self.visibility == 'extern' and type.is_pyobject:
error(declarator.pos,
"Python object cannot be declared extern")
name = name_declarator.name
cname = name_declarator.cname
if type.is_cfunction:
entry = dest_scope.declare_cfunction(name, type, declarator.pos,
cname = cname, visibility = self.visibility, in_pxd = self.in_pxd,
api = self.api)
else:
if self.in_pxd and self.visibility <> 'extern':
error(self.pos,
"Only 'extern' C variable declaration allowed in .pxd file")
dest_scope.declare_var(name, type, declarator.pos,
cname = cname, visibility = self.visibility, is_cdef = 1)
def analyse_expressions(self, env):
pass
def generate_execution_code(self, code):
pass
class CStructOrUnionDefNode(StatNode):
# name string
# cname string or None
# module_path [string]
# kind "struct" or "union"
# typedef_flag boolean
# cplus_flag boolean
# visibility "public" or "private"
# in_pxd boolean
# attributes [CVarDefNode] or None
# entry Entry
# bases [([name, ...], name), ...]
def analyse_declarations(self, env):
scope = None
base_scopes = []
for base in self.bases:
base_entry = env.find_qualified_name(base, self.pos)
if base_entry:
if base_entry.is_type and base_entry.type.is_struct_or_union \
and base_entry.type.scope.is_cplus:
base_scopes.append(base_entry.type.scope)
else:
error(self.pos, "Base type '%s' is not a C++ struct" %
".".join(base[0] + [base[1]]))
if self.attributes is not None:
scope = StructOrUnionScope(base_scopes = base_scopes, is_cplus = self.cplus_flag)
if self.module_path:
home_scope = env.find_imported_module(self.module_path, self.pos)
if not home_scope:
return
else:
home_scope = env
def declare():
self.entry = home_scope.declare_struct_or_union(
self.name, self.kind, scope, self.typedef_flag, self.pos,
self.cname, visibility = self.visibility)
if self.attributes is not None:
if self.in_pxd and not env.in_cinclude:
self.entry.defined_in_pxd = 1
if not self.typedef_flag:
declare()
if self.attributes is not None:
for attr in self.attributes:
attr.analyse_declarations(env, scope)
if self.typedef_flag:
declare()
def analyse_expressions(self, env):
pass
def generate_execution_code(self, code):
pass
class CEnumDefNode(StatNode):
# name string or None
# cname string or None
# items [CEnumDefItemNode]
# typedef_flag boolean
# visibility "public" or "private"
# in_pxd boolean
# entry Entry
def analyse_declarations(self, env):
self.entry = env.declare_enum(self.name, self.pos,
cname = self.cname, typedef_flag = self.typedef_flag,
visibility = self.visibility)
if self.items is not None:
if self.in_pxd and not env.in_cinclude:
self.entry.defined_in_pxd = 1
for item in self.items:
item.analyse_declarations(env, self.entry)
def analyse_expressions(self, env):
pass
def generate_execution_code(self, code):
pass
class CEnumDefItemNode(StatNode):
# name string
# cname string or None
# value ExprNode or None
def analyse_declarations(self, env, enum_entry):
value_node = self.value
if value_node:
value_node.analyse_const_expression(env)
type = value_node.type
if type.is_int or type.is_enum:
value = value_node.result()
else:
error(self.pos,
"Type '%s' is not a valid enum value" % type)
value = "<error>"
else:
value = self.name
entry = env.declare_const(self.name, enum_entry.type,
value, self.pos, cname = self.cname)
enum_entry.enum_values.append(entry)
class CTypeDefNode(StatNode):
# base_type CBaseTypeNode
# declarator CDeclaratorNode
# visibility "public" or "private"
# in_pxd boolean
def analyse_declarations(self, env):
base = self.base_type.analyse(env)
name_declarator, type = self.declarator.analyse(base, env)
name = name_declarator.name
cname = name_declarator.cname
entry = env.declare_typedef(name, type, self.pos,
cname = cname, visibility = self.visibility)
if self.in_pxd and not env.in_cinclude:
entry.defined_in_pxd = 1
def analyse_expressions(self, env):
pass
def generate_execution_code(self, code):
pass
class FuncDefNode(StatNode, BlockNode):
# Base class for function definition nodes.
#
# return_type PyrexType
# #filename string C name of filename string const
# entry Symtab.Entry
def analyse_expressions(self, env):
pass
def need_gil_acquisition(self, lenv):
return 0
def generate_function_definitions(self, env, code):
# Generate C code for header and body of function
genv = env.global_scope()
lenv = LocalScope(name = self.entry.name, outer_scope = genv)
lenv.return_type = self.return_type
type = self.entry.type
if type.is_cfunction:
lenv.nogil = type.nogil and not type.with_gil
code.init_labels()
self.declare_arguments(lenv)
self.body.analyse_declarations(lenv)
self.body.analyse_expressions(lenv)
# Code for nested function definitions would go here
# if we supported them, which we probably won't.
# ----- Function header
code.putln("")
self.generate_function_header(code,
with_pymethdef = env.is_py_class_scope)
# ----- Local variable declarations
self.generate_argument_declarations(lenv, code)
code.put_var_declarations(lenv.var_entries)
init = ""
if not self.return_type.is_void:
code.putln(
"%s%s;" %
(self.return_type.declaration_code(
Naming.retval_cname),
init))
code.put_var_declarations(lenv.temp_entries)
self.generate_keyword_list(code)
# ----- Extern library function declarations
lenv.generate_library_function_declarations(code)
# ----- GIL acquisition
acquire_gil = self.need_gil_acquisition(lenv)
if acquire_gil:
lenv.global_scope().gil_used = 1
code.putln("PyGILState_STATE _save = PyGILState_Ensure();")
# ----- Fetch arguments
self.generate_argument_parsing_code(code)
self.generate_argument_increfs(lenv, code)
# ----- Initialise local variables
for entry in lenv.var_entries:
if entry.type.is_pyobject and entry.init_to_none and entry.used:
code.put_init_var_to_py_none(entry)
# ----- Check and convert arguments
self.generate_argument_conversion_code(code)
self.generate_argument_type_tests(code)
# ----- Function body
self.body.generate_execution_code(code)
# ----- Default return value
code.putln("")
if self.return_type.is_pyobject:
#if self.return_type.is_extension_type:
# lhs = "(PyObject *)%s" % Naming.retval_cname
#else:
lhs = Naming.retval_cname
code.put_init_to_py_none(lhs, self.return_type)
else:
val = self.return_type.default_value
if val:
code.putln("%s = %s;" % (Naming.retval_cname, val))
#code.putln("goto %s;" % code.return_label)
# ----- Error cleanup
if code.error_label in code.labels_used:
code.put_goto(code.return_label)
code.put_label(code.error_label)
code.put_var_xdecrefs(lenv.temp_entries)
default_retval = self.return_type.default_value
err_val = self.error_value()
exc_check = self.caller_will_check_exceptions()
if err_val or exc_check:
code.putln(
'__Pyx_AddTraceback("%s");' %
self.entry.qualified_name)
val = err_val or default_retval
if val:
code.putln(
"%s = %s;" % (
Naming.retval_cname,
val))
else:
code.use_utility_code(unraisable_exception_utility_code)
code.putln(
'__Pyx_WriteUnraisable("%s");' %
self.entry.qualified_name)
#if not self.return_type.is_void:
if default_retval:
code.putln(
"%s = %s;" % (
Naming.retval_cname,
default_retval))
#self.return_type.default_value))
# ----- Return cleanup
code.put_label(code.return_label)
code.put_var_decrefs(lenv.var_entries, used_only = 1)
#code.put_var_decrefs(lenv.arg_entries)
self.generate_argument_decrefs(lenv, code)
self.put_stararg_decrefs(code)
if acquire_gil:
code.putln("PyGILState_Release(_save);")
if not self.return_type.is_void:
code.putln("return %s;" % Naming.retval_cname)
code.putln("}")
def put_stararg_decrefs(self, code):
pass
def declare_argument(self, env, arg, readonly = 0):
if arg.type.is_void:
error(arg.pos, "Invalid use of 'void'")
elif not arg.type.is_complete() and not arg.type.is_array:
error(arg.pos,
"Argument type '%s' is incomplete" % arg.type)
return env.declare_arg(arg.name, arg.type, arg.pos,
readonly = readonly)
def generate_argument_increfs(self, env, code):
# Turn writable borrowed argument refs into owned refs.
# This is necessary, because if the argument is assigned to,
# it will be decrefed.
for entry in env.arg_entries:
if not entry.is_readonly:
code.put_var_incref(entry)
def generate_argument_decrefs(self, env, code):
for entry in env.arg_entries:
if not entry.is_readonly:
code.put_var_decref(entry)
def generate_execution_code(self, code):
pass
class CFuncDefNode(FuncDefNode):
# C function definition.
#
# visibility 'private' or 'public' or 'extern'
# base_type CBaseTypeNode
# declarator CDeclaratorNode
# body StatListNode
# api boolean
#
# with_gil boolean Acquire GIL around body
# type CFuncType
def unqualified_name(self):
return self.entry.name
def analyse_declarations(self, env):
base_type = self.base_type.analyse(env)
name_declarator, type = self.declarator.analyse(base_type, env)
if not type.is_cfunction:
error(self.pos,
"Suite attached to non-function declaration")
# Remember the actual type according to the function header
# written here, because the type in the symbol table entry
# may be different if we're overriding a C method inherited
# from the base type of an extension type.
self.type = type
name = name_declarator.name
cname = name_declarator.cname
self.entry = env.declare_cfunction(
name, type, self.pos,
cname = cname, visibility = self.visibility,
defining = self.body is not None,
api = self.api)
self.return_type = type.return_type
def declare_arguments(self, env):
type = self.type
without_gil = type.nogil and not type.with_gil
for arg in type.args:
if not arg.name:
error(arg.pos, "Missing argument name")
self.declare_argument(env, arg,
readonly = without_gil and arg.type.is_pyobject)
def need_gil_acquisition(self, lenv):
type = self.type
with_gil = type.with_gil
if type.nogil and not with_gil:
# for arg in type.args:
# if arg.type.is_pyobject:
# error(self.pos,
# "Function with Python argument cannot be declared nogil")
if type.return_type.is_pyobject:
error(self.pos,
"Function with Python return type cannot be declared nogil")
for entry in lenv.var_entries + lenv.temp_entries:
#print "CFuncDefNode.need_gil_acquisition:", entry.name, entry.cname, "readonly =", entry.is_readonly ###
if entry.type.is_pyobject and not entry.is_readonly:
error(self.pos, "Function declared nogil has Python locals or temporaries")
return with_gil
def generate_function_header(self, code, with_pymethdef):
arg_decls = []
type = self.type
visibility = self.entry.visibility
for arg in type.args:
arg_decls.append(arg.declaration_code())
if type.has_varargs:
arg_decls.append("...")
if not arg_decls:
arg_decls = ["void"]
entity = type.function_header_code(self.entry.func_cname,
string.join(arg_decls, ","))
if visibility == 'public':
dll_linkage = "DL_EXPORT"
else:
dll_linkage = None
header = self.return_type.declaration_code(entity,
dll_linkage = dll_linkage)
if visibility <> 'private':
storage_class = "%s " % Naming.extern_c_macro
else:
storage_class = "static "
code.putln("%s%s {" % (
storage_class,
header))
def generate_argument_declarations(self, env, code):
# Arguments already declared in function header
pass
def generate_keyword_list(self, code):
pass
def generate_argument_parsing_code(self, code):
pass
def generate_argument_conversion_code(self, code):
pass
def generate_argument_type_tests(self, code):
pass
def error_value(self):
if self.return_type.is_pyobject:
return "0"
else:
#return None
return self.entry.type.exception_value
def caller_will_check_exceptions(self):
return self.entry.type.exception_check
class PyArgDeclNode(Node):
# Argument which must be a Python object (used
# for * and ** arguments).
#
# name string
# entry Symtab.Entry
pass
class DefNode(FuncDefNode):
# A Python function definition.
#
# name string the Python name of the function
# args [CArgDeclNode] formal arguments
# star_arg PyArgDeclNode or None * argument
# starstar_arg PyArgDeclNode or None ** argument
# doc string or None
# body StatListNode
#
# The following subnode is constructed internally
# when the def statement is inside a Python class definition.
#
# assmt AssignmentNode Function construction/assignment
assmt = None
num_kwonly_args = 0
reqd_kw_flags_cname = "0"
has_star_or_kwonly_args = 0
def __init__(self, pos, **kwds):
FuncDefNode.__init__(self, pos, **kwds)
n = 0
for arg in self.args:
if arg.kw_only:
n += 1
self.num_kwonly_args = n
if self.star_arg or self.starstar_arg or n > 0:
self.has_star_or_kwonly_args = 1
def analyse_declarations(self, env):
for arg in self.args:
base_type = arg.base_type.analyse(env)
name_declarator, type = \
arg.declarator.analyse(base_type, env)
arg.name = name_declarator.name
if name_declarator.cname:
error(self.pos,
"Python function argument cannot have C name specification")
arg.type = type.as_argument_type()
arg.hdr_type = None
arg.needs_conversion = 0
arg.needs_type_test = 0
arg.is_generic = 1
if arg.allow_none is not None and not arg.type.is_extension_type:
error(self.pos,
"Only extension type arguments can have 'or None' or 'not None'")
self.declare_pyfunction(env)
self.analyse_signature(env)
self.return_type = self.entry.signature.return_type()
# if self.has_star_or_kwonly_args:
# env.use_utility_code(get_starargs_utility_code)
def analyse_signature(self, env):
any_type_tests_needed = 0
sig = self.entry.signature
nfixed = sig.num_fixed_args()
for i in range(nfixed):
if i < len(self.args):
arg = self.args[i]
arg.is_generic = 0
if sig.is_self_arg(i):
arg.is_self_arg = 1
arg.hdr_type = arg.type = env.parent_type
arg.needs_conversion = 0
else:
arg.hdr_type = sig.fixed_arg_type(i)
if not arg.type.same_as(arg.hdr_type):
if arg.hdr_type.is_pyobject and arg.type.is_pyobject:
arg.needs_type_test = 1
any_type_tests_needed = 1
else:
arg.needs_conversion = 1
if arg.needs_conversion:
arg.hdr_cname = Naming.arg_prefix + arg.name
else:
arg.hdr_cname = Naming.var_prefix + arg.name
else:
self.bad_signature()
return
if nfixed < len(self.args):
if not sig.has_generic_args:
self.bad_signature()
for arg in self.args:
if arg.is_generic and arg.type.is_extension_type:
arg.needs_type_test = 1
any_type_tests_needed = 1
# if any_type_tests_needed:
# env.use_utility_code(arg_type_test_utility_code)
def bad_signature(self):
sig = self.entry.signature
expected_str = "%d" % sig.num_fixed_args()
if sig.has_generic_args:
expected_str = expected_str + " or more"
name = self.name
if name.startswith("__") and name.endswith("__"):
desc = "Special method"
else:
desc = "Method"
error(self.pos,
"%s %s has wrong number of arguments "
"(%d declared, %s expected)" % (
desc, self.name, len(self.args), expected_str))
def declare_pyfunction(self, env):
#print "DefNode.declare_pyfunction:", self.name, "in", env ###
name = self.name
entry = env.declare_pyfunction(self.name, self.pos)
self.entry = entry
prefix = env.scope_prefix
entry.func_cname = \
Naming.func_prefix + prefix + name
entry.pymethdef_cname = \
Naming.pymethdef_prefix + prefix + name
if not entry.is_special:
entry.doc = self.doc
entry.doc_cname = \
Naming.funcdoc_prefix + prefix + name
def declare_arguments(self, env):
for arg in self.args:
if not arg.name:
error(arg.pos, "Missing argument name")
if arg.needs_conversion:
arg.entry = env.declare_var(arg.name, arg.type, arg.pos)
if arg.type.is_pyobject:
arg.entry.init = "0"
arg.entry.init_to_none = 0
else:
arg.entry = self.declare_argument(env, arg)
arg.entry.used = 1
arg.entry.is_self_arg = arg.is_self_arg
if arg.hdr_type:
if arg.is_self_arg or \
(arg.type.is_extension_type and not arg.hdr_type.is_extension_type):
arg.entry.is_declared_generic = 1
self.declare_python_arg(env, self.star_arg)
self.declare_python_arg(env, self.starstar_arg)
def declare_python_arg(self, env, arg):
if arg:
entry = env.declare_var(arg.name,
PyrexTypes.py_object_type, arg.pos)
entry.used = 1
entry.init = "0"
entry.init_to_none = 0
entry.xdecref_cleanup = 1
arg.entry = entry
def analyse_expressions(self, env):
self.analyse_default_values(env)
if env.is_py_class_scope:
self.synthesize_assignment_node(env)
def analyse_default_values(self, env):
for arg in self.args:
if arg.default:
if arg.is_generic:
arg.default.analyse_types(env)
arg.default = arg.default.coerce_to(arg.type, env)
arg.default.allocate_temps(env)
arg.default_entry = env.add_default_value(arg.type)
arg.default_entry.used = 1
else:
error(arg.pos,
"This argument cannot have a default value")
arg.default = None
def synthesize_assignment_node(self, env):
import ExprNodes
self.assmt = SingleAssignmentNode(self.pos,
lhs = ExprNodes.NameNode(self.pos, name = self.name),
rhs = ExprNodes.UnboundMethodNode(self.pos,
class_cname = env.class_obj_cname,
function = ExprNodes.PyCFunctionNode(self.pos,
pymethdef_cname = self.entry.pymethdef_cname)))
self.assmt.analyse_declarations(env)
self.assmt.analyse_expressions(env)
def generate_function_header(self, code, with_pymethdef):
arg_code_list = []
sig = self.entry.signature
if sig.has_dummy_arg:
arg_code_list.append(
"PyObject *%s" % Naming.self_cname)
for arg in self.args:
if not arg.is_generic:
if arg.is_self_arg:
arg_code_list.append("PyObject *%s" % arg.hdr_cname)
else:
arg_code_list.append(
arg.hdr_type.declaration_code(arg.hdr_cname))
if sig.has_generic_args:
arg_code_list.append(
"PyObject *%s, PyObject *%s"
% (Naming.args_cname, Naming.kwds_cname))
arg_code = ", ".join(arg_code_list)
dc = self.return_type.declaration_code(self.entry.func_cname)
header = "static %s(%s)" % (dc, arg_code)
code.putln("%s; /*proto*/" % header)
if self.entry.doc:
code.putln(
'static char %s[] = "%s";' % (
self.entry.doc_cname,
self.entry.doc))
if with_pymethdef:
code.put(
"static PyMethodDef %s = " %
self.entry.pymethdef_cname)
code.put_pymethoddef(self.entry, ";")
code.putln("%s {" % header)
def generate_argument_declarations(self, env, code):
for arg in self.args:
if arg.is_generic: # or arg.needs_conversion:
code.put_var_declaration(arg.entry)
def generate_keyword_list(self, code):
if self.entry.signature.has_generic_args:
reqd_kw_flags = []
has_reqd_kwds = False
code.put(
"static char *%s[] = {" %
Naming.kwdlist_cname)
for arg in self.args:
if arg.is_generic:
code.put(
'"%s",' %
arg.name)
if arg.kw_only and not arg.default:
has_reqd_kwds = 1
flag = "1"
else:
flag = "0"
reqd_kw_flags.append(flag)
code.putln(
"0};")
if has_reqd_kwds:
flags_name = Naming.reqd_kwds_cname
self.reqd_kw_flags_cname = flags_name
code.putln(
"static char %s[] = {%s};" % (
flags_name,
",".join(reqd_kw_flags)))
def generate_argument_parsing_code(self, code):
# Generate PyArg_ParseTuple call for generic
# arguments, if any.
has_kwonly_args = self.num_kwonly_args > 0
has_star_or_kw_args = self.star_arg is not None \
or self.starstar_arg is not None or has_kwonly_args
if not self.entry.signature.has_generic_args:
if has_star_or_kw_args:
error(self.pos, "This method cannot have * or keyword arguments")
else:
arg_addrs = []
arg_formats = []
default_seen = 0
for arg in self.args:
arg_entry = arg.entry
if arg.is_generic:
if arg.default:
code.putln(
"%s = %s;" % (
arg_entry.cname,
arg.default_entry.cname))
if not default_seen:
arg_formats.append("|")
default_seen = 1
elif default_seen and not arg.kw_only:
error(arg.pos, "Non-default argument following default argument")
arg_addrs.append("&" + arg_entry.cname)
format = arg_entry.type.parsetuple_format
if format:
arg_formats.append(format)
else:
error(arg.pos,
"Cannot convert Python object argument to type '%s'"
% arg.type)
error_return_code = "return %s;" % self.error_value()
argformat = '"%s"' % string.join(arg_formats, "")
if has_star_or_kw_args:
self.generate_stararg_getting_code(code)
pt_arglist = [Naming.args_cname, Naming.kwds_cname, argformat,
Naming.kwdlist_cname] + arg_addrs
pt_argstring = string.join(pt_arglist, ", ")
code.put(
'if (!PyArg_ParseTupleAndKeywords(%s)) ' %
pt_argstring)
if has_star_or_kw_args:
code.putln("{")
code.put_xdecref(Naming.args_cname, py_object_type)
code.put_xdecref(Naming.kwds_cname, py_object_type)
self.generate_arg_xdecref(self.star_arg, code)
self.generate_arg_xdecref(self.starstar_arg, code)
code.putln(error_return_code)
code.putln("}")
else:
code.putln(error_return_code)
def put_stararg_decrefs(self, code):
if self.has_star_or_kwonly_args:
code.put_xdecref(Naming.args_cname, py_object_type)
code.put_xdecref(Naming.kwds_cname, py_object_type)
def generate_arg_xdecref(self, arg, code):
if arg:
code.put_var_xdecref(arg.entry)
def arg_address(self, arg):
if arg:
return "&%s" % arg.entry.cname
else:
return 0
def generate_stararg_getting_code(self, code):
num_kwonly = self.num_kwonly_args
nargs = len(self.args) - num_kwonly - self.entry.signature.num_fixed_args()
star_arg_addr = self.arg_address(self.star_arg)
starstar_arg_addr = self.arg_address(self.starstar_arg)
code.use_utility_code(get_starargs_utility_code)
code.putln(
"if (__Pyx_GetStarArgs(&%s, &%s, %s, %s, %s, %s, %s) < 0) return %s;" % (
Naming.args_cname,
Naming.kwds_cname,
Naming.kwdlist_cname,
nargs,
star_arg_addr,
starstar_arg_addr,
self.reqd_kw_flags_cname,
self.error_value()))
def generate_argument_conversion_code(self, code):
# Generate code to convert arguments from
# signature type to declared type, if needed.
for arg in self.args:
if arg.needs_conversion:
self.generate_arg_conversion(arg, code)
def generate_arg_conversion(self, arg, code):
# Generate conversion code for one argument.
old_type = arg.hdr_type
new_type = arg.type
if old_type.is_pyobject:
self.generate_arg_conversion_from_pyobject(arg, code)
elif new_type.is_pyobject:
self.generate_arg_conversion_to_pyobject(arg, code)
else:
if new_type.assignable_from(old_type):
code.putln(
"%s = %s;" % (arg.entry.cname, arg.hdr_cname))
else:
error(arg.pos,
"Cannot convert argument from '%s' to '%s'" %
(old_type, new_type))
def generate_arg_conversion_from_pyobject(self, arg, code):
new_type = arg.type
func = new_type.from_py_function
if func:
code.putln("%s = %s(%s); if (PyErr_Occurred()) %s" % (
arg.entry.cname,
func,
arg.hdr_cname,
code.error_goto(arg.pos)))
else:
error(arg.pos,
"Cannot convert Python object argument to type '%s'"
% new_type)
def generate_arg_conversion_to_pyobject(self, arg, code):
old_type = arg.hdr_type
func = old_type.to_py_function
if func:
code.putln("%s = %s(%s); if (!%s) %s" % (
arg.entry.cname,
func,
arg.hdr_cname,
arg.entry.cname,
code.error_goto(arg.pos)))
else:
error(arg.pos,
"Cannot convert argument of type '%s' to Python object"
% old_type)
def generate_argument_type_tests(self, code):
# Generate type tests for args whose signature
# type is PyObject * and whose declared type is
# a subtype thereof.
for arg in self.args:
if arg.needs_type_test:
self.generate_arg_type_test(arg, code)
def generate_arg_type_test(self, arg, code):
# Generate type test for one argument.
if arg.type.typeobj_is_available():
typeptr_cname = arg.type.typeptr_cname
arg_code = "((PyObject *)%s)" % arg.entry.cname
code.use_utility_code(arg_type_test_utility_code)
code.putln(
'if (!__Pyx_ArgTypeTest(%s, %s, %d, "%s")) %s' % (
arg_code,
typeptr_cname,
#not arg.not_none,
arg.allow_none <> False,
arg.name,
code.error_goto(arg.pos)))
if arg.allow_none is None:
one_time_warning(arg.pos, 'or_none',
"'not None' will become the default in a future version of Pyrex. "
"Use 'or None' to allow passing None.")
else:
error(arg.pos, "Cannot test type of extern C class "
"without type object name specification")
def generate_execution_code(self, code):
# Evaluate and store argument default values
for arg in self.args:
default = arg.default
if default:
default.generate_evaluation_code(code)
default.make_owned_reference(code)
code.putln(
"%s = %s;" % (
arg.default_entry.cname,
default.result_as(arg.default_entry.type)))
default.generate_post_assignment_code(code)
# if default.is_temp and default.type.is_pyobject:
# code.putln(
# "%s = 0;" %
# default.result())
# For Python class methods, create and store function object
if self.assmt:
self.assmt.generate_execution_code(code)
def error_value(self):
return self.entry.signature.error_value
def caller_will_check_exceptions(self):
return 1
class PyClassDefNode(StatNode, BlockNode):
# A Python class definition.
#
# name string Name of the class
# doc string or None
# body StatNode Attribute definition code
# entry Symtab.Entry
# scope PyClassScope
#
# The following subnodes are constructed internally:
#
# dict DictNode Class dictionary
# classobj ClassNode Class object
# target NameNode Variable to assign class object to
def __init__(self, pos, name, bases, doc, body):
StatNode.__init__(self, pos)
self.name = name
self.doc = doc
self.body = body
import ExprNodes
self.dict = ExprNodes.DictNode(pos, key_value_pairs = [])
if self.doc:
doc_node = ExprNodes.StringNode(pos, value = self.doc)
else:
doc_node = None
self.classobj = ExprNodes.ClassNode(pos,
name = ExprNodes.StringNode(pos, value = name),
bases = bases, dict = self.dict, doc = doc_node)
self.target = ExprNodes.NameNode(pos, name = name)
def analyse_declarations(self, env):
self.target.analyse_target_declaration(env)
def analyse_expressions(self, env):
self.dict.analyse_expressions(env)
self.classobj.analyse_expressions(env)
genv = env.global_scope()
cenv = PyClassScope(name = self.name, outer_scope = genv)
cenv.class_dict_cname = self.dict.result()
cenv.class_obj_cname = self.classobj.result()
self.scope = cenv
self.body.analyse_declarations(cenv)
self.body.analyse_expressions(cenv)
self.target.analyse_target_expression(env, self.classobj)
self.dict.release_temp(env)
#self.classobj.release_temp(env)
#self.target.release_target_temp(env)
def generate_function_definitions(self, env, code):
#self.generate_py_string_decls(self.scope, code)
self.body.generate_function_definitions(
self.scope, code)
def generate_execution_code(self, code):
self.dict.generate_evaluation_code(code)
self.classobj.generate_evaluation_code(code)
self.body.generate_execution_code(code)
self.target.generate_assignment_code(self.classobj, code)
self.dict.generate_disposal_code(code)
class CClassDefNode(StatNode):
# An extension type definition.
#
# visibility 'private' or 'public' or 'extern'
# typedef_flag boolean
# api boolean
# module_name string or None For import of extern type objects
# class_name string Unqualified name of class
# as_name string or None Name to declare as in this scope
# base_class_module string or None Module containing the base class
# base_class_name string or None Name of the base class
# options CClassOptions:
# objstruct_name string or None Specified C name of object struct
# typeobj_name string or None Specified C name of type object
# no_gc boolean Suppress GC support
# in_pxd boolean Is in a .pxd file
# doc string or None
# body StatNode or None
# entry Symtab.Entry
# base_type PyExtensionType or None
entry = None
def analyse_declarations(self, env):
#print "CClassDefNode.analyse_declarations:", self.class_name
#print "...visibility =", self.visibility
#print "...module_name =", self.module_name
if env.in_cinclude and not self.options.objstruct_cname:
error(self.pos, "Object struct name specification required for "
"C class defined in 'extern from' block")
self.base_type = None
has_body = self.body is not None
if self.base_class_name:
if self.base_class_module:
base_class_scope = env.find_module(self.base_class_module, self.pos)
else:
base_class_scope = env
if base_class_scope:
base_class_entry = base_class_scope.find(self.base_class_name, self.pos)
if base_class_entry:
if not base_class_entry.is_type:
error(self.pos, "'%s' is not a type name" % self.base_class_name)
elif not base_class_entry.type.is_extension_type:
error(self.pos, "'%s' is not an extension type" % self.base_class_name)
elif has_body and base_class_entry.visibility <> 'extern' and not base_class_entry.type.is_defined():
error(self.pos, "Base class '%s' is incomplete" % self.base_class_name)
else:
self.base_type = base_class_entry.type
if self.module_name and self.visibility <> 'extern':
module_path = self.module_name.split(".")
home_scope = env.find_imported_module(module_path, self.pos)
if not home_scope:
return
else:
home_scope = env
self.entry = home_scope.declare_c_class(
name = self.class_name,
pos = self.pos,
defining = has_body and self.in_pxd,
implementing = has_body and not self.in_pxd,
module_name = self.module_name,
base_type = self.base_type,
visibility = self.visibility,
typedef_flag = self.typedef_flag,
api = self.api,
options = self.options)
if home_scope is not env and self.visibility == 'extern':
env.add_imported_entry(self.class_name, self.entry, pos)
scope = self.entry.type.scope
if self.doc:
scope.doc = self.doc
if has_body:
self.body.analyse_declarations(scope)
if self.in_pxd:
scope.defined = 1
else:
scope.implemented = 1
env.allocate_vtable_names(self.entry)
def analyse_expressions(self, env):
if self.body:
self.body.analyse_expressions(env)
def generate_function_definitions(self, env, code):
if self.entry and self.body:
# self.body.generate_function_definitions(
# self.entry.type.scope, code)
self.body.generate_function_definitions(env, code)
def generate_execution_code(self, code):
# This is needed to generate evaluation code for
# default values of method arguments.
if self.body:
self.body.generate_execution_code(code)
class PropertyNode(StatNode):
# Definition of a property in an extension type.
#
# name string
# doc string or None Doc string
# body StatListNode
def analyse_declarations(self, env):
#print "PropertyNode.analyse_declarations:", env ###
entry = env.declare_property(self.name, self.doc, self.pos)
if entry:
#if self.doc:
# doc_entry = env.get_string_const(self.doc)
# entry.doc_cname = doc_entry.cname
self.body.analyse_declarations(entry.scope)
def analyse_expressions(self, env):
self.body.analyse_expressions(env)
def generate_function_definitions(self, env, code):
self.body.generate_function_definitions(env, code)
def generate_execution_code(self, code):
pass
class GlobalNode(StatNode):
# Global variable declaration.
#
# names [string]
def analyse_declarations(self, env):
for name in self.names:
env.declare_global(name, self.pos)
def analyse_expressions(self, env):
pass
def generate_execution_code(self, code):
pass
class ExprStatNode(StatNode):
# Expression used as a statement.
#
# expr ExprNode
def analyse_expressions(self, env):
self.expr.analyse_expressions(env)
self.expr.release_temp(env)
def generate_execution_code(self, code):
self.expr.generate_evaluation_code(code)
if not self.expr.is_temp and self.expr.result():
code.putln("%s;" % self.expr.result())
self.expr.generate_disposal_code(code)
class AssignmentNode(StatNode):
# Abstract base class for assignment nodes.
#
# The analyse_expressions and generate_execution_code
# phases of assignments are split into two sub-phases
# each, to enable all the right hand sides of a
# parallel assignment to be evaluated before assigning
# to any of the left hand sides.
def analyse_expressions(self, env):
self.analyse_types(env)
self.allocate_rhs_temps(env)
self.allocate_lhs_temps(env)
def generate_execution_code(self, code):
self.generate_rhs_evaluation_code(code)
self.generate_assignment_code(code)
class SingleAssignmentNode(AssignmentNode):
# The simplest case:
#
# a = b
#
# lhs ExprNode Left hand side
# rhs ExprNode Right hand side
def analyse_declarations(self, env):
self.lhs.analyse_target_declaration(env)
def analyse_types(self, env, use_temp = 0):
self.rhs.analyse_types(env)
self.lhs.analyse_target_types(env)
self.lhs.gil_assignment_check(env)
self.rhs = self.rhs.coerce_to(self.lhs.type, env)
if use_temp:
self.rhs = self.rhs.coerce_to_temp(env)
def allocate_rhs_temps(self, env):
self.rhs.allocate_temps(env)
def allocate_lhs_temps(self, env):
self.lhs.allocate_target_temps(env, self.rhs)
def generate_rhs_evaluation_code(self, code):
self.rhs.generate_evaluation_code(code)
def generate_assignment_code(self, code):
self.lhs.generate_assignment_code(self.rhs, code)
class AugmentedAssignmentNode(SingleAssignmentNode):
# An in-place operation:
#
# a op= b
#
# lhs ExprNode Left hand side
# operator string
# rhs ExprNode Right hand side
def analyse_types(self, env):
op = self.operator
self.rhs.analyse_types(env)
self.lhs.analyse_inplace_types(env)
type = self.lhs.type
if type.is_pyobject:
type = py_object_type
else:
if type.is_ptr and (op == '+=' or op == '-='):
type = c_int_type
elif op == "**=":
error(self.pos, "**= operator not supported for non-Python types")
return
self.rhs = self.rhs.coerce_to(type, env)
def allocate_lhs_temps(self, env):
self.lhs.allocate_inplace_target_temps(env, self.rhs)
def generate_assignment_code(self, code):
self.lhs.generate_inplace_assignment_code(self.operator, self.rhs, code)
class CascadedAssignmentNode(AssignmentNode):
# An assignment with multiple left hand sides:
#
# a = b = c
#
# lhs_list [ExprNode] Left hand sides
# rhs ExprNode Right hand sides
#
# Used internally:
#
# coerced_rhs_list [ExprNode] RHS coerced to type of each LHS
def analyse_declarations(self, env):
for lhs in self.lhs_list:
lhs.analyse_target_declaration(env)
def analyse_types(self, env, use_temp = 0):
self.rhs.analyse_types(env)
if use_temp:
self.rhs = self.rhs.coerce_to_temp(env)
else:
self.rhs = self.rhs.coerce_to_simple(env)
from ExprNodes import CloneNode
self.coerced_rhs_list = []
for lhs in self.lhs_list:
lhs.analyse_target_types(env)
lhs.gil_assignment_check(env)
rhs = CloneNode(self.rhs)
rhs = rhs.coerce_to(lhs.type, env)
self.coerced_rhs_list.append(rhs)
def allocate_rhs_temps(self, env):
self.rhs.allocate_temps(env)
def allocate_lhs_temps(self, env):
for lhs, rhs in zip(self.lhs_list, self.coerced_rhs_list):
rhs.allocate_temps(env)
lhs.allocate_target_temps(env, rhs)
#lhs.release_target_temp(env)
#rhs.release_temp(env)
self.rhs.release_temp(env)
def generate_rhs_evaluation_code(self, code):
self.rhs.generate_evaluation_code(code)
def generate_assignment_code(self, code):
for i in range(len(self.lhs_list)):
lhs = self.lhs_list[i]
rhs = self.coerced_rhs_list[i]
rhs.generate_evaluation_code(code)
lhs.generate_assignment_code(rhs, code)
# Assignment has disposed of the cloned RHS
self.rhs.generate_disposal_code(code)
class ParallelAssignmentNode(AssignmentNode):
# A combined packing/unpacking assignment:
#
# a, b, c = d, e, f
#
# This has been rearranged by the parser into
#
# a = d ; b = e ; c = f
#
# but we must evaluate all the right hand sides
# before assigning to any of the left hand sides.
#
# stats [AssignmentNode] The constituent assignments
def analyse_declarations(self, env):
for stat in self.stats:
stat.analyse_declarations(env)
def analyse_expressions(self, env):
for stat in self.stats:
stat.analyse_types(env, use_temp = 1)
stat.allocate_rhs_temps(env)
for stat in self.stats:
stat.allocate_lhs_temps(env)
def generate_execution_code(self, code):
for stat in self.stats:
stat.generate_rhs_evaluation_code(code)
for stat in self.stats:
stat.generate_assignment_code(code)
class PrintStatNode(StatNode):
# print statement
#
# args [ExprNode]
# ends_with_comma boolean
def analyse_expressions(self, env):
for i in range(len(self.args)):
arg = self.args[i]
arg.analyse_types(env)
arg = arg.coerce_to_pyobject(env)
arg.allocate_temps(env)
arg.release_temp(env)
self.args[i] = arg
# env.use_utility_code(printing_utility_code)
self.gil_check(env)
gil_message = "Python print statement"
def generate_execution_code(self, code):
for arg in self.args:
arg.generate_evaluation_code(code)
code.use_utility_code(printing_utility_code)
code.putln(
"if (__Pyx_PrintItem(%s) < 0) %s" % (
arg.py_result(),
code.error_goto(self.pos)))
arg.generate_disposal_code(code)
if not self.ends_with_comma:
code.use_utility_code(printing_utility_code)
code.putln(
"if (__Pyx_PrintNewline() < 0) %s" %
code.error_goto(self.pos))
class DelStatNode(StatNode):
# del statement
#
# args [ExprNode]
def analyse_declarations(self, env):
for arg in self.args:
arg.analyse_target_declaration(env)
def analyse_expressions(self, env):
for arg in self.args:
arg.analyse_target_expression(env, None)
type = arg.type
if not (type.is_pyobject
or (type.is_ptr and type.base_type.is_struct_or_union
and type.base_type.scope.is_cplus)):
error(arg.pos, "'del' can only be applied to Python object or pointer to C++ type")
if type.is_pyobject:
self.gil_check(env)
gil_message = "Deleting Python object"
def generate_execution_code(self, code):
for arg in self.args:
if arg.type.is_pyobject:
arg.generate_deletion_code(code)
else:
arg.generate_evaluation_code(code)
code.putln("delete %s;" % arg.result())
arg.generate_disposal_code(code)
class PassStatNode(StatNode):
# pass statement
def analyse_expressions(self, env):
pass
def generate_execution_code(self, code):
pass
class BreakStatNode(StatNode):
def analyse_expressions(self, env):
pass
def generate_execution_code(self, code):
if not code.break_label:
error(self.pos, "break statement not inside loop")
else:
#code.putln(
# "goto %s;" %
# code.break_label)
code.put_goto(code.break_label)
class ContinueStatNode(StatNode):
def analyse_expressions(self, env):
pass
def generate_execution_code(self, code):
if code.in_try_finally:
error(self.pos, "continue statement inside try of try...finally")
elif not code.continue_label:
error(self.pos, "continue statement not inside loop")
else:
#code.putln(
# "goto %s;" %
# code.continue_label)
code.put_goto(code.continue_label)
class ReturnStatNode(StatNode):
# return statement
#
# value ExprNode or None
# return_type PyrexType
# temps_in_use [Entry] Temps in use at time of return
def analyse_expressions(self, env):
return_type = env.return_type
self.return_type = return_type
self.temps_in_use = env.temps_in_use()
if not return_type:
error(self.pos, "Return not inside a function body")
return
if self.value:
self.value.analyse_types(env)
if return_type.is_void or return_type.is_returncode:
error(self.value.pos,
"Return with value in void function")
else:
self.value = self.value.coerce_to(env.return_type, env)
self.value.allocate_temps(env)
self.value.release_temp(env)
else:
if (not return_type.is_void
and not return_type.is_pyobject
and not return_type.is_returncode):
error(self.pos, "Return value required")
if return_type.is_pyobject:
self.gil_check(env)
gil_message = "Returning Python object"
def generate_execution_code(self, code):
if not self.return_type:
# error reported earlier
return
if self.value:
self.value.generate_evaluation_code(code)
self.value.make_owned_reference(code)
code.putln(
"%s = %s;" % (
Naming.retval_cname,
self.value.result_as(self.return_type)))
self.value.generate_post_assignment_code(code)
else:
if self.return_type.is_pyobject:
code.put_init_to_py_none(Naming.retval_cname, self.return_type)
elif self.return_type.is_returncode:
code.putln(
"%s = %s;" % (
Naming.retval_cname,
self.return_type.default_value))
for entry in self.temps_in_use:
code.put_var_decref_clear(entry)
#code.putln(
# "goto %s;" %
# code.return_label)
code.put_goto(code.return_label)
class RaiseStatNode(StatNode):
# raise statement
#
# exc_type ExprNode or None
# exc_value ExprNode or None
# exc_tb ExprNode or None
def analyse_expressions(self, env):
if self.exc_type:
self.exc_type.analyse_types(env)
self.exc_type = self.exc_type.coerce_to_pyobject(env)
self.exc_type.allocate_temps(env)
if self.exc_value:
self.exc_value.analyse_types(env)
self.exc_value = self.exc_value.coerce_to_pyobject(env)
self.exc_value.allocate_temps(env)
if self.exc_tb:
self.exc_tb.analyse_types(env)
self.exc_tb = self.exc_tb.coerce_to_pyobject(env)
self.exc_tb.allocate_temps(env)
if self.exc_type:
self.exc_type.release_temp(env)
if self.exc_value:
self.exc_value.release_temp(env)
if self.exc_tb:
self.exc_tb.release_temp(env)
self.gil_check(env)
gil_message = "Raising exception"
def generate_execution_code(self, code):
if self.exc_type:
self.exc_type.generate_evaluation_code(code)
type_code = self.exc_type.py_result()
else:
type_code = 0
if self.exc_value:
self.exc_value.generate_evaluation_code(code)
value_code = self.exc_value.py_result()
else:
value_code = "0"
if self.exc_tb:
self.exc_tb.generate_evaluation_code(code)
tb_code = self.exc_tb.py_result()
else:
tb_code = "0"
code.use_utility_code(raise_utility_code)
code.putln(
"__Pyx_Raise(%s, %s, %s);" % (
type_code,
value_code,
tb_code))
if self.exc_type:
self.exc_type.generate_disposal_code(code)
if self.exc_value:
self.exc_value.generate_disposal_code(code)
if self.exc_tb:
self.exc_tb.generate_disposal_code(code)
code.putln(
code.error_goto(self.pos))
class ReraiseStatNode(StatNode):
def analyse_expressions(self, env):
env.reraise_used = 1
self.gil_check(env)
gil_message = "Raising exception"
def generate_execution_code(self, code):
vars = code.exc_vars
if vars:
tvars = tuple(vars)
code.putln("PyErr_Restore(%s, %s, %s);" % tvars)
code.putln("%s = %s = %s = 0;" % tvars)
code.putln(code.error_goto(self.pos))
else:
error(self.pos, "Reraise not inside except clause")
class AssertStatNode(StatNode):
# assert statement
#
# cond ExprNode
# value ExprNode or None
def analyse_expressions(self, env):
self.cond = self.cond.analyse_boolean_expression(env)
if self.value:
self.value.analyse_types(env)
self.value = self.value.coerce_to_pyobject(env)
self.value.allocate_temps(env)
self.cond.release_temp(env)
if self.value:
self.value.release_temp(env)
self.gil_check(env)
gil_message = "Raising exception"
def generate_execution_code(self, code):
code.putln("#ifndef PYREX_WITHOUT_ASSERTIONS")
self.cond.generate_evaluation_code(code)
code.putln(
"if (!%s) {" %
self.cond.result())
if self.value:
self.value.generate_evaluation_code(code)
if self.value:
code.putln(
"PyErr_SetObject(PyExc_AssertionError, %s);" %
self.value.py_result())
else:
code.putln(
"PyErr_SetNone(PyExc_AssertionError);")
code.putln(
code.error_goto(self.pos))
code.putln(
"}")
self.cond.generate_disposal_code(code)
# Disposal code for value not needed because exception always raised
#if self.value:
# self.value.generate_disposal_code(code)
code.putln("#endif")
class IfStatNode(StatNode):
# if statement
#
# if_clauses [IfClauseNode]
# else_clause StatNode or None
def analyse_declarations(self, env):
for if_clause in self.if_clauses:
if_clause.analyse_declarations(env)
if self.else_clause:
self.else_clause.analyse_declarations(env)
def analyse_expressions(self, env):
for if_clause in self.if_clauses:
if_clause.analyse_expressions(env)
if self.else_clause:
self.else_clause.analyse_expressions(env)
def generate_execution_code(self, code):
end_label = code.new_label()
for if_clause in self.if_clauses:
if_clause.generate_execution_code(code, end_label)
if self.else_clause:
code.putln("/*else*/ {")
self.else_clause.generate_execution_code(code)
code.putln("}")
code.put_label(end_label)
class IfClauseNode(Node):
# if or elif clause in an if statement
#
# condition ExprNode
# body StatNode
def analyse_declarations(self, env):
self.condition.analyse_declarations(env)
self.body.analyse_declarations(env)
def analyse_expressions(self, env):
self.condition = \
self.condition.analyse_temp_boolean_expression(env)
self.condition.release_temp(env)
self.body.analyse_expressions(env)
def generate_execution_code(self, code, end_label):
self.condition.generate_evaluation_code(code)
code.putln(
"if (%s) {" %
self.condition.result())
self.body.generate_execution_code(code)
#code.putln(
# "goto %s;" %
# end_label)
code.put_goto(end_label)
code.putln("}")
class WhileStatNode(StatNode):
# while statement
#
# condition ExprNode
# body StatNode
# else_clause StatNode
def analyse_declarations(self, env):
self.body.analyse_declarations(env)
if self.else_clause:
self.else_clause.analyse_declarations(env)
def analyse_expressions(self, env):
self.condition = \
self.condition.analyse_temp_boolean_expression(env)
self.condition.release_temp(env)
#env.recycle_pending_temps() # TEMPORARY
self.body.analyse_expressions(env)
if self.else_clause:
self.else_clause.analyse_expressions(env)
def generate_execution_code(self, code):
old_loop_labels = code.new_loop_labels()
code.putln(
"while (1) {")
self.condition.generate_evaluation_code(code)
code.putln(
"if (!%s) break;" %
self.condition.result())
self.body.generate_execution_code(code)
code.put_label(code.continue_label)
code.putln("}")
break_label = code.break_label
code.set_loop_labels(old_loop_labels)
if self.else_clause:
code.putln("/*else*/ {")
self.else_clause.generate_execution_code(code)
code.putln("}")
code.put_label(break_label)
class ForInStatNode(StatNode):
# for statement
#
# target ExprNode
# iterator IteratorNode
# body StatNode
# else_clause StatNode
# item NextNode used internally
def analyse_declarations(self, env):
self.target.analyse_target_declaration(env)
self.body.analyse_declarations(env)
if self.else_clause:
self.else_clause.analyse_declarations(env)
def analyse_expressions(self, env):
import ExprNodes
self.iterator.analyse_expressions(env)
self.target.analyse_target_types(env)
self.item = ExprNodes.NextNode(self.iterator, env)
self.item = self.item.coerce_to(self.target.type, env)
self.item.allocate_temps(env)
self.target.allocate_target_temps(env, self.item)
#self.item.release_temp(env)
#self.target.release_target_temp(env)
self.body.analyse_expressions(env)
if self.else_clause:
self.else_clause.analyse_expressions(env)
self.iterator.release_temp(env)
def generate_execution_code(self, code):
old_loop_labels = code.new_loop_labels()
self.iterator.generate_evaluation_code(code)
code.putln(
"for (;;) {")
self.item.generate_evaluation_code(code)
self.target.generate_assignment_code(self.item, code)
self.body.generate_execution_code(code)
code.put_label(code.continue_label)
code.putln(
"}")
break_label = code.break_label
code.set_loop_labels(old_loop_labels)
if self.else_clause:
code.putln("/*else*/ {")
self.else_clause.generate_execution_code(code)
code.putln("}")
code.put_label(break_label)
self.iterator.generate_disposal_code(code)
class IntegerForStatNode(StatNode):
# for expr rel name rel expr
#
# bound1 ExprNode
# relation1 string
# target NameNode
# relation2 string
# bound2 ExprNode
# body StatNode
# else_clause StatNode or None
#
# Used internally:
#
# is_py_target bool
# loopvar_name string
# py_loopvar_node PyTempNode or None
def analyse_declarations(self, env):
self.target.analyse_target_declaration(env)
self.body.analyse_declarations(env)
if self.else_clause:
self.else_clause.analyse_declarations(env)
def analyse_expressions(self, env):
import ExprNodes
self.target.analyse_target_types(env)
self.bound1.analyse_types(env)
self.bound2.analyse_types(env)
self.bound1 = self.bound1.coerce_to_integer(env)
self.bound2 = self.bound2.coerce_to_integer(env)
if not (self.bound2.is_name or self.bound2.is_literal):
self.bound2 = self.bound2.coerce_to_temp(env)
target_type = self.target.type
if not (target_type.is_pyobject or target_type.is_int):
error(self.target.pos,
"Integer for-loop variable must be of type int or Python object")
#if not (target_type.is_pyobject
# or target_type.assignable_from(PyrexTypes.c_int_type)):
# error(self.target.pos,
# "Cannot assign integer to variable of type '%s'" % target_type)
if target_type.is_int:
self.is_py_target = 0
self.loopvar_name = self.target.entry.cname
self.py_loopvar_node = None
else:
self.is_py_target = 1
c_loopvar_node = ExprNodes.TempNode(self.pos,
PyrexTypes.c_long_type, env)
c_loopvar_node.allocate_temps(env)
self.loopvar_name = c_loopvar_node.result()
self.py_loopvar_node = \
ExprNodes.CloneNode(c_loopvar_node).coerce_to_pyobject(env)
self.bound1.allocate_temps(env)
self.bound2.allocate_temps(env)
if self.is_py_target:
self.py_loopvar_node.allocate_temps(env)
self.target.allocate_target_temps(env, self.py_loopvar_node)
#self.target.release_target_temp(env)
#self.py_loopvar_node.release_temp(env)
self.body.analyse_expressions(env)
if self.is_py_target:
c_loopvar_node.release_temp(env)
if self.else_clause:
self.else_clause.analyse_expressions(env)
self.bound1.release_temp(env)
self.bound2.release_temp(env)
def generate_execution_code(self, code):
old_loop_labels = code.new_loop_labels()
self.bound1.generate_evaluation_code(code)
self.bound2.generate_evaluation_code(code)
offset, incop = self.relation_table[self.relation1]
code.putln(
"for (%s = %s%s; %s %s %s; %s%s) {" % (
self.loopvar_name,
self.bound1.result(), offset,
self.loopvar_name, self.relation2, self.bound2.result(),
incop, self.loopvar_name))
if self.py_loopvar_node:
self.py_loopvar_node.generate_evaluation_code(code)
self.target.generate_assignment_code(self.py_loopvar_node, code)
self.body.generate_execution_code(code)
code.put_label(code.continue_label)
code.putln("}")
break_label = code.break_label
code.set_loop_labels(old_loop_labels)
if self.else_clause:
code.putln("/*else*/ {")
self.else_clause.generate_execution_code(code)
code.putln("}")
code.put_label(break_label)
self.bound1.generate_disposal_code(code)
self.bound2.generate_disposal_code(code)
relation_table = {
# {relop : (initial offset, increment op)}
'<=': ("", "++"),
'<' : ("+1", "++"),
'>=': ("", "--"),
'>' : ("-1", "--")
}
class TryExceptStatNode(StatNode):
# try .. except statement
#
# body StatNode
# except_clauses [ExceptClauseNode]
# else_clause StatNode or None
# cleanup_list [Entry] temps to clean up on error
def analyse_declarations(self, env):
self.body.analyse_declarations(env)
for except_clause in self.except_clauses:
except_clause.analyse_declarations(env)
if self.else_clause:
self.else_clause.analyse_declarations(env)
self.gil_check(env)
def analyse_expressions(self, env):
self.body.analyse_expressions(env)
self.cleanup_list = env.free_temp_entries[:]
for except_clause in self.except_clauses:
except_clause.analyse_expressions(env)
if self.else_clause:
self.else_clause.analyse_expressions(env)
self.gil_check(env)
gil_message = "Try-except statement"
def generate_execution_code(self, code):
old_error_label = code.new_error_label()
our_error_label = code.error_label
end_label = code.new_label()
code.putln(
"/*try:*/ {")
self.body.generate_execution_code(code)
code.putln(
"}")
code.error_label = old_error_label
if self.else_clause:
code.putln(
"/*else:*/ {")
self.else_clause.generate_execution_code(code)
code.putln(
"}")
code.put_goto(end_label)
code.put_label(our_error_label)
code.put_var_xdecrefs_clear(self.cleanup_list)
default_clause_seen = 0
for except_clause in self.except_clauses:
if not except_clause.pattern:
default_clause_seen = 1
else:
if default_clause_seen:
error(except_clause.pos, "Default except clause not last")
except_clause.generate_handling_code(code, end_label)
if not default_clause_seen:
code.put_goto(code.error_label)
code.put_label(end_label)
class ExceptClauseNode(Node):
# Part of try ... except statement.
#
# pattern ExprNode
# exc_target ExprNode or None
# tb_target ExprNode or None
# body StatNode
# match_flag string result of exception match
# exc_value ExcValueNode used internally
# tb_value ExcValueNode used internally
# function_name string qualified name of enclosing function
# exc_vars (string * 3) local exception variables
# reraise_used boolean body contains reraise statement
def analyse_declarations(self, env):
if self.exc_target:
self.exc_target.analyse_target_declaration(env)
if self.tb_target:
self.tb_target.analyse_target_declaration(env)
self.body.analyse_declarations(env)
def analyse_expressions(self, env):
genv = env.global_scope()
self.function_name = env.qualified_name
if self.pattern:
self.pattern.analyse_expressions(env)
self.pattern = self.pattern.coerce_to_pyobject(env)
self.match_flag = env.allocate_temp(PyrexTypes.c_int_type)
self.pattern.release_temp(env)
env.release_temp(self.match_flag)
self.exc_vars = [env.allocate_temp(py_object_type) for i in xrange(3)]
self.exc_value = self.analyse_target(env, self.exc_target, 1)
self.tb_value = self.analyse_target(env, self.tb_target, 2)
old_reraise_used = env.reraise_used
env.reraise_used = False
self.body.analyse_expressions(env)
self.reraise_used = env.reraise_used
env.reraise_used = old_reraise_used
for var in self.exc_vars:
env.release_temp(var)
def analyse_target(self, env, target, var_no):
if target:
import ExprNodes
value = ExprNodes.ExcValueNode(self.pos, env, self.exc_vars[var_no])
value.allocate_temps(env)
target.analyse_target_expression(env, value)
return value
def generate_handling_code(self, code, end_label):
code.mark_pos(self.pos)
if self.pattern:
self.pattern.generate_evaluation_code(code)
code.putln(
"%s = PyErr_ExceptionMatches(%s);" % (
self.match_flag,
self.pattern.py_result()))
self.pattern.generate_disposal_code(code)
code.putln(
"if (%s) {" %
self.match_flag)
else:
code.putln(
"/*except:*/ {")
any_bindings = self.exc_target or self.tb_target
exc_vars_used = any_bindings or self.reraise_used
if exc_vars_used:
if any_bindings:
code.putln(
'%s; __Pyx_AddTraceback("%s");' % (
code.error_setup(self.pos),
self.function_name))
exc_args = "&%s, &%s, &%s" % tuple(self.exc_vars)
code.putln("PyErr_Fetch(%s);" % exc_args)
if any_bindings:
code.use_utility_code(normalize_exception_utility_code)
code.putln("if (__Pyx_NormalizeException(%s) < 0) %s" % (exc_args,
code.error_goto(self.pos)))
if self.exc_target:
self.exc_value.generate_evaluation_code(code)
self.exc_target.generate_assignment_code(self.exc_value, code)
if self.tb_target:
self.tb_value.generate_evaluation_code(code)
self.tb_target.generate_assignment_code(self.tb_value, code)
old_exc_vars = code.exc_vars
code.exc_vars = self.exc_vars
self.body.generate_execution_code(code)
code.exc_vars = old_exc_vars
if exc_vars_used:
for var in self.exc_vars:
code.putln("Py_XDECREF(%s); %s = 0;" % (var, var))
code.put_goto(end_label)
code.putln(
"}")
class TryFinallyStatNode(StatNode):
# try ... finally statement
#
# body StatNode
# finally_clause StatNode
#
# cleanup_list [Entry] temps to clean up on error
#
# The plan is that we funnel all continue, break
# return and error gotos into the beginning of the
# finally block, setting a variable to remember which
# one we're doing. At the end of the finally block, we
# switch on the variable to figure out where to go.
# In addition, if we're doing an error, we save the
# exception on entry to the finally block and restore
# it on exit.
preserve_exception = 1
disallow_continue_in_try_finally = 0
# There doesn't seem to be any point in disallowing
# continue in the try block, since we have no problem
# handling it.
def analyse_declarations(self, env):
self.body.analyse_declarations(env)
self.finally_clause.analyse_declarations(env)
def analyse_expressions(self, env):
self.body.analyse_expressions(env)
self.cleanup_list = env.free_temp_entries[:]
self.finally_clause.analyse_expressions(env)
self.gil_check(env)
gil_message = "Try-finally statement"
def generate_execution_code(self, code):
old_error_label = code.error_label
old_labels = code.all_new_labels()
new_labels = code.get_all_labels()
new_error_label = code.error_label
catch_label = code.new_label()
code.putln(
"/*try:*/ {")
if self.disallow_continue_in_try_finally:
was_in_try_finally = code.in_try_finally
code.in_try_finally = 1
self.body.generate_execution_code(code)
if self.disallow_continue_in_try_finally:
code.in_try_finally = was_in_try_finally
code.putln(
"}")
code.putln(
"/*finally:*/ {")
cases_used = []
error_label_used = 0
for i, new_label in enumerate(new_labels):
if new_label in code.labels_used:
cases_used.append(i)
if new_label == new_error_label:
error_label_used = 1
error_label_case = i
if cases_used:
code.putln(
"int __pyx_why;")
if error_label_used and self.preserve_exception:
code.putln(
"PyObject *%s, *%s, *%s;" % Naming.exc_vars)
code.putln(
"int %s;" % Naming.exc_lineno_name)
code.use_label(catch_label)
code.putln(
"__pyx_why = 0; goto %s;" % catch_label)
for i in cases_used:
new_label = new_labels[i]
#if new_label and new_label <> "<try>":
if new_label == new_error_label and self.preserve_exception:
self.put_error_catcher(code,
new_error_label, i+1, catch_label)
else:
code.putln(
"%s: __pyx_why = %s; goto %s;" % (
new_label,
i+1,
catch_label))
code.put_label(catch_label)
code.set_all_labels(old_labels)
if error_label_used:
code.new_error_label()
finally_error_label = code.error_label
self.finally_clause.generate_execution_code(code)
if error_label_used:
if finally_error_label in code.labels_used and self.preserve_exception:
over_label = code.new_label()
code.put_goto(over_label);
code.put_label(finally_error_label)
code.putln("if (__pyx_why == %d) {" % (error_label_case + 1))
for var in Naming.exc_vars:
code.putln("Py_XDECREF(%s);" % var)
code.putln("}")
code.put_goto(old_error_label)
code.put_label(over_label)
code.error_label = old_error_label
if cases_used:
code.putln(
"switch (__pyx_why) {")
for i in cases_used:
old_label = old_labels[i]
if old_label == old_error_label and self.preserve_exception:
self.put_error_uncatcher(code, i+1, old_error_label)
else:
code.use_label(old_label)
code.putln(
"case %s: goto %s;" % (
i+1,
old_label))
code.putln(
"}")
code.putln(
"}")
def put_error_catcher(self, code, error_label, i, catch_label):
code.putln(
"%s: {" %
error_label)
code.putln(
"__pyx_why = %s;" %
i)
code.put_var_xdecrefs_clear(self.cleanup_list)
code.putln(
"PyErr_Fetch(&%s, &%s, &%s);" %
Naming.exc_vars)
code.putln(
"%s = %s;" % (
Naming.exc_lineno_name, Naming.lineno_cname))
#code.putln(
# "goto %s;" %
# catch_label)
code.put_goto(catch_label)
code.putln(
"}")
def put_error_uncatcher(self, code, i, error_label):
code.putln(
"case %s: {" %
i)
code.putln(
"PyErr_Restore(%s, %s, %s);" %
Naming.exc_vars)
code.putln(
"%s = %s;" % (
Naming.lineno_cname, Naming.exc_lineno_name))
for var in Naming.exc_vars:
code.putln(
"%s = 0;" %
var)
code.put_goto(error_label)
code.putln(
"}")
class GILStatNode(TryFinallyStatNode):
# 'with gil' or 'with nogil' statement
#
# state string 'gil' or 'nogil'
preserve_exception = 0
def __init__(self, pos, state, body):
self.state = state
TryFinallyStatNode.__init__(self, pos,
body = body,
finally_clause = GILExitNode(pos, state = state))
def analyse_expressions(self, env):
env.global_scope().gil_used = 1
was_nogil = env.nogil
env.nogil = 1
TryFinallyStatNode.analyse_expressions(self, env)
env.nogil = was_nogil
def gil_check(self, env):
pass
def generate_execution_code(self, code):
code.putln("/*with %s:*/ {" % self.state)
if self.state == 'gil':
code.putln("PyGILState_STATE _save = PyGILState_Ensure();")
else:
code.putln("PyThreadState *_save;")
code.putln("Py_UNBLOCK_THREADS")
TryFinallyStatNode.generate_execution_code(self, code)
code.putln("}")
class GILExitNode(StatNode):
# Used as the 'finally' block in a GILStatNode
#
# state string 'gil' or 'nogil'
def analyse_expressions(self, env):
pass
def generate_execution_code(self, code):
if self.state == 'gil':
code.putln("PyGILState_Release();")
else:
code.putln("Py_BLOCK_THREADS")
class CImportStatNode(StatNode):
# cimport statement
#
# module_name string Qualified name of module being imported
# as_name string or None Name specified in "as" clause, if any
def analyse_declarations(self, env):
module_scope = env.find_module(self.module_name, self.pos)
if "." in self.module_name:
names = self.module_name.split(".")
top_name = names[0]
top_module_scope = env.context.find_submodule(top_name)
module_scope = top_module_scope
for name in names[1:]:
submodule_scope = module_scope.find_submodule(name)
module_scope.declare_module(name, submodule_scope, self.pos)
if not self.as_name:
env.add_imported_module(submodule_scope)
module_scope = submodule_scope
if self.as_name:
env.declare_module(self.as_name, module_scope, self.pos)
env.add_imported_module(module_scope)
else:
env.declare_module(top_name, top_module_scope, self.pos)
env.add_imported_module(top_module_scope)
else:
name = self.as_name or self.module_name
env.declare_module(name, module_scope, self.pos)
env.add_imported_module(module_scope)
def analyse_expressions(self, env):
pass
def generate_execution_code(self, code):
pass
class FromCImportStatNode(StatNode):
# from ... cimport statement
#
# module_name string Qualified name of module
# imported_names Parsing.ImportedName Names to be imported
def analyse_declarations(self, env):
module_scope = env.find_module(self.module_name, self.pos)
env.add_imported_module(module_scope)
for imp in self.imported_names:
kind = imp.kind
#entry = module_scope.find(imp.name, imp.pos)
entry = module_scope.lookup(imp.name)
if entry:
if kind and not self.declaration_matches(entry, kind):
entry.redeclared(pos)
else:
if kind == 'struct' or kind == 'union':
entry = module_scope.declare_struct_or_union(imp.name,
kind = kind, scope = None, typedef_flag = 0, pos = imp.pos)
elif kind == 'class':
entry = module_scope.declare_c_class(imp.name, pos = imp.pos,
module_name = self.module_name)
else:
error(imp.pos, "Name '%s' not declared in module '%s'"
% (imp.name, self.module_name))
if entry:
local_name = imp.as_name or imp.name
env.add_imported_entry(local_name, entry, imp.pos)
def declaration_matches(self, entry, kind):
if not entry.is_type:
return 0
type = entry.type
if kind == 'class':
if not type.is_extension_type:
return 0
else:
if not type.is_struct_or_union:
return 0
if kind <> type.kind:
return 0
return 1
def analyse_expressions(self, env):
pass
def generate_execution_code(self, code):
pass
class FromImportStatNode(StatNode):
# from ... import statement
#
# module ImportNode
# items [(string, NameNode)]
# #interned_items [(string, NameNode)]
# item PyTempNode used internally
def analyse_declarations(self, env):
for _, target in self.items:
target.analyse_target_declaration(env)
def analyse_expressions(self, env):
import ExprNodes
self.module.analyse_expressions(env)
self.item = ExprNodes.PyTempNode(self.pos, env)
self.item.allocate_temp(env)
#self.interned_items = []
for name, target in self.items:
#self.interned_items.append((env.intern(name), target))
target.analyse_target_expression(env, None)
self.module.release_temp(env)
self.item.release_temp(env)
def generate_execution_code(self, code):
self.module.generate_evaluation_code(code)
#for cname, target in self.interned_items:
for name, target in self.items:
cname = code.intern(name)
code.putln(
'%s = PyObject_GetAttr(%s, %s); if (!%s) %s' % (
self.item.result(),
self.module.py_result(),
cname,
self.item.result(),
code.error_goto(self.pos)))
target.generate_assignment_code(self.item, code)
self.module.generate_disposal_code(code)
#------------------------------------------------------------------------------------
#
# Runtime support code
#
#------------------------------------------------------------------------------------
#utility_function_predeclarations = \
#"""
#typedef struct {PyObject **p; char *s;} __Pyx_InternTabEntry; /*proto*/
#typedef struct {PyObject **p; char *s; long n;} __Pyx_StringTabEntry; /*proto*/
#"""
utility_function_predeclarations = \
"""
typedef struct {PyObject **p; int i; char *s; long n;} __Pyx_StringTabEntry; /*proto*/
"""
#get_name_predeclaration = \
#"static PyObject *__Pyx_GetName(PyObject *dict, char *name); /*proto*/"
#get_name_interned_predeclaration = \
#"static PyObject *__Pyx_GetName(PyObject *dict, PyObject *name); /*proto*/"
#------------------------------------------------------------------------------------
printing_utility_code = [
"""
static int __Pyx_PrintItem(PyObject *); /*proto*/
static int __Pyx_PrintNewline(void); /*proto*/
""",r"""
static PyObject *__Pyx_GetStdout(void) {
PyObject *f = PySys_GetObject("stdout");
if (!f) {
PyErr_SetString(PyExc_RuntimeError, "lost sys.stdout");
}
return f;
}
static int __Pyx_PrintItem(PyObject *v) {
PyObject *f;
if (!(f = __Pyx_GetStdout()))
return -1;
if (PyFile_SoftSpace(f, 1)) {
if (PyFile_WriteString(" ", f) < 0)
return -1;
}
if (PyFile_WriteObject(v, f, Py_PRINT_RAW) < 0)
return -1;
if (PyString_Check(v)) {
char *s = PyString_AsString(v);
Py_ssize_t len = PyString_Size(v);
if (len > 0 &&
isspace(Py_CHARMASK(s[len-1])) &&
s[len-1] != ' ')
PyFile_SoftSpace(f, 0);
}
return 0;
}
static int __Pyx_PrintNewline(void) {
PyObject *f;
if (!(f = __Pyx_GetStdout()))
return -1;
if (PyFile_WriteString("\n", f) < 0)
return -1;
PyFile_SoftSpace(f, 0);
return 0;
}
"""]
#------------------------------------------------------------------------------------
# The following function is based on do_raise() from ceval.c.
raise_utility_code = [
"""
static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb); /*proto*/
""","""
static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb) {
if (value == Py_None)
value = NULL;
if (tb == Py_None)
tb = NULL;
Py_XINCREF(type);
Py_XINCREF(value);
Py_XINCREF(tb);
if (tb && !PyTraceBack_Check(tb)) {
PyErr_SetString(PyExc_TypeError,
"raise: arg 3 must be a traceback or None");
goto raise_error;
}
#if PY_VERSION_HEX < 0x02050000
if (!PyClass_Check(type))
#else
if (!PyType_Check(type))
#endif
{
/* Raising an instance. The value should be a dummy. */
if (value) {
PyErr_SetString(PyExc_TypeError,
"instance exception may not have a separate value");
goto raise_error;
}
/* Normalize to raise <class>, <instance> */
value = type;
#if PY_VERSION_HEX < 0x02050000
if (PyInstance_Check(type)) {
type = (PyObject*) ((PyInstanceObject*)type)->in_class;
Py_INCREF(type);
}
else {
PyErr_SetString(PyExc_TypeError,
"raise: exception must be an old-style class or instance");
goto raise_error;
}
#else
type = (PyObject*) type->ob_type;
Py_INCREF(type);
if (!PyType_IsSubtype((PyTypeObject *)type, (PyTypeObject *)PyExc_BaseException)) {
PyErr_SetString(PyExc_TypeError,
"raise: exception class must be a subclass of BaseException");
goto raise_error;
}
#endif
}
PyErr_Restore(type, value, tb);
return;
raise_error:
Py_XDECREF(value);
Py_XDECREF(type);
Py_XDECREF(tb);
return;
}
"""]
#------------------------------------------------------------------------------------
#reraise_utility_code = [
#"""
#static void __Pyx_ReRaise(void); /*proto*/
#""","""
#static void __Pyx_ReRaise(void) {
# PyThreadState *tstate = PyThreadState_Get();
# PyObject *type = tstate->exc_type;
# PyObject *value = tstate->exc_value;
# PyObject *tb = tstate->exc_traceback;
# Py_XINCREF(type);
# Py_XINCREF(value);
# Py_XINCREF(tb);
# PyErr_Restore(type, value, tb);
#}
#"""]
#------------------------------------------------------------------------------------
arg_type_test_utility_code = [
"""
static int __Pyx_ArgTypeTest(PyObject *obj, PyTypeObject *type, int none_allowed, char *name); /*proto*/
""","""
static int __Pyx_ArgTypeTest(PyObject *obj, PyTypeObject *type, int none_allowed, char *name) {
if (!type) {
PyErr_Format(PyExc_SystemError, "Missing type object");
return 0;
}
if ((none_allowed && obj == Py_None) || PyObject_TypeCheck(obj, type))
return 1;
PyErr_Format(PyExc_TypeError,
"Argument '%s' has incorrect type (expected %s, got %s)",
name, type->tp_name, obj->ob_type->tp_name);
return 0;
}
"""]
#------------------------------------------------------------------------------------
#
# __Pyx_GetStarArgs splits the args tuple and kwds dict into two parts
# each, one part suitable for passing to PyArg_ParseTupleAndKeywords,
# and the other containing any extra arguments. On success, replaces
# the borrowed references *args and *kwds with references to a new
# tuple and dict, and passes back new references in *args2 and *kwds2.
# Does not touch any of its arguments on failure.
#
# Any of *kwds, args2 and kwds2 may be 0 (but not args or kwds). If
# *kwds == 0, it is not changed. If kwds2 == 0 and *kwds != 0, a new
# reference to the same dictionary is passed back in *kwds.
#
# If rqd_kwds is not 0, it is an array of booleans corresponding to the
# names in kwd_list, indicating required keyword arguments. If any of
# these are not present in kwds, an exception is raised.
#
get_starargs_utility_code = [
"""
static int __Pyx_GetStarArgs(PyObject **args, PyObject **kwds, char *kwd_list[], \
Py_ssize_t nargs, PyObject **args2, PyObject **kwds2, char rqd_kwds[]); /*proto*/
""","""
static int __Pyx_GetStarArgs(
PyObject **args,
PyObject **kwds,
char *kwd_list[],
Py_ssize_t nargs,
PyObject **args2,
PyObject **kwds2,
char rqd_kwds[])
{
PyObject *x = 0, *args1 = 0, *kwds1 = 0;
int i;
char **p;
if (args2)
*args2 = 0;
if (kwds2)
*kwds2 = 0;
if (args2) {
args1 = PyTuple_GetSlice(*args, 0, nargs);
if (!args1)
goto bad;
*args2 = PyTuple_GetSlice(*args, nargs, PyTuple_GET_SIZE(*args));
if (!*args2)
goto bad;
}
else if (PyTuple_GET_SIZE(*args) > nargs) {
int m = nargs;
int n = PyTuple_GET_SIZE(*args);
PyErr_Format(PyExc_TypeError,
"function takes at most %d positional arguments (%d given)",
m, n);
goto bad;
}
else {
args1 = *args;
Py_INCREF(args1);
}
if (rqd_kwds && !*kwds)
for (i = 0, p = kwd_list; *p; i++, p++)
if (rqd_kwds[i])
goto missing_kwarg;
if (kwds2) {
if (*kwds) {
kwds1 = PyDict_New();
if (!kwds1)
goto bad;
*kwds2 = PyDict_Copy(*kwds);
if (!*kwds2)
goto bad;
for (i = 0, p = kwd_list; *p; i++, p++) {
x = PyDict_GetItemString(*kwds, *p);
if (x) {
if (PyDict_SetItemString(kwds1, *p, x) < 0)
goto bad;
if (PyDict_DelItemString(*kwds2, *p) < 0)
goto bad;
}
else if (rqd_kwds && rqd_kwds[i])
goto missing_kwarg;
}
}
else {
*kwds2 = PyDict_New();
if (!*kwds2)
goto bad;
}
}
else {
kwds1 = *kwds;
Py_XINCREF(kwds1);
if (rqd_kwds && *kwds)
for (i = 0, p = kwd_list; *p; i++, p++)
if (rqd_kwds[i] && !PyDict_GetItemString(*kwds, *p))
goto missing_kwarg;
}
*args = args1;
*kwds = kwds1;
return 0;
missing_kwarg:
PyErr_Format(PyExc_TypeError,
"required keyword argument '%s' is missing", *p);
bad:
Py_XDECREF(args1);
Py_XDECREF(kwds1);
if (args2) {
Py_XDECREF(*args2);
}
if (kwds2) {
Py_XDECREF(*kwds2);
}
return -1;
}
"""]
#------------------------------------------------------------------------------------
unraisable_exception_utility_code = [
"""
static void __Pyx_WriteUnraisable(char *name); /*proto*/
""","""
static void __Pyx_WriteUnraisable(char *name) {
PyObject *old_exc, *old_val, *old_tb;
PyObject *ctx;
PyGILState_STATE state = PyGILState_Ensure();
PyErr_Fetch(&old_exc, &old_val, &old_tb);
ctx = PyString_FromString(name);
PyErr_Restore(old_exc, old_val, old_tb);
if (!ctx)
ctx = Py_None;
PyErr_WriteUnraisable(ctx);
PyGILState_Release(state);
}
"""]
#------------------------------------------------------------------------------------
traceback_utility_code = [
"""
static void __Pyx_AddTraceback(char *funcname); /*proto*/
""","""
#include "compile.h"
#include "frameobject.h"
#include "traceback.h"
static void __Pyx_AddTraceback(char *funcname) {
PyObject *py_srcfile = 0;
PyObject *py_funcname = 0;
PyObject *py_globals = 0;
PyObject *empty_tuple = 0;
PyObject *empty_string = 0;
PyCodeObject *py_code = 0;
PyFrameObject *py_frame = 0;
py_srcfile = PyString_FromString(%(FILENAME)s);
if (!py_srcfile) goto bad;
py_funcname = PyString_FromString(funcname);
if (!py_funcname) goto bad;
py_globals = PyModule_GetDict(%(GLOBALS)s);
if (!py_globals) goto bad;
empty_tuple = PyTuple_New(0);
if (!empty_tuple) goto bad;
empty_string = PyString_FromString("");
if (!empty_string) goto bad;
py_code = PyCode_New(
0, /*int argcount,*/
0, /*int nlocals,*/
0, /*int stacksize,*/
0, /*int flags,*/
empty_string, /*PyObject *code,*/
empty_tuple, /*PyObject *consts,*/
empty_tuple, /*PyObject *names,*/
empty_tuple, /*PyObject *varnames,*/
empty_tuple, /*PyObject *freevars,*/
empty_tuple, /*PyObject *cellvars,*/
py_srcfile, /*PyObject *filename,*/
py_funcname, /*PyObject *name,*/
%(LINENO)s, /*int firstlineno,*/
empty_string /*PyObject *lnotab*/
);
if (!py_code) goto bad;
py_frame = PyFrame_New(
PyThreadState_Get(), /*PyThreadState *tstate,*/
py_code, /*PyCodeObject *code,*/
py_globals, /*PyObject *globals,*/
0 /*PyObject *locals*/
);
if (!py_frame) goto bad;
py_frame->f_lineno = %(LINENO)s;
PyTraceBack_Here(py_frame);
bad:
Py_XDECREF(py_srcfile);
Py_XDECREF(py_funcname);
Py_XDECREF(empty_tuple);
Py_XDECREF(empty_string);
Py_XDECREF(py_code);
Py_XDECREF(py_frame);
}
""" % {
'FILENAME': Naming.filename_cname,
'LINENO': Naming.lineno_cname,
'GLOBALS': Naming.module_cname
}]
#------------------------------------------------------------------------------------
set_vtable_utility_code = [
"""
static int __Pyx_SetVtable(PyObject *dict, void *vtable); /*proto*/
""","""
static int __Pyx_SetVtable(PyObject *dict, void *vtable) {
PyObject *pycobj = 0;
int result;
pycobj = PyCObject_FromVoidPtr(vtable, 0);
if (!pycobj)
goto bad;
if (PyDict_SetItemString(dict, "__pyx_vtable__", pycobj) < 0)
goto bad;
result = 0;
goto done;
bad:
result = -1;
done:
Py_XDECREF(pycobj);
return result;
}
"""]
#------------------------------------------------------------------------------------
get_vtable_utility_code = [
"""
static int __Pyx_GetVtable(PyObject *dict, void *vtabptr); /*proto*/
""",r"""
static int __Pyx_GetVtable(PyObject *dict, void *vtabptr) {
int result;
PyObject *pycobj;
pycobj = PyMapping_GetItemString(dict, "__pyx_vtable__");
if (!pycobj)
goto bad;
*(void **)vtabptr = PyCObject_AsVoidPtr(pycobj);
if (!*(void **)vtabptr)
goto bad;
result = 0;
goto done;
bad:
result = -1;
done:
Py_XDECREF(pycobj);
return result;
}
"""]
#------------------------------------------------------------------------------------
#init_intern_tab_utility_code = [
#"""
#static int __Pyx_InternStrings(__Pyx_InternTabEntry *t); /*proto*/
#""","""
#static int __Pyx_InternStrings(__Pyx_InternTabEntry *t) {
# while (t->p) {
# *t->p = PyString_InternFromString(t->s);
# if (!*t->p)
# return -1;
# ++t;
# }
# return 0;
#}
#"""]
#init_intern_tab_utility_code = [
#"""
#static int __Pyx_InternStrings(PyObject **t[]); /*proto*/
#""","""
#static int __Pyx_InternStrings(PyObject **t[]) {
# while (*t) {
# PyString_InternInPlace(*t);
# if (!**t)
# return -1;
# ++t;
# }
# return 0;
#}
#"""]
#------------------------------------------------------------------------------------
init_string_tab_utility_code = [
"""
static int __Pyx_InitStrings(__Pyx_StringTabEntry *t); /*proto*/
""","""
static int __Pyx_InitStrings(__Pyx_StringTabEntry *t) {
while (t->p) {
*t->p = PyString_FromStringAndSize(t->s, t->n - 1);
if (!*t->p)
return -1;
if (t->i)
PyString_InternInPlace(t->p);
++t;
}
return 0;
}
"""]
#------------------------------------------------------------------------------------
#get_exception_utility_code = [
#"""
#static int __Pyx_GetException(PyObject **type, PyObject **value, PyObject **tb); /*proto*/
#""","""
#static int __Pyx_GetException(PyObject **type, PyObject **value, PyObject **tb) {
# PyThreadState *tstate = PyThreadState_Get();
# PyObject *old_type, *old_value, *old_tb;
# PyErr_Fetch(type, value, tb);
# PyErr_NormalizeException(type, value, tb);
# if (PyErr_Occurred())
# goto bad;
# if (!*tb) {
# printf("no traceback\n");
# *tb = Py_None;
# Py_INCREF(*tb);
# }
##if 1
# Py_INCREF(*type);
# Py_INCREF(*value);
# Py_INCREF(*tb);
# old_type = tstate->exc_type;
# old_value = tstate->exc_value;
# old_tb = tstate->exc_traceback;
# tstate->exc_type = *type;
# tstate->exc_value = *value;
# tstate->exc_traceback = *tb;
# Py_XDECREF(old_type);
# Py_XDECREF(old_value);
# Py_XDECREF(old_tb);
##endif
# return 0;
#bad:
# Py_XDECREF(*type);
# Py_XDECREF(*value);
# Py_XDECREF(*tb);
# return -1;
#}
#"""]
#------------------------------------------------------------------------------------
#get_exception_utility_code = [
#"""
#static int __Pyx_GetException(PyObject **type, PyObject **value, PyObject **tb); /*proto*/
#""","""
#static int __Pyx_GetException(PyObject **type, PyObject **value, PyObject **tb) {
# PyErr_Fetch(type, value, tb);
# PyErr_NormalizeException(type, value, tb);
# if (PyErr_Occurred())
# goto bad;
# if (!*tb) {
# *tb = Py_None;
# Py_INCREF(*tb);
# }
# return 0;
#bad:
# Py_XDECREF(*type);
# Py_XDECREF(*value);
# Py_XDECREF(*tb);
# return -1;
#}
#"""]
#------------------------------------------------------------------------------------
normalize_exception_utility_code = [
"""
static int __Pyx_NormalizeException(PyObject **type, PyObject **value, PyObject **tb); /*proto*/
""","""
static int __Pyx_NormalizeException(PyObject **type, PyObject **value, PyObject **tb) {
PyErr_NormalizeException(type, value, tb);
if (PyErr_Occurred())
goto bad;
if (!*tb) {
*tb = Py_None;
Py_INCREF(*tb);
}
return 0;
bad:
Py_XDECREF(*type);
Py_XDECREF(*value);
Py_XDECREF(*tb);
return -1;
}
"""]
#------------------------------------------------------------------------------------
| {
"content_hash": "054afb5a9c228028c77e46f48943dab5",
"timestamp": "",
"source": "github",
"line_count": 3249,
"max_line_length": 121,
"avg_line_length": 35.02431517389966,
"alnum_prop": 0.548543859957467,
"repo_name": "Distrotech/Pyrex",
"id": "fb974df0b300740a1493d9578a786194560a435b",
"size": "113794",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Pyrex/Compiler/Nodes.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "107042"
},
{
"name": "Emacs Lisp",
"bytes": "430"
},
{
"name": "Python",
"bytes": "667404"
},
{
"name": "Shell",
"bytes": "395"
},
{
"name": "Smalltalk",
"bytes": "618"
}
],
"symlink_target": ""
} |
from django.conf import settings
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.contrib.sessions.backends.db import SessionStore
#App imports
from .. import user_successfully_created_msg, referrer_url_session_key, referring_user_id_session_key
from ..models import SocialLaunchProfile
#Test imports
from .util import BaseTestCase
class IndexTestCase(BaseTestCase):
def test_get(self):
response = self.client.get(reverse('social_launch_index'))
self.assertEqual(response.status_code, 200)
def test_get_with_referrer(self):
referrer_url = 'http://facebook.com'
response = self.client.get(reverse('social_launch_index'), HTTP_REFERER=referrer_url)
self.assertEqual(response.status_code, 200)
self.assertEqual(self.client.session[referrer_url_session_key], referrer_url)
def test_post_success_creates_new_user(self):
post_data = {'email' : 'foo@example.com'}
self.assertEqual(User.objects.count(), 1)
self.assertEqual(SocialLaunchProfile.objects.count(), 0)
response = self.client.post(reverse('social_launch_index'), post_data, follow=True)
users = User.objects.all()
slps = SocialLaunchProfile.objects.all()
self.assertEquals(len(users), 2)
self.assertEquals(len(slps), 1)
user = users[1]
slp = slps[0]
self.assertRedirects(response, reverse('social_launch_referral', kwargs={'referring_user_id' : user.id}))
self.assertEquals(user.email, post_data['email'])
self.assertEquals(user.username, post_data['email'])
self.assertFalse(user.has_usable_password())
self.assertContains(response, user_successfully_created_msg)
self.assertEquals(slp.user, user)
self.assertEquals(slp.referrer_url, '')
self.assertEquals(slp.referring_user, None)
def test_post_success_creates_new_user_with_referrer(self):
referrer_url = 'http://facebook.com'
post_data = {'email' : 'foo@example.com'}
session = SessionStore()
session[referrer_url_session_key] = referrer_url
session[referring_user_id_session_key] = ''
session.save()
self.client.cookies[settings.SESSION_COOKIE_NAME] = session.session_key
self.assertEqual(User.objects.count(), 1)
self.assertEqual(SocialLaunchProfile.objects.count(), 0)
response = self.client.post(reverse('social_launch_index'), post_data, follow=True)
users = User.objects.all()
slps = SocialLaunchProfile.objects.all()
self.assertEquals(len(users), 2)
self.assertEquals(len(slps), 1)
user = users[1]
slp = slps[0]
self.assertRedirects(response, reverse('social_launch_referral', kwargs={'referring_user_id' : user.id}))
self.assertEquals(user.email, post_data['email'])
self.assertEquals(user.username, post_data['email'])
self.assertFalse(user.has_usable_password())
self.assertContains(response, user_successfully_created_msg)
self.assertEquals(slp.user, user)
self.assertEquals(slp.referrer_url, referrer_url)
self.assertEquals(slp.referring_user, None)
def test_post_fails_invalid_email(self):
post_data = {'email' : 'fooexample.com'}
self.assertEqual(User.objects.count(), 1)
self.assertEqual(SocialLaunchProfile.objects.count(), 0)
response = self.client.post(reverse('social_launch_index'), post_data)
self.assertEqual(User.objects.count(), 1)
self.assertEqual(SocialLaunchProfile.objects.count(), 0)
self.assertEqual(response.status_code, 200)
self.assertNotContains(response, user_successfully_created_msg)
def test_post_fails_invalid_email_with_referrer(self):
referrer_url = 'http://facebook.com'
post_data = {'email' : 'fooexample.com'}
session = SessionStore()
session[referrer_url_session_key] = referrer_url
session[referring_user_id_session_key] = ''
session.save()
self.client.cookies[settings.SESSION_COOKIE_NAME] = session.session_key
self.assertEqual(User.objects.count(), 1)
self.assertEqual(SocialLaunchProfile.objects.count(), 0)
response = self.client.post(reverse('social_launch_index'), post_data)
self.assertEqual(User.objects.count(), 1)
self.assertEqual(SocialLaunchProfile.objects.count(), 0)
self.assertEqual(response.status_code, 200)
self.assertNotContains(response, user_successfully_created_msg)
self.assertEqual(self.client.session[referrer_url_session_key], referrer_url)
def test_post_fails_no_email(self):
post_data = {}
self.assertEqual(User.objects.count(), 1)
self.assertEqual(SocialLaunchProfile.objects.count(), 0)
response = self.client.post(reverse('social_launch_index'), post_data)
self.assertEqual(User.objects.count(), 1)
self.assertEqual(SocialLaunchProfile.objects.count(), 0)
self.assertEqual(response.status_code, 200)
self.assertNotContains(response, user_successfully_created_msg)
class ReferralTestCase(BaseTestCase):
def test_get_success(self):
response = self.client.get(reverse('social_launch_referral', kwargs={'referring_user_id' : self.user1.id}))
self.assertEqual(response.status_code, 200)
def test_get_fails_invalid_id(self):
response = self.client.get(reverse('social_launch_referral', kwargs={'referring_user_id' : 'foo'}))
self.assertEqual(response.status_code, 404)
def test_get_fails_no_such_user(self):
response = self.client.get(reverse('social_launch_referral', kwargs={'referring_user_id' : 1000}))
self.assertEqual(response.status_code, 404)
def test_post_success_creates_new_user(self):
post_data = {'email' : 'foo@example.com'}
session = SessionStore()
session[referring_user_id_session_key] = self.user1.id
session.save()
self.client.cookies[settings.SESSION_COOKIE_NAME] = session.session_key
self.assertEqual(User.objects.count(), 1)
self.assertEqual(SocialLaunchProfile.objects.count(), 0)
response = self.client.post(reverse('social_launch_referral', kwargs={'referring_user_id' : self.user1.id}), post_data, follow=True)
users = User.objects.all()
slps = SocialLaunchProfile.objects.all()
self.assertEquals(len(users), 2)
self.assertEquals(len(slps), 1)
user = users[1]
slp = slps[0]
self.assertRedirects(response, reverse('social_launch_referral', kwargs={'referring_user_id' : user.id}))
self.assertEquals(user.email, post_data['email'])
self.assertEquals(user.username, post_data['email'])
self.assertFalse(user.has_usable_password())
self.assertContains(response, user_successfully_created_msg)
self.assertEquals(slp.user, user)
self.assertEquals(slp.referrer_url, '')
self.assertEquals(slp.referring_user, self.user1)
def test_post_success_creates_new_user_bad_referring_used_id(self):
post_data = {'email' : 'foo@example.com'}
session = SessionStore()
session[referring_user_id_session_key] = 1000
session.save()
self.client.cookies[settings.SESSION_COOKIE_NAME] = session.session_key
self.assertEqual(User.objects.count(), 1)
self.assertEqual(SocialLaunchProfile.objects.count(), 0)
response = self.client.post(reverse('social_launch_referral', kwargs={'referring_user_id' : self.user1.id}), post_data, follow=True)
users = User.objects.all()
slps = SocialLaunchProfile.objects.all()
self.assertEquals(len(users), 2)
self.assertEquals(len(slps), 1)
user = users[1]
slp = slps[0]
self.assertRedirects(response, reverse('social_launch_referral', kwargs={'referring_user_id' : user.id}))
self.assertEquals(user.email, post_data['email'])
self.assertEquals(user.username, post_data['email'])
self.assertFalse(user.has_usable_password())
self.assertContains(response, user_successfully_created_msg)
self.assertEquals(slp.user, user)
self.assertEquals(slp.referrer_url, '')
self.assertEquals(slp.referring_user, None)
| {
"content_hash": "abdb37c6e7766b75c817aad7e9f526ea",
"timestamp": "",
"source": "github",
"line_count": 217,
"max_line_length": 134,
"avg_line_length": 35.6036866359447,
"alnum_prop": 0.7312969194926223,
"repo_name": "elricgit/django-social-launch",
"id": "52007d4b7eb014b60c4469384c473c38f1c6535b",
"size": "7742",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "django_social_launch/tests/test_urls.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "20954"
}
],
"symlink_target": ""
} |
from __future__ import division, absolute_import, print_function
import collections
import enum
import math
import os
import signal
import six
import subprocess
import sys
import warnings
from multiprocessing.pool import ThreadPool, RUN
from six.moves import zip, queue
from threading import Thread, Event
from beets import ui
from beets.plugins import BeetsPlugin
from beets.util import (syspath, command_output, displayable_path,
py3_path, cpu_count)
# Utilities.
class ReplayGainError(Exception):
"""Raised when a local (to a track or an album) error occurs in one
of the backends.
"""
class FatalReplayGainError(Exception):
"""Raised when a fatal error occurs in one of the backends.
"""
class FatalGstreamerPluginReplayGainError(FatalReplayGainError):
"""Raised when a fatal error occurs in the GStreamerBackend when
loading the required plugins."""
def call(args, **kwargs):
"""Execute the command and return its output or raise a
ReplayGainError on failure.
"""
try:
return command_output(args, **kwargs)
except subprocess.CalledProcessError as e:
raise ReplayGainError(
u"{0} exited with status {1}".format(args[0], e.returncode)
)
except UnicodeEncodeError:
# Due to a bug in Python 2's subprocess on Windows, Unicode
# filenames can fail to encode on that platform. See:
# https://github.com/google-code-export/beets/issues/499
raise ReplayGainError(u"argument encoding failed")
def after_version(version_a, version_b):
return tuple(int(s) for s in version_a.split('.')) \
>= tuple(int(s) for s in version_b.split('.'))
def db_to_lufs(db):
"""Convert db to LUFS.
According to https://wiki.hydrogenaud.io/index.php?title=
ReplayGain_2.0_specification#Reference_level
"""
return db - 107
def lufs_to_db(db):
"""Convert LUFS to db.
According to https://wiki.hydrogenaud.io/index.php?title=
ReplayGain_2.0_specification#Reference_level
"""
return db + 107
# Backend base and plumbing classes.
# gain: in LU to reference level
# peak: part of full scale (FS is 1.0)
Gain = collections.namedtuple("Gain", "gain peak")
# album_gain: Gain object
# track_gains: list of Gain objects
AlbumGain = collections.namedtuple("AlbumGain", "album_gain track_gains")
class Peak(enum.Enum):
none = 0
true = 1
sample = 2
class Backend(object):
"""An abstract class representing engine for calculating RG values.
"""
do_parallel = False
def __init__(self, config, log):
"""Initialize the backend with the configuration view for the
plugin.
"""
self._log = log
def compute_track_gain(self, items, target_level, peak):
"""Computes the track gain of the given tracks, returns a list
of Gain objects.
"""
raise NotImplementedError()
def compute_album_gain(self, items, target_level, peak):
"""Computes the album gain of the given album, returns an
AlbumGain object.
"""
raise NotImplementedError()
# ffmpeg backend
class FfmpegBackend(Backend):
"""A replaygain backend using ffmpeg's ebur128 filter.
"""
do_parallel = True
def __init__(self, config, log):
super(FfmpegBackend, self).__init__(config, log)
self._ffmpeg_path = "ffmpeg"
# check that ffmpeg is installed
try:
ffmpeg_version_out = call([self._ffmpeg_path, "-version"])
except OSError:
raise FatalReplayGainError(
u"could not find ffmpeg at {0}".format(self._ffmpeg_path)
)
incompatible_ffmpeg = True
for line in ffmpeg_version_out.stdout.splitlines():
if line.startswith(b"configuration:"):
if b"--enable-libebur128" in line:
incompatible_ffmpeg = False
if line.startswith(b"libavfilter"):
version = line.split(b" ", 1)[1].split(b"/", 1)[0].split(b".")
version = tuple(map(int, version))
if version >= (6, 67, 100):
incompatible_ffmpeg = False
if incompatible_ffmpeg:
raise FatalReplayGainError(
u"Installed FFmpeg version does not support ReplayGain."
u"calculation. Either libavfilter version 6.67.100 or above or"
u"the --enable-libebur128 configuration option is required."
)
def compute_track_gain(self, items, target_level, peak):
"""Computes the track gain of the given tracks, returns a list
of Gain objects (the track gains).
"""
gains = []
for item in items:
gains.append(
self._analyse_item(
item,
target_level,
peak,
count_blocks=False,
)[0] # take only the gain, discarding number of gating blocks
)
return gains
def compute_album_gain(self, items, target_level, peak):
"""Computes the album gain of the given album, returns an
AlbumGain object.
"""
target_level_lufs = db_to_lufs(target_level)
# analyse tracks
# list of track Gain objects
track_gains = []
# maximum peak
album_peak = 0
# sum of BS.1770 gating block powers
sum_powers = 0
# total number of BS.1770 gating blocks
n_blocks = 0
for item in items:
track_gain, track_n_blocks = self._analyse_item(
item, target_level, peak
)
track_gains.append(track_gain)
# album peak is maximum track peak
album_peak = max(album_peak, track_gain.peak)
# prepare album_gain calculation
# total number of blocks is sum of track blocks
n_blocks += track_n_blocks
# convert `LU to target_level` -> LUFS
track_loudness = target_level_lufs - track_gain.gain
# This reverses ITU-R BS.1770-4 p. 6 equation (5) to convert
# from loudness to power. The result is the average gating
# block power.
track_power = 10**((track_loudness + 0.691) / 10)
# Weight that average power by the number of gating blocks to
# get the sum of all their powers. Add that to the sum of all
# block powers in this album.
sum_powers += track_power * track_n_blocks
# calculate album gain
if n_blocks > 0:
# compare ITU-R BS.1770-4 p. 6 equation (5)
# Album gain is the replaygain of the concatenation of all tracks.
album_gain = -0.691 + 10 * math.log10(sum_powers / n_blocks)
else:
album_gain = -70
# convert LUFS -> `LU to target_level`
album_gain = target_level_lufs - album_gain
self._log.debug(
u"{0}: gain {1} LU, peak {2}"
.format(items, album_gain, album_peak)
)
return AlbumGain(Gain(album_gain, album_peak), track_gains)
def _construct_cmd(self, item, peak_method):
"""Construct the shell command to analyse items."""
return [
self._ffmpeg_path,
"-nostats",
"-hide_banner",
"-i",
item.path,
"-map",
"a:0",
"-filter",
"ebur128=peak={0}".format(peak_method),
"-f",
"null",
"-",
]
def _analyse_item(self, item, target_level, peak, count_blocks=True):
"""Analyse item. Return a pair of a Gain object and the number
of gating blocks above the threshold.
If `count_blocks` is False, the number of gating blocks returned
will be 0.
"""
target_level_lufs = db_to_lufs(target_level)
peak_method = peak.name
# call ffmpeg
self._log.debug(u"analyzing {0}".format(item))
cmd = self._construct_cmd(item, peak_method)
self._log.debug(
u'executing {0}', u' '.join(map(displayable_path, cmd))
)
output = call(cmd).stderr.splitlines()
# parse output
if peak == Peak.none:
peak = 0
else:
line_peak = self._find_line(
output,
" {0} peak:".format(peak_method.capitalize()).encode(),
start_line=len(output) - 1, step_size=-1,
)
peak = self._parse_float(
output[self._find_line(
output, b" Peak:",
line_peak,
)]
)
# convert TPFS -> part of FS
peak = 10**(peak / 20)
line_integrated_loudness = self._find_line(
output, b" Integrated loudness:",
start_line=len(output) - 1, step_size=-1,
)
gain = self._parse_float(
output[self._find_line(
output, b" I:",
line_integrated_loudness,
)]
)
# convert LUFS -> LU from target level
gain = target_level_lufs - gain
# count BS.1770 gating blocks
n_blocks = 0
if count_blocks:
gating_threshold = self._parse_float(
output[self._find_line(
output, b" Threshold:",
start_line=line_integrated_loudness,
)]
)
for line in output:
if not line.startswith(b"[Parsed_ebur128"):
continue
if line.endswith(b"Summary:"):
continue
line = line.split(b"M:", 1)
if len(line) < 2:
continue
if self._parse_float(b"M: " + line[1]) >= gating_threshold:
n_blocks += 1
self._log.debug(
u"{0}: {1} blocks over {2} LUFS"
.format(item, n_blocks, gating_threshold)
)
self._log.debug(
u"{0}: gain {1} LU, peak {2}"
.format(item, gain, peak)
)
return Gain(gain, peak), n_blocks
def _find_line(self, output, search, start_line=0, step_size=1):
"""Return index of line beginning with `search`.
Begins searching at index `start_line` in `output`.
"""
end_index = len(output) if step_size > 0 else -1
for i in range(start_line, end_index, step_size):
if output[i].startswith(search):
return i
raise ReplayGainError(
u"ffmpeg output: missing {0} after line {1}"
.format(repr(search), start_line)
)
def _parse_float(self, line):
"""Extract a float from a key value pair in `line`.
This format is expected: /[^:]:[[:space:]]*value.*/, where `value` is
the float.
"""
# extract value
value = line.split(b":", 1)
if len(value) < 2:
raise ReplayGainError(
u"ffmpeg output: expected key value pair, found {0}"
.format(line)
)
value = value[1].lstrip()
# strip unit
value = value.split(b" ", 1)[0]
# cast value to float
try:
return float(value)
except ValueError:
raise ReplayGainError(
u"ffmpeg output: expected float value, found {0}"
.format(value)
)
# mpgain/aacgain CLI tool backend.
class CommandBackend(Backend):
do_parallel = True
def __init__(self, config, log):
super(CommandBackend, self).__init__(config, log)
config.add({
'command': u"",
'noclip': True,
})
self.command = config["command"].as_str()
if self.command:
# Explicit executable path.
if not os.path.isfile(self.command):
raise FatalReplayGainError(
u'replaygain command does not exist: {0}'.format(
self.command)
)
else:
# Check whether the program is in $PATH.
for cmd in ('mp3gain', 'aacgain'):
try:
call([cmd, '-v'])
self.command = cmd
except OSError:
pass
if not self.command:
raise FatalReplayGainError(
u'no replaygain command found: install mp3gain or aacgain'
)
self.noclip = config['noclip'].get(bool)
def compute_track_gain(self, items, target_level, peak):
"""Computes the track gain of the given tracks, returns a list
of TrackGain objects.
"""
supported_items = list(filter(self.format_supported, items))
output = self.compute_gain(supported_items, target_level, False)
return output
def compute_album_gain(self, items, target_level, peak):
"""Computes the album gain of the given album, returns an
AlbumGain object.
"""
# TODO: What should be done when not all tracks in the album are
# supported?
supported_items = list(filter(self.format_supported, items))
if len(supported_items) != len(items):
self._log.debug(u'tracks are of unsupported format')
return AlbumGain(None, [])
output = self.compute_gain(supported_items, target_level, True)
return AlbumGain(output[-1], output[:-1])
def format_supported(self, item):
"""Checks whether the given item is supported by the selected tool.
"""
if 'mp3gain' in self.command and item.format != 'MP3':
return False
elif 'aacgain' in self.command and item.format not in ('MP3', 'AAC'):
return False
return True
def compute_gain(self, items, target_level, is_album):
"""Computes the track or album gain of a list of items, returns
a list of TrackGain objects.
When computing album gain, the last TrackGain object returned is
the album gain
"""
if len(items) == 0:
self._log.debug(u'no supported tracks to analyze')
return []
"""Compute ReplayGain values and return a list of results
dictionaries as given by `parse_tool_output`.
"""
# Construct shell command. The "-o" option makes the output
# easily parseable (tab-delimited). "-s s" forces gain
# recalculation even if tags are already present and disables
# tag-writing; this turns the mp3gain/aacgain tool into a gain
# calculator rather than a tag manipulator because we take care
# of changing tags ourselves.
cmd = [self.command, '-o', '-s', 's']
if self.noclip:
# Adjust to avoid clipping.
cmd = cmd + ['-k']
else:
# Disable clipping warning.
cmd = cmd + ['-c']
cmd = cmd + ['-d', str(int(target_level - 89))]
cmd = cmd + [syspath(i.path) for i in items]
self._log.debug(u'analyzing {0} files', len(items))
self._log.debug(u"executing {0}", " ".join(map(displayable_path, cmd)))
output = call(cmd).stdout
self._log.debug(u'analysis finished')
return self.parse_tool_output(output,
len(items) + (1 if is_album else 0))
def parse_tool_output(self, text, num_lines):
"""Given the tab-delimited output from an invocation of mp3gain
or aacgain, parse the text and return a list of dictionaries
containing information about each analyzed file.
"""
out = []
for line in text.split(b'\n')[1:num_lines + 1]:
parts = line.split(b'\t')
if len(parts) != 6 or parts[0] == b'File':
self._log.debug(u'bad tool output: {0}', text)
raise ReplayGainError(u'mp3gain failed')
d = {
'file': parts[0],
'mp3gain': int(parts[1]),
'gain': float(parts[2]),
'peak': float(parts[3]) / (1 << 15),
'maxgain': int(parts[4]),
'mingain': int(parts[5]),
}
out.append(Gain(d['gain'], d['peak']))
return out
# GStreamer-based backend.
class GStreamerBackend(Backend):
def __init__(self, config, log):
super(GStreamerBackend, self).__init__(config, log)
self._import_gst()
# Initialized a GStreamer pipeline of the form filesrc ->
# decodebin -> audioconvert -> audioresample -> rganalysis ->
# fakesink The connection between decodebin and audioconvert is
# handled dynamically after decodebin figures out the type of
# the input file.
self._src = self.Gst.ElementFactory.make("filesrc", "src")
self._decbin = self.Gst.ElementFactory.make("decodebin", "decbin")
self._conv = self.Gst.ElementFactory.make("audioconvert", "conv")
self._res = self.Gst.ElementFactory.make("audioresample", "res")
self._rg = self.Gst.ElementFactory.make("rganalysis", "rg")
if self._src is None or self._decbin is None or self._conv is None \
or self._res is None or self._rg is None:
raise FatalGstreamerPluginReplayGainError(
u"Failed to load required GStreamer plugins"
)
# We check which files need gain ourselves, so all files given
# to rganalsys should have their gain computed, even if it
# already exists.
self._rg.set_property("forced", True)
self._sink = self.Gst.ElementFactory.make("fakesink", "sink")
self._pipe = self.Gst.Pipeline()
self._pipe.add(self._src)
self._pipe.add(self._decbin)
self._pipe.add(self._conv)
self._pipe.add(self._res)
self._pipe.add(self._rg)
self._pipe.add(self._sink)
self._src.link(self._decbin)
self._conv.link(self._res)
self._res.link(self._rg)
self._rg.link(self._sink)
self._bus = self._pipe.get_bus()
self._bus.add_signal_watch()
self._bus.connect("message::eos", self._on_eos)
self._bus.connect("message::error", self._on_error)
self._bus.connect("message::tag", self._on_tag)
# Needed for handling the dynamic connection between decodebin
# and audioconvert
self._decbin.connect("pad-added", self._on_pad_added)
self._decbin.connect("pad-removed", self._on_pad_removed)
self._main_loop = self.GLib.MainLoop()
self._files = []
def _import_gst(self):
"""Import the necessary GObject-related modules and assign `Gst`
and `GObject` fields on this object.
"""
try:
import gi
except ImportError:
raise FatalReplayGainError(
u"Failed to load GStreamer: python-gi not found"
)
try:
gi.require_version('Gst', '1.0')
except ValueError as e:
raise FatalReplayGainError(
u"Failed to load GStreamer 1.0: {0}".format(e)
)
from gi.repository import GObject, Gst, GLib
# Calling GObject.threads_init() is not needed for
# PyGObject 3.10.2+
with warnings.catch_warnings():
warnings.simplefilter("ignore")
GObject.threads_init()
Gst.init([sys.argv[0]])
self.GObject = GObject
self.GLib = GLib
self.Gst = Gst
def compute(self, files, target_level, album):
self._error = None
self._files = list(files)
if len(self._files) == 0:
return
self._file_tags = collections.defaultdict(dict)
self._rg.set_property("reference-level", target_level)
if album:
self._rg.set_property("num-tracks", len(self._files))
if self._set_first_file():
self._main_loop.run()
if self._error is not None:
raise self._error
def compute_track_gain(self, items, target_level, peak):
self.compute(items, target_level, False)
if len(self._file_tags) != len(items):
raise ReplayGainError(u"Some tracks did not receive tags")
ret = []
for item in items:
ret.append(Gain(self._file_tags[item]["TRACK_GAIN"],
self._file_tags[item]["TRACK_PEAK"]))
return ret
def compute_album_gain(self, items, target_level, peak):
items = list(items)
self.compute(items, target_level, True)
if len(self._file_tags) != len(items):
raise ReplayGainError(u"Some items in album did not receive tags")
# Collect track gains.
track_gains = []
for item in items:
try:
gain = self._file_tags[item]["TRACK_GAIN"]
peak = self._file_tags[item]["TRACK_PEAK"]
except KeyError:
raise ReplayGainError(u"results missing for track")
track_gains.append(Gain(gain, peak))
# Get album gain information from the last track.
last_tags = self._file_tags[items[-1]]
try:
gain = last_tags["ALBUM_GAIN"]
peak = last_tags["ALBUM_PEAK"]
except KeyError:
raise ReplayGainError(u"results missing for album")
return AlbumGain(Gain(gain, peak), track_gains)
def close(self):
self._bus.remove_signal_watch()
def _on_eos(self, bus, message):
# A file finished playing in all elements of the pipeline. The
# RG tags have already been propagated. If we don't have a next
# file, we stop processing.
if not self._set_next_file():
self._pipe.set_state(self.Gst.State.NULL)
self._main_loop.quit()
def _on_error(self, bus, message):
self._pipe.set_state(self.Gst.State.NULL)
self._main_loop.quit()
err, debug = message.parse_error()
f = self._src.get_property("location")
# A GStreamer error, either an unsupported format or a bug.
self._error = ReplayGainError(
u"Error {0!r} - {1!r} on file {2!r}".format(err, debug, f)
)
def _on_tag(self, bus, message):
tags = message.parse_tag()
def handle_tag(taglist, tag, userdata):
# The rganalysis element provides both the existing tags for
# files and the new computes tags. In order to ensure we
# store the computed tags, we overwrite the RG values of
# received a second time.
if tag == self.Gst.TAG_TRACK_GAIN:
self._file_tags[self._file]["TRACK_GAIN"] = \
taglist.get_double(tag)[1]
elif tag == self.Gst.TAG_TRACK_PEAK:
self._file_tags[self._file]["TRACK_PEAK"] = \
taglist.get_double(tag)[1]
elif tag == self.Gst.TAG_ALBUM_GAIN:
self._file_tags[self._file]["ALBUM_GAIN"] = \
taglist.get_double(tag)[1]
elif tag == self.Gst.TAG_ALBUM_PEAK:
self._file_tags[self._file]["ALBUM_PEAK"] = \
taglist.get_double(tag)[1]
elif tag == self.Gst.TAG_REFERENCE_LEVEL:
self._file_tags[self._file]["REFERENCE_LEVEL"] = \
taglist.get_double(tag)[1]
tags.foreach(handle_tag, None)
def _set_first_file(self):
if len(self._files) == 0:
return False
self._file = self._files.pop(0)
self._pipe.set_state(self.Gst.State.NULL)
self._src.set_property("location", py3_path(syspath(self._file.path)))
self._pipe.set_state(self.Gst.State.PLAYING)
return True
def _set_file(self):
"""Initialize the filesrc element with the next file to be analyzed.
"""
# No more files, we're done
if len(self._files) == 0:
return False
self._file = self._files.pop(0)
# Ensure the filesrc element received the paused state of the
# pipeline in a blocking manner
self._src.sync_state_with_parent()
self._src.get_state(self.Gst.CLOCK_TIME_NONE)
# Ensure the decodebin element receives the paused state of the
# pipeline in a blocking manner
self._decbin.sync_state_with_parent()
self._decbin.get_state(self.Gst.CLOCK_TIME_NONE)
# Disconnect the decodebin element from the pipeline, set its
# state to READY to to clear it.
self._decbin.unlink(self._conv)
self._decbin.set_state(self.Gst.State.READY)
# Set a new file on the filesrc element, can only be done in the
# READY state
self._src.set_state(self.Gst.State.READY)
self._src.set_property("location", py3_path(syspath(self._file.path)))
self._decbin.link(self._conv)
self._pipe.set_state(self.Gst.State.READY)
return True
def _set_next_file(self):
"""Set the next file to be analyzed while keeping the pipeline
in the PAUSED state so that the rganalysis element can correctly
handle album gain.
"""
# A blocking pause
self._pipe.set_state(self.Gst.State.PAUSED)
self._pipe.get_state(self.Gst.CLOCK_TIME_NONE)
# Try setting the next file
ret = self._set_file()
if ret:
# Seek to the beginning in order to clear the EOS state of the
# various elements of the pipeline
self._pipe.seek_simple(self.Gst.Format.TIME,
self.Gst.SeekFlags.FLUSH,
0)
self._pipe.set_state(self.Gst.State.PLAYING)
return ret
def _on_pad_added(self, decbin, pad):
sink_pad = self._conv.get_compatible_pad(pad, None)
assert(sink_pad is not None)
pad.link(sink_pad)
def _on_pad_removed(self, decbin, pad):
# Called when the decodebin element is disconnected from the
# rest of the pipeline while switching input files
peer = pad.get_peer()
assert(peer is None)
class AudioToolsBackend(Backend):
"""ReplayGain backend that uses `Python Audio Tools
<http://audiotools.sourceforge.net/>`_ and its capabilities to read more
file formats and compute ReplayGain values using it replaygain module.
"""
def __init__(self, config, log):
super(AudioToolsBackend, self).__init__(config, log)
self._import_audiotools()
def _import_audiotools(self):
"""Check whether it's possible to import the necessary modules.
There is no check on the file formats at runtime.
:raises :exc:`ReplayGainError`: if the modules cannot be imported
"""
try:
import audiotools
import audiotools.replaygain
except ImportError:
raise FatalReplayGainError(
u"Failed to load audiotools: audiotools not found"
)
self._mod_audiotools = audiotools
self._mod_replaygain = audiotools.replaygain
def open_audio_file(self, item):
"""Open the file to read the PCM stream from the using
``item.path``.
:return: the audiofile instance
:rtype: :class:`audiotools.AudioFile`
:raises :exc:`ReplayGainError`: if the file is not found or the
file format is not supported
"""
try:
audiofile = self._mod_audiotools.open(py3_path(syspath(item.path)))
except IOError:
raise ReplayGainError(
u"File {} was not found".format(item.path)
)
except self._mod_audiotools.UnsupportedFile:
raise ReplayGainError(
u"Unsupported file type {}".format(item.format)
)
return audiofile
def init_replaygain(self, audiofile, item):
"""Return an initialized :class:`audiotools.replaygain.ReplayGain`
instance, which requires the sample rate of the song(s) on which
the ReplayGain values will be computed. The item is passed in case
the sample rate is invalid to log the stored item sample rate.
:return: initialized replagain object
:rtype: :class:`audiotools.replaygain.ReplayGain`
:raises: :exc:`ReplayGainError` if the sample rate is invalid
"""
try:
rg = self._mod_replaygain.ReplayGain(audiofile.sample_rate())
except ValueError:
raise ReplayGainError(
u"Unsupported sample rate {}".format(item.samplerate))
return
return rg
def compute_track_gain(self, items, target_level, peak):
"""Compute ReplayGain values for the requested items.
:return list: list of :class:`Gain` objects
"""
return [self._compute_track_gain(item, target_level) for item in items]
def _with_target_level(self, gain, target_level):
"""Return `gain` relative to `target_level`.
Assumes `gain` is relative to 89 db.
"""
return gain + (target_level - 89)
def _title_gain(self, rg, audiofile, target_level):
"""Get the gain result pair from PyAudioTools using the `ReplayGain`
instance `rg` for the given `audiofile`.
Wraps `rg.title_gain(audiofile.to_pcm())` and throws a
`ReplayGainError` when the library fails.
"""
try:
# The method needs an audiotools.PCMReader instance that can
# be obtained from an audiofile instance.
gain, peak = rg.title_gain(audiofile.to_pcm())
except ValueError as exc:
# `audiotools.replaygain` can raise a `ValueError` if the sample
# rate is incorrect.
self._log.debug(u'error in rg.title_gain() call: {}', exc)
raise ReplayGainError(u'audiotools audio data error')
return self._with_target_level(gain, target_level), peak
def _compute_track_gain(self, item, target_level):
"""Compute ReplayGain value for the requested item.
:rtype: :class:`Gain`
"""
audiofile = self.open_audio_file(item)
rg = self.init_replaygain(audiofile, item)
# Each call to title_gain on a ReplayGain object returns peak and gain
# of the track.
rg_track_gain, rg_track_peak = self._title_gain(
rg, audiofile, target_level
)
self._log.debug(u'ReplayGain for track {0} - {1}: {2:.2f}, {3:.2f}',
item.artist, item.title, rg_track_gain, rg_track_peak)
return Gain(gain=rg_track_gain, peak=rg_track_peak)
def compute_album_gain(self, items, target_level, peak):
"""Compute ReplayGain values for the requested album and its items.
:rtype: :class:`AlbumGain`
"""
# The first item is taken and opened to get the sample rate to
# initialize the replaygain object. The object is used for all the
# tracks in the album to get the album values.
item = list(items)[0]
audiofile = self.open_audio_file(item)
rg = self.init_replaygain(audiofile, item)
track_gains = []
for item in items:
audiofile = self.open_audio_file(item)
rg_track_gain, rg_track_peak = self._title_gain(
rg, audiofile, target_level
)
track_gains.append(
Gain(gain=rg_track_gain, peak=rg_track_peak)
)
self._log.debug(u'ReplayGain for track {0}: {1:.2f}, {2:.2f}',
item, rg_track_gain, rg_track_peak)
# After getting the values for all tracks, it's possible to get the
# album values.
rg_album_gain, rg_album_peak = rg.album_gain()
rg_album_gain = self._with_target_level(rg_album_gain, target_level)
self._log.debug(u'ReplayGain for album {0}: {1:.2f}, {2:.2f}',
items[0].album, rg_album_gain, rg_album_peak)
return AlbumGain(
Gain(gain=rg_album_gain, peak=rg_album_peak),
track_gains=track_gains
)
class ExceptionWatcher(Thread):
"""Monitors a queue for exceptions asynchronously.
Once an exception occurs, raise it and execute a callback.
"""
def __init__(self, queue, callback):
self._queue = queue
self._callback = callback
self._stopevent = Event()
Thread.__init__(self)
def run(self):
while not self._stopevent.is_set():
try:
exc = self._queue.get_nowait()
self._callback()
six.reraise(exc[0], exc[1], exc[2])
except queue.Empty:
# No exceptions yet, loop back to check
# whether `_stopevent` is set
pass
def join(self, timeout=None):
self._stopevent.set()
Thread.join(self, timeout)
# Main plugin logic.
class ReplayGainPlugin(BeetsPlugin):
"""Provides ReplayGain analysis.
"""
backends = {
"command": CommandBackend,
"gstreamer": GStreamerBackend,
"audiotools": AudioToolsBackend,
"ffmpeg": FfmpegBackend,
}
peak_methods = {
"true": Peak.true,
"sample": Peak.sample,
}
def __init__(self):
super(ReplayGainPlugin, self).__init__()
# default backend is 'command' for backward-compatibility.
self.config.add({
'overwrite': False,
'auto': True,
'backend': u'command',
'threads': cpu_count(),
'parallel_on_import': False,
'per_disc': False,
'peak': 'true',
'targetlevel': 89,
'r128': ['Opus'],
'r128_targetlevel': lufs_to_db(-23),
})
self.overwrite = self.config['overwrite'].get(bool)
self.per_disc = self.config['per_disc'].get(bool)
# Remember which backend is used for CLI feedback
self.backend_name = self.config['backend'].as_str()
if self.backend_name not in self.backends:
raise ui.UserError(
u"Selected ReplayGain backend {0} is not supported. "
u"Please select one of: {1}".format(
self.backend_name,
u', '.join(self.backends.keys())
)
)
peak_method = self.config["peak"].as_str()
if peak_method not in self.peak_methods:
raise ui.UserError(
u"Selected ReplayGain peak method {0} is not supported. "
u"Please select one of: {1}".format(
peak_method,
u', '.join(self.peak_methods.keys())
)
)
self._peak_method = self.peak_methods[peak_method]
# On-import analysis.
if self.config['auto']:
self.register_listener('import_begin', self.import_begin)
self.register_listener('import', self.import_end)
self.import_stages = [self.imported]
# Formats to use R128.
self.r128_whitelist = self.config['r128'].as_str_seq()
try:
self.backend_instance = self.backends[self.backend_name](
self.config, self._log
)
except (ReplayGainError, FatalReplayGainError) as e:
raise ui.UserError(
u'replaygain initialization failed: {0}'.format(e))
def should_use_r128(self, item):
"""Checks the plugin setting to decide whether the calculation
should be done using the EBU R128 standard and use R128_ tags instead.
"""
return item.format in self.r128_whitelist
def track_requires_gain(self, item):
return self.overwrite or \
(self.should_use_r128(item) and not item.r128_track_gain) or \
(not self.should_use_r128(item) and
(not item.rg_track_gain or not item.rg_track_peak))
def album_requires_gain(self, album):
# Skip calculating gain only when *all* files don't need
# recalculation. This way, if any file among an album's tracks
# needs recalculation, we still get an accurate album gain
# value.
return self.overwrite or \
any([self.should_use_r128(item) and
(not item.r128_track_gain or not item.r128_album_gain)
for item in album.items()]) or \
any([not self.should_use_r128(item) and
(not item.rg_album_gain or not item.rg_album_peak)
for item in album.items()])
def _store(self, item):
"""Store an item to the database.
When testing, item.store() sometimes fails non-destructively with
sqlite.OperationalError.
This method is here to be patched to a retry-once helper function
in test_replaygain.py, so that it can still fail appropriately
outside of these tests.
"""
item.store()
def store_track_gain(self, item, track_gain):
item.rg_track_gain = track_gain.gain
item.rg_track_peak = track_gain.peak
self._store(item)
self._log.debug(u'applied track gain {0} LU, peak {1} of FS',
item.rg_track_gain, item.rg_track_peak)
def store_album_gain(self, item, album_gain):
item.rg_album_gain = album_gain.gain
item.rg_album_peak = album_gain.peak
self._store(item)
self._log.debug(u'applied album gain {0} LU, peak {1} of FS',
item.rg_album_gain, item.rg_album_peak)
def store_track_r128_gain(self, item, track_gain):
item.r128_track_gain = track_gain.gain
self._store(item)
self._log.debug(u'applied r128 track gain {0} LU',
item.r128_track_gain)
def store_album_r128_gain(self, item, album_gain):
item.r128_album_gain = album_gain.gain
self._store(item)
self._log.debug(u'applied r128 album gain {0} LU',
item.r128_album_gain)
def tag_specific_values(self, items):
"""Return some tag specific values.
Returns a tuple (store_track_gain, store_album_gain, target_level,
peak_method).
"""
if any([self.should_use_r128(item) for item in items]):
store_track_gain = self.store_track_r128_gain
store_album_gain = self.store_album_r128_gain
target_level = self.config['r128_targetlevel'].as_number()
peak = Peak.none # R128_* tags do not store the track/album peak
else:
store_track_gain = self.store_track_gain
store_album_gain = self.store_album_gain
target_level = self.config['targetlevel'].as_number()
peak = self._peak_method
return store_track_gain, store_album_gain, target_level, peak
def handle_album(self, album, write, force=False):
"""Compute album and track replay gain store it in all of the
album's items.
If ``write`` is truthy then ``item.write()`` is called for each
item. If replay gain information is already present in all
items, nothing is done.
"""
if not force and not self.album_requires_gain(album):
self._log.info(u'Skipping album {0}', album)
return
if (any([self.should_use_r128(item) for item in album.items()]) and not
all(([self.should_use_r128(item) for item in album.items()]))):
self._log.error(
u"Cannot calculate gain for album {0} (incompatible formats)",
album)
return
self._log.info(u'analyzing {0}', album)
tag_vals = self.tag_specific_values(album.items())
store_track_gain, store_album_gain, target_level, peak = tag_vals
discs = dict()
if self.per_disc:
for item in album.items():
if discs.get(item.disc) is None:
discs[item.disc] = []
discs[item.disc].append(item)
else:
discs[1] = album.items()
for discnumber, items in discs.items():
def _store_album(album_gain):
if not album_gain or not album_gain.album_gain \
or len(album_gain.track_gains) != len(items):
# In some cases, backends fail to produce a valid
# `album_gain` without throwing FatalReplayGainError
# => raise non-fatal exception & continue
raise ReplayGainError(
u"ReplayGain backend `{}` failed "
u"for some tracks in album {}"
.format(self.backend_name, album)
)
for item, track_gain in zip(items,
album_gain.track_gains):
store_track_gain(item, track_gain)
store_album_gain(item, album_gain.album_gain)
if write:
item.try_write()
self._log.debug(u'done analyzing {0}', item)
try:
self._apply(
self.backend_instance.compute_album_gain, args=(),
kwds={
"items": [i for i in items],
"target_level": target_level,
"peak": peak
},
callback=_store_album
)
except ReplayGainError as e:
self._log.info(u"ReplayGain error: {0}", e)
except FatalReplayGainError as e:
raise ui.UserError(
u"Fatal replay gain error: {0}".format(e))
def handle_track(self, item, write, force=False):
"""Compute track replay gain and store it in the item.
If ``write`` is truthy then ``item.write()`` is called to write
the data to disk. If replay gain information is already present
in the item, nothing is done.
"""
if not force and not self.track_requires_gain(item):
self._log.info(u'Skipping track {0}', item)
return
tag_vals = self.tag_specific_values([item])
store_track_gain, store_album_gain, target_level, peak = tag_vals
def _store_track(track_gains):
if not track_gains or len(track_gains) != 1:
# In some cases, backends fail to produce a valid
# `track_gains` without throwing FatalReplayGainError
# => raise non-fatal exception & continue
raise ReplayGainError(
u"ReplayGain backend `{}` failed for track {}"
.format(self.backend_name, item)
)
store_track_gain(item, track_gains[0])
if write:
item.try_write()
self._log.debug(u'done analyzing {0}', item)
try:
self._apply(
self.backend_instance.compute_track_gain, args=(),
kwds={
"items": [item],
"target_level": target_level,
"peak": peak,
},
callback=_store_track
)
except ReplayGainError as e:
self._log.info(u"ReplayGain error: {0}", e)
except FatalReplayGainError as e:
raise ui.UserError(u"Fatal replay gain error: {0}".format(e))
def _has_pool(self):
"""Check whether a `ThreadPool` is running instance in `self.pool`
"""
if hasattr(self, 'pool'):
if isinstance(self.pool, ThreadPool) and self.pool._state == RUN:
return True
return False
def open_pool(self, threads):
"""Open a `ThreadPool` instance in `self.pool`
"""
if not self._has_pool() and self.backend_instance.do_parallel:
self.pool = ThreadPool(threads)
self.exc_queue = queue.Queue()
signal.signal(signal.SIGINT, self._interrupt)
self.exc_watcher = ExceptionWatcher(
self.exc_queue, # threads push exceptions here
self.terminate_pool # abort once an exception occurs
)
self.exc_watcher.start()
def _apply(self, func, args, kwds, callback):
if self._has_pool():
def catch_exc(func, exc_queue, log):
"""Wrapper to catch raised exceptions in threads
"""
def wfunc(*args, **kwargs):
try:
return func(*args, **kwargs)
except ReplayGainError as e:
log.info(e.args[0]) # log non-fatal exceptions
except Exception:
exc_queue.put(sys.exc_info())
return wfunc
# Wrap function and callback to catch exceptions
func = catch_exc(func, self.exc_queue, self._log)
callback = catch_exc(callback, self.exc_queue, self._log)
self.pool.apply_async(func, args, kwds, callback)
else:
callback(func(*args, **kwds))
def terminate_pool(self):
"""Terminate the `ThreadPool` instance in `self.pool`
(e.g. stop execution in case of exception)
"""
# Don't call self._as_pool() here,
# self.pool._state may not be == RUN
if hasattr(self, 'pool') and isinstance(self.pool, ThreadPool):
self.pool.terminate()
self.pool.join()
# self.exc_watcher.join()
def _interrupt(self, signal, frame):
try:
self._log.info('interrupted')
self.terminate_pool()
exit(0)
except SystemExit:
# Silence raised SystemExit ~ exit(0)
pass
def close_pool(self):
"""Close the `ThreadPool` instance in `self.pool` (if there is one)
"""
if self._has_pool():
self.pool.close()
self.pool.join()
self.exc_watcher.join()
def import_begin(self, session):
"""Handle `import_begin` event -> open pool
"""
threads = self.config['threads'].get(int)
if self.config['parallel_on_import'] \
and self.config['auto'] \
and threads:
self.open_pool(threads)
def import_end(self, paths):
"""Handle `import` event -> close pool
"""
self.close_pool()
def imported(self, session, task):
"""Add replay gain info to items or albums of ``task``.
"""
if self.config['auto']:
if task.is_album:
self.handle_album(
task.album,
self.config['auto'].get(bool),
self.config['overwrite'].get(bool)
)
else:
self.handle_track(
task.item,
self.config['auto'].get(bool),
self.config['overwrite'].get(bool)
)
def commands(self):
"""Return the "replaygain" ui subcommand.
"""
def func(lib, opts, args):
try:
write = ui.should_write(opts.write)
force = opts.force
# Bypass self.open_pool() if called with `--threads 0`
if opts.threads != 0:
threads = opts.threads or self.config['threads'].get(int)
self.open_pool(threads)
if opts.album:
albums = lib.albums(ui.decargs(args))
self._log.info(
"Analyzing {} albums ~ {} backend..."
.format(len(albums), self.backend_name)
)
for album in albums:
self.handle_album(album, write, force)
else:
items = lib.items(ui.decargs(args))
self._log.info(
"Analyzing {} tracks ~ {} backend..."
.format(len(items), self.backend_name)
)
for item in items:
self.handle_track(item, write, force)
self.close_pool()
except (SystemExit, KeyboardInterrupt):
# Silence interrupt exceptions
pass
cmd = ui.Subcommand('replaygain', help=u'analyze for ReplayGain')
cmd.parser.add_album_option()
cmd.parser.add_option(
"-t", "--threads", dest="threads", type=int,
help=u'change the number of threads, \
defaults to maximum available processors'
)
cmd.parser.add_option(
"-f", "--force", dest="force", action="store_true", default=False,
help=u"analyze all files, including those that "
"already have ReplayGain metadata")
cmd.parser.add_option(
"-w", "--write", default=None, action="store_true",
help=u"write new metadata to files' tags")
cmd.parser.add_option(
"-W", "--nowrite", dest="write", action="store_false",
help=u"don't write metadata (opposite of -w)")
cmd.func = func
return [cmd]
| {
"content_hash": "a513141601df05e3a561dd550c27f2d3",
"timestamp": "",
"source": "github",
"line_count": 1374,
"max_line_length": 79,
"avg_line_length": 36.1863173216885,
"alnum_prop": 0.5532984714400644,
"repo_name": "shamangeorge/beets",
"id": "5060c8efe7e0e5ac14713f26ab6e17e46ebd8939",
"size": "50428",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "beetsplug/replaygain.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "3539"
},
{
"name": "HTML",
"bytes": "7094"
},
{
"name": "JavaScript",
"bytes": "86092"
},
{
"name": "Python",
"bytes": "2027754"
},
{
"name": "Shell",
"bytes": "7448"
}
],
"symlink_target": ""
} |
"""Utilities for configuring platform specific installation."""
import os
import re
import shutil
from googlecloudsdk.core.util import platforms
from googlecloudsdk.core.console import console_io
from googlecloudsdk.core.credentials import gce as c_gce
# pylint:disable=superfluous-parens
# pylint:disable=unused-argument
def _UpdatePathForWindows(bin_path):
"""Update the Windows system path to include bin_path.
Args:
bin_path: str, The absolute path to the directory that will contain
Cloud SDK binaries.
"""
# pylint:disable=g-import-not-at-top, we want to only attempt these imports
# on windows.
try:
import win32con
import win32gui
try:
# Python 3
import winreg
except ImportError:
# Python 2
import _winreg as winreg
except ImportError:
print("""\
The installer is unable to automatically update your system PATH. Please add
{path}
to your system PATH to enable easy use of the Cloud SDK Command Line Tools.
""".format(path=bin_path))
return
def GetEnv(name):
root = winreg.HKEY_CURRENT_USER
subkey = 'Environment'
key = winreg.OpenKey(root, subkey, 0, winreg.KEY_READ)
try:
value, _ = winreg.QueryValueEx(key, name)
# pylint:disable=undefined-variable, This variable is defined in windows.
except WindowsError:
return ''
return value
def SetEnv(name, value):
key = winreg.OpenKey(winreg.HKEY_CURRENT_USER, 'Environment', 0,
winreg.KEY_ALL_ACCESS)
winreg.SetValueEx(key, name, 0, winreg.REG_EXPAND_SZ, value)
winreg.CloseKey(key)
win32gui.SendMessage(
win32con.HWND_BROADCAST, win32con.WM_SETTINGCHANGE, 0, 'Environment')
return value
def Remove(paths, value):
while value in paths:
paths.remove(value)
def PrependEnv(name, values):
paths = GetEnv(name).split(';')
for value in values:
if value in paths:
Remove(paths, value)
paths.insert(0, value)
SetEnv(name, ';'.join(paths))
PrependEnv('Path', [bin_path])
print("""\
The following directory has been added to your PATH.
{bin_path}
Create a new command shell for the changes to take effect.
""".format(bin_path=bin_path))
def _GetRcData(comment, rc_path, rc_data, pattern=None):
"""Generates the comment and `source rc_path` lines.
Args:
comment: The shell comment string that precedes the source line.
rc_path: The path of the rc file to source.
rc_data: The current comment and source rc lines or None.
pattern: A regex pattern that matches comment, None for exact match on
comment.
Returns:
The comment and `source rc_path` lines to be inserted into a shell rc file.
"""
if not pattern:
pattern = re.escape(comment)
subre = re.compile(r'\n*' + pattern + r'\n.*$', re.MULTILINE)
line = "{comment}\nsource '{rc_path}'\n".format(
comment=comment, rc_path=rc_path)
filtered_data = subre.sub('', rc_data)
rc_data = '{filtered_data}\n{line}'.format(
filtered_data=filtered_data,
line=line)
return rc_data
class _RcPaths(object):
"""Pathnames for the updateable rc file and files it may source."""
def __init__(self, shell, rc_path, sdk_root):
self.rc_path = rc_path
self.completion = os.path.join(
sdk_root, 'completion.{shell}.inc'.format(shell=shell))
self.path = os.path.join(
sdk_root, 'path.{shell}.inc'.format(shell=shell))
def _GetPreferredShell(path, default='bash'):
"""Returns the preferred shell name based on the base file name in path.
Args:
path: str, The file path to check.
default: str, The default value to return if a preferred name cannot be
determined.
Returns:
The preferred user shell name or default if none can be determined.
"""
name = os.path.basename(path)
for shell in ('bash', 'zsh', 'ksh'):
if shell in name:
return shell
return default
def _GetShellRcFileName(shell, host_os):
"""Returns the RC file name for shell and host_os.
Args:
shell: str, The shell base name.
host_os: str, The host os identification string.
Returns:
The shell RC file name, '.bashrc' by default.
"""
if shell == 'ksh':
return os.environ.get('ENV', None) or '.kshrc'
elif shell != 'bash':
return '.{shell}rc'.format(shell=shell)
elif host_os == platforms.OperatingSystem.LINUX:
if c_gce.Metadata().connected:
return '.bash_profile'
elif host_os == platforms.OperatingSystem.MACOSX:
return '.bash_profile'
elif host_os == platforms.OperatingSystem.MSYS:
return '.profile'
return '.bashrc'
def _GetRcPaths(command_completion, path_update, rc_path, sdk_root, host_os):
"""Returns an _RcPaths object for the preferred user shell.
Args:
command_completion: bool, Whether or not to do command completion. If None,
ask.
path_update: bool, Whether or not to update PATH. If None, ask.
rc_path: str, The path to the rc file to update. If None, ask.
sdk_root: str, The path to the Cloud SDK root.
host_os: str, The host os identification string.
Returns:
An _RcPaths() object for the preferred user shell.
"""
# An initial guess on the preferred user shell based on the environment.
preferred_shell = _GetPreferredShell(os.environ.get('SHELL', '/bin/sh'))
if not command_completion and not path_update:
rc_path = None
elif not rc_path:
file_name = _GetShellRcFileName(preferred_shell, host_os)
rc_path = os.path.expanduser(os.path.join('~', file_name))
rc_path_update = console_io.PromptResponse((
'The Google Cloud SDK installer will now prompt you to update an rc '
'file to bring the Google Cloud CLIs into your environment.\n\n'
'Enter a path to an rc file to update, or leave blank to use '
'[{rc_path}]: ').format(rc_path=rc_path))
if rc_path_update:
rc_path = os.path.expanduser(rc_path_update)
if rc_path:
# Check the rc_path for a better hint at the user preferred shell.
preferred_shell = _GetPreferredShell(rc_path, default=preferred_shell)
return _RcPaths(preferred_shell, rc_path, sdk_root)
def UpdateRC(command_completion, path_update, rc_path, bin_path, sdk_root):
"""Update the system path to include bin_path.
Args:
command_completion: bool, Whether or not to do command completion. If None,
ask.
path_update: bool, Whether or not to update PATH. If None, ask.
rc_path: str, The path to the rc file to update. If None, ask.
bin_path: str, The absolute path to the directory that will contain
Cloud SDK binaries.
sdk_root: str, The path to the Cloud SDK root.
"""
host_os = platforms.OperatingSystem.Current()
if host_os == platforms.OperatingSystem.WINDOWS:
if path_update is None:
path_update = console_io.PromptContinue(
prompt_string='Update %PATH% to include Cloud SDK binaries?')
if path_update:
_UpdatePathForWindows(bin_path)
return
if command_completion is None:
if path_update is None: # Ask only one question if both were not set.
path_update = console_io.PromptContinue(
prompt_string=('\nModify profile to update your $PATH '
'and enable shell command completion?'))
command_completion = path_update
else:
command_completion = console_io.PromptContinue(
prompt_string=('\nModify profile to enable shell command '
'completion?'))
elif path_update is None:
path_update = console_io.PromptContinue(
prompt_string=('\nModify profile to update your $PATH?'))
rc_paths = _GetRcPaths(command_completion, path_update, rc_path, sdk_root,
host_os)
if rc_paths.rc_path:
if os.path.exists(rc_paths.rc_path):
with open(rc_paths.rc_path) as rc_file:
rc_data = rc_file.read()
cached_rc_data = rc_data
else:
rc_data = ''
cached_rc_data = ''
if path_update:
rc_data = _GetRcData('# The next line updates PATH for the Google Cloud'
' SDK.', rc_paths.path, rc_data)
if command_completion:
rc_data = _GetRcData('# The next line enables shell command completion'
' for gcloud.', rc_paths.completion, rc_data,
pattern='# The next line enables [a-z][a-z]*'
'completion for gcloud.')
if cached_rc_data == rc_data:
print('No changes necessary for [{rc}].'.format(rc=rc_paths.rc_path))
return
if os.path.exists(rc_paths.rc_path):
rc_backup = rc_paths.rc_path + '.backup'
print('Backing up [{rc}] to [{backup}].'.format(
rc=rc_paths.rc_path, backup=rc_backup))
shutil.copyfile(rc_paths.rc_path, rc_backup)
with open(rc_paths.rc_path, 'w') as rc_file:
rc_file.write(rc_data)
print("""\
[{rc_path}] has been updated.
Start a new shell for the changes to take effect.
""".format(rc_path=rc_paths.rc_path))
if not command_completion:
print("""\
Source [{rc}]
in your profile to enable shell command completion for gcloud.
""".format(rc=rc_paths.completion))
if not path_update:
print("""\
Source [{rc}]
in your profile to add the Google Cloud SDK command line tools to your $PATH.
""".format(rc=rc_paths.path))
| {
"content_hash": "888c4c045e1fb8cd8e1c8e9e35f0e04d",
"timestamp": "",
"source": "github",
"line_count": 287,
"max_line_length": 79,
"avg_line_length": 32.47386759581882,
"alnum_prop": 0.6606223175965665,
"repo_name": "wemanuel/smry",
"id": "6bceea6635b175f7069a99069355b1ed6e330263",
"size": "9371",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "smry/server-auth/ls/google-cloud-sdk/.install/.backup/lib/googlecloudsdk/core/util/platforms_install.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "3990"
},
{
"name": "Groff",
"bytes": "1221174"
},
{
"name": "HTML",
"bytes": "1873470"
},
{
"name": "JavaScript",
"bytes": "2192"
},
{
"name": "Makefile",
"bytes": "6032"
},
{
"name": "PHP",
"bytes": "16660"
},
{
"name": "Python",
"bytes": "47139164"
},
{
"name": "Shell",
"bytes": "37102"
},
{
"name": "SourcePawn",
"bytes": "1160"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.