code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
|---|---|---|---|---|---|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2016 Red Hat, Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ovirt_hosts_facts
short_description: Retrieve facts about one or more oVirt hosts
author: "Ondra Machacek (@machacekondra)"
version_added: "2.3"
description:
- "Retrieve facts about one or more oVirt hosts."
notes:
- "This module creates a new top-level C(ovirt_hosts) fact, which
contains a list of hosts."
options:
pattern:
description:
- "Search term which is accepted by oVirt search backend."
- "For example to search host X from datacenter Y use following pattern:
name=X and datacenter=Y"
extends_documentation_fragment: ovirt_facts
'''
EXAMPLES = '''
# Examples don't contain auth parameter for simplicity,
# look at ovirt_auth module to see how to reuse authentication:
# Gather facts about all hosts which names start with C(host) and
# belong to data center C(west):
- ovirt_hosts_facts:
pattern: name=host* and datacenter=west
- debug:
var: ovirt_hosts
'''
RETURN = '''
ovirt_hosts:
description: "List of dictionaries describing the hosts. Host attribues are mapped to dictionary keys,
all hosts attributes can be found at following url: https://ovirt.example.com/ovirt-engine/api/model#types/host."
returned: On success.
type: list
'''
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ovirt import (
check_sdk,
create_connection,
get_dict_of_struct,
ovirt_facts_full_argument_spec,
)
def main():
argument_spec = ovirt_facts_full_argument_spec(
pattern=dict(default='', required=False),
)
module = AnsibleModule(argument_spec)
check_sdk(module)
try:
auth = module.params.pop('auth')
connection = create_connection(auth)
hosts_service = connection.system_service().hosts_service()
hosts = hosts_service.list(search=module.params['pattern'])
module.exit_json(
changed=False,
ansible_facts=dict(
ovirt_hosts=[
get_dict_of_struct(
struct=c,
connection=connection,
fetch_nested=module.params.get('fetch_nested'),
attributes=module.params.get('nested_attributes'),
) for c in hosts
],
),
)
except Exception as e:
module.fail_json(msg=str(e), exception=traceback.format_exc())
finally:
connection.close(logout=auth.get('token') is None)
if __name__ == '__main__':
main()
|
bjolivot/ansible
|
lib/ansible/modules/cloud/ovirt/ovirt_hosts_facts.py
|
Python
|
gpl-3.0
| 3,456
|
# -*- coding: ISO-8859-15 -*-
# =============================================================================
# Copyright (c) 2004, 2006 Sean C. Gillies
# Copyright (c) 2005 Nuxeo SARL <http://nuxeo.com>
#
# Authors : Sean Gillies <sgillies@frii.com>
# Julien Anguenot <ja@nuxeo.com>
#
# Contact email: sgillies@frii.com
# =============================================================================
"""
API for Web Map Service (WMS) methods and metadata.
Currently supports only version 1.1.1 of the WMS protocol.
"""
import cgi
import urllib2
from urllib import urlencode
import warnings
from etree import etree
from .util import openURL, testXMLValue, extract_xml_list, xmltag_split
from fgdc import Metadata
from iso import MD_Metadata
class ServiceException(Exception):
"""WMS ServiceException
Attributes:
message -- short error message
xml -- full xml error message from server
"""
def __init__(self, message, xml):
self.message = message
self.xml = xml
def __str__(self):
return repr(self.message)
class CapabilitiesError(Exception):
pass
class WebMapService(object):
"""Abstraction for OGC Web Map Service (WMS).
Implements IWebMapService.
"""
def __getitem__(self,name):
''' check contents dictionary to allow dict like access to service layers'''
if name in self.__getattribute__('contents').keys():
return self.__getattribute__('contents')[name]
else:
raise KeyError, "No content named %s" % name
def __init__(self, url, version='1.1.1', xml=None,
username=None, password=None, parse_remote_metadata=False
):
"""Initialize."""
self.url = url
self.username = username
self.password = password
self.version = version
self._capabilities = None
# Authentication handled by Reader
reader = WMSCapabilitiesReader(
self.version, url=self.url, un=self.username, pw=self.password
)
if xml: # read from stored xml
self._capabilities = reader.readString(xml)
else: # read from server
self._capabilities = reader.read(self.url)
# avoid building capabilities metadata if the response is a ServiceExceptionReport
se = self._capabilities.find('ServiceException')
if se is not None:
err_message = str(se.text).strip()
raise ServiceException(err_message, xml)
# build metadata objects
self._buildMetadata(parse_remote_metadata)
def _getcapproperty(self):
if not self._capabilities:
reader = WMSCapabilitiesReader(
self.version, url=self.url, un=self.username, pw=self.password
)
self._capabilities = ServiceMetadata(reader.read(self.url))
return self._capabilities
def _buildMetadata(self, parse_remote_metadata=False):
''' set up capabilities metadata objects '''
#serviceIdentification metadata
serviceelem=self._capabilities.find('Service')
self.identification=ServiceIdentification(serviceelem, self.version)
#serviceProvider metadata
self.provider=ServiceProvider(serviceelem)
#serviceOperations metadata
self.operations=[]
for elem in self._capabilities.find('Capability/Request')[:]:
self.operations.append(OperationMetadata(elem))
#serviceContents metadata: our assumption is that services use a top-level
#layer as a metadata organizer, nothing more.
self.contents={}
caps = self._capabilities.find('Capability')
#recursively gather content metadata for all layer elements.
#To the WebMapService.contents store only metadata of named layers.
def gather_layers(parent_elem, parent_metadata):
for index, elem in enumerate(parent_elem.findall('Layer')):
cm = ContentMetadata(elem, parent=parent_metadata, index=index+1, parse_remote_metadata=parse_remote_metadata)
if cm.id:
if cm.id in self.contents:
warnings.warn('Content metadata for layer "%s" already exists. Using child layer' % cm.id)
self.contents[cm.id] = cm
gather_layers(elem, cm)
gather_layers(caps, None)
#exceptions
self.exceptions = [f.text for f \
in self._capabilities.findall('Capability/Exception/Format')]
def items(self):
'''supports dict-like items() access'''
items=[]
for item in self.contents:
items.append((item,self.contents[item]))
return items
def getcapabilities(self):
"""Request and return capabilities document from the WMS as a
file-like object.
NOTE: this is effectively redundant now"""
reader = WMSCapabilitiesReader(
self.version, url=self.url, un=self.username, pw=self.password
)
u = self._open(reader.capabilities_url(self.url))
# check for service exceptions, and return
if u.info().gettype() == 'application/vnd.ogc.se_xml':
se_xml = u.read()
se_tree = etree.fromstring(se_xml)
err_message = str(se_tree.find('ServiceException').text).strip()
raise ServiceException(err_message, se_xml)
return u
def getmap(self, layers=None, styles=None, srs=None, bbox=None,
format=None, size=None, time=None, transparent=False,
bgcolor='#FFFFFF',
exceptions='application/vnd.ogc.se_xml',
method='Get',
**kwargs
):
"""Request and return an image from the WMS as a file-like object.
Parameters
----------
layers : list
List of content layer names.
styles : list
Optional list of named styles, must be the same length as the
layers list.
srs : string
A spatial reference system identifier.
bbox : tuple
(left, bottom, right, top) in srs units.
format : string
Output image format such as 'image/jpeg'.
size : tuple
(width, height) in pixels.
transparent : bool
Optional. Transparent background if True.
bgcolor : string
Optional. Image background color.
method : string
Optional. HTTP DCP method name: Get or Post.
**kwargs : extra arguments
anything else e.g. vendor specific parameters
Example
-------
>>> wms = WebMapService('http://giswebservices.massgis.state.ma.us/geoserver/wms', version='1.1.1')
>>> img = wms.getmap(layers=['massgis:GISDATA.SHORELINES_ARC'],\
styles=[''],\
srs='EPSG:4326',\
bbox=(-70.8, 42, -70, 42.8),\
size=(300, 300),\
format='image/jpeg',\
transparent=True)
>>> out = open('example.jpg.jpg', 'wb')
>>> out.write(img.read())
>>> out.close()
"""
try:
base_url = next((m.get('url') for m in self.getOperationByName('GetMap').methods if m.get('type').lower() == method.lower()))
except StopIteration:
base_url = self.url
request = {'version': self.version, 'request': 'GetMap'}
# check layers and styles
assert len(layers) > 0
request['layers'] = ','.join(layers)
if styles:
assert len(styles) == len(layers)
request['styles'] = ','.join(styles)
else:
request['styles'] = ''
# size
request['width'] = str(size[0])
request['height'] = str(size[1])
request['srs'] = str(srs)
request['bbox'] = ','.join([repr(x) for x in bbox])
request['format'] = str(format)
request['transparent'] = str(transparent).upper()
request['bgcolor'] = '0x' + bgcolor[1:7]
request['exceptions'] = str(exceptions)
if time is not None:
request['time'] = str(time)
if kwargs:
for kw in kwargs:
request[kw]=kwargs[kw]
data = urlencode(request)
u = openURL(base_url, data, method, username = self.username, password = self.password)
# check for service exceptions, and return
if u.info()['Content-Type'] == 'application/vnd.ogc.se_xml':
se_xml = u.read()
se_tree = etree.fromstring(se_xml)
err_message = unicode(se_tree.find('ServiceException').text).strip()
raise ServiceException(err_message, se_xml)
return u
def getServiceXML(self):
xml = None
if self._capabilities is not None:
xml = etree.tostring(self._capabilities)
return xml
def getfeatureinfo(self):
raise NotImplementedError
def getOperationByName(self, name):
"""Return a named content item."""
for item in self.operations:
if item.name == name:
return item
raise KeyError, "No operation named %s" % name
class ServiceIdentification(object):
''' Implements IServiceIdentificationMetadata '''
def __init__(self, infoset, version):
self._root=infoset
self.type = testXMLValue(self._root.find('Name'))
self.version = version
self.title = testXMLValue(self._root.find('Title'))
self.abstract = testXMLValue(self._root.find('Abstract'))
self.keywords = extract_xml_list(self._root.findall('KeywordList/Keyword'))
self.accessconstraints = testXMLValue(self._root.find('AccessConstraints'))
self.fees = testXMLValue(self._root.find('Fees'))
class ServiceProvider(object):
''' Implements IServiceProviderMetatdata '''
def __init__(self, infoset):
self._root=infoset
name=self._root.find('ContactInformation/ContactPersonPrimary/ContactOrganization')
if name is not None:
self.name=name.text
else:
self.name=None
self.url=self._root.find('OnlineResource').attrib.get('{http://www.w3.org/1999/xlink}href', '')
#contact metadata
contact = self._root.find('ContactInformation')
## sometimes there is a contact block that is empty, so make
## sure there are children to parse
if contact is not None and contact[:] != []:
self.contact = ContactMetadata(contact)
else:
self.contact = None
def getContentByName(self, name):
"""Return a named content item."""
for item in self.contents:
if item.name == name:
return item
raise KeyError, "No content named %s" % name
def getOperationByName(self, name):
"""Return a named content item."""
for item in self.operations:
if item.name == name:
return item
raise KeyError, "No operation named %s" % name
class ContentMetadata:
"""
Abstraction for WMS layer metadata.
Implements IContentMetadata.
"""
def __init__(self, elem, parent=None, index=0, parse_remote_metadata=False, timeout=30):
if elem.tag != 'Layer':
raise ValueError('%s should be a Layer' % (elem,))
self.parent = parent
if parent:
self.index = "%s.%d" % (parent.index, index)
else:
self.index = str(index)
self.id = self.name = testXMLValue(elem.find('Name'))
# layer attributes
self.queryable = int(elem.attrib.get('queryable', 0))
self.cascaded = int(elem.attrib.get('cascaded', 0))
self.opaque = int(elem.attrib.get('opaque', 0))
self.noSubsets = int(elem.attrib.get('noSubsets', 0))
self.fixedWidth = int(elem.attrib.get('fixedWidth', 0))
self.fixedHeight = int(elem.attrib.get('fixedHeight', 0))
# title is mandatory property
self.title = None
title = testXMLValue(elem.find('Title'))
if title is not None:
self.title = title.strip()
self.abstract = testXMLValue(elem.find('Abstract'))
# bboxes
b = elem.find('BoundingBox')
self.boundingBox = None
if b is not None:
try: #sometimes the SRS attribute is (wrongly) not provided
srs=b.attrib['SRS']
except KeyError:
srs=None
self.boundingBox = (
float(b.attrib['minx']),
float(b.attrib['miny']),
float(b.attrib['maxx']),
float(b.attrib['maxy']),
srs,
)
elif self.parent:
if hasattr(self.parent, 'boundingBox'):
self.boundingBox = self.parent.boundingBox
# ScaleHint
sh = elem.find('ScaleHint')
self.scaleHint = None
if sh is not None:
self.scaleHint = {'min': sh.attrib['min'], 'max': sh.attrib['max']}
attribution = elem.find('Attribution')
if attribution is not None:
self.attribution = dict()
title = attribution.find('Title')
url = attribution.find('OnlineResource')
logo = attribution.find('LogoURL')
if title is not None:
self.attribution['title'] = title.text
if url is not None:
self.attribution['url'] = url.attrib['{http://www.w3.org/1999/xlink}href']
if logo is not None:
self.attribution['logo_size'] = (int(logo.attrib['width']), int(logo.attrib['height']))
self.attribution['logo_url'] = logo.find('OnlineResource').attrib['{http://www.w3.org/1999/xlink}href']
b = elem.find('LatLonBoundingBox')
if b is not None:
self.boundingBoxWGS84 = (
float(b.attrib['minx']),
float(b.attrib['miny']),
float(b.attrib['maxx']),
float(b.attrib['maxy']),
)
elif self.parent:
self.boundingBoxWGS84 = self.parent.boundingBoxWGS84
else:
self.boundingBoxWGS84 = None
#SRS options
self.crsOptions = []
#Copy any parent SRS options (they are inheritable properties)
if self.parent:
self.crsOptions = list(self.parent.crsOptions)
#Look for SRS option attached to this layer
if elem.find('SRS') is not None:
## some servers found in the wild use a single SRS
## tag containing a whitespace separated list of SRIDs
## instead of several SRS tags. hence the inner loop
for srslist in map(lambda x: x.text, elem.findall('SRS')):
if srslist:
for srs in srslist.split():
self.crsOptions.append(srs)
#Get rid of duplicate entries
self.crsOptions = list(set(self.crsOptions))
#Set self.crsOptions to None if the layer (and parents) had no SRS options
if len(self.crsOptions) == 0:
#raise ValueError('%s no SRS available!?' % (elem,))
#Comment by D Lowe.
#Do not raise ValueError as it is possible that a layer is purely a parent layer and does not have SRS specified. Instead set crsOptions to None
# Comment by Jachym:
# Do not set it to None, but to [], which will make the code
# work further. Fixed by anthonybaxter
self.crsOptions=[]
#Styles
self.styles = {}
#Copy any parent styles (they are inheritable properties)
if self.parent:
self.styles = self.parent.styles.copy()
#Get the styles for this layer (items with the same name are replaced)
for s in elem.findall('Style'):
name = s.find('Name')
title = s.find('Title')
if name is None or title is None:
raise ValueError('%s missing name or title' % (s,))
style = { 'title' : title.text }
# legend url
legend = s.find('LegendURL/OnlineResource')
if legend is not None:
style['legend'] = legend.attrib['{http://www.w3.org/1999/xlink}href']
self.styles[name.text] = style
# keywords
self.keywords = [f.text for f in elem.findall('KeywordList/Keyword')]
# timepositions - times for which data is available.
self.timepositions=None
self.defaulttimeposition = None
for extent in elem.findall('Extent'):
if extent.attrib.get("name").lower() =='time':
if extent.text:
self.timepositions=extent.text.split(',')
self.defaulttimeposition = extent.attrib.get("default")
break
# Elevations - available vertical levels
self.elevations=None
for extent in elem.findall('Extent'):
if extent.attrib.get("name").lower() =='elevation':
if extent.text:
self.elevations=extent.text.split(',')
break
# MetadataURLs
self.metadataUrls = []
for m in elem.findall('MetadataURL'):
metadataUrl = {
'type': testXMLValue(m.attrib['type'], attrib=True),
'format': testXMLValue(m.find('Format')),
'url': testXMLValue(m.find('OnlineResource').attrib['{http://www.w3.org/1999/xlink}href'], attrib=True)
}
if metadataUrl['url'] is not None and parse_remote_metadata: # download URL
try:
content = urllib2.urlopen(metadataUrl['url'], timeout=timeout)
doc = etree.parse(content)
if metadataUrl['type'] is not None:
if metadataUrl['type'] == 'FGDC':
metadataUrl['metadata'] = Metadata(doc)
if metadataUrl['type'] == 'TC211':
metadataUrl['metadata'] = MD_Metadata(doc)
except Exception, err:
metadataUrl['metadata'] = None
self.metadataUrls.append(metadataUrl)
# DataURLs
self.dataUrls = []
for m in elem.findall('DataURL'):
dataUrl = {
'format': m.find('Format').text.strip(),
'url': m.find('OnlineResource').attrib['{http://www.w3.org/1999/xlink}href']
}
self.dataUrls.append(dataUrl)
self.layers = []
for child in elem.findall('Layer'):
self.layers.append(ContentMetadata(child, self))
def __str__(self):
return 'Layer Name: %s Title: %s' % (self.name, self.title)
class OperationMetadata:
"""Abstraction for WMS OperationMetadata.
Implements IOperationMetadata.
"""
def __init__(self, elem):
"""."""
self.name = xmltag_split(elem.tag)
# formatOptions
self.formatOptions = [f.text for f in elem.findall('Format')]
self.methods = []
for verb in elem.findall('DCPType/HTTP/*'):
url = verb.find('OnlineResource').attrib['{http://www.w3.org/1999/xlink}href']
self.methods.append({'type' : xmltag_split(verb.tag), 'url': url})
class ContactMetadata:
"""Abstraction for contact details advertised in GetCapabilities.
"""
def __init__(self, elem):
name = elem.find('ContactPersonPrimary/ContactPerson')
if name is not None:
self.name=name.text
else:
self.name=None
email = elem.find('ContactElectronicMailAddress')
if email is not None:
self.email=email.text
else:
self.email=None
self.address = self.city = self.region = None
self.postcode = self.country = None
address = elem.find('ContactAddress')
if address is not None:
street = address.find('Address')
if street is not None: self.address = street.text
city = address.find('City')
if city is not None: self.city = city.text
region = address.find('StateOrProvince')
if region is not None: self.region = region.text
postcode = address.find('PostCode')
if postcode is not None: self.postcode = postcode.text
country = address.find('Country')
if country is not None: self.country = country.text
organization = elem.find('ContactPersonPrimary/ContactOrganization')
if organization is not None: self.organization = organization.text
else:self.organization = None
position = elem.find('ContactPosition')
if position is not None: self.position = position.text
else: self.position = None
class WMSCapabilitiesReader:
"""Read and parse capabilities document into a lxml.etree infoset
"""
def __init__(self, version='1.1.1', url=None, un=None, pw=None):
"""Initialize"""
self.version = version
self._infoset = None
self.url = url
self.username = un
self.password = pw
#if self.username and self.password:
## Provide login information in order to use the WMS server
## Create an OpenerDirector with support for Basic HTTP
## Authentication...
#passman = HTTPPasswordMgrWithDefaultRealm()
#passman.add_password(None, self.url, self.username, self.password)
#auth_handler = HTTPBasicAuthHandler(passman)
#opener = build_opener(auth_handler)
#self._open = opener.open
def capabilities_url(self, service_url):
"""Return a capabilities url
"""
qs = []
if service_url.find('?') != -1:
qs = cgi.parse_qsl(service_url.split('?')[1])
params = [x[0] for x in qs]
if 'service' not in params:
qs.append(('service', 'WMS'))
if 'request' not in params:
qs.append(('request', 'GetCapabilities'))
if 'version' not in params:
qs.append(('version', self.version))
urlqs = urlencode(tuple(qs))
return service_url.split('?')[0] + '?' + urlqs
def read(self, service_url):
"""Get and parse a WMS capabilities document, returning an
elementtree instance
service_url is the base url, to which is appended the service,
version, and request parameters
"""
getcaprequest = self.capabilities_url(service_url)
#now split it up again to use the generic openURL function...
spliturl=getcaprequest.split('?')
u = openURL(spliturl[0], spliturl[1], method='Get', username = self.username, password = self.password)
return etree.fromstring(u.read())
def readString(self, st):
"""Parse a WMS capabilities document, returning an elementtree instance
string should be an XML capabilities document
"""
if not isinstance(st, str):
raise ValueError("String must be of type string, not %s" % type(st))
return etree.fromstring(st)
|
herow/planning_qgis
|
python/ext-libs/owslib/wms.py
|
Python
|
gpl-2.0
| 23,814
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2010 Zuza Software Foundation
#
# This file is part of translate.
#
# translate is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# translate is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
"""This module represents the Marathi language.
.. seealso:: http://en.wikipedia.org/wiki/Marathi_language
"""
from translate.lang import common
class mr(common.Common):
"""This class represents Marathi."""
ignoretests = ["startcaps", "simplecaps"]
|
biswajitsahu/kuma
|
vendor/packages/translate/lang/mr.py
|
Python
|
mpl-2.0
| 1,018
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Nadam for TensorFlow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.training import adam
from tensorflow.python.training import training_ops
class NadamOptimizer(adam.AdamOptimizer):
"""Optimizer that implements the Nadam algorithm.
See [Dozat, T., 2015](http://cs229.stanford.edu/proj2015/054_report.pdf).
"""
def _apply_dense(self, grad, var):
m = self.get_slot(var, "m")
v = self.get_slot(var, "v")
beta1_power, beta2_power = self._get_beta_accumulators()
return training_ops.apply_adam(
var,
m,
v,
math_ops.cast(beta1_power, var.dtype.base_dtype),
math_ops.cast(beta2_power, var.dtype.base_dtype),
math_ops.cast(self._lr_t, var.dtype.base_dtype),
math_ops.cast(self._beta1_t, var.dtype.base_dtype),
math_ops.cast(self._beta2_t, var.dtype.base_dtype),
math_ops.cast(self._epsilon_t, var.dtype.base_dtype),
grad,
use_locking=self._use_locking,
use_nesterov=True).op
def _resource_apply_dense(self, grad, var):
m = self.get_slot(var, "m")
v = self.get_slot(var, "v")
beta1_power, beta2_power = self._get_beta_accumulators()
return training_ops.resource_apply_adam(
var.handle,
m.handle,
v.handle,
math_ops.cast(beta1_power, grad.dtype.base_dtype),
math_ops.cast(beta2_power, grad.dtype.base_dtype),
math_ops.cast(self._lr_t, grad.dtype.base_dtype),
math_ops.cast(self._beta1_t, grad.dtype.base_dtype),
math_ops.cast(self._beta2_t, grad.dtype.base_dtype),
math_ops.cast(self._epsilon_t, grad.dtype.base_dtype),
grad,
use_locking=self._use_locking,
use_nesterov=True)
def _apply_sparse_shared(self, grad, var, indices, scatter_add):
beta1_power, beta2_power = self._get_beta_accumulators()
beta1_power = math_ops.cast(beta1_power, var.dtype.base_dtype)
beta2_power = math_ops.cast(beta2_power, var.dtype.base_dtype)
lr_t = math_ops.cast(self._lr_t, var.dtype.base_dtype)
beta1_t = math_ops.cast(self._beta1_t, var.dtype.base_dtype)
beta2_t = math_ops.cast(self._beta2_t, var.dtype.base_dtype)
epsilon_t = math_ops.cast(self._epsilon_t, var.dtype.base_dtype)
lr = (lr_t * math_ops.sqrt(1 - beta2_power) / (1 - beta1_power))
# m_t = beta1 * m + (1 - beta1) * g_t
m = self.get_slot(var, "m")
m_scaled_g_values = grad * (1 - beta1_t)
m_t = state_ops.assign(m, m * beta1_t, use_locking=self._use_locking)
with ops.control_dependencies([m_t]):
m_t = scatter_add(m, indices, m_scaled_g_values)
# m_bar = (1 - beta1) * g_t + beta1 * m_t
m_bar = m_scaled_g_values + beta1_t * m_t
# v_t = beta2 * v + (1 - beta2) * (g_t * g_t)
v = self.get_slot(var, "v")
v_scaled_g_values = (grad * grad) * (1 - beta2_t)
v_t = state_ops.assign(v, v * beta2_t, use_locking=self._use_locking)
with ops.control_dependencies([v_t]):
v_t = scatter_add(v, indices, v_scaled_g_values)
v_sqrt = math_ops.sqrt(v_t)
var_update = state_ops.assign_sub(
var, lr * m_bar / (v_sqrt + epsilon_t), use_locking=self._use_locking)
return control_flow_ops.group(*[var_update, m_bar, v_t])
|
nburn42/tensorflow
|
tensorflow/contrib/opt/python/training/nadam_optimizer.py
|
Python
|
apache-2.0
| 4,164
|
"""
Generic character device support for serial, parallel, channel, and console
http://libvirt.org/formatdomain.html#elementCharSerial
"""
from virttest.libvirt_xml.devices import base
class CharacterBase(base.TypedDeviceBase):
__slots__ = ('sources', 'targets')
# Not overriding __init__ because ABC cannot hide device_tag as expected
# Accessors just wrap private helpers in UntypedDeviceBase class
def get_sources(self):
"""
Return a list of dictionaries containing each source's attributes.
"""
return self._get_list('source')
def set_sources(self, value):
"""
Set all sources to the value list of dictionaries of source attributes.
"""
self._set_list('source', value)
def del_sources(self):
"""
Remove the list of dictionaries containing each source's attributes.
"""
self._del_list('source')
def get_targets(self):
"""
Return a list of dictionaries containing each target's attributes.
"""
return self._get_list('target')
def set_targets(self, value):
"""
Set all sources to the value list of dictionaries of target attributes.
"""
self._set_list('target', value)
def del_targets(self):
"""
Remove the list of dictionaries containing each target's attributes.
"""
self._del_list('target')
# Some convenience methods so appending to sources/targets is easier
def add_source(self, **attributes):
"""
Convenience method for appending a source from dictionary of attributes
"""
self._add_item('sources', **attributes)
def add_target(self, **attributes):
"""
Convenience method for appending a target from dictionary of attributes
"""
self._add_item('targets', **attributes)
def update_source(self, index, **attributes):
"""
Convenience method for merging values into a source's attributes
"""
self._update_item('sources', index, **attributes)
def update_target(self, index, **attributes):
"""
Convenience method for merging values into a target's attributes
"""
self._update_item('targets', index, **attributes)
|
CongSmile/virt-test
|
virttest/libvirt_xml/devices/character.py
|
Python
|
gpl-2.0
| 2,306
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see http://www.gnu.org/licenses/.
#
##############################################################################
from . import models
|
InakiZabala/odoomrp-wip
|
stock_move_purchase_price/__init__.py
|
Python
|
agpl-3.0
| 894
|
"""
Meta Data Extension for Python-Markdown
=======================================
This extension adds Meta Data handling to markdown.
Basic Usage:
>>> import markdown
>>> text = '''Title: A Test Doc.
... Author: Waylan Limberg
... John Doe
... Blank_Data:
...
... The body. This is paragraph one.
... '''
>>> md = markdown.Markdown(['meta'])
>>> print md.convert(text)
<p>The body. This is paragraph one.</p>
>>> print md.Meta
{u'blank_data': [u''], u'author': [u'Waylan Limberg', u'John Doe'], u'title': [u'A Test Doc.']}
Make sure text without Meta Data still works (markdown < 1.6b returns a <p>).
>>> text = ' Some Code - not extra lines of meta data.'
>>> md = markdown.Markdown(['meta'])
>>> print md.convert(text)
<pre><code>Some Code - not extra lines of meta data.
</code></pre>
>>> md.Meta
{}
Copyright 2007-2008 [Waylan Limberg](http://achinghead.com).
Project website: <http://packages.python.org/Markdown/meta_data.html>
Contact: markdown@freewisdom.org
License: BSD (see ../LICENSE.md for details)
"""
from __future__ import absolute_import
from __future__ import unicode_literals
from . import Extension
from ..preprocessors import Preprocessor
import re
# Global Vars
META_RE = re.compile(r'^[ ]{0,3}(?P<key>[A-Za-z0-9_-]+):\s*(?P<value>.*)')
META_MORE_RE = re.compile(r'^[ ]{4,}(?P<value>.*)')
class MetaExtension (Extension):
""" Meta-Data extension for Python-Markdown. """
def extendMarkdown(self, md, md_globals):
""" Add MetaPreprocessor to Markdown instance. """
md.preprocessors.add("meta", MetaPreprocessor(md), "_begin")
class MetaPreprocessor(Preprocessor):
""" Get Meta-Data. """
def run(self, lines):
""" Parse Meta-Data and store in Markdown.Meta. """
meta = {}
key = None
while 1:
line = lines.pop(0)
if line.strip() == '':
break # blank line - done
m1 = META_RE.match(line)
if m1:
key = m1.group('key').lower().strip()
value = m1.group('value').strip()
try:
meta[key].append(value)
except KeyError:
meta[key] = [value]
else:
m2 = META_MORE_RE.match(line)
if m2 and key:
# Add another line to existing key
meta[key].append(m2.group('value').strip())
else:
lines.insert(0, line)
break # no meta data - done
self.markdown.Meta = meta
return lines
def makeExtension(configs={}):
return MetaExtension(configs=configs)
|
frankiecjunle/yunblog
|
venv/lib/python2.7/site-packages/markdown/extensions/meta.py
|
Python
|
mit
| 2,757
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import tools
from openerp.osv import fields, osv
class asset_asset_report(osv.osv):
_name = "asset.asset.report"
_description = "Assets Analysis"
_auto = False
_columns = {
'name': fields.char('Year', required=False, readonly=True),
'purchase_date': fields.date('Purchase Date', readonly=True),
'depreciation_date': fields.date('Depreciation Date', readonly=True),
'asset_id': fields.many2one('account.asset.asset', string='Asset', readonly=True),
'asset_category_id': fields.many2one('account.asset.category',string='Asset category'),
'partner_id': fields.many2one('res.partner', 'Partner', readonly=True),
'state': fields.selection([('draft','Draft'),('open','Running'),('close','Close')], 'Status', readonly=True),
'depreciation_value': fields.float('Amount of Depreciation Lines', readonly=True),
'move_check': fields.boolean('Posted', readonly=True),
'nbr': fields.integer('# of Depreciation Lines', readonly=True),
'gross_value': fields.float('Gross Amount', readonly=True),
'posted_value': fields.float('Posted Amount', readonly=True),
'unposted_value': fields.float('Unposted Amount', readonly=True),
'company_id': fields.many2one('res.company', 'Company', readonly=True),
}
def init(self, cr):
tools.drop_view_if_exists(cr, 'asset_asset_report')
cr.execute("""
create or replace view asset_asset_report as (
select
min(dl.id) as id,
dl.name as name,
dl.depreciation_date as depreciation_date,
a.purchase_date as purchase_date,
(CASE WHEN dlmin.id = min(dl.id)
THEN a.purchase_value
ELSE 0
END) as gross_value,
dl.amount as depreciation_value,
(CASE WHEN dl.move_check
THEN dl.amount
ELSE 0
END) as posted_value,
(CASE WHEN NOT dl.move_check
THEN dl.amount
ELSE 0
END) as unposted_value,
dl.asset_id as asset_id,
dl.move_check as move_check,
a.category_id as asset_category_id,
a.partner_id as partner_id,
a.state as state,
count(dl.*) as nbr,
a.company_id as company_id
from account_asset_depreciation_line dl
left join account_asset_asset a on (dl.asset_id=a.id)
left join (select min(d.id) as id,ac.id as ac_id from account_asset_depreciation_line as d inner join account_asset_asset as ac ON (ac.id=d.asset_id) group by ac_id) as dlmin on dlmin.ac_id=a.id
group by
dl.amount,dl.asset_id,dl.depreciation_date,dl.name,
a.purchase_date, dl.move_check, a.state, a.category_id, a.partner_id, a.company_id,
a.purchase_value, a.id, a.salvage_value, dlmin.id
)""")
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
ncliam/serverpos
|
openerp/addons/account_asset/report/account_asset_report.py
|
Python
|
agpl-3.0
| 4,260
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from cStringIO import StringIO
from struct import pack,unpack
from thrift.Thrift import TException
class TTransportException(TException):
"""Custom Transport Exception class"""
UNKNOWN = 0
NOT_OPEN = 1
ALREADY_OPEN = 2
TIMED_OUT = 3
END_OF_FILE = 4
def __init__(self, type=UNKNOWN, message=None):
TException.__init__(self, message)
self.type = type
class TTransportBase:
"""Base class for Thrift transport layer."""
def isOpen(self):
pass
def open(self):
pass
def close(self):
pass
def read(self, sz):
pass
def readAll(self, sz):
buff = ''
have = 0
while (have < sz):
chunk = self.read(sz-have)
have += len(chunk)
buff += chunk
if len(chunk) == 0:
raise EOFError()
return buff
def write(self, buf):
pass
def flush(self):
pass
# This class should be thought of as an interface.
class CReadableTransport:
"""base class for transports that are readable from C"""
# TODO(dreiss): Think about changing this interface to allow us to use
# a (Python, not c) StringIO instead, because it allows
# you to write after reading.
# NOTE: This is a classic class, so properties will NOT work
# correctly for setting.
@property
def cstringio_buf(self):
"""A cStringIO buffer that contains the current chunk we are reading."""
pass
def cstringio_refill(self, partialread, reqlen):
"""Refills cstringio_buf.
Returns the currently used buffer (which can but need not be the same as
the old cstringio_buf). partialread is what the C code has read from the
buffer, and should be inserted into the buffer before any more reads. The
return value must be a new, not borrowed reference. Something along the
lines of self._buf should be fine.
If reqlen bytes can't be read, throw EOFError.
"""
pass
class TServerTransportBase:
"""Base class for Thrift server transports."""
def listen(self):
pass
def accept(self):
pass
def close(self):
pass
class TTransportFactoryBase:
"""Base class for a Transport Factory"""
def getTransport(self, trans):
return trans
class TBufferedTransportFactory:
"""Factory transport that builds buffered transports"""
def getTransport(self, trans):
buffered = TBufferedTransport(trans)
return buffered
class TBufferedTransport(TTransportBase,CReadableTransport):
"""Class that wraps another transport and buffers its I/O."""
DEFAULT_BUFFER = 4096
def __init__(self, trans):
self.__trans = trans
self.__wbuf = StringIO()
self.__rbuf = StringIO("")
def isOpen(self):
return self.__trans.isOpen()
def open(self):
return self.__trans.open()
def close(self):
return self.__trans.close()
def read(self, sz):
ret = self.__rbuf.read(sz)
if len(ret) != 0:
return ret
self.__rbuf = StringIO(self.__trans.read(max(sz, self.DEFAULT_BUFFER)))
return self.__rbuf.read(sz)
def write(self, buf):
self.__wbuf.write(buf)
def flush(self):
out = self.__wbuf.getvalue()
# reset wbuf before write/flush to preserve state on underlying failure
self.__wbuf = StringIO()
self.__trans.write(out)
self.__trans.flush()
# Implement the CReadableTransport interface.
@property
def cstringio_buf(self):
return self.__rbuf
def cstringio_refill(self, partialread, reqlen):
retstring = partialread
if reqlen < self.DEFAULT_BUFFER:
# try to make a read of as much as we can.
retstring += self.__trans.read(self.DEFAULT_BUFFER)
# but make sure we do read reqlen bytes.
if len(retstring) < reqlen:
retstring += self.__trans.readAll(reqlen - len(retstring))
self.__rbuf = StringIO(retstring)
return self.__rbuf
class TMemoryBuffer(TTransportBase, CReadableTransport):
"""Wraps a cStringIO object as a TTransport.
NOTE: Unlike the C++ version of this class, you cannot write to it
then immediately read from it. If you want to read from a
TMemoryBuffer, you must either pass a string to the constructor.
TODO(dreiss): Make this work like the C++ version.
"""
def __init__(self, value=None):
"""value -- a value to read from for stringio
If value is set, this will be a transport for reading,
otherwise, it is for writing"""
if value is not None:
self._buffer = StringIO(value)
else:
self._buffer = StringIO()
def isOpen(self):
return not self._buffer.closed
def open(self):
pass
def close(self):
self._buffer.close()
def read(self, sz):
return self._buffer.read(sz)
def write(self, buf):
self._buffer.write(buf)
def flush(self):
pass
def getvalue(self):
return self._buffer.getvalue()
# Implement the CReadableTransport interface.
@property
def cstringio_buf(self):
return self._buffer
def cstringio_refill(self, partialread, reqlen):
# only one shot at reading...
raise EOFError()
class TFramedTransportFactory:
"""Factory transport that builds framed transports"""
def getTransport(self, trans):
framed = TFramedTransport(trans)
return framed
class TFramedTransport(TTransportBase, CReadableTransport):
"""Class that wraps another transport and frames its I/O when writing."""
def __init__(self, trans,):
self.__trans = trans
self.__rbuf = StringIO()
self.__wbuf = StringIO()
def isOpen(self):
return self.__trans.isOpen()
def open(self):
return self.__trans.open()
def close(self):
return self.__trans.close()
def read(self, sz):
ret = self.__rbuf.read(sz)
if len(ret) != 0:
return ret
self.readFrame()
return self.__rbuf.read(sz)
def readFrame(self):
buff = self.__trans.readAll(4)
sz, = unpack('!i', buff)
self.__rbuf = StringIO(self.__trans.readAll(sz))
def write(self, buf):
self.__wbuf.write(buf)
def flush(self):
wout = self.__wbuf.getvalue()
wsz = len(wout)
# reset wbuf before write/flush to preserve state on underlying failure
self.__wbuf = StringIO()
# N.B.: Doing this string concatenation is WAY cheaper than making
# two separate calls to the underlying socket object. Socket writes in
# Python turn out to be REALLY expensive, but it seems to do a pretty
# good job of managing string buffer operations without excessive copies
buf = pack("!i", wsz) + wout
self.__trans.write(buf)
self.__trans.flush()
# Implement the CReadableTransport interface.
@property
def cstringio_buf(self):
return self.__rbuf
def cstringio_refill(self, prefix, reqlen):
# self.__rbuf will already be empty here because fastbinary doesn't
# ask for a refill until the previous buffer is empty. Therefore,
# we can start reading new frames immediately.
while len(prefix) < reqlen:
readFrame()
prefix += self.__rbuf.getvalue()
self.__rbuf = StringIO(prefix)
return self.__rbuf
class TFileObjectTransport(TTransportBase):
"""Wraps a file-like object to make it work as a Thrift transport."""
def __init__(self, fileobj):
self.fileobj = fileobj
def isOpen(self):
return True
def close(self):
self.fileobj.close()
def read(self, sz):
return self.fileobj.read(sz)
def write(self, buf):
self.fileobj.write(buf)
def flush(self):
self.fileobj.flush()
|
cconrad/evernotebackup
|
thrift/transport/TTransport.py
|
Python
|
bsd-2-clause
| 8,204
|
# -*- coding: utf-8 -*-
import mass_mailing
import mass_mailing_stats
import mail_mail
import mail_thread
import res_config
|
ovnicraft/openerp-restaurant
|
mass_mailing/models/__init__.py
|
Python
|
agpl-3.0
| 125
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import datetime
import json
import logging
import sys
from lib.pageframe import PFNCounts
from lib.policy import PolicySet
from lib.subcommand import SubCommand
LOGGER = logging.getLogger('dmprof')
class PolicyCommands(SubCommand):
def __init__(self, command):
super(PolicyCommands, self).__init__(
'Usage: %%prog %s [-p POLICY] <first-dump> [shared-first-dumps...]' %
command)
self._parser.add_option('-p', '--policy', type='string', dest='policy',
help='profile with POLICY', metavar='POLICY')
self._parser.add_option('--alternative-dirs', dest='alternative_dirs',
metavar='/path/on/target@/path/on/host[:...]',
help='Read files in /path/on/host/ instead of '
'files in /path/on/target/.')
self._parser.add_option('--timestamp', dest='timestamp',
action='store_true', help='Use timestamp.')
self._timestamp = False
def _set_up(self, sys_argv):
options, args = self._parse_args(sys_argv, 1)
dump_path = args[1]
shared_first_dump_paths = args[2:]
alternative_dirs_dict = {}
if options.alternative_dirs:
for alternative_dir_pair in options.alternative_dirs.split(':'):
target_path, host_path = alternative_dir_pair.split('@', 1)
alternative_dirs_dict[target_path] = host_path
(bucket_set, dumps) = SubCommand.load_basic_files(
dump_path, True, alternative_dirs=alternative_dirs_dict)
self._timestamp = options.timestamp
pfn_counts_dict = {}
for shared_first_dump_path in shared_first_dump_paths:
shared_dumps = SubCommand._find_all_dumps(shared_first_dump_path)
for shared_dump in shared_dumps:
pfn_counts = PFNCounts.load(shared_dump)
if pfn_counts.pid not in pfn_counts_dict:
pfn_counts_dict[pfn_counts.pid] = []
pfn_counts_dict[pfn_counts.pid].append(pfn_counts)
policy_set = PolicySet.load(SubCommand._parse_policy_list(options.policy))
return policy_set, dumps, pfn_counts_dict, bucket_set
def _apply_policy(self, dump, pfn_counts_dict, policy, bucket_set,
first_dump_time):
"""Aggregates the total memory size of each component.
Iterate through all stacktraces and attribute them to one of the components
based on the policy. It is important to apply policy in right order.
Args:
dump: A Dump object.
pfn_counts_dict: A dict mapping a pid to a list of PFNCounts.
policy: A Policy object.
bucket_set: A BucketSet object.
first_dump_time: An integer representing time when the first dump is
dumped.
Returns:
A dict mapping components and their corresponding sizes.
"""
LOGGER.info(' %s' % dump.path)
all_pfn_dict = {}
if pfn_counts_dict:
LOGGER.info(' shared with...')
for pid, pfnset_list in pfn_counts_dict.iteritems():
closest_pfnset_index = None
closest_pfnset_difference = 1024.0
for index, pfnset in enumerate(pfnset_list):
time_difference = pfnset.time - dump.time
if time_difference >= 3.0:
break
elif ((time_difference < 0.0 and pfnset.reason != 'Exiting') or
(0.0 <= time_difference and time_difference < 3.0)):
closest_pfnset_index = index
closest_pfnset_difference = time_difference
elif time_difference < 0.0 and pfnset.reason == 'Exiting':
closest_pfnset_index = None
break
if closest_pfnset_index:
for pfn, count in pfnset_list[closest_pfnset_index].iter_pfn:
all_pfn_dict[pfn] = all_pfn_dict.get(pfn, 0) + count
LOGGER.info(' %s (time difference = %f)' %
(pfnset_list[closest_pfnset_index].path,
closest_pfnset_difference))
else:
LOGGER.info(' (no match with pid:%d)' % pid)
sizes = dict((c, 0) for c in policy.components)
PolicyCommands._accumulate_malloc(dump, policy, bucket_set, sizes)
verify_global_stats = PolicyCommands._accumulate_maps(
dump, all_pfn_dict, policy, bucket_set, sizes)
# TODO(dmikurube): Remove the verifying code when GLOBAL_STATS is removed.
# http://crbug.com/245603.
for verify_key, verify_value in verify_global_stats.iteritems():
dump_value = dump.global_stat('%s_committed' % verify_key)
if dump_value != verify_value:
LOGGER.warn('%25s: %12d != %d (%d)' % (
verify_key, dump_value, verify_value, dump_value - verify_value))
sizes['mmap-no-log'] = (
dump.global_stat('profiled-mmap_committed') -
sizes['mmap-total-log'])
sizes['mmap-total-record'] = dump.global_stat('profiled-mmap_committed')
sizes['mmap-total-record-vm'] = dump.global_stat('profiled-mmap_virtual')
sizes['tc-no-log'] = (
dump.global_stat('profiled-malloc_committed') -
sizes['tc-total-log'])
sizes['tc-total-record'] = dump.global_stat('profiled-malloc_committed')
sizes['tc-unused'] = (
sizes['mmap-tcmalloc'] -
dump.global_stat('profiled-malloc_committed'))
if sizes['tc-unused'] < 0:
LOGGER.warn(' Assuming tc-unused=0 as it is negative: %d (bytes)' %
sizes['tc-unused'])
sizes['tc-unused'] = 0
sizes['tc-total'] = sizes['mmap-tcmalloc']
# TODO(dmikurube): global_stat will be deprecated.
# See http://crbug.com/245603.
for key, value in {
'total': 'total_committed',
'filemapped': 'file_committed',
'absent': 'absent_committed',
'file-exec': 'file-exec_committed',
'file-nonexec': 'file-nonexec_committed',
'anonymous': 'anonymous_committed',
'stack': 'stack_committed',
'other': 'other_committed',
'unhooked-absent': 'nonprofiled-absent_committed',
'total-vm': 'total_virtual',
'filemapped-vm': 'file_virtual',
'anonymous-vm': 'anonymous_virtual',
'other-vm': 'other_virtual' }.iteritems():
if key in sizes:
sizes[key] = dump.global_stat(value)
if 'mustbezero' in sizes:
removed_list = (
'profiled-mmap_committed',
'nonprofiled-absent_committed',
'nonprofiled-anonymous_committed',
'nonprofiled-file-exec_committed',
'nonprofiled-file-nonexec_committed',
'nonprofiled-stack_committed',
'nonprofiled-other_committed')
sizes['mustbezero'] = (
dump.global_stat('total_committed') -
sum(dump.global_stat(removed) for removed in removed_list))
if 'total-exclude-profiler' in sizes:
sizes['total-exclude-profiler'] = (
dump.global_stat('total_committed') -
(sizes['mmap-profiler'] + sizes['mmap-type-profiler']))
if 'hour' in sizes:
sizes['hour'] = (dump.time - first_dump_time) / 60.0 / 60.0
if 'minute' in sizes:
sizes['minute'] = (dump.time - first_dump_time) / 60.0
if 'second' in sizes:
if self._timestamp:
sizes['second'] = datetime.datetime.fromtimestamp(dump.time).isoformat()
else:
sizes['second'] = dump.time - first_dump_time
return sizes
@staticmethod
def _accumulate_malloc(dump, policy, bucket_set, sizes):
for bucket_id, _, committed, _, _ in dump.iter_stacktrace:
bucket = bucket_set.get(bucket_id)
if not bucket or bucket.allocator_type == 'malloc':
component_match = policy.find_malloc(bucket)
elif bucket.allocator_type == 'mmap':
continue
else:
assert False
sizes[component_match] += committed
assert not component_match.startswith('mmap-')
if component_match.startswith('tc-'):
sizes['tc-total-log'] += committed
else:
sizes['other-total-log'] += committed
@staticmethod
def _accumulate_maps(dump, pfn_dict, policy, bucket_set, sizes):
# TODO(dmikurube): Remove the dict when GLOBAL_STATS is removed.
# http://crbug.com/245603.
global_stats = {
'total': 0,
'file-exec': 0,
'file-nonexec': 0,
'anonymous': 0,
'stack': 0,
'other': 0,
'nonprofiled-file-exec': 0,
'nonprofiled-file-nonexec': 0,
'nonprofiled-anonymous': 0,
'nonprofiled-stack': 0,
'nonprofiled-other': 0,
'profiled-mmap': 0,
}
for key, value in dump.iter_map:
# TODO(dmikurube): Remove the subtotal code when GLOBAL_STATS is removed.
# It's temporary verification code for transition described in
# http://crbug.com/245603.
committed = 0
if 'committed' in value[1]:
committed = value[1]['committed']
global_stats['total'] += committed
key = 'other'
name = value[1]['vma']['name']
if name.startswith('/'):
if value[1]['vma']['executable'] == 'x':
key = 'file-exec'
else:
key = 'file-nonexec'
elif name == '[stack]':
key = 'stack'
elif name == '':
key = 'anonymous'
global_stats[key] += committed
if value[0] == 'unhooked':
global_stats['nonprofiled-' + key] += committed
if value[0] == 'hooked':
global_stats['profiled-mmap'] += committed
if value[0] == 'unhooked':
if pfn_dict and dump.pageframe_length:
for pageframe in value[1]['pageframe']:
component_match = policy.find_unhooked(value, pageframe, pfn_dict)
sizes[component_match] += pageframe.size
else:
component_match = policy.find_unhooked(value)
sizes[component_match] += int(value[1]['committed'])
elif value[0] == 'hooked':
if pfn_dict and dump.pageframe_length:
for pageframe in value[1]['pageframe']:
component_match, _ = policy.find_mmap(
value, bucket_set, pageframe, pfn_dict)
sizes[component_match] += pageframe.size
assert not component_match.startswith('tc-')
if component_match.startswith('mmap-'):
sizes['mmap-total-log'] += pageframe.size
else:
sizes['other-total-log'] += pageframe.size
else:
component_match, _ = policy.find_mmap(value, bucket_set)
sizes[component_match] += int(value[1]['committed'])
if component_match.startswith('mmap-'):
sizes['mmap-total-log'] += int(value[1]['committed'])
else:
sizes['other-total-log'] += int(value[1]['committed'])
else:
LOGGER.error('Unrecognized mapping status: %s' % value[0])
return global_stats
class CSVCommand(PolicyCommands):
def __init__(self):
super(CSVCommand, self).__init__('csv')
def do(self, sys_argv):
policy_set, dumps, pfn_counts_dict, bucket_set = self._set_up(sys_argv)
return self._output(
policy_set, dumps, pfn_counts_dict, bucket_set, sys.stdout)
def _output(self, policy_set, dumps, pfn_counts_dict, bucket_set, out):
max_components = 0
for label in policy_set:
max_components = max(max_components, len(policy_set[label].components))
for label in sorted(policy_set):
components = policy_set[label].components
if len(policy_set) > 1:
out.write('%s%s\n' % (label, ',' * (max_components - 1)))
out.write('%s%s\n' % (
','.join(components), ',' * (max_components - len(components))))
LOGGER.info('Applying a policy %s to...' % label)
for index, dump in enumerate(dumps):
if index == 0:
first_dump_time = dump.time
component_sizes = self._apply_policy(
dump, pfn_counts_dict, policy_set[label], bucket_set,
first_dump_time)
s = []
for c in components:
if c in ('hour', 'minute', 'second'):
if isinstance(component_sizes[c], str):
s.append('%s' % component_sizes[c])
else:
s.append('%05.5f' % (component_sizes[c]))
else:
s.append('%05.5f' % (component_sizes[c] / 1024.0 / 1024.0))
out.write('%s%s\n' % (
','.join(s), ',' * (max_components - len(components))))
bucket_set.clear_component_cache()
return 0
class JSONCommand(PolicyCommands):
def __init__(self):
super(JSONCommand, self).__init__('json')
def do(self, sys_argv):
policy_set, dumps, pfn_counts_dict, bucket_set = self._set_up(sys_argv)
return self._output(
policy_set, dumps, pfn_counts_dict, bucket_set, sys.stdout)
def _output(self, policy_set, dumps, pfn_counts_dict, bucket_set, out):
json_base = {
'version': 'JSON_DEEP_2',
'policies': {},
}
for label in sorted(policy_set):
json_base['policies'][label] = {
'legends': policy_set[label].components,
'snapshots': [],
}
LOGGER.info('Applying a policy %s to...' % label)
for index, dump in enumerate(dumps):
if index == 0:
first_dump_time = dump.time
component_sizes = self._apply_policy(
dump, pfn_counts_dict, policy_set[label], bucket_set,
first_dump_time)
component_sizes['dump_path'] = dump.path
component_sizes['dump_time'] = datetime.datetime.fromtimestamp(
dump.time).strftime('%Y-%m-%d %H:%M:%S')
json_base['policies'][label]['snapshots'].append(component_sizes)
bucket_set.clear_component_cache()
json.dump(json_base, out, indent=2, sort_keys=True)
return 0
class ListCommand(PolicyCommands):
def __init__(self):
super(ListCommand, self).__init__('list')
def do(self, sys_argv):
policy_set, dumps, pfn_counts_dict, bucket_set = self._set_up(sys_argv)
return self._output(
policy_set, dumps, pfn_counts_dict, bucket_set, sys.stdout)
def _output(self, policy_set, dumps, pfn_counts_dict, bucket_set, out):
for label in sorted(policy_set):
LOGGER.info('Applying a policy %s to...' % label)
for dump in dumps:
component_sizes = self._apply_policy(
dump, pfn_counts_dict, policy_set[label], bucket_set, dump.time)
out.write('%s for %s:\n' % (label, dump.path))
for c in policy_set[label].components:
if c in ['hour', 'minute', 'second']:
out.write('%40s %12.3f\n' % (c, component_sizes[c]))
else:
out.write('%40s %12d\n' % (c, component_sizes[c]))
bucket_set.clear_component_cache()
return 0
|
GladeRom/android_external_chromium_org
|
tools/deep_memory_profiler/subcommands/policies.py
|
Python
|
bsd-3-clause
| 14,719
|
"""Test safe_exec.py"""
import hashlib
import os
import os.path
import random
import textwrap
import unittest
from nose.plugins.skip import SkipTest
from capa.safe_exec import safe_exec, update_hash
from codejail.safe_exec import SafeExecException
from codejail.jail_code import is_configured
class TestSafeExec(unittest.TestCase):
def test_set_values(self):
g = {}
safe_exec("a = 17", g)
self.assertEqual(g['a'], 17)
def test_division(self):
g = {}
# Future division: 1/2 is 0.5.
safe_exec("a = 1/2", g)
self.assertEqual(g['a'], 0.5)
def test_assumed_imports(self):
g = {}
# Math is always available.
safe_exec("a = int(math.pi)", g)
self.assertEqual(g['a'], 3)
def test_random_seeding(self):
g = {}
r = random.Random(17)
rnums = [r.randint(0, 999) for _ in xrange(100)]
# Without a seed, the results are unpredictable
safe_exec("rnums = [random.randint(0, 999) for _ in xrange(100)]", g)
self.assertNotEqual(g['rnums'], rnums)
# With a seed, the results are predictable
safe_exec("rnums = [random.randint(0, 999) for _ in xrange(100)]", g, random_seed=17)
self.assertEqual(g['rnums'], rnums)
def test_random_is_still_importable(self):
g = {}
r = random.Random(17)
rnums = [r.randint(0, 999) for _ in xrange(100)]
# With a seed, the results are predictable even from the random module
safe_exec(
"import random\n"
"rnums = [random.randint(0, 999) for _ in xrange(100)]\n",
g, random_seed=17)
self.assertEqual(g['rnums'], rnums)
def test_python_lib(self):
pylib = os.path.dirname(__file__) + "/test_files/pylib"
g = {}
safe_exec(
"import constant; a = constant.THE_CONST",
g, python_path=[pylib]
)
def test_raising_exceptions(self):
g = {}
with self.assertRaises(SafeExecException) as cm:
safe_exec("1/0", g)
self.assertIn("ZeroDivisionError", cm.exception.message)
class TestSafeOrNot(unittest.TestCase):
def test_cant_do_something_forbidden(self):
# Can't test for forbiddenness if CodeJail isn't configured for python.
if not is_configured("python"):
raise SkipTest
g = {}
with self.assertRaises(SafeExecException) as cm:
safe_exec("import os; files = os.listdir('/')", g)
self.assertIn("OSError", cm.exception.message)
self.assertIn("Permission denied", cm.exception.message)
def test_can_do_something_forbidden_if_run_unsafely(self):
g = {}
safe_exec("import os; files = os.listdir('/')", g, unsafely=True)
self.assertEqual(g['files'], os.listdir('/'))
class DictCache(object):
"""A cache implementation over a simple dict, for testing."""
def __init__(self, d):
self.cache = d
def get(self, key):
# Actual cache implementations have limits on key length
assert len(key) <= 250
return self.cache.get(key)
def set(self, key, value):
# Actual cache implementations have limits on key length
assert len(key) <= 250
self.cache[key] = value
class TestSafeExecCaching(unittest.TestCase):
"""Test that caching works on safe_exec."""
def test_cache_miss_then_hit(self):
g = {}
cache = {}
# Cache miss
safe_exec("a = int(math.pi)", g, cache=DictCache(cache))
self.assertEqual(g['a'], 3)
# A result has been cached
self.assertEqual(cache.values()[0], (None, {'a': 3}))
# Fiddle with the cache, then try it again.
cache[cache.keys()[0]] = (None, {'a': 17})
g = {}
safe_exec("a = int(math.pi)", g, cache=DictCache(cache))
self.assertEqual(g['a'], 17)
def test_cache_large_code_chunk(self):
# Caching used to die on memcache with more than 250 bytes of code.
# Check that it doesn't any more.
code = "a = 0\n" + ("a += 1\n" * 12345)
g = {}
cache = {}
safe_exec(code, g, cache=DictCache(cache))
self.assertEqual(g['a'], 12345)
def test_cache_exceptions(self):
# Used to be that running code that raised an exception didn't cache
# the result. Check that now it does.
code = "1/0"
g = {}
cache = {}
with self.assertRaises(SafeExecException):
safe_exec(code, g, cache=DictCache(cache))
# The exception should be in the cache now.
self.assertEqual(len(cache), 1)
cache_exc_msg, cache_globals = cache.values()[0]
self.assertIn("ZeroDivisionError", cache_exc_msg)
# Change the value stored in the cache, the result should change.
cache[cache.keys()[0]] = ("Hey there!", {})
with self.assertRaises(SafeExecException):
safe_exec(code, g, cache=DictCache(cache))
self.assertEqual(len(cache), 1)
cache_exc_msg, cache_globals = cache.values()[0]
self.assertEqual("Hey there!", cache_exc_msg)
# Change it again, now no exception!
cache[cache.keys()[0]] = (None, {'a': 17})
safe_exec(code, g, cache=DictCache(cache))
self.assertEqual(g['a'], 17)
def test_unicode_submission(self):
# Check that using non-ASCII unicode does not raise an encoding error.
# Try several non-ASCII unicode characters
for code in [129, 500, 2**8 - 1, 2**16 - 1]:
code_with_unichr = unicode("# ") + unichr(code)
try:
safe_exec(code_with_unichr, {}, cache=DictCache({}))
except UnicodeEncodeError:
self.fail("Tried executing code with non-ASCII unicode: {0}".format(code))
class TestUpdateHash(unittest.TestCase):
"""Test the safe_exec.update_hash function to be sure it canonicalizes properly."""
def hash_obj(self, obj):
"""Return the md5 hash that `update_hash` makes us."""
md5er = hashlib.md5()
update_hash(md5er, obj)
return md5er.hexdigest()
def equal_but_different_dicts(self):
"""
Make two equal dicts with different key order.
Simple literals won't do it. Filling one and then shrinking it will
make them different.
"""
d1 = {k:1 for k in "abcdefghijklmnopqrstuvwxyz"}
d2 = dict(d1)
for i in xrange(10000):
d2[i] = 1
for i in xrange(10000):
del d2[i]
# Check that our dicts are equal, but with different key order.
self.assertEqual(d1, d2)
self.assertNotEqual(d1.keys(), d2.keys())
return d1, d2
def test_simple_cases(self):
h1 = self.hash_obj(1)
h10 = self.hash_obj(10)
hs1 = self.hash_obj("1")
self.assertNotEqual(h1, h10)
self.assertNotEqual(h1, hs1)
def test_list_ordering(self):
h1 = self.hash_obj({'a': [1,2,3]})
h2 = self.hash_obj({'a': [3,2,1]})
self.assertNotEqual(h1, h2)
def test_dict_ordering(self):
d1, d2 = self.equal_but_different_dicts()
h1 = self.hash_obj(d1)
h2 = self.hash_obj(d2)
self.assertEqual(h1, h2)
def test_deep_ordering(self):
d1, d2 = self.equal_but_different_dicts()
o1 = {'a':[1, 2, [d1], 3, 4]}
o2 = {'a':[1, 2, [d2], 3, 4]}
h1 = self.hash_obj(o1)
h2 = self.hash_obj(o2)
self.assertEqual(h1, h2)
class TestRealProblems(unittest.TestCase):
def test_802x(self):
code = textwrap.dedent("""\
import math
import random
import numpy
e=1.602e-19 #C
me=9.1e-31 #kg
mp=1.672e-27 #kg
eps0=8.854e-12 #SI units
mu0=4e-7*math.pi #SI units
Rd1=random.randrange(1,30,1)
Rd2=random.randrange(30,50,1)
Rd3=random.randrange(50,70,1)
Rd4=random.randrange(70,100,1)
Rd5=random.randrange(100,120,1)
Vd1=random.randrange(1,20,1)
Vd2=random.randrange(20,40,1)
Vd3=random.randrange(40,60,1)
#R=[0,10,30,50,70,100] #Ohm
#V=[0,12,24,36] # Volt
R=[0,Rd1,Rd2,Rd3,Rd4,Rd5] #Ohms
V=[0,Vd1,Vd2,Vd3] #Volts
#here the currents IL and IR are defined as in figure ps3_p3_fig2
a=numpy.array([ [ R[1]+R[4]+R[5],R[4] ],[R[4], R[2]+R[3]+R[4] ] ])
b=numpy.array([V[1]-V[2],-V[3]-V[2]])
x=numpy.linalg.solve(a,b)
IL='%.2e' % x[0]
IR='%.2e' % x[1]
ILR='%.2e' % (x[0]+x[1])
def sign(x):
return abs(x)/x
RW="Rightwards"
LW="Leftwards"
UW="Upwards"
DW="Downwards"
I1='%.2e' % abs(x[0])
I1d=LW if sign(x[0])==1 else RW
I1not=LW if I1d==RW else RW
I2='%.2e' % abs(x[1])
I2d=RW if sign(x[1])==1 else LW
I2not=LW if I2d==RW else RW
I3='%.2e' % abs(x[1])
I3d=DW if sign(x[1])==1 else UW
I3not=DW if I3d==UW else UW
I4='%.2e' % abs(x[0]+x[1])
I4d=UW if sign(x[1]+x[0])==1 else DW
I4not=DW if I4d==UW else UW
I5='%.2e' % abs(x[0])
I5d=RW if sign(x[0])==1 else LW
I5not=LW if I5d==RW else RW
VAP=-x[0]*R[1]-(x[0]+x[1])*R[4]
VPN=-V[2]
VGD=+V[1]-x[0]*R[1]+V[3]+x[1]*R[2]
aVAP='%.2e' % VAP
aVPN='%.2e' % VPN
aVGD='%.2e' % VGD
""")
g = {}
safe_exec(code, g)
self.assertIn("aVAP", g)
|
abhinavp13/IITBX-edx-platform-dev
|
common/lib/capa/capa/safe_exec/tests/test_safe_exec.py
|
Python
|
agpl-3.0
| 9,805
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import tabs
from openstack_dashboard.api import cinder
class BackupOverviewTab(tabs.Tab):
name = _("Overview")
slug = "overview"
template_name = ("project/volumes/backups/"
"_detail_overview.html")
def get_context_data(self, request):
try:
backup = self.tab_group.kwargs['backup']
try:
volume = cinder.volume_get(request, backup.volume_id)
except Exception:
volume = None
return {'backup': backup,
'volume': volume}
except Exception:
redirect = reverse('horizon:project:volumes:index')
exceptions.handle(self.request,
_('Unable to retrieve backup details.'),
redirect=redirect)
class BackupDetailTabs(tabs.TabGroup):
slug = "backup_details"
tabs = (BackupOverviewTab,)
|
FNST-OpenStack/horizon
|
openstack_dashboard/dashboards/project/volumes/backups/tabs.py
|
Python
|
apache-2.0
| 1,613
|
# -*- coding: utf-8 -*-
# Copyright (c) 2019 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
import pytest
from ansible.module_utils._text import to_native
from ansible.module_utils.common.validation import check_type_str
TEST_CASES = (
('string', 'string'),
(100, '100'),
(1.5, '1.5'),
({'k1': 'v1'}, "{'k1': 'v1'}"),
([1, 2, 'three'], "[1, 2, 'three']"),
((1, 2,), '(1, 2)'),
)
@pytest.mark.parametrize('value, expected', TEST_CASES)
def test_check_type_str(value, expected):
assert expected == check_type_str(value)
@pytest.mark.parametrize('value, expected', TEST_CASES[1:])
def test_check_type_str_no_conversion(value, expected):
with pytest.raises(TypeError) as e:
check_type_str(value, allow_conversion=False)
assert 'is not a string and conversion is not allowed' in to_native(e.value)
|
aperigault/ansible
|
test/units/module_utils/common/validation/test_check_type_str.py
|
Python
|
gpl-3.0
| 994
|
# Helper module for a test_reflect test
1/0
|
mzdaniel/oh-mainline
|
vendor/packages/twisted/twisted/test/reflect_helper_ZDE.py
|
Python
|
agpl-3.0
| 46
|
# (c) 2016, Ansible, Inc
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import math
def issubset(a, b):
return set(a) <= set(b)
def issuperset(a, b):
return set(a) >= set(b)
def isnotanumber(x):
try:
return math.isnan(x)
except TypeError:
return False
def contains(seq, value):
'''Opposite of the ``in`` test, allowing use as a test in filters like ``selectattr``
.. versionadded:: 2.8
'''
return value in seq
class TestModule:
''' Ansible math jinja2 tests '''
def tests(self):
return {
# set theory
'issubset': issubset,
'subset': issubset,
'issuperset': issuperset,
'superset': issuperset,
'contains': contains,
# numbers
'isnan': isnotanumber,
'nan': isnotanumber,
}
|
alxgu/ansible
|
lib/ansible/plugins/test/mathstuff.py
|
Python
|
gpl-3.0
| 1,561
|
# Util.py - Python extension for perf trace, miscellaneous utility code
#
# Copyright (C) 2010 by Tom Zanussi <tzanussi@gmail.com>
#
# This software may be distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
NSECS_PER_SEC = 1000000000
def avg(total, n):
return total / n
def nsecs(secs, nsecs):
return secs * NSECS_PER_SEC + nsecs
def nsecs_secs(nsecs):
return nsecs / NSECS_PER_SEC
def nsecs_nsecs(nsecs):
return nsecs % NSECS_PER_SEC
def nsecs_str(nsecs):
str = "%5u.%09u" % (nsecs_secs(nsecs), nsecs_nsecs(nsecs)),
return str
def clear_term():
print("\x1b[H\x1b[2J")
|
droidzone/Supernova-Kernel
|
tools/tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Util.py
|
Python
|
gpl-2.0
| 686
|
import os
import sys
import unittest
sys.path.insert(1, os.path.abspath(os.path.join(__file__, "../..")))
import base_test
from selenium.common import exceptions
class NaturalNonVisibleElementsTest(base_test.WebDriverBaseTest):
def test_0x0_pixel_element_is_not_visible(self):
self.driver.get(self.webserver.where_is("element_state/res/0x0-pixels.html"))
el = self.driver.find_element_by_css_selector("div")
self.assertFalse(el.is_displayed())
def test_0x0_pixel_text_node_is_visible(self):
self.driver.get(self.webserver.where_is("element_state/res/0x0-pixels-text-node.html"))
el = self.driver.find_element_by_css_selector("p")
self.assertTrue(el.is_displayed())
def test_1x1_pixel_element(self):
self.driver.get(self.webserver.where_is("element_state/res/1x1-pixels.html"))
el = self.driver.find_element_by_css_selector("p")
self.assertTrue(el.is_displayed())
def test_zero_sized_element_is_shown_if_decendant_has_size(self):
self.driver.get(self.webserver.where_is("element_state/res/zero-sized-element-with-sizable-decendant.html"))
parent = self.driver.find_element_by_css_selector("#parent")
child = self.driver.find_element_by_css_selector("#child")
self.assertTrue(parent.is_displayed())
self.assertTrue(child.is_displayed())
def test_input_type_hidden_is_never_visible(self):
self.driver.get(self.webserver.where_is("element_state/res/input-type-hidden.html"))
input = self.driver.find_element_by_css_selector("input")
self.assertFalse(input.is_displayed())
def test_input_morphs_into_hidden(self):
self.driver.get(self.webserver.where_is("element_state/res/input-morphs-into-hidden.html"))
input = self.driver.find_element_by_css_selector("input")
self.assertFalse(input.is_displayed())
def test_parent_node_visible_when_all_children_are_absolutely_positioned_and_overflow_is_hidden(self):
pass
def test_parent_of_absolutely_positioned_elements_visible_where_ancestor_overflow_is_hidden(self):
"""When a parent's ancestor hides any overflow, absolutely positioned child elements are
still visible. The parent container is also considered visible by webdriver for this
reason because it is interactable."""
self.driver.get(self.webserver.where_is("element_state/res/absolute-children-ancestor-hidden-overflow.html"))
children = self.driver.find_elements_by_css_selector(".child")
assert all(child.is_displayed() for child in children)
parent = self.driver.find_element_by_css_selector("#parent")
assert parent.is_displayed()
def test_element_hidden_by_overflow_x_is_not_visible(self):
# TODO(andreastt): This test should probably be split in three. Also it's making two
# assertions.
pages = ["element_state/res/x-hidden-y-hidden.html",
"element_state/res/x-hidden-y-scroll.html",
"element_state/res/x-hidden-y-auto.html"]
for page in pages:
self.driver.get(self.webserver.where_is(page))
right = self.driver.find_element_by_css_selector("#right")
bottom_right = self.driver.find_element_by_css_selector("#bottom-right")
self.assertFalse(right.is_displayed())
self.assertFalse(bottom_right.is_displayed())
def test_element_hidden_by_overflow_y_is_not_visible(self):
# TODO(andreastt): This test should probably be split in three. Also it's making two
# assertions.
pages = ["element_state/res/x-hidden-y-hidden.html",
"element_state/res/x-scroll-y-hidden.html",
"element_state/res/x-auto-y-hidden.html"]
for page in pages:
self.driver.get(self.webserver.where_is(page))
bottom = self.driver.find_element_by_css_selector("#bottom")
bottom_right = self.driver.find_element_by_css_selector("#bottom-right")
self.assertFalse(bottom.is_displayed())
self.assertFalse(bottom_right.is_displayed())
def test_parent_node_visible_when_all_children_are_absolutely_position_and_overflow_is_hidden(self):
pass
def test_element_scrollable_by_overflow_x_is_visible(self):
pass
def test_element_scrollable_by_overflow_y_is_visible(self):
pass
def test_element_scrollable_by_overflow_x_and_y_is_visible(self):
pass
def test_element_scrollable_by_overflow_y_is_visible(self):
pass
def test_element_outside_viewport(self):
self.driver.get(self.webserver.where_is("element_state/res/element-outside-viewport.html"))
hidden = self.driver.find_element_by_css_selector("div")
self.assertFalse(hidden.is_displayed())
def test_element_dynamically_moved_outside_viewport(self):
self.driver.get(self.webserver.where_is("element_state/res/element-dynamically-moved-outside-viewport.html"))
hidden = self.driver.find_element_by_css_selector("div")
self.assertFalse(hidden.is_displayed())
def test_element_hidden_by_other_element(self):
self.driver.get(self.webserver.where_is("element_state/res/element-hidden-by-other-element.html"))
overlay = self.driver.find_element_by_css_selector("#overlay")
hidden = self.driver.find_element_by_css_selector("#hidden")
self.assertTrue(overlay.is_displayed())
self.assertFalse(hidden.is_displayed())
def test_element_partially_hidden_by_other_element(self):
self.driver.get(self.webserver.where_is("element_state/res/element-partially-hidden-by-other-element.html"))
partial = self.driver.find_element_by_css_selector("#partial")
self.assertTrue(partial.is_displayed())
def test_element_hidden_by_z_index(self):
self.driver.get(self.webserver.where_is("element_state/res/element-hidden-by-z-index.html"))
overlay = self.driver.find_element_by_css_selector("#overlay")
hidden = self.driver.find_element_by_css_selector("#hidden")
self.assertTrue(overlay.is_displayed())
self.assertFalse(hidden.is_displayed())
def test_element_moved_outside_viewport_by_transform(self):
self.driver.get(self.webserver.where_is("element_state/res/element-moved-outside-viewport-by-transform.html"))
el = self.driver.find_element_by_css_selector("div")
self.assertFalse(el.is_displayed())
def test_element_moved_behind_other_element_by_transform(self):
self.driver.get(self.webserver.where_is("element_state/res/element-moved-behind-other-element-by-transform.html"))
overlay = self.driver.find_element_by_css_selector("#overlay")
hidden = self.driver.find_element_by_css_selector("#hidden")
self.assertTrue(overlay.is_displayed())
self.assertFalse(hidden.is_displayed())
def test_text_with_same_color_as_background(self):
self.driver.get(self.webserver.where_is("element_state/res/text-with-same-color-as-background.html"))
p = self.driver.find_element_by_css_selector("p")
self.assertFalse(p.is_displayed())
def test_text_with_same_color_as_parent_background(self):
self.driver.get(self.webserver.where_is("element_state/res/text-with-same-color-as-parent-background.html"))
p = self.driver.find_element_by_css_selector("p")
self.assertFalse(p.is_displayed())
def test_text_with_matching_color_and_background(self):
self.driver.get(self.webserver.where_is("element_state/res/text-with-matching-color-and-background.html"))
p = self.driver.find_element_by_css_selector("p")
self.assertTrue(p.is_displayed())
def test_element_with_same_color_as_background(self):
self.driver.get(self.webserver.where_is("element_state/res/element-with-same-color-as-background.html"))
el = self.driver.find_element_by_css_selector("div")
self.assertFalse(el.is_displayed())
def test_element_with_same_color_as_parent_background(self):
self.driver.get(self.webserver.where_is("element_state/res/element-with-same-color-as-parent-background.html"))
hidden = self.driver.find_element_by_css_selector("#hidden")
self.assertFalse(hidden.is_displayed())
class BodyElementIsAlwaysDisplayedTest(base_test.WebDriverBaseTest):
def assert_body_is_displayed_on(self, page):
self.driver.get(self.webserver.where_is(page))
body = self.driver.find_element_by_css_selector("body")
assert body.is_displayed()
def test_implicit(self):
self.assert_body_is_displayed_on("element_state/res/body_implicit.html")
def test_empty(self):
self.assert_body_is_displayed_on("element_state/res/body_empty.html")
def test_visibility_hidden(self):
self.assert_body_is_displayed_on("element_state/res/body_visibility_hidden.html")
def test_overflow_hidden(self):
self.assert_body_is_displayed_on("element_state/res/body_overflow_hidden.html")
class DisplayTest(base_test.WebDriverBaseTest):
def test_display_block(self):
self.driver.get(self.webserver.where_is("element_state/res/display-block.html"))
el = self.driver.find_element_by_css_selector("p")
self.assertTrue(el.is_displayed())
def test_display_none(self):
self.driver.get(self.webserver.where_is("element_state/res/display-none.html"))
el = self.driver.find_element_by_css_selector("p")
self.assertFalse(el.is_displayed())
def test_display_none_hides_child_node(self):
self.driver.get(self.webserver.where_is("element_state/res/display-none-child.html"))
parent = self.driver.find_element_by_css_selector("#parent")
child = self.driver.find_element_by_css_selector("#child")
self.assertFalse(parent.is_displayed())
self.assertFalse(child.is_displayed())
def test_display_none_hides_child_node_link(self):
self.driver.get(self.webserver.where_is("element_state/res/display-none-child-link.html"))
child = self.driver.find_element_by_css_selector("#child")
self.assertFalse(child.is_displayed())
def test_display_none_hides_child_node_paragraph(self):
self.driver.get(self.webserver.where_is("element_state/res/display-none-child-paragraph.html"))
child = self.driver.find_element_by_css_selector("#child")
self.assertFalse(child.is_displayed())
def test_display_none_on_parent_takes_presedence(self):
self.driver.get(self.webserver.where_is("element_state/res/display-none-parent-presedence.html"))
child = self.driver.find_element_by_css_selector("#child")
self.assertFalse(child.is_displayed())
def test_display_none_on_parent_takes_presedence_over_visibility_visible(self):
self.driver.get(self.webserver.where_is("element_state/res/display-none-parent-presedence-visibility.html"))
child = self.driver.find_element_by_css_selector("#child")
self.assertFalse(child.is_displayed())
def test_display_none_hidden_dynamically(self):
self.driver.get(self.webserver.where_is("element_state/res/display-none-dynamic.html"))
hidden = self.driver.find_element_by_css_selector("#hidden")
self.assertFalse(hidden.is_displayed())
class VisibilityTest(base_test.WebDriverBaseTest):
def test_element_state_hidden(self):
self.driver.get(self.webserver.where_is("element_state/res/visibility-hidden.html"))
el = self.driver.find_element_by_css_selector("p")
self.assertFalse(el.is_displayed())
def test_element_state_visible(self):
self.driver.get(self.webserver.where_is("element_state/res/visibility-visible.html"))
el = self.driver.find_element_by_css_selector("p")
self.assertTrue(el.is_displayed())
def test_visibility_hidden_hides_child_node(self):
self.driver.get(self.webserver.where_is("element_state/res/visibility-child.html"))
parent = self.driver.find_element_by_css_selector("#parent")
child = self.driver.find_element_by_css_selector("#child")
self.assertFalse(parent.is_displayed())
self.assertFalse(child.is_displayed())
def test_visibility_hidden_hides_child_node_link(self):
self.driver.get(self.webserver.where_is("element_state/res/visibility-child-link.html"))
parent = self.driver.find_element_by_css_selector("#parent")
child = self.driver.find_element_by_css_selector("#child")
self.assertFalse(parent.is_displayed())
self.assertFalse(child.is_displayed())
def test_visibility_hidden_hides_child_node_paragraph(self):
self.driver.get(self.webserver.where_is("element_state/res/visibility-child-paragraph.html"))
parent = self.driver.find_element_by_css_selector("#parent")
child = self.driver.find_element_by_css_selector("#child")
self.assertFalse(parent.is_displayed())
self.assertFalse(child.is_displayed())
def test_visibility_hidden_on_child_takes_precedence(self):
self.driver.get(self.webserver.where_is("element_state/res/visibility-child-presedence.html"))
child = self.driver.find_element_by_css_selector("#child")
self.assertTrue(child.is_displayed())
def test_visibility_hidden_on_parent_takes_precedence_over_display_block(self):
pass
def test_visibility_hidden_set_dynamically(self):
pass
def test_should_show_element_not_visible_with_hidden_attribute(self):
self.driver.get(self.webserver.where_is("element_state/res/hidden.html"))
singleHidden = self.driver.find_element_by_css_selector('#singleHidden')
self.assertFalse(singleHidden.is_displayed())
def test_should_show_element_not_visible_when_parent_element_has_hidden_attribute(self):
self.driver.get(self.webserver.where_is("element_state/res/hidden.html"))
child = self.driver.find_element_by_css_selector('#child')
self.assertFalse(child.is_displayed())
class VisibilityInteractionTest(base_test.WebDriverBaseTest):
def test_input_hidden_is_unclickable(self):
self.driver.get(self.webserver.where_is("element_state/res/input-type-hidden-unclickable.html"))
input = self.driver.find_element_by_css_selector("input")
with self.assertRaises(exceptions.ElementNotVisibleException):
input.click()
def test_hidden_input_checkbox_is_untogglable(self):
self.driver.get(self.webserver.where_is("element_state/res/hidden-input-type-checkbox-untogglable.html"))
checkbox = self.driver.find_element_by_css_selector("input")
with self.assertRaises(exceptions.ElementNotVisibleException):
checkbox.click()
def test_typing_in_hidden_input_is_impossible(self):
self.driver.get(self.webserver.where_is("element_state/res/hidden-input-type-text-writing.html"))
textfield = self.driver.find_element_by_css_selector("input")
with self.assertRaises(exceptions.ElementNotVisibleException):
textfield.send_keys("Koha is a popular Indian cheese")
class OpacityTest(base_test.WebDriverBaseTest):
pass
if __name__ == "__main__":
unittest.main()
|
youtube/cobalt
|
third_party/web_platform_tests/webdriver/element_state/visibility_test.py
|
Python
|
bsd-3-clause
| 15,305
|
# -*- coding: utf-8 -*-
import openerp
from openerp import http
from openerp.http import request
import openerp.addons.website_sale.controllers.main
class website_sale(openerp.addons.website_sale.controllers.main.website_sale):
@http.route(['/shop/payment'], type='http', auth="public", website=True)
def payment(self, **post):
cr, uid, context = request.cr, request.uid, request.context
order = request.website.sale_get_order(context=context)
carrier_id = post.get('carrier_id')
if carrier_id:
carrier_id = int(carrier_id)
if order:
request.registry['sale.order']._check_carrier_quotation(cr, uid, order, force_carrier_id=carrier_id, context=context)
if carrier_id:
return request.redirect("/shop/payment")
res = super(website_sale, self).payment(**post)
return res
def order_lines_2_google_api(self, order_lines):
""" Transforms a list of order lines into a dict for google analytics """
order_lines_not_delivery = [line for line in order_lines if not line.is_delivery]
return super(website_sale, self).order_lines_2_google_api(order_lines_not_delivery)
|
mycodeday/crm-platform
|
website_sale_delivery/controllers/main.py
|
Python
|
gpl-3.0
| 1,205
|
class WiggleParser(object):
"""
Warning - this does not implement the full specification!
"""
def entries(self, input_fh):
track_name = None
replicon = None
span = None
pos_value_pairs = []
for line in input_fh:
row = line[:-1].split()
if len(row) == 0:
continue
if row[0].startswith("track"):
track_name = self._track_name(row)
elif row[0].startswith("variableStep"):
if replicon:
prev_replicon = replicon
prev_span = span
prev_pos_value_pairs = pos_value_pairs
replicon = self._replicon(row)
span = None
pos_value_pairs = []
yield WiggleEntry(
track_name, prev_replicon, prev_span,
prev_pos_value_pairs)
else:
replicon = self._replicon(row)
else:
pos_value_pairs.append([int(row[0]), float(row[1])])
yield WiggleEntry(track_name, replicon, span, pos_value_pairs)
def _replicon(self, row):
return self._attrs_and_values(row)["chrom"]
def _track_name(self, row):
return self._attrs_and_values(row)["name"]
def _attrs_and_values(self, row):
attrs_and_values = {}
for attr_and_value in row:
if "=" not in attr_and_value:
continue
attr, value = attr_and_value.split("=")
value = value.replace("\"", "")
attrs_and_values[attr] = value
return attrs_and_values
class WiggleEntry(object):
def __init__(self, track_name, replicon, span, pos_value_pairs):
self.track_name = track_name
self.replicon = replicon
self.span = span
self.pos_value_pairs = pos_value_pairs
class WiggleWriter(object):
def __init__(self, track_str, fh):
self._fh = fh
self._fh.write(("track type=wiggle_0 name=\"{}\"\n".format(track_str)))
def write_replicons_coverages(
self, replicon_str, coverages, discard_zeros=True, factor=1.0):
self._fh.write("variableStep chrom={} span=1\n".format(
replicon_str))
# Filter values of 0 and multiply the remaining ones by
# the given factor. pos is increased by 1 as a translation
# from a 0-based sysem (Python list) to a 1 based system
# (wiggle) takes place.
self._fh.write(
"\n".join(["{} {}".format(pos + 1, coverage * factor)
for pos, coverage in
filter(lambda pos_and_cov: pos_and_cov[1] != 0.0,
enumerate(coverages))]) + "\n")
def close_file(self):
self._fh.close()
|
tbischler/PEAKachu
|
peakachulib/wiggle.py
|
Python
|
isc
| 2,906
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import tempfile
import shutil
import os
from contextlib import contextmanager
from importlib import import_module
from psycopg2.extras import register_composite, CompositeCaster
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
from django.test import TestCase
from django.db import connection
from django.db.migrations.loader import MigrationLoader
from django.apps import apps
from django.core.management import call_command
from django.conf import settings
from django.test.utils import extend_sys_path
from test_app.models import Book
from migrate_sql.config import SQLItem
class TupleComposite(CompositeCaster):
"""
Loads composite type object as tuple.
"""
def make(self, values):
return tuple(values)
def module_dir(module):
"""
Find the name of the directory that contains a module, if possible.
RMigrateaise ValueError otherwise, e.g. for namespace packages that are split
over several directories.
"""
# Convert to list because _NamespacePath does not support indexing on 3.3.
paths = list(getattr(module, '__path__', []))
if len(paths) == 1:
return paths[0]
else:
filename = getattr(module, '__file__', None)
if filename is not None:
return os.path.dirname(filename)
raise ValueError("Cannot determine directory containing %s" % module)
def item(name, version, dependencies=None):
"""
Creates mock SQL item represented by Postgre composite type.
Returns:
(SQLItem): Resuling composite type:
* sql = CREATE TYPE <name> AS (
[<dep1> <dep1_type>, ..., <depN> <depN_type>], arg1 int, arg2 int, .., argN int);
dependencies are arguments, version affects amount of extra int arguments.
Version = 1 means one int argument.
* sql = DROP TYPE <name>.
"""
dependencies = dependencies or ()
args = ', '.join(['{name}{ver} {name}'.format(name=dep[1], ver=version)
for dep in dependencies] + ['arg{i} int'.format(i=i + 1)
for i in range(version)])
sql, reverse_sql = ('CREATE TYPE {name} AS ({args}); -- {ver}'.format(
name=name, args=args, ver=version),
'DROP TYPE {}'.format(name))
return SQLItem(name, sql, reverse_sql, dependencies=dependencies)
def contains_ordered(lst, order):
"""
Checks if `order` sequence exists in `lst` in the defined order.
"""
prev_idx = -1
try:
for item in order:
idx = lst.index(item)
if idx <= prev_idx:
return False
prev_idx = idx
except ValueError:
return False
return True
def mig_name(name):
"""
Returns name[0] (app name) and first 4 letters of migartion name (name[1]).
"""
return name[0], name[1][:4]
def run_query(sql, params=None):
cursor = connection.cursor()
cursor.execute(sql, params=params)
return cursor.fetchall()
class BaseMigrateSQLTestCase(TestCase):
"""
Tests `migrate_sql` using sample PostgreSQL functions and their body/argument changes.
"""
def setUp(self):
super(BaseMigrateSQLTestCase, self).setUp()
self.config = import_module('test_app.sql_config')
self.config2 = import_module('test_app2.sql_config')
self.out = StringIO()
def tearDown(self):
super(BaseMigrateSQLTestCase, self).tearDown()
if hasattr(self.config, 'sql_items'):
delattr(self.config, 'sql_items')
if hasattr(self.config2, 'sql_items'):
delattr(self.config2, 'sql_items')
def check_migrations_content(self, expected):
"""
Check content (operations) of migrations.
"""
loader = MigrationLoader(None, load=True)
available = loader.disk_migrations.keys()
for expc_mig, (check_exists, dependencies, op_groups) in expected.items():
key = next((mig for mig in available if mig_name(mig) == mig_name(expc_mig)), None)
if check_exists:
self.assertIsNotNone(key, 'Expected migration {} not found.'.format(expc_mig))
else:
self.assertIsNone(key, 'Unexpected migration {} was found.'.format(expc_mig))
continue
migration = loader.disk_migrations[key]
self.assertEqual({mig_name(dep) for dep in migration.dependencies}, set(dependencies))
mig_ops = [(op.__class__.__name__, op.name) for op in migration.operations]
for op_group in op_groups:
self.assertTrue(contains_ordered(mig_ops, op_group))
@contextmanager
def temporary_migration_module(self, app_label='test_app', module=None):
"""
Allows testing management commands in a temporary migrations module.
The migrations module is used as a template for creating the temporary
migrations module. If it isn't provided, the application's migrations
module is used, if it exists.
Returns the filesystem path to the temporary migrations module.
"""
temp_dir = tempfile.mkdtemp()
try:
target_dir = tempfile.mkdtemp(dir=temp_dir)
with open(os.path.join(target_dir, '__init__.py'), 'w'):
pass
target_migrations_dir = os.path.join(target_dir, 'migrations')
if module is None:
module = apps.get_app_config(app_label).name + '.migrations'
try:
source_migrations_dir = module_dir(import_module(module))
except (ImportError, ValueError):
pass
else:
shutil.copytree(source_migrations_dir, target_migrations_dir)
with extend_sys_path(temp_dir):
new_module = os.path.basename(target_dir) + '.migrations'
new_setting = settings.MIGRATION_MODULES.copy()
new_setting[app_label] = new_module
with self.settings(MIGRATION_MODULES=new_setting):
yield target_migrations_dir
finally:
shutil.rmtree(temp_dir)
class MigrateSQLTestCase(BaseMigrateSQLTestCase):
SQL_V1 = (
# sql
[("""
CREATE OR REPLACE FUNCTION top_books()
RETURNS SETOF test_app_book AS $$
BEGIN
RETURN QUERY SELECT * FROM test_app_book ab WHERE ab.rating > %s
ORDER BY ab.rating DESC;
END;
$$ LANGUAGE plpgsql;
""", [5])],
# reverse sql
'DROP FUNCTION top_books()',
)
SQL_V2 = (
# sql
[("""
CREATE OR REPLACE FUNCTION top_books(min_rating int = %s)
RETURNS SETOF test_app_book AS $$
BEGIN
RETURN QUERY EXECUTE 'SELECT * FROM test_app_book ab
WHERE ab.rating > $1 AND ab.published
ORDER BY ab.rating DESC'
USING min_rating;
END;
$$ LANGUAGE plpgsql;
""", [5])],
# reverse sql
'DROP FUNCTION top_books(int)',
)
SQL_V3 = (
# sql
[("""
CREATE OR REPLACE FUNCTION top_books()
RETURNS SETOF test_app_book AS $$
DECLARE
min_rating int := %s;
BEGIN
RETURN QUERY EXECUTE 'SELECT * FROM test_app_book ab
WHERE ab.rating > $1 AND ab.published
ORDER BY ab.rating DESC'
USING min_rating;
END;
$$ LANGUAGE plpgsql;
""", [5])],
# reverse sql
'DROP FUNCTION top_books()',
)
def setUp(self):
super(MigrateSQLTestCase, self).setUp()
books = (
Book(name="Clone Wars", author="John Ben", rating=4, published=True),
Book(name="The mysterious dog", author="John Ben", rating=6, published=True),
Book(name="HTML 5", author="John Ben", rating=9, published=True),
Book(name="Management", author="John Ben", rating=8, published=False),
Book(name="Python 3", author="John Ben", rating=3, published=False),
)
Book.objects.bulk_create(books)
def check_run_migrations(self, migrations):
"""
Launch migrations requested and compare results.
"""
for migration, expected in migrations:
call_command('migrate', 'test_app', migration, stdout=self.out)
if expected:
result = run_query('SELECT name FROM top_books()')
self.assertEqual(result, expected)
else:
result = run_query("SELECT COUNT(*) FROM pg_proc WHERE proname = 'top_books'")
self.assertEqual(result, [(0,)])
def check_migrations(self, content, results, migration_module=None, app_label='test_app'):
"""
Checks migrations content and results after being run.
"""
with self.temporary_migration_module(module=migration_module):
call_command('makemigrations', app_label, stdout=self.out)
self.check_migrations_content(content)
call_command('migrate', app_label, stdout=self.out)
self.check_run_migrations(results)
def test_migration_add(self):
"""
Items newly created should be properly persisted into migrations and created in database.
"""
sql, reverse_sql = self.SQL_V1
self.config.sql_items = [SQLItem('top_books', sql, reverse_sql)]
expected_content = {
('test_app', '0002'): (
True,
[('test_app', '0001')],
[[('CreateSQL', 'top_books')]],
),
}
expected_results = (
('0002', [('HTML 5',), ('Management',), ('The mysterious dog',)]),
)
self.check_migrations(expected_content, expected_results)
def test_migration_change(self):
"""
Items changed should properly persist changes into migrations and alter database.
"""
sql, reverse_sql = self.SQL_V2
self.config.sql_items = [SQLItem('top_books', sql, reverse_sql)]
expected_content = {
('test_app', '0003'): (
True,
[('test_app', '0002')],
[[('ReverseAlterSQL', 'top_books'), ('AlterSQL', 'top_books')]],
),
}
expected_results = (
('0003', [('HTML 5',), ('The mysterious dog',)]),
('0002', [('HTML 5',), ('Management',), ('The mysterious dog',)]),
('0001', None),
)
self.check_migrations(expected_content, expected_results, 'test_app.migrations_change')
def test_migration_replace(self):
"""
Items changed with `replace` = Truel should properly persist changes into migrations and
replace object in database without reversing previously.
"""
sql, reverse_sql = self.SQL_V3
self.config.sql_items = [SQLItem('top_books', sql, reverse_sql, replace=True)]
expected_content = {
('test_app', '0003'): (
True,
[('test_app', '0002')],
[[('AlterSQL', 'top_books')]],
),
}
expected_results = (
('0003', [('HTML 5',), ('The mysterious dog',)]),
('0002', [('HTML 5',), ('Management',), ('The mysterious dog',)]),
('0001', None),
('0002', [('HTML 5',), ('Management',), ('The mysterious dog',)]),
)
self.check_migrations(expected_content, expected_results, 'test_app.migrations_change')
def test_migration_delete(self):
"""
Items deleted should properly embed deletion into migration and run backward SQL in DB.
"""
self.config.sql_items = []
expected_content = {
('test_app', '0003'): (
True,
[('test_app', '0002')],
[[('DeleteSQL', 'top_books')]],
),
}
expected_results = (
('0003', None),
)
self.check_migrations(expected_content, expected_results, 'test_app.migrations_change')
def test_migration_recreate(self):
"""
Items created after deletion should properly embed recreation into migration and alter DB.
"""
sql, reverse_sql = self.SQL_V2
self.config.sql_items = [SQLItem('top_books', sql, reverse_sql)]
expected_content = {
('test_app', '0004'): (
True,
[('test_app', '0003')],
[[('CreateSQL', 'top_books')]],
),
}
expected_results = (
('0003', None),
('0002', [('HTML 5',), ('Management',), ('The mysterious dog',)]),
)
self.check_migrations(expected_content, expected_results, 'test_app.migrations_recreate')
class SQLDependenciesTestCase(BaseMigrateSQLTestCase):
"""
Tests SQL item dependencies system.
"""
# Expected datasets (input and output) for different migration states.
# When migration is run, database is checked against expected result.
# Key = name of migration (app, name), value is a list of :
# * SQL arguments passed to Postgre's ROW
# * composite type to cast ROW built above into.
# * dependency types (included into psycopg2 `register_composite`)
# * expected result after fetching built ROW from database.
RESULTS_EXPECTED = {
('test_app', '0004'): [
# product check
("(('(1, 2)', '(3)', 4, 5), (('(6, 7)', '(8)', 9, 10), 11), '(12)', 13)",
'product',
['product', 'book', 'author',
'rating', 'sale', 'edition'],
(((1, 2), (3,), 4, 5), (((6, 7), (8,), 9, 10), 11), (12,), 13)),
# narration check
("('(1, 2)', ('(3, 4)', '(5)', 6, 7), 8)",
'narration',
['narration', 'book', 'sale', 'rating'],
((1, 2), ((3, 4), (5,), 6, 7), 8)),
],
('test_app', '0002'): [
# narration check
("('(1)', '(2)', 3)",
'narration',
['rating', 'book', 'sale', 'narration'],
((1,), (2,), 3)),
],
('test_app2', 'zero'): [
# edition check
(None, 'edition', [], None),
# ratings check
(None, 'ratings', [], None),
],
('test_app', '0005'): [
# narration check
("(1)", 'edition', ['edition'], (1,)),
# product check
(None, 'product', [], None),
],
('test_app2', '0003'): [
# sale check
(None, 'sale', [], None),
],
}
def check_type(self, repr_sql, fetch_type, known_types, expect):
"""
Checks composite type structure and format.
"""
cursor = connection.cursor()
if repr_sql:
for _type in known_types:
register_composite(str(_type), cursor.cursor, factory=TupleComposite)
sql = 'SELECT ROW{repr_sql}::{ftype}'.format(repr_sql=repr_sql, ftype=fetch_type)
cursor.execute(sql)
result = cursor.fetchone()[0]
self.assertEqual(result, expect)
else:
result = run_query("SELECT COUNT(*) FROM pg_type WHERE typname = %s",
[fetch_type])
self.assertEqual(result, [(0,)])
def check_migrations(self, content, migrations, module=None, module2=None):
"""
Checks migrations content and result after being run.
"""
with self.temporary_migration_module(app_label='test_app', module=module):
with self.temporary_migration_module(app_label='test_app2', module=module2):
call_command('makemigrations', stdout=self.out)
self.check_migrations_content(content)
for app_label, migration in migrations:
call_command('migrate', app_label, migration, stdout=self.out)
check_cases = self.RESULTS_EXPECTED[(app_label, migration)]
for check_case in check_cases:
self.check_type(*check_case)
def test_deps_create(self):
"""
Creating a graph of items with dependencies should embed relations in migrations.
"""
self.config.sql_items = [
item('rating', 1),
item('book', 1),
item('narration', 1, [('test_app2', 'sale'), ('test_app', 'book')]),
]
self.config2.sql_items = [item('sale', 1)]
expected_content = {
('test_app2', '0001'): (
True,
[],
[[('CreateSQL', 'sale')]],
),
('test_app', '0002'): (
True,
[('test_app2', '0001'), ('test_app', '0001')],
[[('CreateSQL', 'rating')],
[('CreateSQL', 'book'), ('CreateSQL', 'narration')]],
),
}
migrations = (
('test_app', '0002'),
)
self.check_migrations(expected_content, migrations)
def test_deps_update(self):
"""
Updating a graph of items with dependencies should embed relation changes in migrations.
"""
self.config.sql_items = [
item('rating', 1),
item('edition', 1),
item('author', 1, [('test_app', 'book')]),
item('narration', 1, [('test_app2', 'sale'), ('test_app', 'book')]),
item('book', 2, [('test_app2', 'sale'), ('test_app', 'rating')]),
item('product', 1,
[('test_app', 'book'), ('test_app', 'author'), ('test_app', 'edition')]),
]
self.config2.sql_items = [item('sale', 2)]
expected_content = {
('test_app', '0003'): (
True,
[('test_app', '0002')],
[[('CreateSQL', 'edition')],
[('ReverseAlterSQL', 'narration'), ('ReverseAlterSQL', 'book')]],
),
('test_app2', '0002'): (
True,
[('test_app', '0003'), ('test_app2', '0001')],
[[('ReverseAlterSQL', 'sale'), ('AlterSQL', 'sale')]],
),
('test_app', '0004'): (
True,
[('test_app2', '0002'), ('test_app', '0003')],
[[('AlterSQL', 'book'), ('CreateSQL', 'author'), ('CreateSQL', 'product')],
[('AlterSQL', 'book'), ('AlterSQL', 'narration')],
[('AlterSQL', 'book'), ('AlterSQLState', u'book')]],
),
}
migrations = (
('test_app', '0004'),
('test_app', '0002'),
('test_app', '0004'),
)
self.check_migrations(
expected_content, migrations,
module='test_app.migrations_deps_update', module2='test_app2.migrations_deps_update',
)
def test_deps_circular(self):
"""
Graph with items that refer to themselves in their dependencies should raise an error.
"""
from django.db.migrations.graph import CircularDependencyError
self.config.sql_items = [
item('narration', 1, [('test_app2', 'sale'), ('test_app', 'book')]),
item('book', 2, [('test_app2', 'sale'), ('test_app', 'narration')]),
]
self.config2.sql_items = [item('sale', 1)]
with self.assertRaises(CircularDependencyError):
self.check_migrations(
{}, (),
module='test_app.migrations_deps_update',
module2='test_app2.migrations_deps_update',
)
def test_deps_no_changes(self):
"""
In case no changes are made to structure of sql config, no migrations should be created.
"""
self.config.sql_items = [
item('rating', 1),
item('book', 1),
item('narration', 1, [('test_app2', 'sale'), ('test_app', 'book')]),
]
self.config2.sql_items = [item('sale', 1)]
expected_content = {
('test_app', '0003'): (False, [], []),
('test_app2', '0002'): (False, [], []),
}
migrations = ()
self.check_migrations(
expected_content, migrations,
module='test_app.migrations_deps_update', module2='test_app2.migrations_deps_update',
)
def test_deps_delete(self):
"""
Graph with items that gets some of them removed along with dependencies should reflect
changes into migrations.
"""
self.config.sql_items = [
item('rating', 1),
item('edition', 1),
]
self.config2.sql_items = []
expected_content = {
('test_app', '0005'): (
True,
[('test_app', '0004')],
[[('DeleteSQL', 'narration'), ('DeleteSQL', 'book')],
[('DeleteSQL', 'product'), ('DeleteSQL', 'author'), ('DeleteSQL', 'book')]],
),
('test_app2', '0003'): (
True,
[('test_app', '0005'), ('test_app2', '0002')],
[[('DeleteSQL', 'sale')]],
),
}
migrations = (
('test_app', '0005'),
('test_app', '0002'),
('test_app2', 'zero'),
('test_app', '0005'),
('test_app2', '0003'),
('test_app', '0004'),
)
self.check_migrations(
expected_content, migrations,
module='test_app.migrations_deps_delete', module2='test_app2.migrations_deps_delete',
)
|
klichukb/django-migrate-sql
|
tests/test_app/test_migrations.py
|
Python
|
isc
| 21,936
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# PyOffers documentation build configuration file, created by
# sphinx-quickstart on Wed Aug 31 19:11:54 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath(".."))
import sphinx_rtd_theme
from pyoffers import __version__
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.coverage",
"sphinx.ext.viewcode",
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = ".rst"
# The encoding of source files.
#
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = "index"
# General information about the project.
project = "PyOffers"
copyright = "2016, Dmitry Dygalo"
author = "Dmitry Dygalo"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = release = __version__
# The full version, including alpha/beta/rc tags.
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#
# today = ''
#
# Else, today_fmt is used as the format for a strftime call.
#
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]
# The reST default role (used for this markup: `text`) to use for all
# documents.
#
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "sphinx_rtd_theme"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# The name for this set of Sphinx documents.
# "<project> v<release> documentation" by default.
#
# html_title = 'PyOffers v0.4.0'
# A shorter title for the navigation bar. Default is the same as html_title.
#
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#
# html_logo = None
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#
# html_extra_path = []
# If not None, a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
# The empty string is equivalent to '%b %d, %Y'.
#
# html_last_updated_fmt = None
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#
# html_additional_pages = {}
# If false, no module index is generated.
#
# html_domain_indices = True
# If false, no index is generated.
#
# html_use_index = True
# If true, the index is split into individual pages for each letter.
#
# html_split_index = False
# If true, links to the reST sources are added to the pages.
#
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr', 'zh'
#
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# 'ja' uses this config value.
# 'zh' user can custom change `jieba` dictionary path.
#
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = "PyOffersdoc"
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, "PyOffers.tex", "PyOffers Documentation", "Dmitry Dygalo", "manual"),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#
# latex_use_parts = False
# If true, show page references after internal links.
#
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
#
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
#
# latex_appendices = []
# It false, will not define \strong, \code, itleref, \crossref ... but only
# \sphinxstrong, ..., \sphinxtitleref, ... To help avoid clash with user added
# packages.
#
# latex_keep_old_macro_names = True
# If false, no module index is generated.
#
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [(master_doc, "pyoffers", "PyOffers Documentation", [author], 1)]
# If true, show URL addresses after external links.
#
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
master_doc,
"PyOffers",
"PyOffers Documentation",
author,
"PyOffers",
"One line description of project.",
"Miscellaneous",
),
]
# Documents to append as an appendix to all manuals.
#
# texinfo_appendices = []
# If false, no module index is generated.
#
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#
# texinfo_no_detailmenu = False
|
Stranger6667/pyoffers
|
docs/conf.py
|
Python
|
mit
| 9,995
|
from .base import Entity,SystemBase
from .component import ComponentBase,MultiComponentBase
from .managers import EntityManager,SystemManager
|
Kromey/roglick
|
roglick/engine/ecs/__init__.py
|
Python
|
mit
| 143
|
# List aa images with query stringa available
def list_all_images_subparser(subparser):
list_all_images = subparser.add_parser(
'list-all-images',
description=('***List all'
' images of'
' producers/consumers'
' account'),
help=('List all'
' images of'
' producers/consumers'
' account'))
group_key = list_all_images.add_mutually_exclusive_group(required=True)
group_key.add_argument('-pn',
'--producer-username',
help="Producer\'s(source account) username")
group_key.add_argument('-cn',
'--consumer-username',
dest='producer_username',
metavar='CONSUMER_USERNAME',
help="Consumer\'s(destination account) username")
group_apikey = list_all_images.add_mutually_exclusive_group(required=True)
group_apikey.add_argument('-pa',
'--producer-apikey',
help="Producer\'s(source account) apikey")
group_apikey.add_argument('-ca',
'--consumer-apikey',
dest='producer_apikey',
metavar='CONSUMER_APIKEY',
help="Consumer\'s(destination account) apikey")
list_all_images.add_argument('--visibility',
nargs=1,
help='[shared][private][public]')
list_all_images.add_argument('--member-status',
nargs=1,
help='[pending][accepted][rejected][all]')
list_all_images.add_argument('--owner',
nargs=1,
help='Owner Id')
|
dman777/image_share
|
subparsers/list_all_images.py
|
Python
|
mit
| 2,198
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from collections import MutableMapping
class BaseClass(MutableMapping): # pragma: no cover
def __init__(self, *args, **kwargs):
self.__dict__.update(*args, **kwargs)
def __setitem__(self, key, value):
self.__dict__[key] = value
def __getitem__(self, key):
return self.__dict__[key]
def __delitem__(self, key):
del self.__dict__[key]
def __iter__(self):
return iter(self.__dict__)
def __len__(self):
return len(self.__dict__)
# def __repr__(self):
# return self.__str__()
|
mikejarrett/tvdb-api
|
tvdb/models/common.py
|
Python
|
mit
| 652
|
import os
import re
import shutil
import sys
import tempfile
from typing import Any
from typing import cast
from typing import List
from typing import Tuple
import sqlalchemy
from sqlalchemy import testing
from sqlalchemy.testing import config
from sqlalchemy.testing import eq_
from sqlalchemy.testing import fixtures
def _file_combinations(dirname):
path = os.path.join(os.path.dirname(__file__), dirname)
files = []
for f in os.listdir(path):
if f.endswith(".py"):
files.append(os.path.join(os.path.dirname(__file__), dirname, f))
for extra_dir in testing.config.options.mypy_extra_test_paths:
if extra_dir and os.path.isdir(extra_dir):
for f in os.listdir(os.path.join(extra_dir, dirname)):
if f.endswith(".py"):
files.append(os.path.join(extra_dir, dirname, f))
return files
def _incremental_dirs():
path = os.path.join(os.path.dirname(__file__), "incremental")
files = []
for d in os.listdir(path):
if os.path.isdir(os.path.join(path, d)):
files.append(
os.path.join(os.path.dirname(__file__), "incremental", d)
)
for extra_dir in testing.config.options.mypy_extra_test_paths:
if extra_dir and os.path.isdir(extra_dir):
for d in os.listdir(os.path.join(extra_dir, "incremental")):
if os.path.isdir(os.path.join(path, d)):
files.append(os.path.join(extra_dir, "incremental", d))
return files
@testing.add_to_marker.mypy
class MypyPluginTest(fixtures.TestBase):
__tags__ = ("mypy",)
__requires__ = ("no_sqlalchemy2_stubs",)
@testing.fixture(scope="function")
def per_func_cachedir(self):
for item in self._cachedir():
yield item
@testing.fixture(scope="class")
def cachedir(self):
for item in self._cachedir():
yield item
def _cachedir(self):
sqlalchemy_path = os.path.dirname(os.path.dirname(sqlalchemy.__file__))
# for a pytest from my local ./lib/ , i need mypy_path.
# for a tox run where sqlalchemy is in site_packages, mypy complains
# "../python3.10/site-packages is in the MYPYPATH. Please remove it."
# previously when we used sqlalchemy2-stubs, it would just be
# installed as a dependency, which is why mypy_path wasn't needed
# then, but I like to be able to run the test suite from the local
# ./lib/ as well.
if "site-packages" not in sqlalchemy_path:
mypy_path = f"mypy_path={sqlalchemy_path}"
else:
mypy_path = ""
with tempfile.TemporaryDirectory() as cachedir:
with open(
os.path.join(cachedir, "sqla_mypy_config.cfg"), "w"
) as config_file:
config_file.write(
f"""
[mypy]\n
plugins = sqlalchemy.ext.mypy.plugin\n
show_error_codes = True\n
{mypy_path}
disable_error_code = no-untyped-call
[mypy-sqlalchemy.*]
ignore_errors = True
"""
)
with open(
os.path.join(cachedir, "plain_mypy_config.cfg"), "w"
) as config_file:
config_file.write(
f"""
[mypy]\n
show_error_codes = True\n
{mypy_path}
disable_error_code = var-annotated,no-untyped-call
[mypy-sqlalchemy.*]
ignore_errors = True
"""
)
yield cachedir
@testing.fixture()
def mypy_runner(self, cachedir):
from mypy import api
def run(path, use_plugin=True, incremental=False):
args = [
"--strict",
"--raise-exceptions",
"--cache-dir",
cachedir,
"--config-file",
os.path.join(
cachedir,
"sqla_mypy_config.cfg"
if use_plugin
else "plain_mypy_config.cfg",
),
]
args.append(path)
return api.run(args)
return run
@testing.combinations(
*[
(pathname, testing.exclusions.closed())
for pathname in _incremental_dirs()
],
argnames="pathname",
)
@testing.requires.patch_library
def test_incremental(self, mypy_runner, per_func_cachedir, pathname):
import patch
cachedir = per_func_cachedir
dest = os.path.join(cachedir, "mymodel")
os.mkdir(dest)
patches = set()
print("incremental test: %s" % pathname)
for fname in os.listdir(pathname):
if fname.endswith(".py"):
shutil.copy(
os.path.join(pathname, fname), os.path.join(dest, fname)
)
print("copying to: %s" % os.path.join(dest, fname))
elif fname.endswith(".testpatch"):
patches.add(fname)
for patchfile in [None] + sorted(patches):
if patchfile is not None:
print("Applying patchfile %s" % patchfile)
patch_obj = patch.fromfile(os.path.join(pathname, patchfile))
assert patch_obj.apply(1, dest), (
"pathfile %s failed" % patchfile
)
print("running mypy against %s" % dest)
result = mypy_runner(
dest,
use_plugin=True,
incremental=True,
)
eq_(
result[2],
0,
msg="Failure after applying patch %s: %s"
% (patchfile, result[0]),
)
@testing.combinations(
*(
cast(
List[Tuple[Any, ...]],
[
("w_plugin", os.path.basename(path), path, True)
for path in _file_combinations("plugin_files")
],
)
+ cast(
List[Tuple[Any, ...]],
[
("plain", os.path.basename(path), path, False)
for path in _file_combinations("plain_files")
],
)
),
argnames="filename,path,use_plugin",
id_="isaa",
)
def test_files(self, mypy_runner, filename, path, use_plugin):
expected_messages = []
expected_re = re.compile(r"\s*# EXPECTED(_MYPY)?(_RE)?(_TYPE)?: (.+)")
py_ver_re = re.compile(r"^#\s*PYTHON_VERSION\s?>=\s?(\d+\.\d+)")
with open(path) as file_:
current_assert_messages = []
for num, line in enumerate(file_, 1):
m = py_ver_re.match(line)
if m:
major, _, minor = m.group(1).partition(".")
if sys.version_info < (int(major), int(minor)):
config.skip_test(
"Requires python >= %s" % (m.group(1))
)
continue
if line.startswith("# NOPLUGINS"):
use_plugin = False
continue
m = expected_re.match(line)
if m:
is_mypy = bool(m.group(1))
is_re = bool(m.group(2))
is_type = bool(m.group(3))
expected_msg = re.sub(r"# noqa ?.*", "", m.group(4))
if is_type:
is_mypy = is_re = True
expected_msg = f'Revealed type is "{expected_msg}"'
current_assert_messages.append(
(is_mypy, is_re, expected_msg.strip())
)
elif current_assert_messages:
expected_messages.extend(
(num, is_mypy, is_re, expected_msg)
for (
is_mypy,
is_re,
expected_msg,
) in current_assert_messages
)
current_assert_messages[:] = []
result = mypy_runner(path, use_plugin=use_plugin)
if expected_messages:
eq_(result[2], 1, msg=result)
output = []
raw_lines = result[0].split("\n")
while raw_lines:
e = raw_lines.pop(0)
if re.match(r".+\.py:\d+: error: .*", e):
output.append(("error", e))
elif re.match(
r".+\.py:\d+: note: +(?:Possible overload|def ).*", e
):
while raw_lines:
ol = raw_lines.pop(0)
if not re.match(r".+\.py:\d+: note: +def \[.*", ol):
break
elif re.match(
r".+\.py:\d+: note: .*(?:perhaps|suggestion)", e, re.I
):
pass
elif re.match(r".+\.py:\d+: note: .*", e):
output.append(("note", e))
for num, is_mypy, is_re, msg in expected_messages:
msg = msg.replace("'", '"')
prefix = "[SQLAlchemy Mypy plugin] " if not is_mypy else ""
for idx, (typ, errmsg) in enumerate(output):
if is_re:
if re.match(
fr".*{filename}\:{num}\: {typ}\: {prefix}{msg}", # noqa E501
errmsg,
):
break
elif (
f"{filename}:{num}: {typ}: {prefix}{msg}"
in errmsg.replace("'", '"')
):
break
else:
continue
del output[idx]
if output:
print("messages from mypy that were not consumed:")
print("\n".join(msg for _, msg in output))
assert False, "errors and/or notes remain, see stdout"
else:
if result[2] != 0:
print(result[0])
eq_(result[2], 0, msg=result)
|
sqlalchemy/sqlalchemy
|
test/ext/mypy/test_mypy_plugin_py3k.py
|
Python
|
mit
| 10,474
|
# coding=utf-8
from django.http import HttpResponse
from django.template import RequestContext
from django.shortcuts import render_to_response, get_object_or_404, Http404, redirect
from django.views.decorators.csrf import csrf_exempt
def default(request):
return render_to_response('configurator.html', {}, context_instance=RequestContext(request))
|
antofik/Wartech
|
WartechWeb/configurator/views.py
|
Python
|
mit
| 354
|
#!/usr/bin/python
# -*- coding: UTF-8 -*-
'''
# 题目地址:
https://www.codewars.com/kata/56747fd5cb988479af000028/train/python
'''
import time
import unittest
class TestCases(unittest.TestCase):
def test1(self):self.assertEqual(get_middle("test"),"es")
def test2(self):self.assertEqual(get_middle("testing"),"t")
def test3(self):self.assertEqual(get_middle("middle"),"dd")
def test4(self):self.assertEqual(get_middle("A"),"A")
def test5(self):self.assertEqual(get_middle("of"),"of")
def get_middle(s):
while len(s) > 2:
s = s[1:-1]
return s
if __name__ == '__main__':
unittest.main()
# 测试时间:
# start = time.clock()
# for i in range(100000):
# a = sum_pairs([20, -13, 40], -7)
# b = sum_pairs([20, -13, 40, 23, 122, 492, 324, -245, 58, -132, -49, 942], -7)
# end = time.clock()
# print(end - start)
'''
参考解法:
def get_middle(s):
return s[(len(s)-1)/2:len(s)/2+1]
def get_middle(s):
i = (len(s) - 1) // 2
return s[i:-i] or s
'''
|
karchi/codewars_kata
|
已完成/Get the Middle Character.py
|
Python
|
mit
| 1,079
|
#!/usr/bin/env python
# $Id: mergedInputExample.py 545 2012-01-18 06:10:03Z cvermilion $
#----------------------------------------------------------------------
# Copyright (c) 2010-12, Pierre-Antoine Delsart, Kurtis Geerlings, Joey Huston,
# Brian Martin, and Christopher Vermilion
#
#----------------------------------------------------------------------
# This file is part of SpartyJet.
#
# SpartyJet is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# SpartyJet is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SpartyJet; if not, write to the Free Software
# Foundation, Inc.:
# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#----------------------------------------------------------------------
from spartyjet import *
#===============================================
# Example showing the use of several input files whose events are merged.
# inputExample.py gives an example where the results for each file are separate.
def shortName(path):
from os.path import basename
b = basename(path)
return b.split('.')[0] # take everything up to first period
input = SJ.MultiInput()
fileList = ['../data/J2_clusters_wPile.root', '../data/J2_clusters_wPile2.root']
for f in fileList:
print "Opening: ", f
input_i = createNtupleInputMaker(f, inputprefix='Cluster')
input_i.set_name(shortName(f))
input.addInputMaker(input_i)
builder = SJ.JetBuilder(SJ.INFO)
builder.configure_input(input)
outfile = '../data/output/J2_clusters_wPile_merged.root'
builder.configure_output('SpartyJet_Tree', outfile)
builder.add_default_analysis(SJ.FastJet.FastJetFinder('Kt4', fastjet.kt_algorithm, 0.4))
#builder.process_events() # all events or use -1
builder.process_events(20) # first 20 events (will go through files in order until 20 events reached)
# Save this script in the ROOT file (needs to go after process_events or it
# gets over-written!)
writeCurrentFile(outfile)
|
mickypaganini/SSI2016-jet-clustering
|
spartyjet-4.0.2_mac/examples_py/mergedInputExample.py
|
Python
|
mit
| 2,382
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .sub_resource import SubResource
class SecurityRule(SubResource):
"""Network security rule.
All required parameters must be populated in order to send to Azure.
:param id: Resource ID.
:type id: str
:param description: A description for this rule. Restricted to 140 chars.
:type description: str
:param protocol: Required. Network protocol this rule applies to. Possible
values are 'Tcp', 'Udp', and '*'. Possible values include: 'Tcp', 'Udp',
'*'
:type protocol: str or
~azure.mgmt.network.v2017_09_01.models.SecurityRuleProtocol
:param source_port_range: The source port or range. Integer or range
between 0 and 65535. Asterix '*' can also be used to match all ports.
:type source_port_range: str
:param destination_port_range: The destination port or range. Integer or
range between 0 and 65535. Asterix '*' can also be used to match all
ports.
:type destination_port_range: str
:param source_address_prefix: The CIDR or source IP range. Asterix '*' can
also be used to match all source IPs. Default tags such as
'VirtualNetwork', 'AzureLoadBalancer' and 'Internet' can also be used. If
this is an ingress rule, specifies where network traffic originates from.
:type source_address_prefix: str
:param source_address_prefixes: The CIDR or source IP ranges.
:type source_address_prefixes: list[str]
:param source_application_security_groups: The application security group
specified as source.
:type source_application_security_groups:
list[~azure.mgmt.network.v2017_09_01.models.ApplicationSecurityGroup]
:param destination_address_prefix: The destination address prefix. CIDR or
destination IP range. Asterix '*' can also be used to match all source
IPs. Default tags such as 'VirtualNetwork', 'AzureLoadBalancer' and
'Internet' can also be used.
:type destination_address_prefix: str
:param destination_address_prefixes: The destination address prefixes.
CIDR or destination IP ranges.
:type destination_address_prefixes: list[str]
:param destination_application_security_groups: The application security
group specified as destination.
:type destination_application_security_groups:
list[~azure.mgmt.network.v2017_09_01.models.ApplicationSecurityGroup]
:param source_port_ranges: The source port ranges.
:type source_port_ranges: list[str]
:param destination_port_ranges: The destination port ranges.
:type destination_port_ranges: list[str]
:param access: Required. The network traffic is allowed or denied.
Possible values are: 'Allow' and 'Deny'. Possible values include: 'Allow',
'Deny'
:type access: str or
~azure.mgmt.network.v2017_09_01.models.SecurityRuleAccess
:param priority: The priority of the rule. The value can be between 100
and 4096. The priority number must be unique for each rule in the
collection. The lower the priority number, the higher the priority of the
rule.
:type priority: int
:param direction: Required. The direction of the rule. The direction
specifies if rule will be evaluated on incoming or outcoming traffic.
Possible values are: 'Inbound' and 'Outbound'. Possible values include:
'Inbound', 'Outbound'
:type direction: str or
~azure.mgmt.network.v2017_09_01.models.SecurityRuleDirection
:param provisioning_state: The provisioning state of the public IP
resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
:type provisioning_state: str
:param name: The name of the resource that is unique within a resource
group. This name can be used to access the resource.
:type name: str
:param etag: A unique read-only string that changes whenever the resource
is updated.
:type etag: str
"""
_validation = {
'protocol': {'required': True},
'access': {'required': True},
'direction': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'description': {'key': 'properties.description', 'type': 'str'},
'protocol': {'key': 'properties.protocol', 'type': 'str'},
'source_port_range': {'key': 'properties.sourcePortRange', 'type': 'str'},
'destination_port_range': {'key': 'properties.destinationPortRange', 'type': 'str'},
'source_address_prefix': {'key': 'properties.sourceAddressPrefix', 'type': 'str'},
'source_address_prefixes': {'key': 'properties.sourceAddressPrefixes', 'type': '[str]'},
'source_application_security_groups': {'key': 'properties.sourceApplicationSecurityGroups', 'type': '[ApplicationSecurityGroup]'},
'destination_address_prefix': {'key': 'properties.destinationAddressPrefix', 'type': 'str'},
'destination_address_prefixes': {'key': 'properties.destinationAddressPrefixes', 'type': '[str]'},
'destination_application_security_groups': {'key': 'properties.destinationApplicationSecurityGroups', 'type': '[ApplicationSecurityGroup]'},
'source_port_ranges': {'key': 'properties.sourcePortRanges', 'type': '[str]'},
'destination_port_ranges': {'key': 'properties.destinationPortRanges', 'type': '[str]'},
'access': {'key': 'properties.access', 'type': 'str'},
'priority': {'key': 'properties.priority', 'type': 'int'},
'direction': {'key': 'properties.direction', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
}
def __init__(self, *, protocol, access, direction, id: str=None, description: str=None, source_port_range: str=None, destination_port_range: str=None, source_address_prefix: str=None, source_address_prefixes=None, source_application_security_groups=None, destination_address_prefix: str=None, destination_address_prefixes=None, destination_application_security_groups=None, source_port_ranges=None, destination_port_ranges=None, priority: int=None, provisioning_state: str=None, name: str=None, etag: str=None, **kwargs) -> None:
super(SecurityRule, self).__init__(id=id, **kwargs)
self.description = description
self.protocol = protocol
self.source_port_range = source_port_range
self.destination_port_range = destination_port_range
self.source_address_prefix = source_address_prefix
self.source_address_prefixes = source_address_prefixes
self.source_application_security_groups = source_application_security_groups
self.destination_address_prefix = destination_address_prefix
self.destination_address_prefixes = destination_address_prefixes
self.destination_application_security_groups = destination_application_security_groups
self.source_port_ranges = source_port_ranges
self.destination_port_ranges = destination_port_ranges
self.access = access
self.priority = priority
self.direction = direction
self.provisioning_state = provisioning_state
self.name = name
self.etag = etag
|
lmazuel/azure-sdk-for-python
|
azure-mgmt-network/azure/mgmt/network/v2017_09_01/models/security_rule_py3.py
|
Python
|
mit
| 7,664
|
# Copyright (C) 2004-2019 by
# Aric Hagberg <hagberg@lanl.gov>
# Dan Schult <dschult@colgate.edu>
# Pieter Swart <swart@lanl.gov>
# All rights reserved.
# BSD license.
#
# Authors: Aric Hagberg (hagberg@lanl.gov)
# Pieter Swart (swart@lanl.gov)
# Sasha Gutfraind (ag362@cornell.edu)
"""Degree centrality measures."""
import networkx as nx
from networkx.utils.decorators import not_implemented_for
__all__ = ['degree_centrality',
'in_degree_centrality',
'out_degree_centrality']
def degree_centrality(G):
"""Compute the degree centrality for nodes.
The degree centrality for a node v is the fraction of nodes it
is connected to.
Parameters
----------
G : graph
A networkx graph
Returns
-------
nodes : dictionary
Dictionary of nodes with degree centrality as the value.
See Also
--------
betweenness_centrality, load_centrality, eigenvector_centrality
Notes
-----
The degree centrality values are normalized by dividing by the maximum
possible degree in a simple graph n-1 where n is the number of nodes in G.
For multigraphs or graphs with self loops the maximum degree might
be higher than n-1 and values of degree centrality greater than 1
are possible.
"""
if len(G) <= 1:
return {n: 1 for n in G}
s = 1.0 / (len(G) - 1.0)
centrality = {n: d * s for n, d in G.degree()}
return centrality
@not_implemented_for('undirected')
def in_degree_centrality(G):
"""Compute the in-degree centrality for nodes.
The in-degree centrality for a node v is the fraction of nodes its
incoming edges are connected to.
Parameters
----------
G : graph
A NetworkX graph
Returns
-------
nodes : dictionary
Dictionary of nodes with in-degree centrality as values.
Raises
------
NetworkXNotImplemented:
If G is undirected.
See Also
--------
degree_centrality, out_degree_centrality
Notes
-----
The degree centrality values are normalized by dividing by the maximum
possible degree in a simple graph n-1 where n is the number of nodes in G.
For multigraphs or graphs with self loops the maximum degree might
be higher than n-1 and values of degree centrality greater than 1
are possible.
"""
if len(G) <= 1:
return {n: 1 for n in G}
s = 1.0 / (len(G) - 1.0)
centrality = {n: d * s for n, d in G.in_degree()}
return centrality
@not_implemented_for('undirected')
def out_degree_centrality(G):
"""Compute the out-degree centrality for nodes.
The out-degree centrality for a node v is the fraction of nodes its
outgoing edges are connected to.
Parameters
----------
G : graph
A NetworkX graph
Returns
-------
nodes : dictionary
Dictionary of nodes with out-degree centrality as values.
Raises
------
NetworkXNotImplemented:
If G is undirected.
See Also
--------
degree_centrality, in_degree_centrality
Notes
-----
The degree centrality values are normalized by dividing by the maximum
possible degree in a simple graph n-1 where n is the number of nodes in G.
For multigraphs or graphs with self loops the maximum degree might
be higher than n-1 and values of degree centrality greater than 1
are possible.
"""
if len(G) <= 1:
return {n: 1 for n in G}
s = 1.0 / (len(G) - 1.0)
centrality = {n: d * s for n, d in G.out_degree()}
return centrality
|
sserrot/champion_relationships
|
venv/Lib/site-packages/networkx/algorithms/centrality/degree_alg.py
|
Python
|
mit
| 3,604
|
import datetime
import logging
import os
import re
import types
from copy import copy
from django.conf import settings
from django.template.defaultfilters import linebreaks as django_linebreaks,\
escapejs as django_escapejs, pluralize as django_pluralize,\
date as django_date
from django.utils.encoding import force_text
from django.utils.timesince import timesince
from django_jinja import library
from jinja2 import Markup
from sorl.thumbnail.shortcuts import get_thumbnail
from typogrify.filters import typogrify as dj_typogrify,\
smartypants as dj_smartypants
from source.articles.models import Article
logger = logging.getLogger('base.helpers')
@library.filter
def typogrify(string):
return Markup(dj_typogrify(string))
@library.filter
def smartypants(string):
return Markup(dj_smartypants(string))
@library.filter
def linebreaks(string):
return django_linebreaks(string)
@library.filter
def escapejs(string):
return django_escapejs(string)
@library.global_function
def get_timestamp():
return datetime.datetime.now()
@library.filter
def dj_pluralize(string, arg='s'):
return django_pluralize(string, arg)
@library.global_function
def dj_date(value, format_string):
return django_date(value, format_string)
@library.global_function
def thumbnail(source, *args, **kwargs):
im = get_thumbnail(source, *args, **kwargs)
return im.name
@library.filter
def dj_intcomma(value):
"""
https://github.com/django/django/blob/master/django/contrib/humanize/templatetags/humanize.py
Converts an integer to a string containing commas every three digits.
For example, 3000 becomes '3,000' and 45000 becomes '45,000'.
"""
orig = force_text(value)
new = re.sub("^(-?\d+)(\d{3})", '\g<1>,\g<2>', orig)
if orig == new:
return new
else:
return dj_intcomma(new)
@library.filter
def simple_timesince(value):
now = datetime.datetime.now()
try:
difference = now - value
except:
return value
if difference <= datetime.timedelta(minutes=1):
return 'just now'
return '%(time)s ago' % {'time': timesince(value).split(', ')[0]}
@library.filter
def simple_datesince(value):
today = datetime.datetime.now().date()
try:
difference = today - value
except:
return value
if difference < datetime.timedelta(days=1):
return 'today'
return '%(days)s ago' % {'days': timesince(value).split(', ')[0]}
# FORM RENDERING
# https://github.com/kmike/django-widget-tweaks/blob/master/widget_tweaks/templatetags/widget_tweaks.py
def _process_field_attributes(field, attr, process):
# split attribute name and value from 'attr:value' string
params = attr.split(':', 1)
attribute = params[0]
value = params[1] if len(params) == 2 else ''
field = copy(field)
# decorate field.as_widget method with updated attributes
old_as_widget = field.as_widget
def as_widget(self, widget=None, attrs=None, only_initial=False):
attrs = attrs or {}
process(widget or self.field.widget, attrs, attribute, value)
html = old_as_widget(widget, attrs, only_initial)
self.as_widget = old_as_widget
return html
field.as_widget = types.MethodType(as_widget, field)
return field
@library.filter
def append_attr(field, attr):
def process(widget, attrs, attribute, value):
if attrs.get(attribute):
attrs[attribute] += ' ' + value
elif widget.attrs.get(attribute):
attrs[attribute] = widget.attrs[attribute] + ' ' + value
else:
attrs[attribute] = value
return _process_field_attributes(field, attr, process)
@library.filter
def add_class(field, css_class):
return append_attr(field, 'class:' + css_class)
@library.global_function
def get_random_articles(num, recent_days=None):
random_articles = Article.live_objects.filter(show_in_lists=True)
if recent_days:
cutoff = datetime.datetime.today() - datetime.timedelta(recent_days)
if random_articles.filter(pubdate__gte=cutoff).count() > 0:
random_articles = random_articles.filter(pubdate__gte=cutoff)
random_articles = random_articles.order_by('?')
try:
if num == 1:
return random_articles[0]
else:
return random_articles[:num]
except:
return None
|
OpenNews/opennews-source
|
source/base/helpers.py
|
Python
|
mit
| 4,414
|
# coding: utf-8
from __future__ import absolute_import
import application
app = application.create_app()
|
PostmonAPI/correios.postmon.com.br
|
main.py
|
Python
|
mit
| 107
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-03-02 12:09
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("Artifactorial", "0002_use_biginteger_for_quota")]
operations = [
migrations.AlterField(
model_name="directory",
name="path",
field=models.CharField(max_length=300, unique=True),
)
]
|
ivoire/Artifactorial
|
Artifactorial/migrations/0003_make_directory_path_unique.py
|
Python
|
mit
| 422
|
def difftest():
a = {1, 2, 3, 4, 5}
b = {5, 6, 7, 8, 9, 10}
print(a)
print(b)
c = a.difference(b)
print(c)
if __name__ == '__main__':
# unionは非破壊的メソッドなので、別のオブジェクトに代入しないと消えます。
difftest()
|
ekazyam/study
|
パーフェクトPython/pp_088_difference差集合.py
|
Python
|
mit
| 285
|
__author__ = 'casper'
|
caspervg/geontology
|
tests/__init__.py
|
Python
|
mit
| 22
|
'''
Created on May 16, 2013
@author: aryaveer
'''
from django.forms.util import ErrorList
from django import forms
from courses.models import Role
class DivErrorList(ErrorList):
def __unicode__(self):
return self.as_divs()
def as_divs(self):
if not self: return u''
return u'<div class="alert alert-error">%s</div>' % ''.join([u'<div class="error">%s</div>' % e for e in self])
class RoleForm(forms.Form):
roles = (
('S','Student'),
('T', 'Teacher')
)
role = forms.ChoiceField(choices=roles)
def __init__(self, *args, **kwargs):
kwargs['error_class']=DivErrorList
super(RoleForm, self).__init__(*args, **kwargs)
class JoinCourseForm(forms.Form):
choices = [('S', 'Student'),]
name_or_id = forms.CharField(label="Course Name or course code")
role = forms.ChoiceField(
choices=choices,
label="Join as",
)
def __init__(self, *args, **kwargs):
kwargs['error_class']=DivErrorList
super(JoinCourseForm, self).__init__(*args, **kwargs)
def clean_name_or_id(self):
# form field has been cleaned and value is stored in self.cleaned_data
data = self.cleaned_data['name_or_id']
try:
self.course = self.course_class.objects.get(name=data)
except self.course_class.DoesNotExist:
raise forms.ValidationError(["This course does not exist."])
return data
def clean(self):
cleaned_data = super(JoinCourseForm, self).clean()
if self.errors: return cleaned_data
has_joined = Role.objects.filter(user=self.current_user, course=self.course, role=cleaned_data.get('role')).count()
if bool(has_joined):
self._errors['name_or_id'] = self.error_class(["You have already joined this course"])
elif self.current_user == self.course.creater:
self._errors['name_or_id'] = self.error_class(["You are instructor of this course. You cannot join"])
return cleaned_data
|
kartikshah1/Test
|
courses/forms.py
|
Python
|
mit
| 2,090
|
#pragma repy
if callfunc == "initialize":
# first, create the file, we'll read...
fobj = open("junk_test.out","w")
fobj.close()
# Now the actual test...
try:
fobj = open("junk_test.out", 'r')
fobj.flush()
# flush on a read-only file should be a no-op
finally:
fobj.close()
|
sburnett/seattle
|
repy/tests/ut_repytests_testfileflushnooponreadonly.py
|
Python
|
mit
| 303
|
import asyncio
import logging
import random
from typing import Optional
import sortedcontainers
from gear import Database
from hailtop import aiotools
from hailtop.utils import (
AsyncWorkerPool,
Notice,
WaitableSharedPool,
periodically_call,
retry_long_running,
run_if_changed,
secret_alnum_string,
time_msecs,
)
from ...batch_configuration import STANDING_WORKER_MAX_IDLE_TIME_MSECS
from ...inst_coll_config import PoolConfig
from ...utils import Box, ExceededSharesCounter
from ..instance import Instance
from ..job import schedule_job
from ..resource_manager import CloudResourceManager
from .base import InstanceCollection, InstanceCollectionManager
log = logging.getLogger('pool')
class Pool(InstanceCollection):
@staticmethod
async def create(
app,
db: Database, # BORROWED
inst_coll_manager: InstanceCollectionManager,
resource_manager: CloudResourceManager,
machine_name_prefix: str,
config: PoolConfig,
async_worker_pool: AsyncWorkerPool, # BORROWED
task_manager: aiotools.BackgroundTaskManager,
) -> 'Pool':
pool = Pool(
app, db, inst_coll_manager, resource_manager, machine_name_prefix, config, async_worker_pool, task_manager
)
log.info(f'initializing {pool}')
async for record in db.select_and_fetchall(
'''
SELECT instances.*, instances_free_cores_mcpu.free_cores_mcpu
FROM instances
INNER JOIN instances_free_cores_mcpu
ON instances.name = instances_free_cores_mcpu.name
WHERE removed = 0 AND inst_coll = %s;
''',
(pool.name,),
):
pool.add_instance(Instance.from_record(app, pool, record))
return pool
def __init__(
self,
app,
db: Database, # BORROWED
inst_coll_manager: InstanceCollectionManager,
resource_manager: CloudResourceManager,
machine_name_prefix: str,
config: PoolConfig,
async_worker_pool: AsyncWorkerPool, # BORROWED
task_manager: aiotools.BackgroundTaskManager, # BORROWED
):
super().__init__(
db,
inst_coll_manager,
resource_manager,
config.cloud,
config.name,
machine_name_prefix,
is_pool=True,
max_instances=config.max_instances,
max_live_instances=config.max_live_instances,
task_manager=task_manager,
)
self.app = app
self.inst_coll_manager = inst_coll_manager
global_scheduler_state_changed: Notice = self.app['scheduler_state_changed']
self.scheduler_state_changed = global_scheduler_state_changed.subscribe()
self.scheduler = PoolScheduler(self.app, self, async_worker_pool, task_manager)
self.healthy_instances_by_free_cores = sortedcontainers.SortedSet(key=lambda instance: instance.free_cores_mcpu)
self.worker_type = config.worker_type
self.worker_cores = config.worker_cores
self.worker_local_ssd_data_disk = config.worker_local_ssd_data_disk
self.worker_external_ssd_data_disk_size_gb = config.worker_external_ssd_data_disk_size_gb
self.enable_standing_worker = config.enable_standing_worker
self.standing_worker_cores = config.standing_worker_cores
self.boot_disk_size_gb = config.boot_disk_size_gb
self.data_disk_size_gb = config.data_disk_size_gb
self.data_disk_size_standing_gb = config.data_disk_size_standing_gb
task_manager.ensure_future(self.control_loop())
@property
def local_ssd_data_disk(self) -> bool:
return self.worker_local_ssd_data_disk
def _default_location(self) -> str:
return self.inst_coll_manager.location_monitor.default_location()
def config(self):
return {
'name': self.name,
'worker_type': self.worker_type,
'worker_cores': self.worker_cores,
'boot_disk_size_gb': self.boot_disk_size_gb,
'worker_local_ssd_data_disk': self.worker_local_ssd_data_disk,
'worker_external_ssd_data_disk_size_gb': self.worker_external_ssd_data_disk_size_gb,
'enable_standing_worker': self.enable_standing_worker,
'standing_worker_cores': self.standing_worker_cores,
'max_instances': self.max_instances,
'max_live_instances': self.max_live_instances,
}
def configure(self, pool_config: PoolConfig):
assert self.name == pool_config.name
assert self.cloud == pool_config.cloud
assert self.worker_type == pool_config.worker_type
self.worker_cores = pool_config.worker_cores
self.worker_local_ssd_data_disk = pool_config.worker_local_ssd_data_disk
self.worker_external_ssd_data_disk_size_gb = pool_config.worker_external_ssd_data_disk_size_gb
self.enable_standing_worker = pool_config.enable_standing_worker
self.standing_worker_cores = pool_config.standing_worker_cores
self.boot_disk_size_gb = pool_config.boot_disk_size_gb
self.data_disk_size_gb = pool_config.data_disk_size_gb
self.data_disk_size_standing_gb = pool_config.data_disk_size_standing_gb
self.max_instances = pool_config.max_instances
self.max_live_instances = pool_config.max_live_instances
def adjust_for_remove_instance(self, instance):
super().adjust_for_remove_instance(instance)
if instance in self.healthy_instances_by_free_cores:
self.healthy_instances_by_free_cores.remove(instance)
def adjust_for_add_instance(self, instance):
super().adjust_for_add_instance(instance)
if instance.state == 'active' and instance.failed_request_count <= 1:
self.healthy_instances_by_free_cores.add(instance)
def get_instance(self, user, cores_mcpu):
i = self.healthy_instances_by_free_cores.bisect_key_left(cores_mcpu)
while i < len(self.healthy_instances_by_free_cores):
instance = self.healthy_instances_by_free_cores[i]
assert cores_mcpu <= instance.free_cores_mcpu
if user != 'ci' or (user == 'ci' and instance.location == self._default_location()):
return instance
i += 1
return None
async def create_instance(
self,
cores: int,
data_disk_size_gb: int,
max_idle_time_msecs: Optional[int] = None,
location: Optional[str] = None,
):
machine_type = self.resource_manager.machine_type(cores, self.worker_type, self.worker_local_ssd_data_disk)
_, _ = await self._create_instance(
app=self.app,
cores=cores,
machine_type=machine_type,
job_private=False,
location=location,
preemptible=True,
max_idle_time_msecs=max_idle_time_msecs,
local_ssd_data_disk=self.worker_local_ssd_data_disk,
data_disk_size_gb=data_disk_size_gb,
boot_disk_size_gb=self.boot_disk_size_gb,
)
async def create_instances_from_ready_cores(self, ready_cores_mcpu, location=None):
n_live_instances = self.n_instances_by_state['pending'] + self.n_instances_by_state['active']
if location is None:
live_free_cores_mcpu = self.live_free_cores_mcpu
else:
live_free_cores_mcpu = self.live_free_cores_mcpu_by_location[location]
instances_needed = (ready_cores_mcpu - live_free_cores_mcpu + (self.worker_cores * 1000) - 1) // (
self.worker_cores * 1000
)
instances_needed = min(
instances_needed,
self.max_live_instances - n_live_instances,
self.max_instances - self.n_instances,
# 20 queries/s; our GCE long-run quota
300,
# n * 16 cores / 15s = excess_scheduling_rate/s = 10/s => n ~= 10
10,
)
if instances_needed > 0:
log.info(f'creating {instances_needed} new instances')
# parallelism will be bounded by thread pool
await asyncio.gather(
*[
self.create_instance(
cores=self.worker_cores, data_disk_size_gb=self.data_disk_size_gb, location=location
)
for _ in range(instances_needed)
]
)
async def create_instances(self):
if self.app['frozen']:
log.info(f'not creating instances for {self}; batch is frozen')
return
ready_cores_mcpu_per_user = self.db.select_and_fetchall(
'''
SELECT user,
CAST(COALESCE(SUM(ready_cores_mcpu), 0) AS SIGNED) AS ready_cores_mcpu
FROM user_inst_coll_resources
WHERE inst_coll = %s
GROUP BY user;
''',
(self.name,),
)
if ready_cores_mcpu_per_user is None:
ready_cores_mcpu_per_user = {}
else:
ready_cores_mcpu_per_user = {r['user']: r['ready_cores_mcpu'] async for r in ready_cores_mcpu_per_user}
ready_cores_mcpu = sum(ready_cores_mcpu_per_user.values())
free_cores_mcpu = sum([worker.free_cores_mcpu for worker in self.healthy_instances_by_free_cores])
free_cores = free_cores_mcpu / 1000
log.info(
f'{self} n_instances {self.n_instances} {self.n_instances_by_state}'
f' free_cores {free_cores} live_free_cores {self.live_free_cores_mcpu / 1000}'
f' ready_cores {ready_cores_mcpu / 1000}'
)
if ready_cores_mcpu > 0 and free_cores < 500:
await self.create_instances_from_ready_cores(ready_cores_mcpu)
default_location = self._default_location()
ci_ready_cores_mcpu = ready_cores_mcpu_per_user.get('ci', 0)
if ci_ready_cores_mcpu > 0 and self.live_free_cores_mcpu_by_location[default_location] == 0:
await self.create_instances_from_ready_cores(ci_ready_cores_mcpu, location=default_location)
n_live_instances = self.n_instances_by_state['pending'] + self.n_instances_by_state['active']
if self.enable_standing_worker and n_live_instances == 0 and self.max_instances > 0:
await self.create_instance(
cores=self.standing_worker_cores,
data_disk_size_gb=self.data_disk_size_standing_gb,
max_idle_time_msecs=STANDING_WORKER_MAX_IDLE_TIME_MSECS,
)
async def control_loop(self):
await periodically_call(15, self.create_instances)
def __str__(self):
return f'pool {self.name}'
class PoolScheduler:
def __init__(
self,
app,
pool: Pool,
async_worker_pool: AsyncWorkerPool, # BORROWED
task_manager: aiotools.BackgroundTaskManager, # BORROWED
):
self.app = app
self.scheduler_state_changed = pool.scheduler_state_changed
self.db: Database = app['db']
self.pool = pool
self.async_worker_pool = async_worker_pool
self.exceeded_shares_counter = ExceededSharesCounter()
task_manager.ensure_future(
retry_long_running('schedule_loop', run_if_changed, self.scheduler_state_changed, self.schedule_loop_body)
)
async def compute_fair_share(self):
free_cores_mcpu = sum([worker.free_cores_mcpu for worker in self.pool.healthy_instances_by_free_cores])
user_running_cores_mcpu = {}
user_total_cores_mcpu = {}
result = {}
pending_users_by_running_cores = sortedcontainers.SortedSet(key=lambda user: user_running_cores_mcpu[user])
allocating_users_by_total_cores = sortedcontainers.SortedSet(key=lambda user: user_total_cores_mcpu[user])
records = self.db.execute_and_fetchall(
'''
SELECT user,
CAST(COALESCE(SUM(n_ready_jobs), 0) AS SIGNED) AS n_ready_jobs,
CAST(COALESCE(SUM(ready_cores_mcpu), 0) AS SIGNED) AS ready_cores_mcpu,
CAST(COALESCE(SUM(n_running_jobs), 0) AS SIGNED) AS n_running_jobs,
CAST(COALESCE(SUM(running_cores_mcpu), 0) AS SIGNED) AS running_cores_mcpu
FROM user_inst_coll_resources
WHERE inst_coll = %s
GROUP BY user
HAVING n_ready_jobs + n_running_jobs > 0;
''',
(self.pool.name,),
"compute_fair_share",
)
async for record in records:
user = record['user']
user_running_cores_mcpu[user] = record['running_cores_mcpu']
user_total_cores_mcpu[user] = record['running_cores_mcpu'] + record['ready_cores_mcpu']
pending_users_by_running_cores.add(user)
record['allocated_cores_mcpu'] = 0
result[user] = record
def allocate_cores(user, mark):
result[user]['allocated_cores_mcpu'] = int(mark - user_running_cores_mcpu[user] + 0.5)
mark = 0
while free_cores_mcpu > 0 and (pending_users_by_running_cores or allocating_users_by_total_cores):
lowest_running = None
lowest_total = None
if pending_users_by_running_cores:
lowest_running_user = pending_users_by_running_cores[0]
lowest_running = user_running_cores_mcpu[lowest_running_user]
if lowest_running == mark:
pending_users_by_running_cores.remove(lowest_running_user)
allocating_users_by_total_cores.add(lowest_running_user)
continue
if allocating_users_by_total_cores:
lowest_total_user = allocating_users_by_total_cores[0]
lowest_total = user_total_cores_mcpu[lowest_total_user]
if lowest_total == mark:
allocating_users_by_total_cores.remove(lowest_total_user)
allocate_cores(lowest_total_user, mark)
continue
allocation = min([c for c in [lowest_running, lowest_total] if c is not None])
n_allocating_users = len(allocating_users_by_total_cores)
cores_to_allocate = n_allocating_users * (allocation - mark)
if cores_to_allocate > free_cores_mcpu:
mark += int(free_cores_mcpu / n_allocating_users + 0.5)
free_cores_mcpu = 0
break
mark = allocation
free_cores_mcpu -= cores_to_allocate
for user in allocating_users_by_total_cores:
allocate_cores(user, mark)
return result
async def schedule_loop_body(self):
if self.app['frozen']:
log.info(f'not scheduling any jobs for {self.pool}; batch is frozen')
return True
log.info(f'schedule {self.pool}: starting')
start = time_msecs()
n_scheduled = 0
user_resources = await self.compute_fair_share()
total = sum(resources['allocated_cores_mcpu'] for resources in user_resources.values())
if not total:
log.info(f'schedule {self.pool}: no allocated cores')
should_wait = True
return should_wait
user_share = {
user: max(int(300 * resources['allocated_cores_mcpu'] / total + 0.5), 20)
for user, resources in user_resources.items()
}
async def user_runnable_jobs(user, remaining):
async for batch in self.db.select_and_fetchall(
'''
SELECT batches.id, batches_cancelled.id IS NOT NULL AS cancelled, userdata, user, format_version
FROM batches
LEFT JOIN batches_cancelled
ON batches.id = batches_cancelled.id
WHERE user = %s AND `state` = 'running';
''',
(user,),
"user_runnable_jobs__select_running_batches",
):
async for record in self.db.select_and_fetchall(
'''
SELECT job_id, spec, cores_mcpu
FROM jobs FORCE INDEX(jobs_batch_id_state_always_run_inst_coll_cancelled)
WHERE batch_id = %s AND state = 'Ready' AND always_run = 1 AND inst_coll = %s
LIMIT %s;
''',
(batch['id'], self.pool.name, remaining.value),
"user_runnable_jobs__select_ready_always_run_jobs",
):
record['batch_id'] = batch['id']
record['userdata'] = batch['userdata']
record['user'] = batch['user']
record['format_version'] = batch['format_version']
yield record
if not batch['cancelled']:
async for record in self.db.select_and_fetchall(
'''
SELECT job_id, spec, cores_mcpu
FROM jobs FORCE INDEX(jobs_batch_id_state_always_run_cancelled)
WHERE batch_id = %s AND state = 'Ready' AND always_run = 0 AND inst_coll = %s AND cancelled = 0
LIMIT %s;
''',
(batch['id'], self.pool.name, remaining.value),
"user_runnable_jobs__select_ready_jobs_batch_not_cancelled",
):
record['batch_id'] = batch['id']
record['userdata'] = batch['userdata']
record['user'] = batch['user']
record['format_version'] = batch['format_version']
yield record
waitable_pool = WaitableSharedPool(self.async_worker_pool)
should_wait = True
for user, resources in user_resources.items():
allocated_cores_mcpu = resources['allocated_cores_mcpu']
if allocated_cores_mcpu == 0:
continue
scheduled_cores_mcpu = 0
share = user_share[user]
remaining = Box(share)
async for record in user_runnable_jobs(user, remaining):
batch_id = record['batch_id']
job_id = record['job_id']
id = (batch_id, job_id)
attempt_id = secret_alnum_string(6)
record['attempt_id'] = attempt_id
if scheduled_cores_mcpu + record['cores_mcpu'] > allocated_cores_mcpu:
if random.random() > self.exceeded_shares_counter.rate():
self.exceeded_shares_counter.push(True)
self.scheduler_state_changed.set()
break
self.exceeded_shares_counter.push(False)
instance = self.pool.get_instance(user, record['cores_mcpu'])
if instance:
instance.adjust_free_cores_in_memory(-record['cores_mcpu'])
scheduled_cores_mcpu += record['cores_mcpu']
n_scheduled += 1
async def schedule_with_error_handling(app, record, id, instance):
try:
await schedule_job(app, record, instance)
except Exception:
log.info(f'scheduling job {id} on {instance} for {self.pool}', exc_info=True)
await waitable_pool.call(schedule_with_error_handling, self.app, record, id, instance)
remaining.value -= 1
if remaining.value <= 0:
should_wait = False
break
await waitable_pool.wait()
end = time_msecs()
log.info(f'schedule: attempted to schedule {n_scheduled} jobs in {end - start}ms for {self.pool}')
return should_wait
|
hail-is/hail
|
batch/batch/driver/instance_collection/pool.py
|
Python
|
mit
| 19,341
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2016-12-04 22:35
from __future__ import unicode_literals
from django.db import migrations
from coordinators.auth_migration_utils import get_coordinators_group, get_permissions
def add_campaignlocationshiftparticipation_rights(apps, schema_editor):
coordinators_group = get_coordinators_group(apps)
permissions = get_permissions(apps,
('campaigns', 'campaignlocationshiftparticipation',
('add', 'change', 'delete')))
coordinators_group.permissions.add(*permissions)
class Migration(migrations.Migration):
dependencies = [
('auth', '0001_initial'),
('campaigns', '0014_campaignlocationshiftparticipation'),
('coordinators', '0002_auto_20161127_1403'),
]
operations = [
migrations.RunPython(add_campaignlocationshiftparticipation_rights),
]
|
mrts/foodbank-campaign
|
src/coordinators/migrations/0003_auto_20161205_0035.py
|
Python
|
mit
| 890
|
"""
Tools for modeling unit generators (UGens).
"""
from .basic import Mix, MulAdd, Sum3, Sum4
from .beq import (
BAllPass,
BBandPass,
BBandStop,
BHiCut,
BHiPass,
BHiShelf,
BLowCut,
BLowPass,
BLowShelf,
BPeakEQ,
)
from .bufio import BufRd, BufWr, ClearBuf, LocalBuf, MaxLocalBufs, PlayBuf, RecordBuf
from .chaos import (
CuspL,
CuspN,
FBSineC,
FBSineL,
FBSineN,
GbmanL,
GbmanN,
HenonC,
HenonL,
HenonN,
LatoocarfianC,
LatoocarfianL,
LatoocarfianN,
LinCongC,
LinCongL,
LinCongN,
LorenzL,
QuadC,
QuadL,
QuadN,
StandardL,
StandardN,
)
from .convolution import Convolution, Convolution2, Convolution2L, Convolution3
from .delay import (
AllpassC,
AllpassL,
AllpassN,
BufAllpassC,
BufAllpassL,
BufAllpassN,
BufCombC,
BufCombL,
BufCombN,
BufDelayC,
BufDelayL,
BufDelayN,
CombC,
CombL,
CombN,
DelTapRd,
DelTapWr,
Delay1,
Delay2,
DelayC,
DelayL,
DelayN,
)
from .demand import (
DUGen,
Dbrown,
Dbufrd,
Dbufwr,
Demand,
DemandEnvGen,
Dgeom,
Dibrown,
Diwhite,
Drand,
Dreset,
Dseq,
Dser,
Dseries,
Dshuf,
Dstutter,
Dswitch,
Dswitch1,
Dunique,
Duty,
Dwhite,
Dwrand,
Dxrand,
)
from .diskio import DiskIn, DiskOut, VDiskIn
from .dynamics import Amplitude, Compander, CompanderD, Limiter, Normalizer
from .envelopes import (
Done,
EnvGen,
Free,
FreeSelf,
FreeSelfWhenDone,
Linen,
Pause,
PauseSelf,
PauseSelfWhenDone,
)
from .ffsinosc import Blip, FSinOsc, Klank, Pulse, Saw
from .filters import (
APF,
BPF,
BPZ2,
BRF,
BRZ2,
FOS,
HPF,
HPZ1,
HPZ2,
LPF,
LPZ1,
LPZ2,
RHPF,
RLPF,
SOS,
Changed,
Decay,
Decay2,
DetectSilence,
Filter,
Formlet,
Integrator,
Lag,
Lag2,
Lag2UD,
Lag3,
Lag3UD,
LagUD,
LeakDC,
Median,
MidEQ,
MoogFF,
OnePole,
OneZero,
Ramp,
Ringz,
Slew,
Slope,
TwoPole,
TwoZero,
)
from .gendyn import Gendy1, Gendy2, Gendy3
from .granular import GrainBuf, GrainIn, PitchShift, Warp1
from .hilbert import FreqShift, Hilbert, HilbertFIR
from .info import (
BlockSize,
BufChannels,
BufDur,
BufFrames,
BufRateScale,
BufSampleRate,
BufSamples,
ControlDur,
ControlRate,
NodeID,
NumAudioBuses,
NumBuffers,
NumControlBuses,
NumInputBuses,
NumOutputBuses,
NumRunningSynths,
RadiansPerSample,
SampleDur,
SampleRate,
SubsampleOffset,
)
from .inout import (
In,
InFeedback,
LocalIn,
LocalOut,
OffsetOut,
Out,
ReplaceOut,
SoundIn,
XOut,
)
from .lines import A2K, DC, K2A, AmpComp, AmpCompA, LinExp, LinLin, Line, Silence, XLine
from .mac import KeyState, MouseButton, MouseX, MouseY
from .ml import (
MFCC,
BeatTrack,
BeatTrack2,
KeyTrack,
Loudness,
Onsets,
SpecCentroid,
SpecFlatness,
SpecPcile,
)
from .noise import (
BrownNoise,
ClipNoise,
CoinGate,
Crackle,
Dust,
Dust2,
ExpRand,
GrayNoise,
Hasher,
IRand,
LFClipNoise,
LFDClipNoise,
LFDNoise0,
LFDNoise1,
LFDNoise3,
LFNoise0,
LFNoise1,
LFNoise2,
LinRand,
Logistic,
MantissaMask,
NRand,
PinkNoise,
Rand,
RandID,
RandSeed,
TExpRand,
TIRand,
TRand,
TWindex,
WhiteNoise,
)
from .osc import (
COsc,
DegreeToKey,
Impulse,
Index,
LFCub,
LFGauss,
LFPar,
LFPulse,
LFSaw,
LFTri,
Select,
SinOsc,
SyncSaw,
VOsc,
VOsc3,
VarSaw,
Vibrato,
WrapIndex,
)
from .panning import (
Balance2,
BiPanB2,
DecodeB2,
Pan2,
Pan4,
PanAz,
PanB,
PanB2,
Rotate2,
Splay,
XFade2,
)
from .physical import Ball, Pluck, Spring, TBall
from .pv import (
FFT,
IFFT,
PV_Add,
PV_BinScramble,
PV_BinShift,
PV_BinWipe,
PV_BrickWall,
PV_ChainUGen,
PV_ConformalMap,
PV_Conj,
PV_Copy,
PV_CopyPhase,
PV_Diffuser,
PV_Div,
PV_HainsworthFoote,
PV_JensenAndersen,
PV_LocalMax,
PV_MagAbove,
PV_MagBelow,
PV_MagClip,
PV_MagDiv,
PV_MagFreeze,
PV_MagMul,
PV_MagNoise,
PV_MagShift,
PV_MagSmear,
PV_MagSquared,
PV_Max,
PV_Min,
PV_Mul,
PV_PhaseShift,
PV_PhaseShift90,
PV_PhaseShift270,
PV_RandComb,
PV_RandWipe,
PV_RectComb,
PV_RectComb2,
RunningSum,
)
from .reverb import FreeVerb
from .safety import CheckBadValues, Sanitize
from .triggers import (
Clip,
Fold,
Gate,
InRange,
Latch,
LeastChange,
MostChange,
Peak,
PeakFollower,
Phasor,
Poll,
RunningMax,
RunningMin,
Schmidt,
SendPeakRMS,
SendTrig,
Sweep,
TDelay,
ToggleFF,
Trig,
Trig1,
Wrap,
ZeroCrossing,
)
__all__ = [
"A2K",
"APF",
"AllpassC",
"AllpassL",
"AllpassN",
"AmpComp",
"AmpCompA",
"Amplitude",
"BAllPass",
"BBandPass",
"BBandStop",
"BHiCut",
"BHiPass",
"BHiShelf",
"BLowCut",
"BLowPass",
"BLowShelf",
"BPF",
"BPZ2",
"BPeakEQ",
"BRF",
"BRZ2",
"Balance2",
"Ball",
"BeatTrack",
"BeatTrack2",
"BiPanB2",
"Blip",
"BlockSize",
"BrownNoise",
"BufAllpassC",
"BufAllpassL",
"BufAllpassN",
"BufChannels",
"BufCombC",
"BufCombL",
"BufCombN",
"BufDelayC",
"BufDelayL",
"BufDelayN",
"BufDur",
"BufFrames",
"BufRateScale",
"BufRd",
"BufSampleRate",
"BufSamples",
"BufWr",
"COsc",
"Changed",
"CheckBadValues",
"ClearBuf",
"Clip",
"ClipNoise",
"CoinGate",
"CombC",
"CombL",
"CombN",
"Compander",
"CompanderD",
"ControlDur",
"ControlRate",
"Convolution",
"Convolution2",
"Convolution2L",
"Convolution3",
"Crackle",
"CuspL",
"CuspN",
"DC",
"DUGen",
"Dbrown",
"Dbufrd",
"Dbufwr",
"Decay",
"Decay2",
"DecodeB2",
"DegreeToKey",
"DelTapRd",
"DelTapWr",
"Delay1",
"Delay2",
"DelayC",
"DelayL",
"DelayN",
"Demand",
"DemandEnvGen",
"DetectSilence",
"Dgeom",
"Dibrown",
"DiskIn",
"DiskOut",
"Diwhite",
"Done",
"Drand",
"Dreset",
"Dseq",
"Dser",
"Dseries",
"Dshuf",
"Dstutter",
"Dswitch",
"Dswitch1",
"Dunique",
"Dust",
"Dust2",
"Duty",
"Dwhite",
"Dwrand",
"Dxrand",
"EnvGen",
"ExpRand",
"FBSineC",
"FBSineL",
"FBSineN",
"FFT",
"FOS",
"FSinOsc",
"Filter",
"Fold",
"Formlet",
"Free",
"FreeSelf",
"FreeSelfWhenDone",
"FreeVerb",
"FreqShift",
"Gate",
"GbmanL",
"GbmanN",
"Gendy1",
"Gendy2",
"Gendy3",
"GrainBuf",
"GrainIn",
"GrayNoise",
"HPF",
"HPZ1",
"HPZ2",
"Hasher",
"HenonC",
"HenonL",
"HenonN",
"Hilbert",
"HilbertFIR",
"IFFT",
"IRand",
"Impulse",
"In",
"InFeedback",
"InRange",
"Index",
"Integrator",
"K2A",
"KeyState",
"KeyTrack",
"Klank",
"LFClipNoise",
"LFCub",
"LFDClipNoise",
"LFDNoise0",
"LFDNoise1",
"LFDNoise3",
"LFGauss",
"LFNoise0",
"LFNoise1",
"LFNoise2",
"LFPar",
"LFPulse",
"LFSaw",
"LFTri",
"LPF",
"LPZ1",
"LPZ2",
"Lag",
"Lag2",
"Lag2UD",
"Lag3",
"Lag3UD",
"LagUD",
"Latch",
"LatoocarfianC",
"LatoocarfianL",
"LatoocarfianN",
"LeakDC",
"LeastChange",
"Limiter",
"LinCongC",
"LinCongL",
"LinCongN",
"LinExp",
"LinLin",
"LinRand",
"Line",
"Linen",
"LocalBuf",
"LocalIn",
"LocalOut",
"Logistic",
"LorenzL",
"Loudness",
"MFCC",
"MantissaMask",
"MaxLocalBufs",
"Median",
"MidEQ",
"Mix",
"MoogFF",
"MostChange",
"MouseButton",
"MouseX",
"MouseY",
"MulAdd",
"NRand",
"NodeID",
"Normalizer",
"NumAudioBuses",
"NumBuffers",
"NumControlBuses",
"NumInputBuses",
"NumOutputBuses",
"NumRunningSynths",
"OffsetOut",
"OnePole",
"OneZero",
"Onsets",
"Out",
"PV_Add",
"PV_BinScramble",
"PV_BinShift",
"PV_BinWipe",
"PV_BrickWall",
"PV_ChainUGen",
"PV_ConformalMap",
"PV_Conj",
"PV_Copy",
"PV_CopyPhase",
"PV_Diffuser",
"PV_Div",
"PV_HainsworthFoote",
"PV_JensenAndersen",
"PV_LocalMax",
"PV_MagAbove",
"PV_MagBelow",
"PV_MagClip",
"PV_MagDiv",
"PV_MagFreeze",
"PV_MagMul",
"PV_MagNoise",
"PV_MagShift",
"PV_MagSmear",
"PV_MagSquared",
"PV_Max",
"PV_Min",
"PV_Mul",
"PV_PhaseShift",
"PV_PhaseShift270",
"PV_PhaseShift90",
"PV_RandComb",
"PV_RandWipe",
"PV_RectComb",
"PV_RectComb2",
"Pan2",
"Pan4",
"PanAz",
"PanB",
"PanB2",
"Pause",
"PauseSelf",
"PauseSelfWhenDone",
"Peak",
"PeakFollower",
"Phasor",
"PinkNoise",
"PitchShift",
"PlayBuf",
"Pluck",
"Poll",
"Pulse",
"QuadC",
"QuadL",
"QuadN",
"RHPF",
"RLPF",
"RadiansPerSample",
"Ramp",
"Rand",
"RandID",
"RandSeed",
"RecordBuf",
"ReplaceOut",
"Ringz",
"Rotate2",
"RunningMax",
"RunningMin",
"RunningSum",
"SOS",
"SampleDur",
"SampleRate",
"Sanitize",
"Saw",
"Schmidt",
"Select",
"SendPeakRMS",
"SendTrig",
"Silence",
"SinOsc",
"Slew",
"Slope",
"SoundIn",
"SpecCentroid",
"SpecFlatness",
"SpecPcile",
"Splay",
"Spring",
"StandardL",
"StandardN",
"SubsampleOffset",
"Sum3",
"Sum4",
"Sweep",
"SyncSaw",
"TBall",
"TDelay",
"TExpRand",
"TIRand",
"TRand",
"TWindex",
"ToggleFF",
"Trig",
"Trig1",
"TwoPole",
"TwoZero",
"VDiskIn",
"VOsc",
"VOsc3",
"VarSaw",
"Vibrato",
"Warp1",
"WhiteNoise",
"Wrap",
"WrapIndex",
"XFade2",
"XLine",
"XOut",
"ZeroCrossing",
]
|
josiah-wolf-oberholtzer/supriya
|
supriya/ugens/__init__.py
|
Python
|
mit
| 10,418
|
Midan_Metal_Musketoon0 = [[148, 0, 110, 0, 123, 162, 73, 2.64], u'Midan Metal Musketoon+CRIT12+CRIT12', 1, 14581]
Midan_Metal_Musketoon1 = [[148, 12, 98, 0, 123, 162, 73, 2.64], u'Midan Metal Musketoon+CRIT12+ACC12', 1, 14581]
Midan_Metal_Musketoon2 = [[148, 24, 86, 0, 123, 162, 73, 2.64], u'Midan Metal Musketoon+ACC12+ACC12', 1, 14581]
Augmented_Torrent_Musketoon0 = [[141, 0, 120, 80, 24, 154, 72, 2.64], u'Augmented Torrent Musketoon+SS12+SS12', 1, 14395]
Augmented_Torrent_Musketoon1 = [[141, 12, 120, 80, 12, 154, 72, 2.64], u'Augmented Torrent Musketoon+SS12+ACC12', 1, 14395]
Augmented_Torrent_Musketoon2 = [[141, 24, 120, 80, 0, 154, 72, 2.64], u'Augmented Torrent Musketoon+ACC12+ACC12', 1, 14395]
Arm = [ Midan_Metal_Musketoon0, Augmented_Torrent_Musketoon0, Augmented_Torrent_Musketoon1, Augmented_Torrent_Musketoon2, ]
Midan_Goggles_of_Aiming0 = [[81, 48, 69, 0, 24, 88, 0, 0], u'Midan Goggles of Aiming+SS12+SS12', 1, 14591]
Midan_Goggles_of_Aiming1 = [[81, 60, 69, 0, 12, 88, 0, 0], u'Midan Goggles of Aiming+SS12+ACC12', 1, 14591]
Midan_Goggles_of_Aiming2 = [[81, 69, 69, 0, 0, 88, 0, 0], u'Midan Goggles of Aiming+ACC12+ACC9', 1, 14591]
Augmented_Torrent_Mask_of_Aiming0 = [[81, 69, 24, 0, 48, 88, 0, 0], u'Augmented Torrent Mask of Aiming+CRIT12+CRIT12', 1, 14405]
Head = [ Midan_Goggles_of_Aiming1, Augmented_Torrent_Mask_of_Aiming0, ]
Midan_Coat_of_Aiming0 = [[131, 0, 102, 0, 112, 143, 0, 0], u'Midan Coat of Aiming+CRIT12+CRIT12', 1, 14598]
Midan_Coat_of_Aiming1 = [[131, 12, 90, 0, 112, 143, 0, 0], u'Midan Coat of Aiming+CRIT12+ACC12', 1, 14598]
Midan_Coat_of_Aiming2 = [[131, 24, 78, 0, 112, 143, 0, 0], u'Midan Coat of Aiming+ACC12+ACC12', 1, 14598]
Augmented_Torrent_Tabard_of_Aiming0 = [[131, 112, 24, 75, 0, 143, 0, 0], u'Augmented Torrent Tabard of Aiming+CRIT12+CRIT12', 1, 14412]
Body = [ Midan_Coat_of_Aiming0, Augmented_Torrent_Tabard_of_Aiming0, ]
Midan_Gloves_of_Aiming0 = [[81, 0, 24, 66, 48, 88, 0, 0], u'Midan Gloves of Aiming+CRIT12+CRIT12', 1, 14605]
Midan_Gloves_of_Aiming1 = [[81, 12, 12, 66, 48, 88, 0, 0], u'Midan Gloves of Aiming+CRIT12+ACC12', 1, 14605]
Midan_Gloves_of_Aiming2 = [[81, 24, 0, 66, 48, 88, 0, 0], u'Midan Gloves of Aiming+ACC12+ACC12', 1, 14605]
Augmented_Torrent_Armguards_of_Aiming0 = [[81, 48, 69, 0, 24, 88, 0, 0], u'Augmented Torrent Armguards of Aiming+SS12+SS12', 1, 14419]
Augmented_Torrent_Armguards_of_Aiming1 = [[81, 60, 69, 0, 12, 88, 0, 0], u'Augmented Torrent Armguards of Aiming+SS12+ACC12', 1, 14419]
Augmented_Torrent_Armguards_of_Aiming2 = [[81, 69, 69, 0, 0, 88, 0, 0], u'Augmented Torrent Armguards of Aiming+ACC12+ACC9', 1, 14419]
Hands = [ Midan_Gloves_of_Aiming0, Midan_Gloves_of_Aiming1, Midan_Gloves_of_Aiming2, Augmented_Torrent_Armguards_of_Aiming2, ]
Midan_Belt_of_Aiming0 = [[61, 0, 51, 34, 12, 66, 0, 0], u'Midan Belt of Aiming+SS12', 1, 14612]
Midan_Belt_of_Aiming1 = [[61, 12, 51, 34, 0, 66, 0, 0], u'Midan Belt of Aiming+ACC12', 1, 14612]
Augmented_Torrent_Belt_of_Aiming0 = [[61, 36, 12, 0, 51, 66, 0, 0], u'Augmented Torrent Belt of Aiming+CRIT12', 1, 14426]
Augmented_Torrent_Belt_of_Aiming1 = [[61, 48, 0, 0, 51, 66, 0, 0], u'Augmented Torrent Belt of Aiming+ACC12', 1, 14426]
Waist = [ Midan_Belt_of_Aiming1, Augmented_Torrent_Belt_of_Aiming0, Augmented_Torrent_Belt_of_Aiming1, ]
Midan_Poleyns_of_Aiming0 = [[131, 112, 24, 75, 0, 143, 0, 0], u'Midan Poleyns of Aiming+CRIT12+CRIT12', 1, 14619]
Augmented_Torrent_Tights_of_Aiming0 = [[131, 0, 102, 0, 112, 143, 0, 0], u'Augmented Torrent Tights of Aiming+CRIT12+CRIT12', 1, 14433]
Augmented_Torrent_Tights_of_Aiming1 = [[131, 12, 90, 0, 112, 143, 0, 0], u'Augmented Torrent Tights of Aiming+CRIT12+ACC12', 1, 14433]
Augmented_Torrent_Tights_of_Aiming2 = [[131, 24, 78, 0, 112, 143, 0, 0], u'Augmented Torrent Tights of Aiming+ACC12+ACC12', 1, 14433]
Legs = [ Midan_Poleyns_of_Aiming0, Augmented_Torrent_Tights_of_Aiming0, ]
Midan_Boots_of_Aiming0 = [[81, 69, 24, 0, 48, 88, 0, 0], u'Midan Boots of Aiming+CRIT12+CRIT12', 1, 14626]
Augmented_Torrent_Boots_of_Aiming0 = [[81, 0, 69, 66, 0, 88, 0, 0], u'Augmented Torrent Boots of Aiming+CRIT12+CRIT9', 1, 14440]
Augmented_Torrent_Boots_of_Aiming1 = [[81, 12, 60, 66, 0, 88, 0, 0], u'Augmented Torrent Boots of Aiming+CRIT12+ACC12', 1, 14440]
Augmented_Torrent_Boots_of_Aiming2 = [[81, 24, 48, 66, 0, 88, 0, 0], u'Augmented Torrent Boots of Aiming+ACC12+ACC12', 1, 14440]
Feet = [ Midan_Boots_of_Aiming0, Augmented_Torrent_Boots_of_Aiming1, Augmented_Torrent_Boots_of_Aiming0, Augmented_Torrent_Boots_of_Aiming2, ]
Midan_Neckband_of_Aiming0 = [[61, 36, 51, 0, 12, 0, 0, 0], u'Midan Neckband of Aiming+SS12', 1, 14637]
Midan_Neckband_of_Aiming1 = [[61, 48, 51, 0, 0, 0, 0, 0], u'Midan Neckband of Aiming+ACC12', 1, 14637]
Augmented_Primal_Choker_of_Aiming0 = [[61, 51, 12, 0, 36, 0, 0, 0], u'Augmented Primal Choker of Aiming+CRIT12', 1, 14451]
Necklace = [ Midan_Neckband_of_Aiming1, Augmented_Primal_Choker_of_Aiming0, ]
Midan_Earrings_of_Aiming0 = [[61, 51, 12, 0, 36, 0, 0, 0], u'Midan Earrings of Aiming+CRIT12', 1, 14632]
Augmented_Primal_Earrings_of_Aiming0 = [[61, 0, 51, 34, 12, 0, 0, 0], u'Augmented Primal Earrings of Aiming+SS12', 1, 14446]
Augmented_Primal_Earrings_of_Aiming1 = [[61, 12, 51, 34, 0, 0, 0, 0], u'Augmented Primal Earrings of Aiming+ACC12', 1, 14446]
Earrings = [ Midan_Earrings_of_Aiming0, Augmented_Primal_Earrings_of_Aiming1, ]
Midan_Bracelets_of_Aiming0 = [[61, 0, 12, 49, 36, 0, 0, 0], u'Midan Bracelets of Aiming+CRIT12', 1, 14642]
Midan_Bracelets_of_Aiming1 = [[61, 12, 0, 49, 36, 0, 0, 0], u'Midan Bracelets of Aiming+ACC12', 1, 14642]
Augmented_Primal_Bracelet_of_Aiming0 = [[61, 51, 48, 0, 0, 0, 0, 0], u'Augmented Primal Bracelet of Aiming+CRIT12', 1, 14456]
Bracelets = [ Midan_Bracelets_of_Aiming0, Midan_Bracelets_of_Aiming1, Augmented_Primal_Bracelet_of_Aiming0, ]
Midan_Ring_of_Aiming0 = [[61, 36, 12, 0, 51, 0, 0, 0], u'Midan Ring of Aiming+CRIT12', 1, 14647]
Midan_Ring_of_Aiming1 = [[61, 48, 0, 0, 51, 0, 0, 0], u'Midan Ring of Aiming+ACC12', 1, 14647]
Augmented_Primal_Ring_of_Aiming0 = [[61, 0, 48, 49, 0, 0, 0, 0], u'Augmented Primal Ring of Aiming+CRIT12', 1, 14461]
Augmented_Primal_Ring_of_Aiming1 = [[61, 12, 36, 49, 0, 0, 0, 0], u'Augmented Primal Ring of Aiming+ACC12', 1, 14461]
Ring = [ Midan_Ring_of_Aiming0, Augmented_Primal_Ring_of_Aiming0, ]
|
jrlusby/xiv-bard-calc
|
inventories/staffbis.py
|
Python
|
mit
| 6,323
|
import os
from flask import current_app
from .remote import remote
def exec_remote_wmt(host, uuid, which_wmt_exe='wmt-exe', username=None,
password=None, extra_args=None):
extra_args = extra_args or []
which_wmt_slave = os.path.join(os.path.dirname(which_wmt_exe), 'wmt-slave')
cmd = ' '.join([which_wmt_exe, uuid,
'--server-url=%s' % current_app.config['WMT_SERVER_URL'],
'--with-wmt-slave=%s' % which_wmt_slave,
'--daemon'] + extra_args)
cmd = 'df -h'
return remote(host, cmd, username, password, prompt=False)
|
mcflugen/wmt-rest
|
wmt/flask/tasks.py
|
Python
|
mit
| 620
|
import unittest
class TestExtractUtil(unittest.TestCase):
def test_xyz(self):
self.assertEqual(True, True)
|
scruwys/and-the-award-goes-to
|
tests/test_extract_util.py
|
Python
|
mit
| 121
|
# coding=utf-8
"""
stream.varint
~~~~~~~~~~~~~
Encode and decode an integer up to 64 bit to/from 'Varint'. See Google
Protobuf library documentation for more details about Varints.
:copyright: (c) 2017 by Ali Ghaffaari.
:license: MIT, see LICENSE for more details.
"""
import sys
import click
from google.protobuf.internal.decoder import _DecodeVarint as decodeVarint
from google.protobuf.internal.encoder import _EncodeVarint as encodeVarint
@click.group(context_settings=dict(help_option_names=['-h', '--help']))
def cli():
"""Varint encoder/decoder."""
pass
@cli.command('encode')
@click.argument('integer', nargs=1, type=int)
def cmd_encode(integer):
"""Encode an integer up to 64 bit to Varint."""
encode(integer)
@cli.command('decode')
@click.argument('input_file', nargs=1, type=click.File('rb'))
def cmd_decode(input_file):
"""Decode an integer up to 64 bit from Varint."""
decode(input_file)
def encode(value):
"""Output the encoded value to the standard output.
Args:
value (int): the integer to be encoded.
"""
encodeVarint(sys.stdout.buffer.write, value, True)
def decode(input_file):
"""Output the decoded value to the standard output.
Args:
input_file (file handler): input file handler.
"""
print(decodeVarint(input_file.read(), 0)[0])
|
cartoonist/pystream-protobuf
|
stream/varint.py
|
Python
|
mit
| 1,366
|
"""wolfpass.py is a project example using resources from Wolf Pass"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from numpy import load as npload
from .base import BaseExample, exampleproperty
from ..data import DataArray
from ..line import Line, Mesh1D
from ..point import Mesh0D, Point
from ..project import Project
from ..surface import Mesh2D, Surface
from ..texture import Texture2DImage
from ..volume import Mesh3DGrid, Volume
class Wolfpass(BaseExample):
"""Wolfpass example
This module provides example data for a geology exploration project.
"""
@exampleproperty
def filenames(self):
return [
'AG_gpt.line.npy',
'AS_ppm.line.npy',
'AU_gpt.line.npy',
'CU_pct.line.npy',
'CU_pct.vol.npy',
'CU_pct_0.75_1.0_t.cusurf.npy',
'CU_pct_0.75_1.0_v.cusurf.npy',
'CU_pct_1.0_1.25_t.cusurf.npy',
'CU_pct_1.0_1.25_v.cusurf.npy',
'CU_pct_1.25_1.5_t.cusurf.npy',
'CU_pct_1.25_1.5_v.cusurf.npy',
'CU_pct_gt_1.5_t.cusurf.npy',
'CU_pct_gt_1.5_v.cusurf.npy',
'CU_pct_lt_0.75_t.cusurf.npy',
'CU_pct_lt_0.75_v.cusurf.npy',
'Density.line.npy',
'MO_ppm.line.npy',
'Recov.line.npy',
'S_pct.line.npy',
'basement_t.lithsurf.npy',
'basement_v.lithsurf.npy',
'boreholes_s.line.npy',
'boreholes_v.line.npy',
'dacite_data.line.npy',
'dacite_t.lithsurf.npy',
'dacite_v.lithsurf.npy',
'diorite_early_t.lithsurf.npy',
'diorite_early_v.lithsurf.npy',
'diorite_late_t.lithsurf.npy',
'diorite_late_v.lithsurf.npy',
'dist_to_borehole.lithsurf.npy',
'dist_to_borehole.vol.npy',
'drill_loc_v.point.npy',
'elevation.toposurf.npy',
'lithology.xsurf.npy',
'maxdepth.point.npy',
'ovb_t.lithsurf.npy',
'ovb_v.lithsurf.npy',
'section_number.xsurf.npy',
'topo_t.toposurf.npy',
'topo_v.toposurf.npy',
'topography.png',
'trench.point.npy',
'vol_h1.vol.npy',
'vol_h2.vol.npy',
'vol_h3.vol.npy',
'vol_x0.vol.npy',
'xsect_t.xsurf.npy',
'xsect_v.xsurf.npy',
]
@exampleproperty
def drill_vertices(self):
"""drill point vertices"""
return npload(Wolfpass.fetch_data(filename='drill_loc_v.point.npy',
download_if_missing=False,
verbose=False))
@exampleproperty
def drill_data(self):
"""dictionry of drill point data"""
if getattr(self, '_drill_data', None) is None:
self._drill_data = dict()
for npyfile in self.filenames:
if not npyfile.endswith('.point.npy'):
continue
if npyfile.endswith('_v.point.npy'):
continue
self._drill_data[npyfile.split('.')[0]] = npload(
Wolfpass.fetch_data(filename=npyfile,
download_if_missing=False,
verbose=False)
)
return self._drill_data
@exampleproperty
def borehole_vertices(self):
"""borehole line vertices"""
return npload(Wolfpass.fetch_data(filename='boreholes_v.line.npy',
download_if_missing=False,
verbose=False))
@exampleproperty
def borehole_segments(self):
"""borehole segment vertex indices"""
return npload(Wolfpass.fetch_data(filename='boreholes_s.line.npy',
download_if_missing=False,
verbose=False))
@exampleproperty
def borehole_data(self):
"""dictionary of borehole data"""
if getattr(self, '_borehole_data', None) is None:
self._borehole_data = dict()
for npyfile in self.filenames:
if not npyfile.endswith('.line.npy'):
continue
if (npyfile.endswith('_v.line.npy') or
npyfile.endswith('_s.line.npy')):
continue
self._borehole_data[npyfile.split('.')[0]] = npload(
Wolfpass.fetch_data(filename=npyfile,
download_if_missing=False,
verbose=False)
)
return self._borehole_data
@exampleproperty
def cu_names(self):
"""list of names for the different cu pct surfaces"""
return [fname[:-13] for fname in self.filenames
if fname.endswith('_v.cusurf.npy')]
@exampleproperty
def cu_vertices(self):
"""list of cu pct surface vertices"""
return [npload(Wolfpass.fetch_data(filename=prefix + '_v.cusurf.npy',
download_if_missing=False,
verbose=False))
for prefix in self.cu_names]
@exampleproperty
def cu_triangles(self):
"""list of cu pct surface triangles"""
return [npload(Wolfpass.fetch_data(filename=prefix + '_t.cusurf.npy',
download_if_missing=False,
verbose=False))
for prefix in self.cu_names]
@exampleproperty
def lith_names(self):
"""list of names for the different lithology surfaces"""
return [fname[:-15] for fname in self.filenames
if fname.endswith('_v.lithsurf.npy')]
@exampleproperty
def lith_vertices(self):
"""list of lithology surface vertices"""
return [npload(Wolfpass.fetch_data(filename=prefix + '_v.lithsurf.npy',
download_if_missing=False,
verbose=False))
for prefix in self.lith_names]
@exampleproperty
def lith_triangles(self):
"""list of lithology surface triangles"""
return [npload(Wolfpass.fetch_data(filename=prefix + '_t.lithsurf.npy',
download_if_missing=False,
verbose=False))
for prefix in self.lith_names]
@exampleproperty
def lith_diorite_early_data(self):
"""data for early diorite surface"""
return npload(Wolfpass.fetch_data(
filename='dist_to_borehole.lithsurf.npy',
download_if_missing=False,
verbose=False
))
@exampleproperty
def topo_vertices(self):
"""topography vertices"""
return npload(Wolfpass.fetch_data(filename='topo_v.toposurf.npy',
download_if_missing=False,
verbose=False))
@exampleproperty
def topo_triangles(self):
"""topography triangles"""
return npload(Wolfpass.fetch_data(filename='topo_t.toposurf.npy',
download_if_missing=False,
verbose=False))
@exampleproperty
def topo_image(self):
"""surface image PNG"""
return Wolfpass.fetch_data(filename='topography.png',
download_if_missing=False,
verbose=False)
@exampleproperty
def topo_image_orientation(self):
"""surface image O, U, and V"""
return dict(
O=[443200., 491750, 0],
U=[4425., 0, 0],
V=[0., 3690, 0]
)
@exampleproperty
def topo_data(self):
"""elevation data"""
return npload(Wolfpass.fetch_data(filename='elevation.toposurf.npy',
download_if_missing=False,
verbose=False))
@exampleproperty
def xsect_vertices(self):
"""cross section vertices"""
return npload(Wolfpass.fetch_data(filename='xsect_v.xsurf.npy',
download_if_missing=False,
verbose=False))
@exampleproperty
def xsect_triangles(self):
"""cross section triangles"""
return npload(Wolfpass.fetch_data(filename='xsect_t.xsurf.npy',
download_if_missing=False,
verbose=False))
@exampleproperty
def xsect_data(self):
"""dictionary of cross section data"""
if getattr(self, '_xsect_data', None) is None:
self._xsect_data = dict()
for npyfile in self.filenames:
if not npyfile.endswith('.xsurf.npy'):
continue
if (npyfile.endswith('_v.xsurf.npy') or
npyfile.endswith('_t.xsurf.npy')):
continue
self._xsect_data[npyfile.split('.')[0]] = npload(
Wolfpass.fetch_data(filename=npyfile,
download_if_missing=False,
verbose=False)
)
return self._xsect_data
@exampleproperty
def lith_tensor(self):
"""h1, h2, h3 dictionary for lith volume"""
return dict(
h1=npload(Wolfpass.fetch_data(filename='vol_h1.vol.npy',
download_if_missing=False,
verbose=False)),
h2=npload(Wolfpass.fetch_data(filename='vol_h2.vol.npy',
download_if_missing=False,
verbose=False)),
h3=npload(Wolfpass.fetch_data(filename='vol_h3.vol.npy',
download_if_missing=False,
verbose=False))
)
@exampleproperty
def lith_origin(self):
"""x0 for lith volume"""
return npload(Wolfpass.fetch_data(filename='vol_x0.vol.npy',
download_if_missing=False,
verbose=False))
@exampleproperty
def lith_data(self):
"""dictionary of data for lith volume"""
if getattr(self, '_lith_data', None) is None:
self._lith_data = dict()
for npyfile in self.filenames:
if not npyfile.endswith('.vol.npy'):
continue
if npyfile.startswith('vol_'):
continue
self._lith_data[npyfile.split('.')[0]] = npload(
Wolfpass.fetch_data(filename=npyfile,
download_if_missing=False,
verbose=False)
).flatten()
return self._lith_data
@classmethod
def get_project(self):
"""Return a project with all the Wolf Pass data"""
proj = Project(
title='Wolf Pass'
)
self._add_points(proj)
self._add_lines(proj)
self._add_cu_surf(proj)
self._add_lith_surf(proj)
self._add_topo(proj)
self._add_xsect(proj)
self._add_lith_vol(proj)
return proj
@classmethod
def get_project_topo(self):
"""Return a project with Wolf Pass topography data"""
proj = Project(
title='Topography',
description='Topography, surface imagery, and drill locations'
)
self._add_points(proj)
self._add_topo(proj)
return proj
@classmethod
def get_project_dacite(self):
"""Return a project with Wolf Pass dacite data"""
proj = Project(
title='Wolf Pass',
description='Boreholes and dacite formation'
)
self._add_lines(proj)
self._add_lith_surf(proj, ['ovb', 'dacite'])
return proj
@classmethod
def _add_points(self, proj):
Point(
project=proj,
mesh=Mesh0D(
vertices=self.drill_vertices
),
data=[
dict(
location='N',
data=DataArray(
title=k,
array=self.drill_data[k]
)
) for k in self.drill_data
],
title='Borehole Drill Locations'
)
@classmethod
def _add_lines(self, proj):
Line(
project=proj,
mesh=Mesh1D(
vertices=self.borehole_vertices,
segments=self.borehole_segments
),
data=[
dict(
location='CC',
data=DataArray(
title=k,
array=self.borehole_data[k]
)
) for k in self.borehole_data
],
title='Boreholes'
)
@classmethod
def _add_cu_surf(self, proj):
for i, prefix in enumerate(self.cu_names):
Surface(
project=proj,
mesh=Mesh2D(
vertices=self.cu_vertices[i],
triangles=self.cu_triangles[i]
),
title=prefix
)
@classmethod
def _add_lith_surf(self, proj, include=None):
for i, prefix in enumerate(self.lith_names):
if include is not None and prefix not in include:
continue
if prefix == 'diorite_early':
lith_data = [dict(
location='N',
data=DataArray(
title='Distance to Borehole',
array=self.lith_diorite_early_data
)
)]
else:
lith_data = []
Surface(
project=proj,
mesh=Mesh2D(
vertices=self.lith_vertices[i],
triangles=self.lith_triangles[i]
),
data=lith_data,
title=prefix
)
@classmethod
def _add_topo(self, proj):
Surface(
project=proj,
mesh=Mesh2D(
vertices=self.topo_vertices,
triangles=self.topo_triangles
),
data=dict(
location='N',
data=DataArray(
title='Elevation',
array=self.topo_data
)
),
textures=Texture2DImage(
image=self.topo_image,
**self.topo_image_orientation
),
title='Topography Surface'
)
@classmethod
def _add_xsect(self, proj):
Surface(
project=proj,
mesh=Mesh2D(
vertices=self.xsect_vertices,
triangles=self.xsect_triangles
),
data=[
dict(
location='CC',
data=DataArray(
title=k,
array=self.xsect_data[k]
)
) for k in self.xsect_data
],
title='Cross-Sections'
)
@classmethod
def _add_lith_vol(self, proj):
Volume(
project=proj,
mesh=Mesh3DGrid(
x0=self.lith_origin,
**self.lith_tensor
),
data=[
dict(
location='CC',
data=DataArray(
title=k,
array=self.lith_data[k]
)
) for k in self.lith_data
],
title='Lithology Volume'
)
|
3ptscience/steno3dpy
|
steno3d/examples/wolfpass.py
|
Python
|
mit
| 16,441
|
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import palettable
import richardsplot as rplot
x = np.random.random(size=1000)*6000+3000
y = np.random.normal(loc=0,scale=0.25,size=1000)*1.0e-17
ind = np.argsort(x)
xplot = x[ind]
yplot = y[ind]/1.0e-17
plt.figure(figsize=(8,8))
plt.xlim(3000,9000)
#plt.ylim(-1,1)
plt.xlabel("Wavelength ($\mu$m)")
plt.ylabel("Flux Density ($10^{-17}$ ergs s$^{-1}$ cm$^{-2}$ \AA$^{-1}$)")
plt.plot(xplot,yplot,color=rplot.csdark[2],label="Test1")
#plt.show()
plt.tight_layout(pad=0.1)
plt.savefig("testplot1.pdf")
|
gtrichards/richardsplot
|
testplot1.py
|
Python
|
mit
| 579
|
# -*- coding: utf-8 -*-
import dis
def disassemble(co, lasti=-1):
"""
Disassemble a code object.
:param co: code object
:type: code
:param lasti: last instruction executed (optional)
:type lasti: int
:returns: disassembled code
:rtype: dict
"""
result = []
code = co.co_code
labels = dis.findlabels(code)
linestarts = dict(dis.findlinestarts(co))
n = len(code)
i = 0
extended_arg = 0
free = None
while i < n:
instruction = {}
c = code[i]
op = ord(c)
if i in linestarts:
instruction['linestarts'] = linestarts[i]
instruction['lasti'] = (i == lasti)
instruction['labelled'] = (i in labels)
instruction['i'] = i
instruction['opname'] = dis.opname[op]
i += 1
if op >= dis.HAVE_ARGUMENT:
oparg = ord(code[i]) + ord(code[i + 1]) * 256 + extended_arg
extended_arg = 0
i += 2
if op == dis.EXTENDED_ARG:
extended_arg = oparg * 65536
instruction['oparg'] = {
'count': oparg
}
if op in dis.hasconst:
instruction['oparg']['type'] = 'consts'
instruction['oparg']['val'] = co.co_consts[oparg]
elif op in dis.hasname:
instruction['oparg']['type'] = 'names'
instruction['oparg']['val'] = co.co_names[oparg]
elif op in dis.hasjrel:
instruction['oparg']['type'] = 'jump'
instruction['oparg']['val'] = i + oparg
elif op in dis.haslocal:
instruction['oparg']['type'] = 'varnames'
instruction['oparg']['val'] = co.co_varnames[oparg]
elif op in dis.hascompare:
instruction['oparg']['type'] = 'compare'
instruction['oparg']['val'] = dis.cmp_op[oparg]
elif op in dis.hasfree:
if free is None:
free = co.co_cellvars + co.co_freevars
instruction['oparg']['type'] = 'free'
instruction['oparg']['val'] = free[oparg]
result.append(instruction)
return result
|
linkdd/errcorrect
|
errcorrect/disassembler.py
|
Python
|
mit
| 2,234
|
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2015-2018 CERN.
# Copyright (C) 2021 TU Wien.
#
# Invenio is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""REST API module for Invenio."""
from __future__ import absolute_import, print_function
from datetime import timezone
from flask import Response, abort, current_app, g, jsonify, make_response, \
request
from flask.views import MethodView
from werkzeug.exceptions import HTTPException
from .errors import RESTException, SameContentException
def create_api_errorhandler(**kwargs):
r"""Create an API error handler.
E.g. register a 404 error:
.. code-block:: python
app.errorhandler(404)(create_api_errorhandler(
status=404, message='Not Found'))
:param \*\*kwargs: It contains the ``'status'`` and the ``'message'``
to describe the error.
"""
def api_errorhandler(e):
if isinstance(e, RESTException):
return e.get_response()
elif isinstance(e, HTTPException) and e.description:
kwargs['message'] = e.description
if kwargs.get('status', 400) >= 500 and hasattr(g, 'sentry_event_id'):
kwargs['error_id'] = str(g.sentry_event_id)
return make_response(jsonify(kwargs), kwargs['status'])
return api_errorhandler
class ContentNegotiatedMethodView(MethodView):
"""MethodView with content negotiation.
Dispatch HTTP requests as MethodView does and build responses using the
registered serializers. It chooses the right serializer using the request's
accept type. It also provides a helper method for handling ETags.
"""
def __init__(self, serializers=None, method_serializers=None,
serializers_query_aliases=None, default_media_type=None,
default_method_media_type=None, *args, **kwargs):
"""Register the serializing functions.
Serializing functions will receive all named and non named arguments
provided to ``make_response`` or returned by request handling methods.
Recommended prototype is: ``serializer(data, code=200, headers=None)``
and it should return :class:`flask.Response` instances.
Serializing functions can also be overridden by setting
``self.serializers``.
:param serializers: A mapping from mediatype to a serializer function.
:param method_serializers: A mapping of HTTP method name (GET, PUT,
PATCH, POST, DELETE) -> dict(mediatype -> serializer function). If
set, it overrides the serializers dict.
:param serializers_query_aliases: A mapping of values of the defined
query arg (see `config.REST_MIMETYPE_QUERY_ARG_NAME`) to valid
mimetypes: dict(alias -> mimetype).
:param default_media_type: Default media type used if no accept type
has been provided and global serializers are used for the request.
Can be None if there is only one global serializer or None. This
media type is used for method serializers too if
``default_method_media_type`` is not set.
:param default_method_media_type: Default media type used if no accept
type has been provided and a specific method serializers are used
for the request. Can be ``None`` if the method has only one
serializer or ``None``.
"""
super(ContentNegotiatedMethodView, self).__init__(*args, **kwargs)
self.serializers = serializers or None
self.default_media_type = default_media_type
self.default_method_media_type = default_method_media_type or {}
# set default default media_types if none has been given
if self.serializers and not self.default_media_type:
if len(self.serializers) == 1:
self.default_media_type = next(iter(self.serializers.keys()))
elif len(self.serializers) > 1:
raise ValueError('Multiple serializers with no default media'
' type')
# set method serializers
self.method_serializers = ({key.upper(): func for key, func in
method_serializers.items()} if
method_serializers else {})
# set serializer aliases
self.serializers_query_aliases = serializers_query_aliases or {}
# create default method media_types dict if none has been given
if self.method_serializers and not self.default_method_media_type:
self.default_method_media_type = {}
for http_method, meth_serial in self.method_serializers.items():
if len(self.method_serializers[http_method]) == 1:
self.default_method_media_type[http_method] = \
next(iter(self.method_serializers[http_method].keys()))
elif len(self.method_serializers[http_method]) > 1:
# try to use global default media type
if default_media_type in \
self.method_serializers[http_method]:
self.default_method_media_type[http_method] = \
default_media_type
else:
raise ValueError('Multiple serializers for method {0}'
'with no default media type'.format(
http_method))
def get_method_serializers(self, http_method):
"""Get request method serializers + default media type.
Grab serializers from ``method_serializers`` if defined, otherwise
returns the default serializers. Uses GET serializers for HEAD requests
if no HEAD serializers were specified.
The method also determines the default media type.
:param http_method: HTTP method as a string.
:returns: Tuple of serializers and default media type.
"""
if http_method == 'HEAD' and 'HEAD' not in self.method_serializers:
http_method = 'GET'
return (
self.method_serializers.get(http_method, self.serializers),
self.default_method_media_type.get(
http_method, self.default_media_type)
)
def _match_serializers_by_query_arg(self, serializers):
"""Match serializer by query arg."""
# if the format query argument is present, match the serializer
arg_name = current_app.config.get('REST_MIMETYPE_QUERY_ARG_NAME')
if arg_name:
arg_value = request.args.get(arg_name, None)
if arg_value is None:
return None
# Search for the serializer matching the format
try:
return serializers[
self.serializers_query_aliases[arg_value]]
except KeyError: # either no serializer for this format
return None
return None
def _match_serializers_by_accept_headers(self, serializers,
default_media_type):
"""Match serializer by `Accept` headers."""
# Bail out fast if no accept headers were given.
if len(request.accept_mimetypes) == 0:
return serializers[default_media_type]
# Determine best match based on quality.
best_quality = -1
best = None
has_wildcard = False
for client_accept, quality in request.accept_mimetypes:
if quality <= best_quality:
continue
if client_accept == '*/*':
has_wildcard = True
for s in serializers:
if s in ['*/*', client_accept] and quality > 0:
best_quality = quality
best = s
# If no match found, but wildcard exists, them use default media
# type.
if best is None and has_wildcard:
best = default_media_type
if best is not None:
return serializers[best]
return None
def match_serializers(self, serializers, default_media_type):
"""Choose serializer for a given request based on query arg or headers.
Checks if query arg `format` (by default) is present and tries to match
the serializer based on the arg value, by resolving the mimetype mapped
to the arg value.
Otherwise, chooses the serializer by retrieving the best quality
`Accept` headers and matching its value (mimetype).
:param serializers: Dictionary of serializers.
:param default_media_type: The default media type.
:returns: Best matching serializer based on `format` query arg first,
then client `Accept` headers or None if no matching serializer.
"""
return self._match_serializers_by_query_arg(serializers) or self.\
_match_serializers_by_accept_headers(serializers,
default_media_type)
def make_response(self, *args, **kwargs):
"""Create a Flask Response.
Dispatch the given arguments to the serializer best matching the
current request's Accept header.
:return: The response created by the serializing function.
:rtype: :class:`flask.Response`
:raises werkzeug.exceptions.NotAcceptable: If no media type
matches current Accept header.
"""
serializer = self.match_serializers(
*self.get_method_serializers(request.method))
if serializer:
return serializer(*args, **kwargs)
abort(406)
def dispatch_request(self, *args, **kwargs):
"""Dispatch current request.
Dispatch the current request using
:class:`flask.views.MethodView` `dispatch_request()` then, if the
result is not already a :py:class:`flask.Response`, search for the
serializing function which matches the best the current request's
Accept header and use it to build the :py:class:`flask.Response`.
:rtype: :class:`flask.Response`
:raises werkzeug.exceptions.NotAcceptable: If no media type matches
current Accept header.
:returns: The response returned by the request handler or created by
the serializing function.
"""
result = super(ContentNegotiatedMethodView, self).dispatch_request(
*args, **kwargs
)
if isinstance(result, Response):
return result
elif isinstance(result, (list, tuple)):
return self.make_response(*result)
else:
return self.make_response(result)
def check_etag(self, etag, weak=False):
"""Validate the given ETag with current request conditions.
Compare the given ETag to the ones in the request header If-Match
and If-None-Match conditions.
The result is unspecified for requests having If-Match and
If-None-Match being both set.
:param str etag: The ETag of the current resource. For PUT and PATCH
it is the one before any modification of the resource. This ETag
will be tested with the Accept header conditions. The given ETag
should not be quoted.
:raises werkzeug.exceptions.PreconditionFailed: If the
condition is not met.
:raises invenio_rest.errors.SameContentException: If the
the request is GET or HEAD and the If-None-Match condition is not
met.
"""
# bool(:py:class:`werkzeug.datastructures.ETags`) is not consistent
# in Python 3. bool(Etags()) == True even though it is empty.
if len(request.if_match.as_set(include_weak=weak)) > 0 or \
request.if_match.star_tag:
contains_etag = (request.if_match.contains_weak(etag) if weak
else request.if_match.contains(etag))
if not contains_etag and '*' not in request.if_match:
abort(412)
if len(request.if_none_match.as_set(include_weak=weak)) > 0 or \
request.if_none_match.star_tag:
contains_etag = (request.if_none_match.contains_weak(etag) if weak
else request.if_none_match.contains(etag))
if contains_etag or '*' in request.if_none_match:
if request.method in ('GET', 'HEAD'):
raise SameContentException(etag)
else:
abort(412)
def check_if_modified_since(self, dt, etag=None):
"""Validate If-Modified-Since with current request conditions."""
dt = dt.replace(microsecond=0)
# since Werkzeug v2.0, request-related datetime values are
# timezone-aware, which compared dates to be timezone-aware as well
if request.if_modified_since and request.if_modified_since.tzinfo and \
not dt.tzinfo:
dt = dt.replace(tzinfo=timezone.utc)
if request.if_modified_since and dt <= request.if_modified_since:
raise SameContentException(etag, last_modified=dt)
|
inveniosoftware/invenio-rest
|
invenio_rest/views.py
|
Python
|
mit
| 13,273
|
#! -*- coding: utf-8 -*-
import os
import re
__all__ = ['splitext', 'joinext', 'list_dir']
def splitext(path):
"""Get the extension from a path"""
root, ext = os.path.splitext(path)
return (root, ext) if not ext else (root, ext[1:])
def joinext(name, ext, sep='.'):
"""
>>> joinext('a', 'txt')
'a.txt'
"""
return sep.join([name, ext])
def list_dir(root, file_type='f', pattern=None, path=True, recursive=True):
"""
@root: root dir
@file_type: file type,
'f' --> general file,
'd' --> dir,
'a' --> file and dir
@pattern: search pattern
@path: if True, return path else name
@recursive: if recursive is True, list files or subdirs recursively
Return a generator of subdirs or files
"""
for dirpath, subdirs, filenames in os.walk(root):
subs = []
if file_type == 'f':
subs = filenames
elif file_type == 'd':
subs = subdirs
elif file_type == 'a':
subs.extend(subdirs)
subs.extend(filenames)
else:
break
# dirname or filename filter
for sub in subs:
_sub = os.path.join(dirpath, sub)
if pattern is not None:
if re.search(pattern, _sub) is None:
continue
if path:
# abspath
yield os.path.abspath(_sub)
else:
# name
yield sub
# recursive
if not recursive:
break
if __name__ == '__main__':
pass
|
yang3yen/pykit
|
pylib/path.py
|
Python
|
mit
| 1,587
|
class Solution(object):
def lengthOfLongestSubstring(self, s):
"""
:type s: str
:rtype: int
"""
l = len(s)
r = 0
i = j = 0
m = [None] * 256
while j < l:
c = ord(s[j])
if m[c] is not None:
r = max(j - i, r)
while i <= m[c]:
m[ord(s[i])] = None
i += 1
m[c] = j
j += 1
r = max(j - i, r)
return r
|
foomango/leetcode
|
src/python/3_longest_substring_without_repeating_characters/solution.py
|
Python
|
mit
| 507
|
# Copyright (c) 2014 - 2016 townhallpinball.org
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
from ..lib.handler import Handler
from ..lib import p, dmd, ui, util
mode_labels = (
"Manual",
"Repeat",
"Cycle",
)
class PulseTest(Handler):
timer = None
def __init__(self, name, icon, devices):
self.image = ui.Image(icon, left=0)
self.devices = util.Cycle(devices)
self.interval = 0.75
super(PulseTest, self).__init__(name)
def setup(self):
self.display = ui.Panel(name=self.name)
self.mode_label = ui.Text("Mode", font="t5b")
self.name_label = ui.Text("Name", font="t5cp", case="full")
self.action_label = ui.Text("Action", font="t5cp", fill=0x02,
padding=[1, 5], enabled=False)
ui.valign((self.mode_label, self.name_label, self.action_label))
self.display.add((self.image, self.mode_label, self.name_label,
self.action_label))
self.modes = util.Cycle(mode_labels)
self.on("switch_service_exit", self.exit)
self.on("switch_service_up", self.next)
self.on("switch_service_down", self.previous)
self.on("switch_service_enter", self.next_mode)
self.on("switch_start_button", self.manual_action)
def on_enable(self):
self.update_mode()
def on_disable(self):
self.cancel(self.timer)
def update_mode(self):
mode = self.modes.get()
self.mode_label.show(mode)
self.update_selection()
def update_selection(self):
device = self.devices.get()
self.name_label.show(device.label)
if self.modes.get() in ("Repeat", "Cycle"):
self.schedule_action()
else:
self.cancel(self.timer)
def next(self):
p.mixer.play("service_select")
self.devices.get().disable()
self.devices.next()
self.update_selection()
def previous(self):
p.mixer.play("service_select")
self.devices.get().disable()
self.devices.previous()
self.update_selection()
def next_mode(self):
p.mixer.play("service_enter")
mode = self.modes.next()
self.update_mode()
def schedule_action(self):
self.cancel(self.timer)
self.timer = self.wait(self.interval, self.scheduled_action)
def scheduled_action(self):
if self.modes.get() == "Cycle":
self.devices.next()
device = self.devices.get()
self.name_label.show(device.label)
self.pulse()
self.schedule_action()
def pulse(self):
self.devices.get().pulse()
self.action_label.show("Pulse", 1.0)
def manual_action(self):
if self.modes.get() == "Manual":
self.pulse()
def exit(self):
self.disable()
p.mixer.play("service_exit")
def on_disable(self):
p.modes["service"].resume()
|
town-hall-pinball/project-omega
|
pin/service/pulse_test.py
|
Python
|
mit
| 3,939
|
# This file is part of Indico.
# Copyright (C) 2002 - 2020 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
from __future__ import absolute_import, unicode_literals
import cPickle
import uuid
from datetime import datetime, timedelta
from flask import flash, request
from flask.sessions import SessionInterface, SessionMixin
from markupsafe import Markup
from werkzeug.datastructures import CallbackDict
from werkzeug.utils import cached_property
from indico.core.config import config
from indico.legacy.common.cache import GenericCache
from indico.modules.users import User
from indico.util.date_time import get_display_tz
from indico.util.decorators import cached_writable_property
from indico.util.i18n import _, set_best_lang
class BaseSession(CallbackDict, SessionMixin):
def __init__(self, initial=None, sid=None, new=False):
def on_update(self):
self.modified = True
CallbackDict.__init__(self, initial, on_update)
self.sid = sid
self.new = new
self.modified = False
defaults = self._get_defaults()
if defaults:
self.update(defaults)
def _get_defaults(self):
# Note: This is called before there is a DB connection available!
return None
# Hey, if you intend on adding a custom property to this class:
# - Only do it if you need logic behind it. Otherwise use the dict API!
# - Even if you do need logic, keep it to core stuff. Otherwise it probably does not belong here!
# - Always prefix the dict keys backing a property with an underscore (to prevent clashes with externally-set items)
# - When you store something like the avatar that involves a DB lookup, use cached_writable_property
class IndicoSession(BaseSession):
@cached_writable_property('_user')
def user(self):
user_id = self.get('_user_id')
user = User.get(user_id) if user_id is not None else None
if user and user.is_deleted:
merged_into_user = user.merged_into_user
user = None
# If the user is deleted and the request is likely to be seen by
# the user, we forcefully log him out and inform him about it.
if not request.is_xhr and request.blueprint != 'assets':
self.clear()
if merged_into_user:
msg = _('Your profile has been merged into <strong>{}</strong>. Please log in using that profile.')
flash(Markup(msg).format(merged_into_user.full_name), 'warning')
else:
flash(_('Your profile has been deleted.'), 'error')
elif user and user.is_blocked:
user = None
if not request.is_xhr and request.blueprint != 'assets':
self.clear()
flash(_('Your Indico profile has been blocked.'), 'error')
return user
@user.setter
def user(self, user):
if user is None:
self.pop('_user_id', None)
else:
self['_user_id'] = user.id
self._refresh_sid = True
@property
def lang(self):
return self.get('_lang') or set_best_lang(check_session=False)
@lang.setter
def lang(self, lang):
self['_lang'] = lang
@cached_property
def csrf_token(self):
if '_csrf_token' not in self:
if not self.csrf_protected:
# don't store a token in the session if we don't really need CSRF protection
return '00000000-0000-0000-0000-000000000000'
self['_csrf_token'] = str(uuid.uuid4())
return self['_csrf_token']
@property
def csrf_protected(self):
return self.user is not None
@property
def timezone(self):
if '_timezone' in self:
return self['_timezone']
if '_user_id' not in self:
return 'LOCAL'
return config.DEFAULT_TIMEZONE
@timezone.setter
def timezone(self, tz):
self['_timezone'] = tz
@property
def tzinfo(self):
"""The tzinfo of the user's current timezone.
This should only be used in places where no other timezone
such as from an event or category is available.
"""
return get_display_tz(as_timezone=True)
class IndicoSessionInterface(SessionInterface):
pickle_based = True
serializer = cPickle
session_class = IndicoSession
temporary_session_lifetime = timedelta(days=7)
def __init__(self):
self.storage = GenericCache('flask-session')
def generate_sid(self):
return str(uuid.uuid4())
def get_cookie_secure(self, app):
return request.is_secure
def get_storage_lifetime(self, app, session):
# Permanent sessions are stored for exactly the same duration as the session id cookie.
# "Temporary" session are stored for a period that is not too short/long as some people
# close their browser very rarely and thus shouldn't be logged out that often.
if session.permanent:
return app.permanent_session_lifetime
else:
return self.temporary_session_lifetime
def should_refresh_session(self, app, session):
if session.new or '_expires' not in session:
return False
threshold = self.get_storage_lifetime(app, session) / 2
return session['_expires'] - datetime.now() < threshold
def should_refresh_sid(self, app, session):
if not session.new and self.get_cookie_secure(app) and not session.get('_secure'):
return True
if getattr(session, '_refresh_sid', False):
return True
return False
def open_session(self, app, request):
sid = request.cookies.get(app.session_cookie_name)
if not sid:
return self.session_class(sid=self.generate_sid(), new=True)
data = self.storage.get(sid)
if data is not None:
return self.session_class(self.serializer.loads(data), sid=sid)
return self.session_class(sid=self.generate_sid(), new=True)
def save_session(self, app, session, response):
domain = self.get_cookie_domain(app)
secure = self.get_cookie_secure(app)
refresh_sid = self.should_refresh_sid(app, session)
if not session and not session.new:
# empty session, delete it from storage and cookie
self.storage.delete(session.sid)
response.delete_cookie(app.session_cookie_name, domain=domain)
return
if not refresh_sid and not session.modified and not self.should_refresh_session(app, session):
# If the session has not been modified we only store if it needs to be refreshed
return
if config.SESSION_LIFETIME > 0:
# Setting session.permanent marks the session as modified so we only set it when we
# are saving the session anyway!
session.permanent = True
storage_ttl = self.get_storage_lifetime(app, session)
cookie_lifetime = self.get_expiration_time(app, session)
session['_expires'] = datetime.now() + storage_ttl
if refresh_sid:
self.storage.delete(session.sid)
session.sid = self.generate_sid()
session['_secure'] = request.is_secure
self.storage.set(session.sid, self.serializer.dumps(dict(session)), storage_ttl)
response.set_cookie(app.session_cookie_name, session.sid, expires=cookie_lifetime, httponly=True,
secure=secure)
|
mic4ael/indico
|
indico/web/flask/session.py
|
Python
|
mit
| 7,625
|
#
# linter.py
# Linter for SublimeLinter3, a code checking framework for Sublime Text 3
#
# Written by Mark Maday
# Copyright (c) 2015 Mark Maday
#
# License: MIT
#
"""This module exports the Htmlhint plugin class."""
import logging
import sublime
from SublimeLinter.lint import LintMatch, NodeLinter
logger = logging.getLogger("SublimeLinter.plugin.htmlhint")
class Htmlhint(NodeLinter):
"""Provides an interface to htmlhint."""
cmd = ("htmlhint", "--format", "json", "--nocolor", "stdin")
defaults = {"selector": "text.html"}
def find_errors(self, output):
"""
Override find_errors, parsing output json into json_object.
Calls parse_message for each error found.
"""
output_json = sublime.decode_value(output)
logger.debug('output_json:"%s", file: "%s"', output_json, self.filename)
for file in output_json:
for message in file["messages"]:
yield self.parse_message(message)
def parse_message(self, message):
"""Parse message object into standard elements of an error and return them."""
error_message = message["message"]
line = message["line"] - 1
col = message["col"]
error_type = message["type"]
# ignore message type of info
if error_type == "info":
message = None
logger.info(
'message -- msg:"%s", line:%s, col:%s, type: %s, message_obj:%s',
error_message,
line,
col,
error_type,
message,
)
return LintMatch(
filename=self.filename,
line=line,
col=col,
error_type=error_type,
code=message.get("rule", {}).get("id", ""),
message=error_message,
match=str(message),
)
|
mmaday/SublimeLinter-contrib-htmlhint
|
linter.py
|
Python
|
mit
| 1,850
|
# -*- coding: utf8 -*-
# Copyright (c) 2017-2021 THL A29 Limited, a Tencent company. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from tencentcloud.common.abstract_model import AbstractModel
class GetLocalEngineRequest(AbstractModel):
"""GetLocalEngine请求参数结构体
"""
def __init__(self):
r"""
:param Key: 购买服务后获得的授权信息,用于保证请求有效性
:type Key: str
"""
self.Key = None
def _deserialize(self, params):
self.Key = params.get("Key")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class GetLocalEngineResponse(AbstractModel):
"""GetLocalEngine返回参数结构体
"""
def __init__(self):
r"""
:param Status: 接口调用状态,成功返回200,失败返回400
:type Status: int
:param Info: 接口调用描述信息,成功返回"scan success",失败返回"scan error"
:type Info: str
:param Data: 本地引擎下载地址
:type Data: str
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Status = None
self.Info = None
self.Data = None
self.RequestId = None
def _deserialize(self, params):
self.Status = params.get("Status")
self.Info = params.get("Info")
self.Data = params.get("Data")
self.RequestId = params.get("RequestId")
class GetScanResultRequest(AbstractModel):
"""GetScanResult请求参数结构体
"""
def __init__(self):
r"""
:param Key: 购买服务后获得的授权信息,用于保证请求有效性
:type Key: str
:param Md5: 需要获取扫描接口的md5(只允许单个md5)
:type Md5: str
"""
self.Key = None
self.Md5 = None
def _deserialize(self, params):
self.Key = params.get("Key")
self.Md5 = params.get("Md5")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class GetScanResultResponse(AbstractModel):
"""GetScanResult返回参数结构体
"""
def __init__(self):
r"""
:param Status: 接口调用状态,成功返回200,失败返回400
:type Status: int
:param Info: 接口调用描述信息,成功返回"scan success",失败返回"scan error"
:type Info: str
:param Data: 实际结果信息,包括md5、scan_status、virus_name三个字段;virus_name报毒名:"torjan.**":黑样本的报毒名、".":样本不报毒、"" :样本无检出信息,需上传扫描;
scan_status样本状态:-1无检出信息需上传扫描、0样本扫描中、1样本扫描结束且不报毒、2样本扫描结束且报黑、3样本下载失败;
:type Data: str
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Status = None
self.Info = None
self.Data = None
self.RequestId = None
def _deserialize(self, params):
self.Status = params.get("Status")
self.Info = params.get("Info")
self.Data = params.get("Data")
self.RequestId = params.get("RequestId")
class ScanFileHashRequest(AbstractModel):
"""ScanFileHash请求参数结构体
"""
def __init__(self):
r"""
:param Key: 购买服务后获得的授权信息,用于保证请求有效性
:type Key: str
:param Md5s: 需要查询的md5值(支持单个和多个,多个md5间用逗号分格)
:type Md5s: str
:param WithCategory: 保留字段默认填0
:type WithCategory: str
:param SensitiveLevel: 松严规则控制字段默认填10(5-松、10-标准、15-严)
:type SensitiveLevel: str
"""
self.Key = None
self.Md5s = None
self.WithCategory = None
self.SensitiveLevel = None
def _deserialize(self, params):
self.Key = params.get("Key")
self.Md5s = params.get("Md5s")
self.WithCategory = params.get("WithCategory")
self.SensitiveLevel = params.get("SensitiveLevel")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class ScanFileHashResponse(AbstractModel):
"""ScanFileHash返回参数结构体
"""
def __init__(self):
r"""
:param Status: 接口调用状态,成功返回200,失败返回400
:type Status: int
:param Info: 接口调用描述信息,成功返回"scan success",失败返回"scan error"
:type Info: str
:param Data: 云查实际结果信息,包括md5、return_state、virus_state、virus_name字符逗号间隔;
return_state查询状态:-1/0代表失败、1/2代表成功;
virus_state文状件态:0文件不存在、1白、2黑、3未知、4感染性、5低可信白;
:type Data: str
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Status = None
self.Info = None
self.Data = None
self.RequestId = None
def _deserialize(self, params):
self.Status = params.get("Status")
self.Info = params.get("Info")
self.Data = params.get("Data")
self.RequestId = params.get("RequestId")
class ScanFileRequest(AbstractModel):
"""ScanFile请求参数结构体
"""
def __init__(self):
r"""
:param Key: 购买服务后获得的授权信息,用于保证请求有效性
:type Key: str
:param Sample: 文件下载url地址
:type Sample: str
:param Md5: 文件的md5值
:type Md5: str
"""
self.Key = None
self.Sample = None
self.Md5 = None
def _deserialize(self, params):
self.Key = params.get("Key")
self.Sample = params.get("Sample")
self.Md5 = params.get("Md5")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class ScanFileResponse(AbstractModel):
"""ScanFile返回参数结构体
"""
def __init__(self):
r"""
:param Status: 接口调用状态,成功返回200,失败返回400
:type Status: int
:param Info: 接口调用描述信息,成功返回"success",失败返回"invalid request"
:type Info: str
:param Data: 异步扫描任务提交成功返回success
:type Data: str
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Status = None
self.Info = None
self.Data = None
self.RequestId = None
def _deserialize(self, params):
self.Status = params.get("Status")
self.Info = params.get("Info")
self.Data = params.get("Data")
self.RequestId = params.get("RequestId")
|
tzpBingo/github-trending
|
codespace/python/tencentcloud/tav/v20190118/models.py
|
Python
|
mit
| 8,548
|
import os
from pyven.utils.utils import str_to_file, file_to_str
class Style(object):
DIR = os.path.join(os.environ.get('PVN_HOME'), 'report', 'css')
COUNT = 0
SINGLETON = None
def __init__(self, name='default'):
Style.COUNT += 1
self.name = name
self.line = {'div_style' : 'line',\
'part_style' : 'linePart',\
'error' : 'error',\
'warning' : 'warning'}
self.lines = {'div_style' : 'lines'}
self.status = {'div_style' : 'status',\
'span_style' : 'status',\
'success' : 'success',\
'failure' : 'failure',\
'unknown' : 'unknown'}
self.title = {'title_style' : 'title'}
self.property = {'p_style' : 'property'}
self.properties = {'div_style' : 'properties'}
self.listing = {'div_style' : 'listing'}
self.summary = {'div_style' : 'summary'}
self.platform = {'div_style' : 'platform'}
self.step = {'div_style' : 'step'}
self.reportable = {'div_style' : 'reportable'}
self.line_separator = 'line-separator'
@staticmethod
def get():
if Style.COUNT == 0 or Style.SINGLETON is None:
Style.SINGLETON = Style()
return Style.SINGLETON
def inline_inserter(function):
def _intern(self):
str = "<!--/* <![CDATA[ */"
try:
str += function(self)
finally:
str += "/* ]]> */-->"
return str
return _intern
@inline_inserter
def write(self):
if not os.path.isdir(Style.DIR):
os.makedirs(Style.DIR)
style_file = os.path.join(Style.DIR, self.name + '.css')
if not os.path.isfile(style_file):
self.name = 'default'
self.generate_default()
return self.default()
else:
return file_to_str(style_file)
def generate_default(self):
if not os.path.isdir(Style.DIR):
os.makedirs(Style.DIR)
str_to_file(self.default(), os.path.join(Style.DIR, 'default.css'))
def default(self):
css = 'h1' + """
{
font-size : 32px;
color : #4d4d4d;
font-weight : bold;
font-family: Arial;
}
"""
css += self.line_separator + """
{
height : 1px;
background : #FFFFFF;
border-bottom : 1px solid #FFFFFF;
}
"""
css += 'h2' + """
{
font-size : 20px;
color : #0047b3;
font-weight : bold;
font-family: Arial;
}
"""
css += 'a' + """
{
font-size : 16px;
font-family: Arial;
}
"""
css += '.' + self.listing['div_style'] + """
{
margin : 3px 25px;
padding-left : 25px;
padding-bottom : 5px;
padding-top : 5px;
border : 1px solid #d9d9d9;
}
"""
css += '.' + self.properties['div_style'] + """
{
margin-bottom : 15px;
padding-left : 10px;
}
"""
css += '.' + self.property['p_style'] + """
{
margin : 2px;
font-size : 16px;
color : #66a3ff;
font-family: Arial;
}
"""
css += '.' + self.status['success'] + """
{
color : #00b33c;
}
"""
css += '.' + self.status['failure'] + """
{
color : #990000;
}
"""
css += '.' + self.status['unknown'] + """
{
color : #666666;
}
"""
css += '.' + self.status['span_style'] + """
{
font-size : 16px;
font-family: Arial;
font-weight : bold;
}
"""
css += '.' + self.line['error'] + """
{
color : #990000;
border-color : #ffcccc;
}
"""
css += '.' + self.line['warning'] + """
{
color : #cc4400;
border-color : #ffc299;
}
"""
css += '.' + self.line['part_style'] + """
{
font-size : 16px;
font-family: Arial;
}
"""
css += '.' + self.line['div_style'] + """
{
margin-bottom : 2px;
margin-left : 20px;
margin-right : 20px;
padding : 4px;
border-width : 1px;
border-style : dotted;
}
"""
return css
|
mgaborit/pyven
|
source/pyven/reporting/style.py
|
Python
|
mit
| 5,043
|
# -*- coding: iso-8859-1 -*-
try:
import Xlib.display
import Xlib.X
import Xlib.XK
import Xlib.error
import Xlib.ext.xtest
haveXlib = True
except ImportError:
haveXlib = False
try:
# TODO - refactor this shit to prevent namespace pollution
from ctypes import *
import win32con
haveWindows = True
except ImportError:
haveWindows = False
class Action():
def __init__(self,xLibKeyCode=-1,win32KeyCode=-1,shiftDown=False,controlDown=False,altDown=False,extend=False,mouseButton=False,mouseMovement=False):
self.xLibKeyCode = xLibKeyCode
self.win32KeyCode = win32KeyCode
self.shiftDown = shiftDown
self.controlDown = controlDown
self.altDown = altDown
self.extend = extend
self.mouseButton = mouseButton
self.mouseMovement = mouseMovement
self.isDown = False
actions = {}
if haveWindows:
user32 = windll.user32
PUL = POINTER(c_ulong)
KEYEVENTF_EXTENDEDKEY = 0x0001
KEYEVENTF_KEYUP = 0x0002
KEYEVENTF_SCANCODE = 0x0008
MOUSEEVENTF_LEFTDOWN = 0x0002
MOUSEEVENTF_LEFTUP = 0x0004
MOUSEEVENTF_RIGHTDOWN = 0x0008
MOUSEEVENTF_RIGHTUP = 0x0010
MOUSEEVENTF_MIDDLEDOWN = 0x0020
MOUSEEVENTF_MIDDLEUP = 0x0040
MOUSEEVENTF_RELATIVE = 0x0000
MOUSEEVENTF_MOVE = 0x0001
MOUSEEVENTF_ABSOLUTE = 0x8000
MOUSEEVENTF_WHEEL = 0x0080
MOUSEEVENTF_XDOWN = 0x0100
MOUSEEVENTF_XUP = 0x0200
INPUT_MOUSE = 0x0000
INPUT_KEYBOARD = 0x0001
INPUT_HARDWARE = 0x0002
class KeyBdInput(Structure):
_fields_ = [("wVk", c_ushort),
("wScan", c_ushort),
("dwFlags", c_ulong),
("time", c_ulong),
("dwExtraInfo", PUL)]
class HardwareInput(Structure):
_fields_ = [("uMsg", c_ulong),
("wParamL", c_short),
("wParamH", c_ushort)]
class MouseInput(Structure):
_fields_ = [("dx", c_long),
("dy", c_long),
("mouseData", c_ulong),
("dwFlags", c_ulong),
("time",c_ulong),
("dwExtraInfo", PUL)]
class Input_I(Union):
_fields_ = [("ki", KeyBdInput),
("mi", MouseInput),
("hi", HardwareInput)]
class Input(Structure):
_fields_ = [("type", c_ulong),
("ii", Input_I)]
class POINT(Structure):
_fields_ = [("x", c_ulong),
("y", c_ulong)]
nums = "0123456789"
chars = "abcdefghijklmnopqrstuvwxyz"
for i in range(len(nums)):
actions[nums[i]] = Action(win32KeyCode = 0x30 + i)
for i in range(len(chars)):
actions[chars[i]] = Action(win32KeyCode = 0x41 + i )
actions["Arrow Left"] = Action(win32KeyCode = 0x25,extend=True )
actions["Arrow Right"] = Action(win32KeyCode = 0x27,extend=True )
actions["Arrow Up"] = Action(win32KeyCode = 0x26,extend=True )
actions["Arrow Down"] = Action(win32KeyCode = 0x28,extend=True )
actions["F1"] = Action(win32KeyCode = 0x70,extend=True )
actions["F2"] = Action(win32KeyCode = 0x71,extend=True )
actions["F3"] = Action(win32KeyCode = 0x72,extend=True )
actions["F4"] = Action(win32KeyCode = 0x73,extend=True )
actions["F5"] = Action(win32KeyCode = 0x74,extend=True )
actions["F6"] = Action(win32KeyCode = 0x75,extend=True )
actions["F7"] = Action(win32KeyCode = 0x76,extend=True )
actions["F8"] = Action(win32KeyCode = 0x77,extend=True )
actions["F9"] = Action(win32KeyCode = 0x78,extend=True )
actions["F10"] = Action(win32KeyCode = 0x79,extend=True )
actions["F11"] = Action(win32KeyCode = 0x7a,extend=True )
actions["F12"] = Action(win32KeyCode = 0x7b,extend=True )
actions["Enter"] = Action(win32KeyCode = 0x0d,extend=True )
actions["Space"] = Action(win32KeyCode = 0x20,extend=True )
actions["Backspace"] = Action(win32KeyCode = 0x08,extend=True )
actions["Page Up"] = Action(win32KeyCode = 0x21,extend=True )
actions["Page Down"] = Action(win32KeyCode = 0x22,extend=True )
actions["Home"] = Action(win32KeyCode = 0x24,extend=True )
actions["End"] = Action(win32KeyCode = 0x23,extend=True )
actions["Insert"] = Action(win32KeyCode = 0x2d,extend=True )
actions["Delete"] = Action(win32KeyCode = 0x2e,extend=True )
actions["Mouse Button Left"] = Action(win32KeyCode = 0x1,mouseButton=True )
actions["Mouse Button Right"] = Action(win32KeyCode = 0x2,mouseButton=True )
actions["Mouse Button Middle"] = Action(win32KeyCode = 0x3,mouseButton=True )
actions["Mouse Move Slow Left"] = Action(win32KeyCode = (-5,0),mouseMovement=True)
actions["Mouse Move Slow Right"] = Action(win32KeyCode = (5,0),mouseMovement=True)
actions["Mouse Move Slow Up"] = Action(win32KeyCode = (0,-5),mouseMovement=True)
actions["Mouse Move Slow Down"] = Action(win32KeyCode = (0,5),mouseMovement=True)
actions["Mouse Move Fast Left"] = Action(win32KeyCode = (-50,0),mouseMovement=True)
actions["Mouse Move Fast Right"] = Action(win32KeyCode = (50,0),mouseMovement=True)
actions["Mouse Move Fast Up"] = Action(win32KeyCode = (0,-50),mouseMovement=True)
actions["Mouse Move Fast Down"] = Action(win32KeyCode = (0,50),mouseMovement=True)
if haveXlib:
display = Xlib.display.Display()
for each in "1234567890abcdefghijklmnopqrstuvwxyz":
actions[each] = Action(xLibKeyCode= display.keysym_to_keycode(ord(each)))
#actions[each.upper()] = Action(xLibKeyCode= display.keysym_to_keycode(each),shiftDown=True)
#actions["Ctrl "+each] = Action(xLibKeyCode= display.keysym_to_keycode(each),controlDown=True)
#actions["Ctrl "+each.upper()] = Action(xLibKeyCode= display.keysym_to_keycode(each),shiftDown=True,controlDown=True)
#actions["Alt "+each] = Action(xLibKeyCode= display.keysym_to_keycode(each),altDown=True)
#actions["Alt "+each.upper()] = Action(xLibKeyCode= display.keysym_to_keycode(each),shiftDown=True,altDown=True)
#actions["Ctrl Alt "+each] = Action(xLibKeyCode= display.keysym_to_keycode(each),controlDown=True,altDown=True)
#actions["Ctrl Alt "+each.upper()] = Action(xLibKeyCode= display.keysym_to_keycode(each),shiftDown=True,controlDown=True,altDown=True)
actions["Arrow Left"] = Action(xLibKeyCode=113)
actions["Arrow Right"] = Action(xLibKeyCode=114)
actions["Arrow Up"] = Action(xLibKeyCode=111)
actions["Arrow Down"] = Action(xLibKeyCode=116)
actions["F1"] = Action(xLibKeyCode=67)
actions["F2"] = Action(xLibKeyCode=68)
actions["F3"] = Action(xLibKeyCode=69)
actions["F4"] = Action(xLibKeyCode=70)
actions["F5"] = Action(xLibKeyCode=71)
actions["F6"] = Action(xLibKeyCode=72)
actions["F7"] = Action(xLibKeyCode=73)
actions["F8"] = Action(xLibKeyCode=74)
actions["F9"] = Action(xLibKeyCode=75)
actions["F10"] = Action(xLibKeyCode=76)
actions["F11"] = Action(xLibKeyCode=95)
actions["F12"] = Action(xLibKeyCode=96)
actions["Enter"] = Action(xLibKeyCode=36)
actions["Space"] = Action(xLibKeyCode=65)
actions["Backspace"] = Action(xLibKeyCode=22)
actions["Page Up"] = Action(xLibKeyCode=112)
actions["Page Down"] = Action(xLibKeyCode=117)
actions["Home"] = Action(xLibKeyCode=110)
actions["End"] = Action(xLibKeyCode=115)
actions["Insert"] = Action(xLibKeyCode=118)
actions["Delete"] = Action(xLibKeyCode=119)
actions["Mouse Button Left"] = Action(xLibKeyCode = 1,mouseButton=True )
actions["Mouse Button Right"] = Action(xLibKeyCode = 3,mouseButton=True )
actions["Mouse Button Middle"] = Action(xLibKeyCode = 2,mouseButton=True )
actions["Mouse Move Slow Left"] = Action(xLibKeyCode = (-5,0),mouseMovement=True)
actions["Mouse Move Slow Right"] = Action(xLibKeyCode = (5,0),mouseMovement=True)
actions["Mouse Move Slow Up"] = Action(xLibKeyCode = (0,-5),mouseMovement=True)
actions["Mouse Move Slow Down"] = Action(xLibKeyCode = (0,5),mouseMovement=True)
actions["Mouse Move Fast Left"] = Action(xLibKeyCode = (-50,0),mouseMovement=True)
actions["Mouse Move Fast Right"] = Action(xLibKeyCode = (50,0),mouseMovement=True)
actions["Mouse Move Fast Up"] = Action(xLibKeyCode = (0,-50),mouseMovement=True)
actions["Mouse Move Fast Down"] = Action(xLibKeyCode = (0,50),mouseMovement=True)
class XLibInputFaker():
def __init__(self):
self.actions = actions
InputFaker.__init__(self)
self.display = Xlib.display.Display()
def keyHold(self,key):
if not(self.actions[key].isDown):
if self.actions[key].mouseButton:
self.mouseButtonHold(self.actions[key].xLibKeyCode)
self.actions[key].isDown = True
elif self.actions[key].mouseMovement:
self.mouseMove(self.actions[key].xLibKeyCode[0],self.actions[key].xLibKeyCode[1])
else:
if self.actions[key].shiftDown:
Xlib.ext.xtest.fake_input(self.display,Xlib.X.KeyPress, 50)
if self.actions[key].controlDown:
Xlib.ext.xtest.fake_input(self.display,Xlib.X.KeyPress, 37)
if self.actions[key].altDown:
Xlib.ext.xtest.fake_input(self.display,Xlib.X.KeyPress, 64)
Xlib.ext.xtest.fake_input(self.display,Xlib.X.KeyPress, self.actions[key].xLibKeyCode)
self.actions[key].isDown = True
def keyRelease(self,key):
if (self.actions[key].isDown):
if self.actions[key].mouseButton:
self.mouseButtonRelease(self.actions[key].xLibKeyCode)
self.actions[key].isDown = False
elif not(self.actions[key].mouseMovement):
if self.actions[key].shiftDown:
Xlib.ext.xtest.fake_input(self.display,Xlib.X.KeyRelease, 50)
if self.actions[key].controlDown:
Xlib.ext.xtest.fake_input(self.display,Xlib.X.KeyRelease, 37)
if self.actions[key].altDown:
Xlib.ext.xtest.fake_input(self.display,Xlib.X.KeyRelease, 64)
Xlib.ext.xtest.fake_input(self.display,Xlib.X.KeyRelease, self.actions[key].xLibKeyCode)
self.actions[key].isDown = False
def keyPress(self,key):
self.keyHold(key)
self.keyRelease(key)
def mouseMove(self,x,y):
Xlib.ext.xtest.fake_input(self.display,Xlib.X.MotionNotify,True,x=x,y=y)
def mouseButtonHold(self,buttonNumber):
Xlib.ext.xtest.fake_input(self.display,Xlib.X.ButtonPress, buttonNumber)
def mouseButtonRelease(self,buttonNumber):
Xlib.ext.xtest.fake_input(self.display,Xlib.X.ButtonRelease, buttonNumber)
def mouseButtonPress(self,buttonNumber):
self.mouseButtonHold(buttonNumber)
self.mouseButtonRelease(buttonNumber)
def flush(self):
self.display.sync()
class WindowsInputFaker():
def __init__(self):
self.actions = actions
InputFaker.__init__(self)
self.inputItem = {}
self.inputItem["mouse"] = []
self.inputItem["keyboard"] = []
def keyHold(self,key):
if not(self.actions[key].isDown):
if self.actions[key].mouseButton:
self.mouseButtonHold(self.actions[key].win32KeyCode)
self.actions[key].isDown = True
elif self.actions[key].mouseMovement:
self.mouseMove(self.actions[key].win32KeyCode[0],self.actions[key].win32KeyCode[1])
else:
if self.actions[key].shiftDown:
extra = c_ulong(0)
self.inputItem["keyboard"].append(Input_I())
self.inputItem["keyboard"][-1].ki = KeyBdInput(0, user32.MapVirtualKeyA(win32con.VK_SHIFT, 0), KEYEVENTF_SCANCODE, 0, pointer(extra))
if self.actions[key].controlDown:
extra = c_ulong(0)
self.inputItem["keyboard"].append(Input_I())
self.inputItem["keyboard"][-1].ki = KeyBdInput(0, user32.MapVirtualKeyA(win32con.VK_CONTROL, 0), KEYEVENTF_SCANCODE, 0, pointer(extra))
if self.actions[key].altDown:
extra = c_ulong(0)
self.inputItem["keyboard"].append(Input_I())
self.inputItem["keyboard"][-1].ki = KeyBdInput(0, user32.MapVirtualKeyA(win32con.VK_ALT, 0), KEYEVENTF_SCANCODE, 0, pointer(extra))
if self.actions[key].extend == True:
extra = c_ulong(0)
self.inputItem["keyboard"].append(Input_I())
self.inputItem["keyboard"][-1].ki = KeyBdInput(0, user32.MapVirtualKeyA(self.actions[key].win32KeyCode, 0), KEYEVENTF_EXTENDEDKEY, 0, pointer(extra))
else:
extra = c_ulong(0)
self.inputItem["keyboard"].append(Input_I())
self.inputItem["keyboard"][-1].ki = KeyBdInput(0, user32.MapVirtualKeyA(self.actions[key].win32KeyCode, 0), KEYEVENTF_SCANCODE, 0, pointer(extra))
self.actions[key].isDown = True
def keyRelease(self,key):
if (self.actions[key].isDown):
if self.actions[key].mouseButton:
self.mouseButtonRelease(self.actions[key].win32KeyCode)
self.actions[key].isDown = False
elif not(self.actions[key].mouseMovement):
if self.actions[key].extend == True:
extra = c_ulong(0)
self.inputItem["keyboard"].append(Input_I())
self.inputItem["keyboard"][-1].ki = KeyBdInput(0, user32.MapVirtualKeyA(self.actions[key].win32KeyCode, 0), KEYEVENTF_EXTENDEDKEY | KEYEVENTF_KEYUP, 0, pointer(extra))
else:
extra = c_ulong(0)
self.inputItem["keyboard"].append(Input_I())
self.inputItem["keyboard"][-1].ki = KeyBdInput(0, user32.MapVirtualKeyA(self.actions[key].win32KeyCode, 0), KEYEVENTF_SCANCODE | KEYEVENTF_KEYUP, 0, pointer(extra))
if self.actions[key].altDown:
extra = c_ulong(0)
self.inputItem["keyboard"].append(Input_I())
self.inputItem["keyboard"][-1].ki = KeyBdInput(0, user32.MapVirtualKeyA(win32con.VK_ALT, 0), KEYEVENTF_SCANCODE | KEYEVENTF_KEYUP, 0, pointer(extra))
if self.actions[key].controlDown:
extra = c_ulong(0)
self.inputItem["keyboard"].append(Input_I())
self.inputItem["keyboard"][-1].ki = KeyBdInput(0, user32.MapVirtualKeyA(win32con.VK_CONTROL, 0), KEYEVENTF_SCANCODE | KEYEVENTF_KEYUP, 0, pointer(extra))
if self.actions[key].shiftDown:
extra = c_ulong(0)
self.inputItem["keyboard"].append(Input_I())
self.inputItem["keyboard"][-1].ki = KeyBdInput(0, user32.MapVirtualKeyA(win32con.VK_SHIFT, 0), KEYEVENTF_SCANCODE | KEYEVENTF_KEYUP, 0, pointer(extra))
self.actions[key].isDown = False
def keyPress(self,key):
self.keyHold(key)
self.keyRelease(key)
def mouseMove(self,x,y):
extra = c_ulong(0)
self.inputItem["mouse"].append(Input_I())
self.inputItem["mouse"][-1].mi = MouseInput(x, y, 0, MOUSEEVENTF_MOVE, 0, pointer(extra))
def mouseButtonHold(self,buttonNumber):
if buttonNumber == 1:
mouseButton = MOUSEEVENTF_LEFTDOWN
elif buttonNumber == 2:
mouseButton = MOUSEEVENTF_RIGHTDOWN
elif buttonNumber == 3:
mouseButton = MOUSEEVENTF_MIDDLEDOWN
extra = c_ulong(0)
self.inputItem["mouse"].append(Input_I())
self.inputItem["mouse"][-1].mi = MouseInput(0, 0, 0, mouseButton, 0, pointer(extra))
def mouseButtonRelease(self,buttonNumber):
if buttonNumber == 1:
mouseButton = MOUSEEVENTF_LEFTUP
elif buttonNumber == 2:
mouseButton = MOUSEEVENTF_RIGHTUP
elif buttonNumber == 3:
mouseButton = MOUSEEVENTF_MIDDLEUP
extra = c_ulong(0)
self.inputItem["mouse"].append(Input_I())
self.inputItem["mouse"][-1].mi = MouseInput(0, 0, 0, mouseButton, 0, pointer(extra))
def mouseButtonPress(self,buttonNumber):
self.mouseButtonHold(buttonNumber)
self.mouseButtonRelease(buttonNumber)
def flush(self):
numItems = len(self.inputItem["mouse"]) + len(self.inputItem["keyboard"])
if numItems > 0:
FInputs = Input * numItems
x = FInputs()
counter = 0
for mouse in self.inputItem["mouse"]:
x[counter].type = INPUT_MOUSE
x[counter].ii = mouse
counter = counter + 1
for keyboard in self.inputItem["keyboard"]:
x[counter].type = INPUT_KEYBOARD
x[counter].ii = keyboard
counter = counter + 1
user32.SendInput(numItems, pointer(x), sizeof(x[0]))
self.inputItem["mouse"] = []
self.inputItem["keyboard"] = []
def InputFaker():
if haveXlib:
faker = XLibInputFaker()
return faker
elif haveWindows:
faker = WindowsInputFaker()
return faker
else:
return -1
if __name__ == "__main__":
import time
inputFaker = InputFaker()
if inputFaker != -1:
time.sleep(1)
inputFaker.mouseMove(50,50)
inputFaker.flush()
time.sleep(1)
inputFaker.mouseMove(-50,-50)
inputFaker.flush()
time.sleep(1)
inputFaker.mouseMove(50,50)
inputFaker.flush()
inputFaker.keyPress('w')
inputFaker.keyPress('a')
inputFaker.keyPress('s')
inputFaker.keyPress('d')
inputFaker.flush()
else:
print "Error: neither XLib nor win32 could be found"
|
Buggaboo/Triathlon
|
TriathlonBeta/OutputManager.py
|
Python
|
mit
| 18,021
|
#!/usr/bin/env python 3
from accessoryFunctions.accessoryFunctions import MetadataObject
from geneseekr.geneseekr import GeneSeekr
from geneseekr.blast import BLAST
import multiprocessing
from glob import glob
from time import time
import pytest
import os
test_path = os.path.abspath(os.path.dirname(__file__))
__author__ = 'adamkoziol'
@pytest.fixture()
def variables():
v = MetadataObject()
datapath = os.path.join(test_path, 'testdata')
v.sequencepath = os.path.join(datapath, 'sequences')
v.targetpath = os.path.join(datapath, 'databases', 'card_aa')
v.reportpath = os.path.join(datapath, 'reports')
v.cutoff = 70
v.evalue = '1E-05'
v.align = False
v.unique = False
v.resfinder = False
v.virulencefinder = False
v.numthreads = multiprocessing.cpu_count()
v.start = time()
return v
def variable_update():
global method
method = method_init(variables())
@pytest.fixture()
def method_init(variables, analysistype, program, align, unique):
global method
variables.analysistype = analysistype
variables.program = program
variables.align = align
variables.unique = unique
method = BLAST(variables)
return method
blastx_method = method_init(variables(), 'geneseekr', 'blastx', True, True)
def test_parser():
assert os.path.basename(blastx_method.targets[0]) == 'amr.tfa'
def test_combined_files():
assert os.path.isfile(blastx_method.combinedtargets)
def test_strains():
assert os.path.isfile(blastx_method.strains[0])
def test_strain():
assert os.path.basename(blastx_method.strains[0]) == '2018-SEQ-0552.fasta'
def test_makeblastdb(variables):
global geneseekr
geneseekr = GeneSeekr()
geneseekr.makeblastdb(fasta=blastx_method.combinedtargets,
program=blastx_method.program)
assert os.path.isfile(os.path.join(variables.targetpath, 'combinedtargets.psq'))
def test_variable_populate():
global targetfolders
global targetfiles
global records
targetfolders, targetfiles, records = \
geneseekr.target_folders(metadata=blastx_method.metadata,
analysistype=blastx_method.analysistype)
def test_targetfolders():
assert os.path.basename(list(targetfolders)[0]) == 'card_aa'
def test_targetfiles():
assert targetfiles[0] == blastx_method.combinedtargets
def test_records():
assert records[targetfiles[0]]['yojI']
def test_blastx(variables):
global blastx_report
blastx_method.metadata = geneseekr.run_blast(metadata=blastx_method.metadata,
analysistype=blastx_method.analysistype,
program=blastx_method.program,
outfmt=blastx_method.outfmt,
evalue=blastx_method.evalue,
num_threads=blastx_method.cpus)
blastx_report = os.path.join(variables.reportpath, '2018-SEQ-0552_blastx_geneseekr.tsv')
assert os.path.isfile(blastx_report)
def test_enhance_report_parsing():
geneseekr.parseable_blast_outputs(metadata=blastx_method.metadata,
analysistype=blastx_method.analysistype,
fieldnames=blastx_method.fieldnames,
program=blastx_method.program)
header = open(blastx_report).readline()
assert header.split('\t')[0] == 'query_id'
def test_blastx_results():
with open(blastx_report) as blast_results:
next(blast_results)
data = blast_results.readline()
results = data.split('\t')
assert int(results[2]) >= 50
def test_blast_parse():
blastx_method.metadata = geneseekr.unique_parse_blast(metadata=blastx_method.metadata,
analysistype=blastx_method.analysistype,
fieldnames=blastx_method.fieldnames,
cutoff=blastx_method.cutoff,
program=blastx_method.program)
for sample in blastx_method.metadata:
assert sample.geneseekr.queryranges['Contig_54_76.3617'] == [[29664, 31283], [11054, 11845]]
def test_filter():
blastx_method.metadata = geneseekr.filter_unique(metadata=blastx_method.metadata,
analysistype=blastx_method.analysistype)
for sample in blastx_method.metadata:
assert sample.geneseekr.blastlist[0]['percentidentity'] >= 70
def test_dict_create():
blastx_method.metadata = geneseekr.dict_initialise(metadata=blastx_method.metadata,
analysistype=blastx_method.analysistype)
for sample in blastx_method.metadata:
assert type(sample.geneseekr.protseq) is dict
def test_target_folders():
global targetfolders, targetfiles, records
targetfolders, targetfiles, records = \
geneseekr.target_folders(metadata=blastx_method.metadata,
analysistype=blastx_method.analysistype)
assert records[targetfiles[0]]['yojI']
def test_report_creation():
blastx_method.metadata = geneseekr.reporter(metadata=blastx_method.metadata,
analysistype=blastx_method.analysistype,
reportpath=blastx_method.reportpath,
align=blastx_method.align,
records=records,
program=blastx_method.program,
cutoff=blastx_method.cutoff)
def test_report_csv():
global geneseekr_csv
geneseekr_csv = os.path.join(blastx_method.reportpath, 'geneseekr_blastx.csv')
assert os.path.isfile(geneseekr_csv)
def test_detailed_report_csv():
global geneseekr_detailed_csv
geneseekr_detailed_csv = os.path.join(blastx_method.reportpath, 'geneseekr_blastx_detailed.csv')
assert os.path.isfile(geneseekr_detailed_csv)
def test_report_xls():
global geneseekr_xls
geneseekr_xls = os.path.join(blastx_method.reportpath, 'geneseekr_blastx.xlsx')
assert os.path.isfile(geneseekr_xls)
def test_parse_results():
for sample in blastx_method.metadata:
assert sample.geneseekr.blastresults['OXA_12'] == 94.19
def test_aaseq():
for sample in blastx_method.metadata:
assert sample.geneseekr.blastlist[0]['query_sequence'][:5] == 'MELLS' or \
sample.geneseekr.blastlist[0]['query_sequence'][:5] == 'MSRIL'
def test_fasta_create(variables):
global fasta_file
geneseekr.export_fasta(metadata=blastx_method.metadata,
analysistype=blastx_method.analysistype,
reportpath=blastx_method.reportpath,
cutoff=blastx_method.cutoff,
program=blastx_method.program)
fasta_file = os.path.join(variables.reportpath, '2018-SEQ-0552_geneseekr.fasta')
assert os.path.isfile(fasta_file)
header = open(fasta_file, 'r').readline().rstrip()
assert header == '>2018-SEQ-0552_OXA_12'
def test_combined_targets_clean():
os.remove(blastx_method.combinedtargets)
def test_makeblastdb_clean(variables):
databasefiles = glob(os.path.join(variables.targetpath, 'combinedtargets.p*'))
for dbfile in databasefiles:
os.remove(dbfile)
def test_remove_blastx_report():
os.remove(blastx_report)
def test_remove_geneseekr_csv():
os.remove(geneseekr_csv)
def test_remove_fasta_file():
os.remove(fasta_file)
def test_removed_detailed_geneseekr_csv():
os.remove(geneseekr_detailed_csv)
def test_remove_geneseekr_xls():
os.remove(geneseekr_xls)
def test_remove_report_path():
os.rmdir(blastx_method.reportpath)
|
OLC-Bioinformatics/pythonGeneSeekr
|
tests/test_blastx.py
|
Python
|
mit
| 8,082
|
from easythreads import AsyncWorker
def task_a():
print 123
def task_b(n):
for i in range(n):
print 999
pool = AsyncWorker(10)
for _ in range(100) :
pool.append(task_a)
for n in range(100):
pool.append(lambda : task_b(n))
pool.shutdown()
|
rfyiamcool/easythreads
|
test.py
|
Python
|
mit
| 280
|
from django.db import models
from django.contrib.auth.models import User
class Project(models.Model):
def __str__(self):
return self.name
name = models.CharField(max_length=200, blank=False, null=False)
created = models.DateTimeField(auto_now_add=True)
updated = models.DateTimeField(auto_now=True)
@staticmethod
def quick_create(name=None):
if name is None:
name = "Some Project"
return Project.objects.create(name=name)
class Repo(models.Model):
def __str__(self):
return self.name
project = models.ForeignKey(Project)
name = models.CharField(max_length=200, blank=False, null=False)
class Build(models.Model):
"""
curl -X POST /buidservice/{project}/ \
-d metric.name=
"""
def __str__(self):
return "{}.{}: #{}" . format (self.repo.project.name, self.repo.name, self.build_number)
repo = models.ForeignKey(Repo)
build_number = models.IntegerField()
created = models.DateTimeField(auto_now_add=True)
updated = models.DateTimeField(auto_now=True)
@staticmethod
def record(build_number, project, repo, metrics=[], booleans=[], **kwargs):
project, created = Project.objects.get_or_create(name=project)
repo, created = Repo.objects.get_or_create(project=project, name=repo)
build = Build()
build.repo = repo
build.build_number = build_number
build.save()
return build
class Smell(models.Model):
"""
{
"location": {
"path": "api/management/commands/load_live_data.py",
"lines": {
"end": "75",
"begin": "75"
}
},
"check_name": "Complexity",
"content": {
"body": "We encountered an error attempting to analyze this line."
},
"remediation_points": 1000000,
"description": "Error: Missing parentheses in call to 'print' (\u003cunknown\u003e, line 75)",
"categories": ["Bug Risk"],
"codeclimate_issue_type": "issue",
"fingerprint": "307abcdec8074d4d3d4cd04ec1d9d2cd",
"engine_name": "radon"
}
"""
check_name = models.CharField(max_length=200, blank=False, null=False)
content = models.TextField()
remediation = models.IntegerField()
description = models.CharField(max_length=200, blank=False, null=False)
issue_type = models.CharField(max_length=200, blank=False, null=False)
engine = models.CharField(max_length=200, blank=False, null=False)
created = models.DateTimeField(auto_now_add=True)
updated = models.DateTimeField(auto_now=True)
class SmellCategories(models.Model):
smell = models.ForeignKey(Smell)
title = models.CharField(max_length=200, blank=False, null=False)
METRIC_TYPES = [
('default', None),
('percent', 'percent'),
]
class Metric(models.Model):
build = models.ForeignKey(Build, related_name='metrics')
key = models.CharField(max_length=200, blank=False, null=False)
value = models.CharField(max_length=200, blank=False, null=False)
type = models.CharField(max_length=200, blank=False, null=True, choices=METRIC_TYPES, default=None)
class BooleanMetric(models.Model):
build = models.ForeignKey(Build, related_name='booleans')
key = models.CharField(max_length=200, blank=False, null=False)
value = models.BooleanField(default=False)
|
TangentMicroServices/BuildService
|
api/models.py
|
Python
|
mit
| 3,443
|
"""Returns Component from YAML syntax.
name: myComponent
vars:
length: 3
info:
description: just a demo
polarization: TE
...
instances:
mzi:
component: mzi_phase_shifter
settings:
delta_length: ${vars.length}
length_x: 50
pads:
component: pad_array
settings:
n: 2
port_names:
- e4
placements:
mzi:
x: 0
pads:
y: 200
x: mzi,cc
ports:
o1: mzi,o1
o2: mzi,o2
routes:
electrical:
links:
mzi,etop_e1: pads,e4_0
mzi,etop_e2: pads,e4_1
settings:
layer: [31, 0]
width: 10
radius: 10
"""
import functools
import hashlib
import io
import json
import pathlib
import warnings
from typing import IO, Any, Callable, Dict, List, Optional, Union
import numpy as np
import omegaconf
from omegaconf import OmegaConf
from gdsfactory.add_pins import add_instance_label
from gdsfactory.cell import CACHE
from gdsfactory.component import Component, ComponentReference
from gdsfactory.components import factory
from gdsfactory.cross_section import cross_section_factory
from gdsfactory.routing.factories import routing_strategy as routing_strategy_factories
from gdsfactory.types import ComponentFactoryDict, CrossSectionFactory, Route
valid_placement_keys = ["x", "y", "dx", "dy", "rotation", "mirror", "port"]
valid_top_level_keys = [
"name",
"instances",
"placements",
"connections",
"ports",
"routes",
"vars",
"info",
]
valid_anchor_point_keywords = [
"ce",
"cw",
"nc",
"ne",
"nw",
"sc",
"se",
"sw",
"center",
"cc",
]
# refer to an (x,y) Point
valid_anchor_value_keywords = [
"south",
"west",
"east",
"north",
]
# refer to a singular (x or y) value
valid_anchor_keywords = valid_anchor_point_keywords + valid_anchor_value_keywords
# full set of valid anchor keywords (either referring to points or values)
valid_route_keys = [
"links",
"settings",
"routing_strategy",
]
# Recognized keys within a YAML route definition
def _get_anchor_point_from_name(
ref: ComponentReference, anchor_name: str
) -> Optional[np.ndarray]:
if anchor_name in valid_anchor_point_keywords:
pt = getattr(ref.size_info, anchor_name)
return pt
elif anchor_name in ref.ports:
return ref.ports[anchor_name].position
else:
return None
def _get_anchor_value_from_name(
ref: ComponentReference, anchor_name: str, return_value: str
) -> Optional[float]:
if anchor_name in valid_anchor_value_keywords:
v = getattr(ref.size_info, anchor_name)
return v
else:
anchor_point = _get_anchor_point_from_name(ref, anchor_name)
if anchor_point is None:
return None
if return_value == "x":
return anchor_point[0]
elif return_value == "y":
return anchor_point[1]
else:
raise ValueError("Expected x or y as return_value.")
def place(
placements_conf: Dict[str, Dict[str, Union[int, float, str]]],
connections_by_transformed_inst: Dict[str, Dict[str, str]],
instances: Dict[str, ComponentReference],
encountered_insts: List[str],
instance_name: Optional[str] = None,
all_remaining_insts: Optional[List[str]] = None,
) -> None:
"""Place instance_name with placements_conf config.
Args:
placements_conf: Dict of instance_name to placement (x, y, rotation ...)
connections_by_transformed_inst: Dict of connection attributes.
keyed by the name of the instance which should be transformed
instances: Dict of references
encountered_insts: list of encountered_instances
instance_name: instance_name to place
all_remaining_insts: list of all the remaining instances to place
instances pop from this instrance as they are placed
"""
if not all_remaining_insts:
return
if instance_name is None:
instance_name = all_remaining_insts.pop(0)
else:
all_remaining_insts.remove(instance_name)
if instance_name in encountered_insts:
encountered_insts.append(instance_name)
loop_str = " -> ".join(encountered_insts)
raise ValueError(
f"circular reference in placement for {instance_name}! Loop: {loop_str}"
)
encountered_insts.append(instance_name)
ref = instances[instance_name]
if instance_name in placements_conf:
placement_settings = placements_conf[instance_name] or {}
if not isinstance(placement_settings, omegaconf.DictConfig):
raise ValueError(
f"Invalid placement {placement_settings} from {valid_placement_keys}"
)
for k in placement_settings.keys():
if k not in valid_placement_keys:
raise ValueError(f"Invalid placement {k} from {valid_placement_keys}")
x = placement_settings.get("x")
y = placement_settings.get("y")
dx = placement_settings.get("dx")
dy = placement_settings.get("dy")
port = placement_settings.get("port")
rotation = placement_settings.get("rotation")
mirror = placement_settings.get("mirror")
if port:
a = _get_anchor_point_from_name(ref, port)
if a is None:
raise ValueError(
f"Port {port} is neither a valid port on {ref.parent.name}"
" nor a recognized anchor keyword.\n"
"Valid ports: \n"
f"{list(ref.ports.keys())}. \n"
"Valid keywords: \n"
f"{valid_anchor_point_keywords}",
)
ref.x -= a[0]
ref.y -= a[1]
if x:
if isinstance(x, str):
if not len(x.split(",")) == 2:
raise ValueError(
f"You can define x as `x: instaceName,portName` got `x: {x!r}`"
)
instance_name_ref, port_name = x.split(",")
if instance_name_ref in all_remaining_insts:
place(
placements_conf,
connections_by_transformed_inst,
instances,
encountered_insts,
instance_name_ref,
all_remaining_insts,
)
if instance_name_ref not in instances:
raise ValueError(
f"instance {instance_name_ref} not in {list(instances.keys())}."
f" You can define x as `x: instaceName,portName`, got x: {x!r}"
)
if (
port_name not in instances[instance_name_ref].ports
and port_name not in valid_anchor_keywords
):
raise ValueError(
f"port = `{port_name}` not in {list(instances[instance_name_ref].ports.keys())}"
f" or in valid anchors {valid_anchor_keywords} for {instance_name_ref}, "
f"you can define x as `x: instaceName,portName`, got `x: {x!r}`"
)
x = _get_anchor_value_from_name(
instances[instance_name_ref], port_name, "x"
)
ref.x += x
if y:
if isinstance(y, str):
if not len(y.split(",")) == 2:
raise ValueError(
f"You can define y as `y: instaceName,portName` got `y: {y!r}`"
)
instance_name_ref, port_name = y.split(",")
if instance_name_ref in all_remaining_insts:
place(
placements_conf,
connections_by_transformed_inst,
instances,
encountered_insts,
instance_name_ref,
all_remaining_insts,
)
if instance_name_ref not in instances:
raise ValueError(
f"{instance_name_ref} not in {list(instances.keys())}, "
f"you can define y as `y: instaceName,portName`, got `y: {y!r}`"
)
if (
port_name not in instances[instance_name_ref].ports
and port_name not in valid_anchor_keywords
):
raise ValueError(
f"port = {port_name} not in {list(instances[instance_name_ref].ports.keys())} "
f"or in valid anchors {valid_anchor_keywords} for {instance_name_ref}, "
f"you can define y as `y: instaceName,portName`, got `y: {y!r}`"
)
y = _get_anchor_value_from_name(
instances[instance_name_ref], port_name, "y"
)
ref.y += y
if dx:
ref.x += dx
if dy:
ref.y += dy
if mirror:
if mirror is True and port:
ref.reflect_h(x0=_get_anchor_value_from_name(ref, port, "x"))
elif mirror is True:
if x:
ref.reflect_h(x0=x)
else:
ref.reflect_h()
elif mirror is False:
pass
elif isinstance(mirror, str):
ref.reflect_h(port_name=mirror)
elif isinstance(mirror, (int, float)):
ref.reflect_h(x0=mirror)
else:
raise ValueError(
f"{mirror!r} can only be a port name {ref.ports.keys()}, "
"x value or True/False"
)
if rotation:
if port:
ref.rotate(rotation, center=_get_anchor_point_from_name(ref, port))
else:
x, y = ref.origin
ref.rotate(rotation, center=(x, y))
# ref.rotate(rotation, center=(ref.x, ref.y))
if instance_name in connections_by_transformed_inst:
conn_info = connections_by_transformed_inst[instance_name]
instance_dst_name = conn_info["instance_dst_name"]
if instance_dst_name in all_remaining_insts:
place(
placements_conf,
connections_by_transformed_inst,
instances,
encountered_insts,
instance_dst_name,
all_remaining_insts,
)
make_connection(instances=instances, **conn_info)
# placements_conf.pop(instance_name)
def transform_connections_dict(connections_conf: Dict[str, str]) -> Dict[str, Dict]:
"""Returns Dict with source_instance_name key and connection properties."""
if not connections_conf:
return {}
attrs_by_src_inst = {}
for port_src_string, port_dst_string in connections_conf.items():
instance_src_name, port_src_name = port_src_string.split(",")
instance_dst_name, port_dst_name = port_dst_string.split(",")
attrs_by_src_inst[instance_src_name] = {
"instance_src_name": instance_src_name,
"port_src_name": port_src_name,
"instance_dst_name": instance_dst_name,
"port_dst_name": port_dst_name,
}
return attrs_by_src_inst
def make_connection(
instance_src_name: str,
port_src_name: str,
instance_dst_name: str,
port_dst_name: str,
instances: Dict[str, ComponentReference],
) -> None:
instance_src_name = instance_src_name.strip()
instance_dst_name = instance_dst_name.strip()
port_src_name = port_src_name.strip()
port_dst_name = port_dst_name.strip()
assert (
instance_src_name in instances
), f"{instance_src_name} not in {list(instances.keys())}"
assert (
instance_dst_name in instances
), f"{instance_dst_name} not in {list(instances.keys())}"
instance_src = instances[instance_src_name]
instance_dst = instances[instance_dst_name]
assert port_src_name in instance_src.ports, (
f"{port_src_name} not in {list(instance_src.ports.keys())} for"
f" {instance_src_name} "
)
assert port_dst_name in instance_dst.ports, (
f"{port_dst_name} not in {list(instance_dst.ports.keys())} for"
f" {instance_dst_name}"
)
port_dst = instance_dst.ports[port_dst_name]
instance_src.connect(port=port_src_name, destination=port_dst)
sample_mmis = """
info:
polarization: te
wavelength: 1.55
description: just a demo on adding metadata
instances:
mmi_long:
component: mmi1x2
settings:
width_mmi: 4.5
length_mmi: 10
mmi_short:
component: mmi1x2
settings:
width_mmi: 4.5
length_mmi: 5
placements:
mmi_long:
rotation: 180
x: 100
y: 100
routes:
route_name1:
links:
mmi_short,o2: mmi_long,o1
ports:
o1: mmi_short,o1
o2: mmi_long,o2
o3: mmi_long,o3
"""
def from_yaml(
yaml_str: Union[str, pathlib.Path, IO[Any]],
component_factory: ComponentFactoryDict = factory,
routing_strategy: Dict[str, Callable] = routing_strategy_factories,
cross_section_factory: Dict[str, CrossSectionFactory] = cross_section_factory,
label_instance_function: Callable = add_instance_label,
**kwargs,
) -> Component:
"""Returns a Component defined in YAML file or string.
Args:
yaml: YAML IO describing Component file or string (with newlines)
(instances, placements, routes, ports, connections, names)
component_factory: dict of functions {factory_name: factory_function}
routing_strategy: for links
label_instance_function: to label each instance
kwargs: cache, prefix, autoname ... to pass to all factories
Returns:
Component
.. code::
valid properties:
name: Optional Component name
vars: Optional variables
info: Optional component info
description: just a demo
polarization: TE
...
instances:
name:
component: (ComponentFactory)
settings (Optional)
length: 10
...
placements:
x: Optional[float, str] str can be instanceName,portName
y: Optional[float, str]
rotation: Optional[float]
mirror: Optional[bool, float] float is x mirror axis
port: Optional[str] port anchor
connections (Optional): between instances
ports (Optional): ports to expose
routes (Optional): bundles of routes
routeName:
library: optical
links:
instance1,port1: instance2,port2
.. code::
vars:
length_mmi: 5
instances:
mmi_bot:
component: mmi1x2
settings:
width_mmi: 4.5
length_mmi: 10
mmi_top:
component: mmi1x2
settings:
width_mmi: 4.5
length_mmi: ${vars.length_mmi}
placements:
mmi_top:
port: o1
x: 0
y: 0
mmi_bot:
port: o1
x: mmi_top,o2
y: mmi_top,o2
dx: 30
dy: -30
routes:
optical:
library: optical
links:
mmi_top,o3: mmi_bot,o1
"""
yaml_str = (
io.StringIO(yaml_str)
if isinstance(yaml_str, str) and "\n" in yaml_str
else yaml_str
)
conf = OmegaConf.load(yaml_str) # nicer loader than conf = yaml.safe_load(yaml_str)
for key in conf.keys():
assert key in valid_top_level_keys, f"{key} not in {list(valid_top_level_keys)}"
instances = {}
routes = {}
name = conf.get(
"name",
f"Unnamed_{hashlib.md5(json.dumps(OmegaConf.to_container(conf)).encode()).hexdigest()[:8]}",
)
if name in CACHE:
return CACHE[name]
else:
c = Component(name)
CACHE[name] = c
placements_conf = conf.get("placements")
routes_conf = conf.get("routes")
ports_conf = conf.get("ports")
connections_conf = conf.get("connections")
instances_dict = conf["instances"]
c.info = conf.get("info", omegaconf.DictConfig({}))
for instance_name in instances_dict:
instance_conf = instances_dict[instance_name]
component_type = instance_conf["component"]
assert (
component_type in component_factory
), f"{component_type} not in {list(component_factory.keys())}"
settings = instance_conf.get("settings", {})
settings = OmegaConf.to_container(settings, resolve=True) if settings else {}
settings.update(**kwargs)
if "cross_section" in settings:
name_or_dict = settings["cross_section"]
if isinstance(name_or_dict, str):
cross_section = cross_section_factory[name_or_dict]
elif isinstance(name_or_dict, dict):
name = name_or_dict.pop("function")
cross_section = functools.partial(
cross_section_factory[name], **name_or_dict
)
else:
raise ValueError(f"invalid type for cross_section={name_or_dict!r}")
settings["cross_section"] = cross_section
ci = component_factory[component_type](**settings)
ref = c << ci
instances[instance_name] = ref
placements_conf = dict() if placements_conf is None else placements_conf
connections_by_transformed_inst = transform_connections_dict(connections_conf)
components_to_place = set(placements_conf.keys())
components_with_placement_conflicts = components_to_place.intersection(
connections_by_transformed_inst.keys()
)
for instance_name in components_with_placement_conflicts:
placement_settings = placements_conf[instance_name]
if "x" in placement_settings or "y" in placement_settings:
warnings.warn(
f"YAML defined: ({', '.join(components_with_placement_conflicts)}) "
+ "with both connection and placement. Please use one or the other.",
)
all_remaining_insts = list(
set(placements_conf.keys()).union(set(connections_by_transformed_inst.keys()))
)
while all_remaining_insts:
place(
placements_conf=placements_conf,
connections_by_transformed_inst=connections_by_transformed_inst,
instances=instances,
encountered_insts=list(),
all_remaining_insts=all_remaining_insts,
)
for instance_name in instances_dict:
label_instance_function(
component=c, instance_name=instance_name, reference=instances[instance_name]
)
if routes_conf:
for route_alias in routes_conf:
route_names = []
ports1 = []
ports2 = []
routes_dict = routes_conf[route_alias]
for key in routes_dict.keys():
if key not in valid_route_keys:
raise ValueError(
f"{route_alias!r} key={key!r} not in {valid_route_keys}"
)
settings = routes_dict.pop("settings", {})
settings = (
OmegaConf.to_container(settings, resolve=True) if settings else {}
)
if "cross_section" in settings:
name_or_dict = settings["cross_section"]
if isinstance(name_or_dict, str):
cross_section = cross_section_factory[name_or_dict]
elif isinstance(name_or_dict, dict):
name = name_or_dict.pop("function")
cross_section = functools.partial(
cross_section_factory[name], **name_or_dict
)
else:
raise ValueError(f"invalid type for cross_section={name_or_dict}")
settings["cross_section"] = cross_section
routing_strategy_name = routes_dict.pop("routing_strategy", "get_bundle")
if routing_strategy_name not in routing_strategy:
raise ValueError(
f"function {routing_strategy_name!r} not in routing_strategy {list(routing_strategy.keys())}"
)
if "links" not in routes_dict:
raise ValueError(
f"You need to define links for the {route_alias!r} route"
)
links_dict = routes_dict["links"]
for port_src_string, port_dst_string in links_dict.items():
if ":" in port_src_string:
src, src0, src1 = [s.strip() for s in port_src_string.split(":")]
dst, dst0, dst1 = [s.strip() for s in port_dst_string.split(":")]
instance_src_name, port_src_name = [
s.strip() for s in src.split(",")
]
instance_dst_name, port_dst_name = [
s.strip() for s in dst.split(",")
]
src0 = int(src0)
src1 = int(src1)
dst0 = int(dst0)
dst1 = int(dst1)
if src1 > src0:
ports1names = [
f"{port_src_name}{i}" for i in range(src0, src1 + 1, 1)
]
else:
ports1names = [
f"{port_src_name}{i}" for i in range(src0, src1 - 1, -1)
]
if dst1 > dst0:
ports2names = [
f"{port_dst_name}{i}" for i in range(dst0, dst1 + 1, 1)
]
else:
ports2names = [
f"{port_dst_name}{i}" for i in range(dst0, dst1 - 1, -1)
]
assert len(ports1names) == len(ports2names)
route_names += [
f"{instance_src_name},{i}:{instance_dst_name},{j}"
for i, j in zip(ports1names, ports2names)
]
instance_src = instances[instance_src_name]
instance_dst = instances[instance_dst_name]
for port_src_name in ports1names:
assert port_src_name in instance_src.ports, (
f"{port_src_name} not in {list(instance_src.ports.keys())}"
f"for {instance_src_name} "
)
ports1.append(instance_src.ports[port_src_name])
for port_dst_name in ports2names:
assert port_dst_name in instance_dst.ports, (
f"{port_dst_name} not in {list(instance_dst.ports.keys())}"
f"for {instance_dst_name}"
)
ports2.append(instance_dst.ports[port_dst_name])
# print(ports1)
# print(ports2)
# print(route_names)
else:
instance_src_name, port_src_name = port_src_string.split(",")
instance_dst_name, port_dst_name = port_dst_string.split(",")
instance_src_name = instance_src_name.strip()
instance_dst_name = instance_dst_name.strip()
port_src_name = port_src_name.strip()
port_dst_name = port_dst_name.strip()
assert (
instance_src_name in instances
), f"{instance_src_name} not in {list(instances.keys())}"
assert (
instance_dst_name in instances
), f"{instance_dst_name} not in {list(instances.keys())}"
instance_src = instances[instance_src_name]
instance_dst = instances[instance_dst_name]
assert port_src_name in instance_src.ports, (
f"{port_src_name} not in {list(instance_src.ports.keys())} for"
f" {instance_src_name} "
)
assert port_dst_name in instance_dst.ports, (
f"{port_dst_name} not in {list(instance_dst.ports.keys())} for"
f" {instance_dst_name}"
)
ports1.append(instance_src.ports[port_src_name])
ports2.append(instance_dst.ports[port_dst_name])
route_name = f"{port_src_string}:{port_dst_string}"
route_names.append(route_name)
routing_function = routing_strategy[routing_strategy_name]
route_or_route_list = routing_function(
ports1=ports1,
ports2=ports2,
**settings,
)
# FIXME, be more consistent
if isinstance(route_or_route_list, list):
for route_name, route_dict in zip(route_names, route_or_route_list):
c.add(route_dict.references)
routes[route_name] = route_dict.length
elif isinstance(route_or_route_list, Route):
c.add(route_or_route_list.references)
routes[route_name] = route_or_route_list.length
else:
raise ValueError(f"{route_or_route_list} needs to be a Route or a list")
if ports_conf:
assert hasattr(ports_conf, "items"), f"{ports_conf} needs to be a dict"
for port_name, instance_comma_port in ports_conf.items():
if "," in instance_comma_port:
instance_name, instance_port_name = instance_comma_port.split(",")
instance_name = instance_name.strip()
instance_port_name = instance_port_name.strip()
assert (
instance_name in instances
), f"{instance_name} not in {list(instances.keys())}"
instance = instances[instance_name]
assert instance_port_name in instance.ports, (
f"{instance_port_name} not in {list(instance.ports.keys())} for"
f" {instance_name} "
)
c.add_port(port_name, port=instance.ports[instance_port_name])
else:
c.add_port(**instance_comma_port)
c.routes = routes
c.instances = instances
return c
if __name__ == "__main__":
# for k in factory.keys():
# print(k)
# print(c.settings["info"])
# from gdsfactory.tests.test_component_from_yaml import yaml_anchor
# c = from_yaml(yaml_anchor)
c = from_yaml(sample_mmis)
c.show()
# c = test_connections_regex()
# c = from_yaml(sample_regex_connections)
# c = from_yaml(sample_regex_connections_backwards)
# c = test_docstring_sample()
# c = test_connections()
# c = from_yaml(sample_mirror_simple)
# c = test_connections_2x2()
# c = test_connections_different_factory()
# test_connections_different_link_factory()
# test_connections_waypoints()
# test_mirror()
# c = from_yaml(sample_different_link_factory)
# c = test_mirror()
# c = from_yaml(sample_waypoints)
# c = from_yaml(sample_2x2_connections)
# c = from_yaml(sample_mmis)
# c = from_yaml(sample_connections)
# assert len(c.get_dependencies()) == 3
# test_component_from_yaml()
# test_component_from_yaml_with_routing()
# print(c.ports)
# c = gf.routing.add_fiber_array(c)
|
gdsfactory/gdsfactory
|
gdsfactory/read/from_yaml.py
|
Python
|
mit
| 28,215
|
# Encoding: UTF-8
''' FinancialSession implementation for Swedish Lansforsakringar Bank.
Uses LFABs REST API, based on Björn Sållarps work, with updated client key.
Copyleft 2012 Jacob Hansson <jakewins@gmail.com>
Released under the MIT license:
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of
the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
'''
import hashlib
from datetime import datetime
from bank.domain import FinancialSession, Account, Transaction
from bank.io.http import HttpJsonClient
class LFABAccount(Account):
@classmethod
def from_map(cls, data):
return cls(data['accountName'], data['accountNumber'], data['clearingNumber'], data['dispoibleAmount'], data['ledger'])
def __init__(self, name, number, clearing_number, balance, ledger):
super(LFABAccount, self).__init__(name, number, clearing_number)
self.balance = balance
self._ledger = ledger
class LFABTransaction(Transaction):
@classmethod
def from_map(cls, data):
# Note timestamp is in milliseconds, and yes, their serialization actually contains spelling errors
return cls(data['text'], data['ammount'], datetime.fromtimestamp(data['transactiondate'] / 1000))
class LFABSession(FinancialSession):
def __init__(self, ssn, pin):
self._ssn = str(ssn)
self._pin = str(pin)
self._client = HttpJsonClient("https://mobil.lansforsakringar.se/appoutlet")
self._client.add_persistent_header("Accept", "application/json,text/plain")
self._client.add_persistent_header("Accept-Charset", "utf-8")
self._client.add_persistent_header("Accept-Encoding", "gzip,deflate")
self._client.add_persistent_header('User-Agent', "lf-android-app")
def begin(self):
challenge = self._client.get('/security/client')
session_token = self._client.post('/security/client', self._create_challenge_reply(challenge))
# Set up session token header
self._client.add_persistent_header('Ctoken', session_token['token'])
self._client.add_persistent_header('DeviceId', '1a1805054248c4529340f4ee20bb1d1ec200a0b9') # TODO: Use a randomized ID instead?
# Log in
login_response = self._client.post('/security/user', {"ssn":self._ssn,"pin":self._pin})
# Utoken
self._client.add_persistent_header('Utoken', login_response['ticket'])
def get_accounts(self):
for response in [self._client.post('/account/bytype', {"accountType":"CHECKING"}),
self._client.post('/account/bytype', {"accountType":"SAVING"})]:
for account in response['accounts']:
yield LFABAccount.from_map(account)
def get_transactions(self, account):
if not isinstance(account, LFABAccount):
raise Exception("LFAB account required to list LFAB transactions")
payload = {"requestedPage":0,"ledger":account._ledger,"accountNumber":account.number}
current_response = None
while current_response is None or current_response['hasMore'] is True:
# Get a fresh page of transactions
current_response = self._client.post("/account/transaction", payload)
# Yield transactions
for tx_json in current_response['transactions']:
# Each tx looks like:
#{ "ammount" : -248.0, "text" : "Transaction message", "transactiondate" : 1319580000000 }
yield LFABTransaction.from_map(tx_json)
# Next page
payload['requestedPage'] += 1
def end(self):
self._client = None
# Internals
def _create_challenge_reply(self, challenge):
# Add magic number
number = challenge['number'] + 5616
# Convert to lower-case hex, stripping off the first two chars that hex() adds ('0x')
number = hex(number).lower()[2:]
# Sha-hash
number_hash = hashlib.sha1(number).hexdigest()
# Return response in appropriate format
return {
"originalChallenge" : challenge['number'],
"hash" : number_hash,
"challengePair" : challenge['numberPair']
}
|
jakewins/bank-api
|
bank/impl/lfab.py
|
Python
|
mit
| 5,331
|
name = "Killstreak Announcer"
author = "codesuela"
import base
class Bf3Mod(base.Bf3Mod):
def onLoad(self):
self.playerStreaks = {}
def playerOnKill(self, killer, killed, weapon, headshot):
killer = str(killer).strip()
killed = str(killed).strip()
if killer and killed: # disregard environmental and admin induced deaths
if killer in self.playerStreaks:
self.playerStreaks[killer] += 1
else:
self.playerStreaks[killer] = 1
self.playerStreaks[killed] = 0
if(self.playerStreaks[killer] == 4):
self.actionHandler.sayAll("%s Mega Kill" % killer)
elif(self.playerStreaks[killer] == 5):
self.actionHandler.sayAll("%s Ultra Kill" % killer)
elif(self.playerStreaks[killer] == 6):
self.actionHandler.sayAll("%s Monster Kill" % killer)
elif(self.playerStreaks[killer] == 7):
for _ in range(2):
self.actionHandler.sayAll("%s Ludicrous Kill" % killer)
elif(self.playerStreaks[killer] == 8):
for _ in range(3):
self.actionHandler.sayAll(killer + " HOLY SHIT")
elif(self.playerStreaks[killer] > 8):
self.actionHandler.sayAll(killer + " IS LEGENDARY")
def playerOnLeave(self, name, pInfo):
try:
del(self.playerStreaks[name])
except KeyError:
pass
def serverOnRoundOver(self, winningTeam):
del(self.playerStreaks)
self.playerStreaks = {}
|
smowden/b3ef-Battlefield-3-RCON-autoadmin-framework
|
mods/killstreakannouncer.py
|
Python
|
mit
| 1,612
|
import subprocess
import os
import jsonasobj
import pandas as pd
import requests
from SPARQLWrapper import SPARQLWrapper, JSON
from ShExJSG import ShExC
from pyshex import PrefixLibrary, ShExEvaluator
from sparql_slurper import SlurpyGraph
from wikidataintegrator import wdi_core, wdi_helpers
from datetime import datetime
def run_shex_manifest():
manifest = jsonasobj.loads(requests.get(
"https://raw.githubusercontent.com/SuLab/Genewiki-ShEx/master/pathways/reactome/manifest.json").text)
for case in manifest:
if case.data.startswith("Endpoint:"):
sparql_endpoint = case.data.replace("Endpoint: ", "")
schema = requests.get(case.schemaURL).text
shex = ShExC(schema).schema
evaluator = ShExEvaluator(schema=shex, debug=True)
sparql_query = case.queryMap.replace("SPARQL '''", "").replace("'''@START", "")
df = wdi_core.WDItemEngine.execute_sparql_query(sparql_query)
for row in df["results"]["bindings"]:
wdid=row["item"]["value"]
slurpeddata = SlurpyGraph(sparql_endpoint)
try:
results = evaluator.evaluate(rdf=slurpeddata, focus=wdid, debug=False)
for result in results:
if result.result:
print(str(result.focus) + ": INFO")
msg = wdi_helpers.format_msg(wdid, wdid, None, 'CONFORMS', '')
wdi_core.WDItemEngine.log("INFO", msg)
else:
msg = wdi_helpers.format_msg(wdid, wdid, None, '', '')
wdi_core.WDItemEngine.log("ERROR", s)
except RuntimeError:
print("Continue after 1 minute, no validation happened on"+ wdid)
continue
__metadata__ = {
'name': 'PathwayBot',
'maintainer': 'Andra',
'tags': ['pathways', 'reactome'],
}
log_dir = "./logs"
run_id = datetime.now().strftime('%Y%m%d_%H:%M')
__metadata__['run_id'] = run_id
log_name = '{}-{}.log'.format(__metadata__['name'], run_id)
if wdi_core.WDItemEngine.logger is not None:
wdi_core.WDItemEngine.logger.handles = []
wdi_core.WDItemEngine.setup_logging(log_dir=log_dir, log_name=log_name, header="",logger_name='reactome')
run_shex_manifest()
|
SuLab/scheduled-bots
|
scheduled_bots/reactome/checkShEx.py
|
Python
|
mit
| 2,396
|
import _plotly_utils.basevalidators
class ColorValidator(_plotly_utils.basevalidators.ColorValidator):
def __init__(
self, plotly_name="color", parent_name="heatmap.colorbar.title.font", **kwargs
):
super(ColorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
role=kwargs.pop("role", "style"),
**kwargs
)
|
plotly/python-api
|
packages/python/plotly/plotly/validators/heatmap/colorbar/title/font/_color.py
|
Python
|
mit
| 474
|
#!/usr/bin/env python
from pymongo import MongoClient
from time import time
from ..doc import Doc
from ..util import parmapper
from progressbar import ProgressBar, Bar, Counter, ETA, FileTransferSpeed, Percentage, Timer
from nel import logging
log = logging.getLogger()
class Process(object):
def __call__(self, doc):
"""Add annotations to doc and return it"""
raise NotImplementedError
class CorpusMapper(object):
"Load, process and store documents."
def __init__(self, **kwargs):
self.corpus_id = kwargs.pop('corpus')
self.tag_filter = kwargs.pop('tag')
self.output_corpus_id = kwargs.pop('output_corpus', None) or self.corpus_id
self.processes = kwargs.pop('processes')
if self.processes != None:
self.processes = int(self.processes)
self.recycle_interval = kwargs.pop('recycle')
if self.recycle_interval != None:
self.recycle_interval = int(self.recycle_interval)
def mapper(self, doc):
raise NotImplementedError
def process_document(self, doc):
try:
doc = self.mapper(doc)
except Exception, e:
log.warn('Error processing doc (%s): %s', doc.id, str(e))
raise
return doc
def get_corpus_filter(self):
flt = {}
if self.tag_filter != None:
flt['tag'] = self.tag_filter
return flt
def count_docs(self, corpus):
return corpus.count(self.get_corpus_filter())
def iter_docs(self, corpus):
cursor = corpus.find(self.get_corpus_filter(), modifiers={'$snapshot':True})
for json_doc in cursor:
yield Doc.obj(json_doc)
def iter_processed_docs(self, corpus):
try:
if self.processes == 1:
for doc in self.iter_docs(corpus):
yield self.mapper(doc)
else:
with parmapper(self.mapper, nprocs=self.processes,recycle_interval=self.recycle_interval) as pm:
for _, doc in pm.consume(self.iter_docs(corpus)):
yield doc
except Exception as e:
log.warn('Exception during feature extraction: %s', str(e))
def __call__(self):
start_time = time()
client = MongoClient()
corpus = client.docs[self.corpus_id]
if self.corpus_id == self.output_corpus_id:
output_corpus = corpus
else:
log.warn('Writing over output corpus: %s', self.output_corpus_id)
output_corpus = client.docs[self.output_corpus_id]
output_corpus.drop()
total_docs = self.count_docs(corpus)
widgets = [
'Processed: ', Counter(), '/', str(total_docs), ' ',
'(', FileTransferSpeed(unit='d'), ') ',
Bar(marker='#', left='[', right=']'),
' ', Percentage(), ' ',
ETA(),
' (', Timer(format='Elapsed: %s'), ')'
]
log.info(
'Running %s-process doc mapper over %i docs from %s[%s] to %s',
'single' if self.processes==1 else 'multi',
total_docs,
self.corpus_id,
self.tag_filter or 'all',
self.output_corpus_id)
with ProgressBar(total_docs, widgets, redirect_stdout=self.processes != 1) as progress:
for i, doc in enumerate(self.iter_processed_docs(corpus)):
try:
output_corpus.save(doc.json())
progress.update(i)
except:
log.warn('Error saving processed document.')
raise
log.info('Done.')
@classmethod
def add_arguments(cls, p):
p.add_argument('--corpus', metavar='CORPUS', required=True)
p.add_argument('--tag', default=None, required=False, metavar='TAG_FILTER')
p.add_argument('--output-corpus', default=None, required=False, metavar='OUTPUT_CORPUS')
p.add_argument('--processes', default=None, required=False, type=int, metavar='PROCESS_COUNT')
p.add_argument('--recycle', default=None, required=False, metavar='WORKER_RECYCLE_INTERVAL')
p.set_defaults(cls=cls)
return p
class CorpusProcessor(CorpusMapper):
def __init__(self, **kwargs):
super(CorpusProcessor, self).__init__(**kwargs)
mapper_cls = kwargs.pop('mappercls')
mapper_args = {p:kwargs[p] for p in mapper_cls.__init__.__code__.co_varnames if p in kwargs}
self.mapper = mapper_cls(**mapper_args)
|
wikilinks/nel
|
nel/process/process.py
|
Python
|
mit
| 4,542
|
"""
===================================================================
Support Vector Regression (SVR) using linear and non-linear kernels
===================================================================
Toy example of 1D regression using linear, polynomial and RBF kernels.
"""
print(__doc__)
import matplotlib.pyplot as plt
import numpy as np
from sklearn.svm import SVR
###############################################################################
# Generate sample data
X = np.sort(5 * np.random.rand(40, 1), axis=0)
y = np.sin(X).ravel()
###############################################################################
# Add noise to targets
y[::5] += 3 * (0.5 - np.random.rand(8))
###############################################################################
# Fit regression model
svr_rbf = SVR(kernel='rbf', C=1e3, gamma=0.1)
svr_lin = SVR(kernel='linear', C=1e3)
svr_poly = SVR(kernel='poly', C=1e3, degree=2)
y_rbf = svr_rbf.fit(X, y).predict(X)
y_lin = svr_lin.fit(X, y).predict(X)
y_poly = svr_poly.fit(X, y).predict(X)
###############################################################################
# look at the results
lw = 2
plt.scatter(X, y, color='darkorange', label='data')
plt.hold('on')
plt.plot(X, y_rbf, color='navy', lw=lw, label='RBF model')
plt.plot(X, y_lin, color='c', lw=lw, label='Linear model')
plt.plot(X, y_poly, color='cornflowerblue', lw=lw, label='Polynomial model')
plt.xlabel('data')
plt.ylabel('target')
plt.title('Support Vector Regression')
plt.legend()
plt.show()
|
DailyActie/Surrogate-Model
|
01-codes/scikit-learn-master/examples/svm/plot_svm_regression.py
|
Python
|
mit
| 1,520
|
# -*- coding: utf-8 -*-
"""
This is part of WebScout software
Docs EN: http://hack4sec.pro/wiki/index.php/WebScout_en
Docs RU: http://hack4sec.pro/wiki/index.php/WebScout
License: MIT
Copyright (c) Anton Kuzmin <http://anton-kuzmin.ru> (ru) <http://anton-kuzmin.pro> (en)
Thread class for HostsBrute modules
"""
import threading
import Queue
import time
import copy
import pprint
import re
from requests.exceptions import ChunkedEncodingError, ConnectionError
from classes.Registry import Registry
from classes.threads.HttpThread import HttpThread
class HostsBruteThread(HttpThread):
""" Thread class for HostsBrute modules """
queue = None
method = None
url = None
mask_symbol = None
counter = None
retested_words = None
last_action = 0
def __init__(
self, queue, protocol, host, template, mask_symbol,
false_phrase, retest_codes, delay, ignore_words_re, counter, result):
threading.Thread.__init__(self)
self.retested_words = {}
self.queue = queue
self.protocol = protocol.lower()
self.host = host
self.template = template
self.mask_symbol = mask_symbol
self.counter = counter
self.result = result
self.done = False
self.false_phrase = false_phrase
self.retest_codes = list(set(retest_codes.split(','))) if len(retest_codes) else []
self.delay = int(delay)
self.retest_delay = int(Registry().get('config')['hosts_brute']['retest_delay'])
self.http = copy.deepcopy(Registry().get('http'))
self.logger = Registry().get('logger')
self.method = 'get'
self.ignore_words_re = False if not len(ignore_words_re) else re.compile(ignore_words_re)
self.retest_limit = int(Registry().get('config')['hosts_brute']['retest_limit'])
def run(self):
""" Run thread """
req_func = getattr(self.http, self.method)
need_retest = False
word = False
while not self.done:
self.last_action = int(time.time())
if self.delay:
time.sleep(self.delay)
try:
if not need_retest:
word = self.queue.get()
self.counter.up()
if not len(word.strip()) or (self.ignore_words_re and self.ignore_words_re.findall(word)):
continue
try:
hostname = self.template.replace(self.mask_symbol, word)
except UnicodeDecodeError:
self.logger.log(
"URL build error (UnicodeDecodeError) with word '{0}', skip it".format(pprint.pformat(word)),
_print=False
)
continue
try:
resp = req_func(self.protocol + "://" + self.host, headers={'host': hostname})
except ConnectionError:
need_retest = True
self.http.change_proxy()
continue
if self.is_retest_need(word, resp):
time.sleep(self.retest_delay)
need_retest = True
continue
search_scope = ""
for header in resp.headers:
search_scope += "{0}: {1}\r\n".format(header.title(), resp.headers[header])
search_scope += '\r\n\r\n' + resp.text
positive_item = False
if resp is not None and not search_scope.count(self.false_phrase):
self.result.append(hostname)
positive_item = True
self.log_item(word, resp, positive_item)
self.check_positive_limit_stop(self.result)
need_retest = False
except Queue.Empty:
self.done = True
break
except ChunkedEncodingError as e:
self.logger.ex(e)
except BaseException as e:
try:
if str(e).count('Cannot connect to proxy'):
need_retest = True
else:
self.logger.ex(e)
except UnicodeDecodeError:
pass
except UnboundLocalError:
self.logger.ex(e)
finally:
pass
|
hack4sec/ws-cli
|
classes/threads/HostsBruteThread.py
|
Python
|
mit
| 4,433
|
# -*- coding: utf-8 -*-
#
# Nirvana documentation build configuration file, created by
# sphinx-quickstart on Wed Sep 14 09:20:10 2011.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
sys.path.insert(0, os.path.abspath('..'))
from nirvana import __version__
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode', 'sphinx.ext.todo']
# sphinx.ext.todo configuration
todo_include_todos=True
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Nirvana'
copyright = u'2011, Nick Wilson'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = __version__
# The full version, including alpha/beta/rc tags.
release = __version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Nirvanadoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'Nirvana.tex', u'Nirvana Documentation',
u'Nick Wilson', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'nirvana', u'Nirvana Documentation',
[u'Nick Wilson'], 1)
]
|
njwilson/nirvana-python
|
docs/conf.py
|
Python
|
mit
| 7,175
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class VirtualMachineScaleSetNetworkProfile(Model):
"""Describes a virtual machine scale set network profile.
:param health_probe: A reference to a load balancer probe used to
determine the health of an instance in the virtual machine scale set. The
reference will be in the form:
'/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}/probes/{probeName}'.
:type health_probe:
~azure.mgmt.compute.v2017_03_30.models.ApiEntityReference
:param network_interface_configurations: The list of network
configurations.
:type network_interface_configurations:
list[~azure.mgmt.compute.v2017_03_30.models.VirtualMachineScaleSetNetworkConfiguration]
"""
_attribute_map = {
'health_probe': {'key': 'healthProbe', 'type': 'ApiEntityReference'},
'network_interface_configurations': {'key': 'networkInterfaceConfigurations', 'type': '[VirtualMachineScaleSetNetworkConfiguration]'},
}
def __init__(self, health_probe=None, network_interface_configurations=None):
super(VirtualMachineScaleSetNetworkProfile, self).__init__()
self.health_probe = health_probe
self.network_interface_configurations = network_interface_configurations
|
AutorestCI/azure-sdk-for-python
|
azure-mgmt-compute/azure/mgmt/compute/v2017_03_30/models/virtual_machine_scale_set_network_profile.py
|
Python
|
mit
| 1,812
|
"""
WSGI config for MusicMash project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "MusicMash.settings")
application = get_wsgi_application()
|
castle-c/djangoTutorial
|
MusicMash/wsgi.py
|
Python
|
mit
| 396
|
import json
import boto3
import hashlib
from shmenkins import logging, aws
from botocore.exceptions import ClientError
logger = logging.get_logger()
account = None # type: str
topic_build_status_changed = None # type: aws.Topic
cb = None
def handler(event: dict, ignored: dict) -> None:
global account
global topic_build_status_changed
global cb
if topic_build_status_changed is None:
aws_ctx = aws.Aws()
account = aws_ctx.get_account()
topic_build_status_changed = aws_ctx.get_topic("build_status_changed")
cb = boto3.client("codebuild")
handle(event)
def handle(event: dict) -> None:
logger.debug("Handling %s", str(event))
# parse input
sns_record = event["Records"][0]["Sns"]
build_scheduled_event = json.loads(sns_record["Message"])
url = build_scheduled_event["url"]
interaction_id = build_scheduled_event["interaction_id"]
try:
project_name = put_project(url)
build = cb_start_build(project_name)
print(str(build))
# publish event
topic_build_status_changed.publish({"interaction_id": interaction_id, "url": url, "status": "started"})
except Exception as e:
logger.error("Failed to start build; project_name=%s, url=%s", url, e)
topic_build_status_changed.publish({"interaction_id": interaction_id, "url": url, "status": "failed"})
logger.debug("Finished handling %s", str(event))
def put_project(url: str) -> str:
project_name = hashlib.sha256(url.encode()).hexdigest() # type: str
try:
# update the cb project without checking if it exists first
# most of the time the project is already there
cb_update_project(project_name, url)
except ClientError as e:
if is_resource_not_found_error(e):
logger.debug("Project not found; project_name=%s, url=%s", project_name, url)
cb_create_project(project_name, url)
else:
raise e
return project_name
def cb_start_build(project_name: str) -> dict:
logger.debug("Starting build; project_name=%s", project_name)
build = cb.start_build(projectName=project_name) # type: dict
logger.debug("Started build; project_name=%s, build=%s", project_name, build)
return build
def is_resource_not_found_error(e: ClientError) -> bool:
try:
return e.response["Error"]["Code"] == "ResourceNotFoundException"
except:
return False
def cb_create_project(project_name: str, url: str) -> None:
logger.debug("Creating build project; project_name=%s, url=%s", project_name, url)
project = cb.create_project(
name=project_name,
description=url,
source={"type": "GITHUB", "location": url},
artifacts={"type": "NO_ARTIFACTS"},
environment={"type": "LINUX_CONTAINER", "image": "rzhilkibaev/jst", "computeType": "BUILD_GENERAL1_SMALL"},
serviceRole="arn:aws:iam::" + account + ":role/cb_general"
)
logger.debug("Created build project; project_name=%s, url=%s, project=%s", project_name, url, str(project))
def cb_update_project(project_name: str, url: str) -> None:
logger.debug("Updating build project; project_name=%s, url=%s", project_name, url)
project = cb.update_project(
name=project_name,
description=url,
source={"type": "GITHUB", "location": url},
artifacts={"type": "NO_ARTIFACTS"},
environment={"type": "LINUX_CONTAINER", "image": "rzhilkibaev/jst", "computeType": "BUILD_GENERAL1_SMALL"},
serviceRole="arn:aws:iam::" + account + ":role/cb_general"
)
logger.debug("Updated build project; project_name=%s, url=%s, project=%s", project_name, url, str(project))
|
rzhilkibaev/shmenkins
|
lambda-py/builder/src/main.py
|
Python
|
mit
| 3,720
|
from django.contrib import admin
from models import Category,CodeSnippet,Project,Publication,Conference,ProjectAdmin,Code,Article,ArticleAdmin
class CategoryAdmin(admin.ModelAdmin):
fields = ('title',)
# Register your models here.
admin.site.register(Category,CategoryAdmin)
admin.site.register(CodeSnippet)
admin.site.register(Project,ProjectAdmin)
admin.site.register(Publication)
admin.site.register(Conference)
admin.site.register(Code)
admin.site.register(Article,ArticleAdmin)
|
sariyanidi/academics-webpage
|
mysite/blog/admin.py
|
Python
|
mit
| 491
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class VpnServerConfigurationsOperations:
"""VpnServerConfigurationsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2019_08_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def get(
self,
resource_group_name: str,
vpn_server_configuration_name: str,
**kwargs: Any
) -> "_models.VpnServerConfiguration":
"""Retrieves the details of a VpnServerConfiguration.
:param resource_group_name: The resource group name of the VpnServerConfiguration.
:type resource_group_name: str
:param vpn_server_configuration_name: The name of the VpnServerConfiguration being retrieved.
:type vpn_server_configuration_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: VpnServerConfiguration, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2019_08_01.models.VpnServerConfiguration
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.VpnServerConfiguration"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-08-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'vpnServerConfigurationName': self._serialize.url("vpn_server_configuration_name", vpn_server_configuration_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('VpnServerConfiguration', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnServerConfigurations/{vpnServerConfigurationName}'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
vpn_server_configuration_name: str,
vpn_server_configuration_parameters: "_models.VpnServerConfiguration",
**kwargs: Any
) -> "_models.VpnServerConfiguration":
cls = kwargs.pop('cls', None) # type: ClsType["_models.VpnServerConfiguration"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-08-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'vpnServerConfigurationName': self._serialize.url("vpn_server_configuration_name", vpn_server_configuration_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(vpn_server_configuration_parameters, 'VpnServerConfiguration')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('VpnServerConfiguration', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('VpnServerConfiguration', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnServerConfigurations/{vpnServerConfigurationName}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
vpn_server_configuration_name: str,
vpn_server_configuration_parameters: "_models.VpnServerConfiguration",
**kwargs: Any
) -> AsyncLROPoller["_models.VpnServerConfiguration"]:
"""Creates a VpnServerConfiguration resource if it doesn't exist else updates the existing
VpnServerConfiguration.
:param resource_group_name: The resource group name of the VpnServerConfiguration.
:type resource_group_name: str
:param vpn_server_configuration_name: The name of the VpnServerConfiguration being created or
updated.
:type vpn_server_configuration_name: str
:param vpn_server_configuration_parameters: Parameters supplied to create or update
VpnServerConfiguration.
:type vpn_server_configuration_parameters: ~azure.mgmt.network.v2019_08_01.models.VpnServerConfiguration
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either VpnServerConfiguration or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2019_08_01.models.VpnServerConfiguration]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.VpnServerConfiguration"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
vpn_server_configuration_name=vpn_server_configuration_name,
vpn_server_configuration_parameters=vpn_server_configuration_parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('VpnServerConfiguration', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'vpnServerConfigurationName': self._serialize.url("vpn_server_configuration_name", vpn_server_configuration_name, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnServerConfigurations/{vpnServerConfigurationName}'} # type: ignore
async def _update_tags_initial(
self,
resource_group_name: str,
vpn_server_configuration_name: str,
vpn_server_configuration_parameters: "_models.TagsObject",
**kwargs: Any
) -> "_models.VpnServerConfiguration":
cls = kwargs.pop('cls', None) # type: ClsType["_models.VpnServerConfiguration"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-08-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._update_tags_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'vpnServerConfigurationName': self._serialize.url("vpn_server_configuration_name", vpn_server_configuration_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(vpn_server_configuration_parameters, 'TagsObject')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('VpnServerConfiguration', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('VpnServerConfiguration', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_tags_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnServerConfigurations/{vpnServerConfigurationName}'} # type: ignore
async def begin_update_tags(
self,
resource_group_name: str,
vpn_server_configuration_name: str,
vpn_server_configuration_parameters: "_models.TagsObject",
**kwargs: Any
) -> AsyncLROPoller["_models.VpnServerConfiguration"]:
"""Updates VpnServerConfiguration tags.
:param resource_group_name: The resource group name of the VpnServerConfiguration.
:type resource_group_name: str
:param vpn_server_configuration_name: The name of the VpnServerConfiguration being updated.
:type vpn_server_configuration_name: str
:param vpn_server_configuration_parameters: Parameters supplied to update
VpnServerConfiguration tags.
:type vpn_server_configuration_parameters: ~azure.mgmt.network.v2019_08_01.models.TagsObject
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either VpnServerConfiguration or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2019_08_01.models.VpnServerConfiguration]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.VpnServerConfiguration"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._update_tags_initial(
resource_group_name=resource_group_name,
vpn_server_configuration_name=vpn_server_configuration_name,
vpn_server_configuration_parameters=vpn_server_configuration_parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('VpnServerConfiguration', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'vpnServerConfigurationName': self._serialize.url("vpn_server_configuration_name", vpn_server_configuration_name, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnServerConfigurations/{vpnServerConfigurationName}'} # type: ignore
async def _delete_initial(
self,
resource_group_name: str,
vpn_server_configuration_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-08-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'vpnServerConfigurationName': self._serialize.url("vpn_server_configuration_name", vpn_server_configuration_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnServerConfigurations/{vpnServerConfigurationName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
vpn_server_configuration_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Deletes a VpnServerConfiguration.
:param resource_group_name: The resource group name of the VpnServerConfiguration.
:type resource_group_name: str
:param vpn_server_configuration_name: The name of the VpnServerConfiguration being deleted.
:type vpn_server_configuration_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
vpn_server_configuration_name=vpn_server_configuration_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'vpnServerConfigurationName': self._serialize.url("vpn_server_configuration_name", vpn_server_configuration_name, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnServerConfigurations/{vpnServerConfigurationName}'} # type: ignore
def list_by_resource_group(
self,
resource_group_name: str,
**kwargs: Any
) -> AsyncIterable["_models.ListVpnServerConfigurationsResult"]:
"""Lists all the vpnServerConfigurations in a resource group.
:param resource_group_name: The resource group name of the VpnServerConfiguration.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ListVpnServerConfigurationsResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2019_08_01.models.ListVpnServerConfigurationsResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ListVpnServerConfigurationsResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-08-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_resource_group.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('ListVpnServerConfigurationsResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnServerConfigurations'} # type: ignore
def list(
self,
**kwargs: Any
) -> AsyncIterable["_models.ListVpnServerConfigurationsResult"]:
"""Lists all the VpnServerConfigurations in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ListVpnServerConfigurationsResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2019_08_01.models.ListVpnServerConfigurationsResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ListVpnServerConfigurationsResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-08-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('ListVpnServerConfigurationsResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/vpnServerConfigurations'} # type: ignore
|
Azure/azure-sdk-for-python
|
sdk/network/azure-mgmt-network/azure/mgmt/network/v2019_08_01/aio/operations/_vpn_server_configurations_operations.py
|
Python
|
mit
| 31,822
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import fcntl
import socket
import select
response = 'HTTP/1.1 200 OK\r\nConnection: Close\r\nContent-Length: 1\r\n\r\nA'
server = socket.socket()
server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
server.bind(('0.0.0.0', 8080))
server.listen(32)
# set nonblocking
flags = fcntl.fcntl(server.fileno(), fcntl.F_GETFL)
fcntl.fcntl(server.fileno(), fcntl.F_SETFL, flags | os.O_NONBLOCK)
clients = set([])
while True:
try:
client, clientaddr = server.accept()
clients.add(client)
except Exception as e:
pass
for client in clients.copy():
try:
request = client.recv(4096)
client.send(response)
clients.remove(client)
client.close()
except Exception as e:
pass
|
chaleaoch/jianshu_repo
|
network_program/nonblocking.py
|
Python
|
mit
| 839
|
# -*- coding: utf-8 -*-
"""
Configuration
==============
Config file that will be used to specify constants employed in the code.
A normal user shouldn't interact with this file unless there is an extreme need to change the default behaviour.
For overwriting or adding new values to the config, the following should be done
.. code-block:: python
from experimentor.config import Config
Config.new_property = 'new_value'
Config.old_property = 'override value'
In this way, whenever the config is used in the Experimentor package, the new values will be available.
Remember that you are modifying the class properties, not an instance of the Config class, and that is why it is
inherited by the rest of the code.
.. warning:: If you need to change the values of the config, you should do it before the importing of the rest of
the Experimentor happens.
"""
import PyDAQmx as nidaq
class Config(object):
# Settings specific to the national instruments card. Not all experiments will need this block.
ni_buffer = 50000 # When acquiring in continuous mode, how big is the buffer.
ni_measure_mode = nidaq.DAQmx_Val_Diff
ni_trigger_edge = nidaq.DAQmx_Val_Rising
ni_read_timeout = 0
class Laser:
number_checks_per_scan = 10 # How many times it checks if the 1D scan is done.
class NI:
""" Default values for the National Instruments ADQ cards."""
class Output:
""" Output values """
class Analog:
""" Analog channels """
timeout = 0 # It does not timeout.
class Input:
""" Input values """
class Analog:
""" Analog channels """
freq = 1/1000 # kHz, in order to do an average of 10 measurements in 10ms.
num_points = 10
trigger = "" # Internal trigger
trigger_edge = nidaq.DAQmx_Val_Rising
measure_mode = nidaq.DAQmx_Val_Diff
cont_finite = nidaq.DAQmx_Val_FiniteSamps
read_timeout = 0
|
uetke/experimentor
|
experimentor/config/config.py
|
Python
|
mit
| 2,076
|
################################################################################
# Easy python interface for posting updates to a slack channel. Usage in README.
#
# Author: Carl Cortright
# Date: 12/20/2016
#
################################################################################
import requests
class slackwebhook:
webhook_url = ""
#
# Default initializer
#
def __init__(self, webhook_url):
self.webhook_url = webhook_url
#
# Post a simple update to slack
#
def post(self, text):
payload = { "text" : text}
status = self.__post_payload(payload)
return status
#
# Posts a richly formatted post to slack
#
def rich_format_post(self, fallback=None, text=None, pretext=None, color=None, title=None, value=None, short=None):
# Create a richly formatted payload
payload = {
"attachments":[
{
"fallback": fallback,
"text": text,
"pretext": pretext,
"color": color,
"fields":[
{
"title": title,
"value": value,
"short": short
}
]
}
]
}
status = self.__post_payload(payload)
return status
#
# Post a json payload to slack webhook url
#
def __post_payload(self, payload):
response = requests.post(self.webhook_url, json=payload)
if(response.status_code != 200):
print("ERROR: the url %s returned a %d response code." % (self.webhook_url, response.status_code))
return response.status_code
|
carlcortright/slack-webhook
|
slackwebhook/slackwebhook.py
|
Python
|
mit
| 1,776
|
'''
The rule class.
@author: anze.vavpetic@ijs.si
'''
import json
from collections import defaultdict
from hedwig.core.predicate import UnaryPredicate, BinaryPredicate
from hedwig.core.example import Example
from hedwig.core.helpers import avg, std
from hedwig.core.settings import DEFAULT_ANNOTATION_NAME
class Rule:
'''
Represents a rule, along with its description, examples and statistics.
'''
def __init__(self, kb, predicates=[], target=None):
self.predicates = predicates
self.kb = kb
self.covered_examples = kb.get_full_domain()
self.target_type = kb.target_type
self.target = target
# Allow only unary predicates
for pred in predicates:
if isinstance(pred, UnaryPredicate):
self.covered_examples &= pred.domain[pred.input_var]
self.head_var = None
if self.predicates:
self.head_var = self.predicates[0].input_var
# Dictionary of predicates that share a certain variable
self.shared_var = {self.head_var: self.predicates}
# Predicates that currently can be specialized
self.latest_var = self.head_var
# Statistics
self.score = -1
self.coverage = -1
self.mean = -1
self.sd = -1
self.distribution = {}
self.__refresh_coverage()
self.__refresh_statistics()
# Validation
self.pval = -1
def clone(self):
'''
Returns a clone of this rule. The predicates themselves are NOT cloned.
'''
new_rule = Rule(self.kb, target=self.target)
new_rule.predicates = self.predicates[:]
new_rule.covered_examples = self.covered_examples
new_rule.latest_var = self.latest_var
new_rule.head_var = self.head_var
new_rule.shared_var = {}
for var in self.shared_var:
new_rule.shared_var[var] = self.shared_var[var][:]
return new_rule
def clone_negate(self, target_pred):
'''
Returns a copy of this rule where 'taget_pred' is negated.
'''
new_rule = self.clone()
# Create the instance of the child pred
producer_pred = target_pred.producer_predicate
var_name = target_pred.input_var
members = target_pred.domain[target_pred.input_var].copy()
members.invert()
neg_pred = UnaryPredicate(target_pred.label,
members,
self.kb,
producer_pred=producer_pred,
custom_var_name=var_name,
negated=True)
new_rule._replace_predicate(target_pred, neg_pred)
return new_rule
def clone_swap_with_subclass(self, target_pred, child_pred_label):
'''
Returns a copy of this rule where
'target_pred' is swapped for 'child_pred_label'.
'''
new_rule = self.clone()
# Create the instance of the child pred
producer_pred = target_pred.producer_predicate
var_name = target_pred.input_var
child_pred = UnaryPredicate(child_pred_label,
self.kb.get_members(child_pred_label),
self.kb,
producer_pred=producer_pred,
custom_var_name=var_name)
new_rule._replace_predicate(target_pred, child_pred)
return new_rule
def clone_append(self, predicate_label, producer_pred, bin=False):
'''
Returns a copy of this rule where 'predicate_label'
is appended to the rule.
'''
if not bin:
new_rule = self.clone()
predicate = UnaryPredicate(predicate_label,
self.kb.get_members(predicate_label),
self.kb,
producer_pred=producer_pred)
new_rule.predicates.append(predicate)
new_rule.shared_var[producer_pred.output_var].append(predicate)
else:
new_rule = self.clone()
predicate = BinaryPredicate(predicate_label,
self.kb.get_members(predicate_label),
self.kb,
producer_pred=producer_pred)
new_rule.predicates.append(predicate)
# Introduce new variable
new_rule.shared_var[predicate.output_var] = [predicate]
new_rule.shared_var[predicate.input_var].append(predicate)
new_rule.latest_var = predicate.output_var
new_rule.__refresh_coverage()
new_rule.__refresh_statistics()
return new_rule
def _replace_predicate(self, target, replacement):
'''
Replaces 'target' with 'replacement' in the rule.
'''
Rule.__replace(self.predicates, target, replacement)
self.covered_examples = self.covered_examples & \
replacement.domain[replacement.input_var]
# Reference possible consumers
replacement.consumer_predicate = target.consumer_predicate
# Update the backlinks
if replacement.producer_predicate:
replacement.producer_predicate.consumer_predicate = replacement
if replacement.consumer_predicate:
replacement.consumer_predicate.producer_predicate = replacement
# Update the shared var list
shared_list = self.shared_var[target.input_var]
Rule.__replace(shared_list, target, replacement)
# Recalc the covered examples and statistics
self.__refresh_coverage()
self.__refresh_statistics()
@staticmethod
def __replace(l, target, replacement):
idx = l.index(target)
l[idx] = replacement
def __refresh_coverage(self):
'''
Recalculates the covered examples.
'''
var = self.shared_var[self.head_var]
self.covered_examples = self.__covered_examples(var)
def __covered_examples(self, predicates):
'''
Recursively calculates the covered examples for a given set of
predicates that share a variable.
'''
covered_examples = self.kb.get_full_domain()
for pred in predicates:
if isinstance(pred, BinaryPredicate):
# Predicates that share the new variable, without 'pred'
shared = self.shared_var[pred.output_var][:]
shared.remove(pred)
existential_cov_examples = self.__covered_examples(shared)
reverse_members = self.kb.get_reverse_members(pred.label)
tmp_covered = self.kb.get_empty_domain()
# Calculate all examples that have a pair for this relation
for idx in self.kb.bits_to_indices(existential_cov_examples):
if reverse_members.has_key(idx):
tmp_covered |= reverse_members[idx]
covered_examples &= tmp_covered
else:
covered_examples &= pred.domain[pred.input_var]
return covered_examples
def __refresh_statistics(self):
'''
Recalculates the statistics for this rule.
'''
self.coverage = self.covered_examples.count()
indices = self.kb.bits_to_indices(self.covered_examples)
ex_scores = [self.kb.get_score(idx) for idx in indices]
if self.target_type == Example.Ranked:
self.mean = avg(ex_scores)
self.sd = std(ex_scores)
self.score = self.kb.score_fun(self)
else:
self.distribution = defaultdict(int)
for score in ex_scores:
self.distribution[score] += 1
self.score = self.kb.score_fun(self)
def similarity(self, rule):
'''
Calculates the similarity between this rule and 'rule'.
'''
intersection = (self.covered_examples & rule.covered_examples).count()
union = (self.covered_examples | rule.covered_examples).count()
if union == 0:
return 1
else:
return intersection/float(union)
def size(self):
'''
Returns the number of conjunts.
'''
return len(self.predicates)
def examples(self, positive_only=False):
'''
Returns the covered examples.
'''
indices = self.kb.bits_to_indices(self.covered_examples)
all_examples = [self.kb.examples[idx] for idx in indices]
if positive_only:
return filter(lambda ex: ex.score == self.target, all_examples)
else:
return all_examples
@property
def positives(self):
return self.distribution[self.target]
def precision(self):
if self.coverage:
return self.positives / float(self.coverage)
else:
return 0
def rule_report(self, show_uris=False, latex=False):
'''
Rule as string with some statistics.
'''
if latex:
return self._latex_report()
else:
return self._plain_report(show_uris=show_uris)
def _plain_report(self, show_uris=False, human=lambda label, rule: label):
'''
Plain text rule report
'''
s = self._plain_conjunctions(show_uris=show_uris, human=human) + ' ' + \
self._plain_statistics()
return s
def _plain_conjunctions(self, show_uris=False,
human=lambda label, rule: label):
conjuncts = []
for pred in self.predicates:
label = pred.label
if '#' in label and not show_uris:
label = pred.label.split('#')[-1]
label = human(label, self)
if isinstance(pred, UnaryPredicate):
anno_names = self.kb.annotation_name.get(pred.label, [DEFAULT_ANNOTATION_NAME])
predicate_label = '_and_'.join(anno_names)
if pred.negated:
predicate_label = '~' + predicate_label
conj = '%s(%s, %s)' % (predicate_label, pred.input_var, label)
else:
conj = '%s(%s, %s)' % (label,
pred.input_var,
pred.output_var)
conjuncts.append(conj)
s = ', '.join(conjuncts)
return s
def _plain_statistics(self):
if self.target_type == Example.ClassLabeled:
stats = (self.coverage,
self.positives,
self.precision(),
self.kb.score_fun.__name__,
self.score,
self.pval)
return '[cov=%d, pos=%d, prec=%.3f, %s=%.3f, pval=%.3f]' % stats
else:
return '[size=%d, score=%.3f]' % (self.coverage, self.score)
def _latex_report(self):
'''
Latex rule report
'''
conjuncts = []
for pred in self.predicates:
label = pred.label
if '#' in label:
label = pred.label.split('#')[-1]
if isinstance(pred, UnaryPredicate):
if pred.negated:
label = r'$\neg$' + label
conj = '%s(%s)' % (label, pred.input_var)
else:
conj = '%s(%s, %s)' % (label,
pred.input_var,
pred.output_var)
conjuncts.append(conj)
s = r' $\wedge$ '.join(conjuncts)
return s
def __str__(self):
return self.rule_report(show_uris=False)
@staticmethod
def ruleset_report(rules, show_uris=False, latex=False,
human=lambda label, rule: label):
if latex:
return Rule._latex_ruleset_report(rules)
else:
return Rule._plain_ruleset_report(rules, show_uris=show_uris,
human=human)
@staticmethod
def _latex_ruleset_report(rules):
target, var = rules[0].target, rules[0].head_var
if target:
head = '%s(%s) $\leftarrow$ ' % (target, var)
else:
head = ''
_tex_report = \
r'\begin{tabular}{clccccc}\hline' + '\n' \
r'\textbf{\#} & \textbf{Rule} & \textbf{TP} & \textbf{FP} & \textbf{Precision} & \textbf{Lift} & \textbf{p-value}\\\hline' + '\n'
for i, rule in enumerate(sorted(rules, key=lambda r: r.score, reverse=True)):
rule_report = rule._latex_report()
stats = (i+1,
head + rule_report,
rule.distribution[rule.target],
rule.coverage - rule.distribution[rule.target],
rule.distribution[rule.target]/float(rule.coverage),
rule.score,
rule.pval)
_tex_report += r'%d & \texttt{%s} & %d & %d & %.2f & %.2f & %.3f\\' % stats
_tex_report += '\n'
_tex_report += \
r'\hline' + '\n' \
r'\end{tabular}' + '\n'
return _tex_report
@staticmethod
def _plain_ruleset_report(rules, show_uris=False,
human=lambda label, rule: label):
target, var = rules[0].target, rules[0].head_var
if target:
head = '\'%s\'(%s) <--\n\t' % (target, var)
else:
head = ''
ruleset = []
for rule in sorted(rules, key=lambda r: r.score, reverse=True):
rule = rule._plain_report(show_uris=show_uris, human=human)
ruleset.append(rule)
return head + '\n\t'.join(ruleset)
@staticmethod
def ruleset_examples_json(rules_per_target, show_uris=False):
examples_output = []
for target_class, rules in rules_per_target:
class_examples = []
for _, rule in enumerate(sorted(rules, key=lambda r: r.score,
reverse=True)):
examples = rule.examples()
class_examples.append((rule._plain_conjunctions(),
[ex.label for ex in examples]))
examples_output.append((target_class, class_examples))
return examples_output
@staticmethod
def to_json(rules_per_target, show_uris=False):
results = {}
for target, rules in rules_per_target:
results[target] = [str(rule) for rule in rules]
return json.dumps(results, indent=2)
|
anzev/hedwig
|
hedwig/core/rule.py
|
Python
|
mit
| 14,746
|
import CardosoTech_GPIO.Platform as Platform
OUT = 0
IN = 1
HIGH = True
LOW = False
RISING = 1
FALLING = 2
BOTH = 3
PUD_OFF = 0
PUD_DOWN = 1
PUD_UP = 2
class BaseGPIO(object):
"""Base class for implementing simple digital IO for a platform.
Implementors are expected to subclass from this and provide an implementation
of the setup, output, and input functions."""
def setup(self, pin, mode, pull_up_down=PUD_OFF):
"""Set the input or output mode for a specified pin. Mode should be
either OUT or IN."""
raise NotImplementedError
def output(self, pin, value):
"""Set the specified pin the provided high/low value. Value should be
either HIGH/LOW or a boolean (true = high)."""
raise NotImplementedError
def input(self, pin):
"""Read the specified pin and return HIGH/true if the pin is pulled high,
or LOW/false if pulled low."""
raise NotImplementedError
def set_high(self, pin):
"""Set the specified pin HIGH."""
self.output(pin, HIGH)
def set_low(self, pin):
"""Set the specified pin LOW."""
self.output(pin, LOW)
def is_high(self, pin):
"""Return true if the specified pin is pulled high."""
return self.input(pin) == HIGH
def is_low(self, pin):
"""Return true if the specified pin is pulled low."""
return self.input(pin) == LOW
# Basic implementation of multiple pin methods just loops through pins and
# processes each one individually. This is not optimal, but derived classes can
# provide a more optimal implementation that deals with groups of pins
# simultaneously.
# See MCP230xx or PCF8574 classes for examples of optimized implementations.
def output_pins(self, pins):
"""Set multiple pins high or low at once. Pins should be a dict of pin
name to pin value (HIGH/True for 1, LOW/False for 0). All provided pins
will be set to the given values.
"""
# General implementation just loops through pins and writes them out
# manually. This is not optimized, but subclasses can choose to implement
# a more optimal batch output implementation. See the MCP230xx class for
# example of optimized implementation.
for pin, value in iter(pins.items()):
self.output(pin, value)
def setup_pins(self, pins):
"""Setup multiple pins as inputs or outputs at once. Pins should be a
dict of pin name to pin type (IN or OUT).
"""
# General implementation that can be optimized by derived classes.
for pin, value in iter(pins.items()):
self.setup(pin, value)
def input_pins(self, pins):
"""Read multiple pins specified in the given list and return list of pin values
GPIO.HIGH/True if the pin is pulled high, or GPIO.LOW/False if pulled low.
"""
# General implementation that can be optimized by derived classes.
return [self.input(pin) for pin in pins]
def add_event_detect(self, pin, edge):
"""Enable edge detection events for a particular GPIO channel. Pin
should be type IN. Edge must be RISING, FALLING or BOTH.
"""
raise NotImplementedError
def remove_event_detect(self, pin):
"""Remove edge detection for a particular GPIO channel. Pin should be
type IN.
"""
raise NotImplementedError
def add_event_callback(self, pin, callback):
"""Add a callback for an event already defined using add_event_detect().
Pin should be type IN.
"""
raise NotImplementedError
def event_detected(self, pin):
"""Returns True if an edge has occured on a given GPIO. You need to
enable edge detection using add_event_detect() first. Pin should be
type IN.
"""
raise NotImplementedError
def wait_for_edge(self, pin, edge):
"""Wait for an edge. Pin should be type IN. Edge must be RISING,
FALLING or BOTH."""
raise NotImplementedError
def cleanup(self, pin=None):
"""Clean up GPIO event detection for specific pin, or all pins if none
is specified.
"""
raise NotImplementedError
# helper functions useful to derived classes
def _validate_pin(self, pin):
# Raise an exception if pin is outside the range of allowed values.
if pin < 0 or pin >= self.NUM_GPIO:
raise ValueError('Invalid GPIO value, must be between 0 and {0}.'.format(self.NUM_GPIO))
def _bit2(self, src, bit, val):
bit = 1 << bit
return (src | bit) if val else (src & ~bit)
class RPiGPIOAdapter(BaseGPIO):
"""GPIO implementation for the Raspberry Pi using the RPi.GPIO library."""
def __init__(self, rpi_gpio, mode=None):
self.rpi_gpio = rpi_gpio
# Suppress warnings about GPIO in use.
rpi_gpio.setwarnings(False)
# Setup board pin mode.
if mode == rpi_gpio.BOARD or mode == rpi_gpio.BCM:
rpi_gpio.setmode(mode)
elif mode is not None:
raise ValueError('Unexpected value for mode. Must be BOARD or BCM.')
else:
# Default to BCM numbering if not told otherwise.
rpi_gpio.setmode(rpi_gpio.BCM)
# Define mapping of Adafruit GPIO library constants to RPi.GPIO constants.
self._dir_mapping = { OUT: rpi_gpio.OUT,
IN: rpi_gpio.IN }
self._pud_mapping = { PUD_OFF: rpi_gpio.PUD_OFF,
PUD_DOWN: rpi_gpio.PUD_DOWN,
PUD_UP: rpi_gpio.PUD_UP }
self._edge_mapping = { RISING: rpi_gpio.RISING,
FALLING: rpi_gpio.FALLING,
BOTH: rpi_gpio.BOTH }
def setup(self, pin, mode, pull_up_down=PUD_OFF):
"""Set the input or output mode for a specified pin. Mode should be
either OUTPUT or INPUT.
"""
self.rpi_gpio.setup(pin, self._dir_mapping[mode],
pull_up_down=self._pud_mapping[pull_up_down])
def output(self, pin, value):
"""Set the specified pin the provided high/low value. Value should be
either HIGH/LOW or a boolean (true = high).
"""
self.rpi_gpio.output(pin, value)
def input(self, pin):
"""Read the specified pin and return HIGH/true if the pin is pulled high,
or LOW/false if pulled low.
"""
return self.rpi_gpio.input(pin)
def input_pins(self, pins):
"""Read multiple pins specified in the given list and return list of pin values
GPIO.HIGH/True if the pin is pulled high, or GPIO.LOW/False if pulled low.
"""
# maybe rpi has a mass read... it would be more efficient to use it if it exists
return [self.rpi_gpio.input(pin) for pin in pins]
def add_event_detect(self, pin, edge, callback=None, bouncetime=-1):
"""Enable edge detection events for a particular GPIO channel. Pin
should be type IN. Edge must be RISING, FALLING or BOTH. Callback is a
function for the event. Bouncetime is switch bounce timeout in ms for
callback
"""
kwargs = {}
if callback:
kwargs['callback']=callback
if bouncetime > 0:
kwargs['bouncetime']=bouncetime
self.rpi_gpio.add_event_detect(pin, self._edge_mapping[edge], **kwargs)
def remove_event_detect(self, pin):
"""Remove edge detection for a particular GPIO channel. Pin should be
type IN.
"""
self.rpi_gpio.remove_event_detect(pin)
def add_event_callback(self, pin, callback):
"""Add a callback for an event already defined using add_event_detect().
Pin should be type IN.
"""
self.rpi_gpio.add_event_callback(pin, callback)
def event_detected(self, pin):
"""Returns True if an edge has occured on a given GPIO. You need to
enable edge detection using add_event_detect() first. Pin should be
type IN.
"""
return self.rpi_gpio.event_detected(pin)
def wait_for_edge(self, pin, edge):
"""Wait for an edge. Pin should be type IN. Edge must be RISING,
FALLING or BOTH.
"""
self.rpi_gpio.wait_for_edge(pin, self._edge_mapping[edge])
def cleanup(self, pin=None):
"""Clean up GPIO event detection for specific pin, or all pins if none
is specified.
"""
if pin is None:
self.rpi_gpio.cleanup()
else:
self.rpi_gpio.cleanup(pin)
class AdafruitBBIOAdapter(BaseGPIO):
"""GPIO implementation for the Beaglebone Black using the Adafruit_BBIO
library.
"""
def __init__(self, bbio_gpio):
self.bbio_gpio = bbio_gpio
# Define mapping of Adafruit GPIO library constants to RPi.GPIO constants.
self._dir_mapping = { OUT: bbio_gpio.OUT,
IN: bbio_gpio.IN }
self._pud_mapping = { PUD_OFF: bbio_gpio.PUD_OFF,
PUD_DOWN: bbio_gpio.PUD_DOWN,
PUD_UP: bbio_gpio.PUD_UP }
self._edge_mapping = { RISING: bbio_gpio.RISING,
FALLING: bbio_gpio.FALLING,
BOTH: bbio_gpio.BOTH }
def setup(self, pin, mode, pull_up_down=PUD_OFF):
"""Set the input or output mode for a specified pin. Mode should be
either OUTPUT or INPUT.
"""
self.bbio_gpio.setup(pin, self._dir_mapping[mode],
pull_up_down=self._pud_mapping[pull_up_down])
def output(self, pin, value):
"""Set the specified pin the provided high/low value. Value should be
either HIGH/LOW or a boolean (true = high).
"""
self.bbio_gpio.output(pin, value)
def input(self, pin):
"""Read the specified pin and return HIGH/true if the pin is pulled high,
or LOW/false if pulled low.
"""
return self.bbio_gpio.input(pin)
def input_pins(self, pins):
"""Read multiple pins specified in the given list and return list of pin values
GPIO.HIGH/True if the pin is pulled high, or GPIO.LOW/False if pulled low.
"""
# maybe bbb has a mass read... it would be more efficient to use it if it exists
return [self.bbio_gpio.input(pin) for pin in pins]
def add_event_detect(self, pin, edge, callback=None, bouncetime=-1):
"""Enable edge detection events for a particular GPIO channel. Pin
should be type IN. Edge must be RISING, FALLING or BOTH. Callback is a
function for the event. Bouncetime is switch bounce timeout in ms for
callback
"""
kwargs = {}
if callback:
kwargs['callback']=callback
if bouncetime > 0:
kwargs['bouncetime']=bouncetime
self.bbio_gpio.add_event_detect(pin, self._edge_mapping[edge], **kwargs)
def remove_event_detect(self, pin):
"""Remove edge detection for a particular GPIO channel. Pin should be
type IN.
"""
self.bbio_gpio.remove_event_detect(pin)
def add_event_callback(self, pin, callback, bouncetime=-1):
"""Add a callback for an event already defined using add_event_detect().
Pin should be type IN. Bouncetime is switch bounce timeout in ms for
callback
"""
kwargs = {}
if bouncetime > 0:
kwargs['bouncetime']=bouncetime
self.bbio_gpio.add_event_callback(pin, callback, **kwargs)
def event_detected(self, pin):
"""Returns True if an edge has occured on a given GPIO. You need to
enable edge detection using add_event_detect() first. Pin should be
type IN.
"""
return self.bbio_gpio.event_detected(pin)
def wait_for_edge(self, pin, edge):
"""Wait for an edge. Pin should be type IN. Edge must be RISING,
FALLING or BOTH.
"""
self.bbio_gpio.wait_for_edge(pin, self._edge_mapping[edge])
def cleanup(self, pin=None):
"""Clean up GPIO event detection for specific pin, or all pins if none
is specified.
"""
if pin is None:
self.bbio_gpio.cleanup()
else:
self.bbio_gpio.cleanup(pin)
class AdafruitMinnowAdapter(BaseGPIO):
"""GPIO implementation for the Minnowboard + MAX using the mraa library"""
def __init__(self,mraa_gpio):
self.mraa_gpio = mraa_gpio
# Define mapping of Adafruit GPIO library constants to mraa constants
self._dir_mapping = { OUT: self.mraa_gpio.DIR_OUT,
IN: self.mraa_gpio.DIR_IN }
self._pud_mapping = { PUD_OFF: self.mraa_gpio.MODE_STRONG,
PUD_UP: self.mraa_gpio.MODE_HIZ,
PUD_DOWN: self.mraa_gpio.MODE_PULLDOWN }
self._edge_mapping = { RISING: self.mraa_gpio.EDGE_RISING,
FALLING: self.mraa_gpio.EDGE_FALLING,
BOTH: self.mraa_gpio.EDGE_BOTH }
def setup(self,pin,mode):
"""Set the input or output mode for a specified pin. Mode should be
either DIR_IN or DIR_OUT.
"""
self.mraa_gpio.Gpio.dir(self.mraa_gpio.Gpio(pin),self._dir_mapping[mode])
def output(self,pin,value):
"""Set the specified pin the provided high/low value. Value should be
either 1 (ON or HIGH), or 0 (OFF or LOW) or a boolean.
"""
self.mraa_gpio.Gpio.write(self.mraa_gpio.Gpio(pin), value)
def input(self,pin):
"""Read the specified pin and return HIGH/true if the pin is pulled high,
or LOW/false if pulled low.
"""
return self.mraa_gpio.Gpio.read(self.mraa_gpio.Gpio(pin))
def add_event_detect(self, pin, edge, callback=None, bouncetime=-1):
"""Enable edge detection events for a particular GPIO channel. Pin
should be type IN. Edge must be RISING, FALLING or BOTH. Callback is a
function for the event. Bouncetime is switch bounce timeout in ms for
callback
"""
kwargs = {}
if callback:
kwargs['callback']=callback
if bouncetime > 0:
kwargs['bouncetime']=bouncetime
self.mraa_gpio.Gpio.isr(self.mraa_gpio.Gpio(pin), self._edge_mapping[edge], **kwargs)
def remove_event_detect(self, pin):
"""Remove edge detection for a particular GPIO channel. Pin should be
type IN.
"""
self.mraa_gpio.Gpio.isrExit(self.mraa_gpio.Gpio(pin))
def wait_for_edge(self, pin, edge):
"""Wait for an edge. Pin should be type IN. Edge must be RISING,
FALLING or BOTH.
"""
self.bbio_gpio.wait_for_edge(self.mraa_gpio.Gpio(pin), self._edge_mapping[edge])
def get_platform_gpio(**keywords):
"""Attempt to return a GPIO instance for the platform which the code is being
executed on. Currently supports only the Raspberry Pi using the RPi.GPIO
library and Beaglebone Black using the Adafruit_BBIO library. Will throw an
exception if a GPIO instance can't be created for the current platform. The
returned GPIO object is an instance of BaseGPIO.
"""
plat = Platform.platform_detect()
if plat == Platform.RASPBERRY_PI:
import RPi.GPIO
return RPiGPIOAdapter(RPi.GPIO, **keywords)
elif plat == Platform.BEAGLEBONE_BLACK:
import Adafruit_BBIO.GPIO
return AdafruitBBIOAdapter(Adafruit_BBIO.GPIO, **keywords)
elif plat == Platform.MINNOWBOARD:
import mraa
return AdafruitMinnowAdapter(mraa, **keywords)
elif plat == Platform.UNKNOWN:
raise RuntimeError('Could not determine platform.')
|
CardosoTech/CardosoTech_Python_GPIO
|
CardosoTech_GPIO/GPIO.py
|
Python
|
mit
| 16,029
|
# License MIT (https://opensource.org/licenses/MIT).
from . import pos_cashbox_model
|
it-projects-llc/pos-addons
|
pos_cashbox/models/__init__.py
|
Python
|
mit
| 85
|
from distutils.core import setup
setup(
name='Django CMS NivoSlider',
version='0.1.2',
author='Ryan Bagwell',
author_email='ryan@ryanbagwell.com',
packages=['nivoslider',],
url='https://github.com/ryanbagwell/django-cms-nivo-slider',
license='LICENSE.txt',
description='A Django CMS carousel version of Nivo Slider',
long_description=open('README.txt').read(),
install_requires=[
"Django >= 1.4",
"django-filer >= 0.9.3",
],
)
|
ryanbagwell/django-cms-nivo-slider
|
setup.py
|
Python
|
mit
| 487
|
"""
Unofficial Python API for Dicio.com.br
@author Felipe Pontes
@email felipemfpontes@gmail.com
"""
import html
from urllib.request import urlopen
from dicio.utils import Utils
BASE_URL = 'http://www.dicio.com.br/{}'
CHARSET = 'utf-8'
TAG_MEANING = ('class="significado', '</p>')
TAG_ETYMOLOGY = ('class="etim', '</span>')
TAG_SYNONYMS = ('class="adicional sinonimos"', '</p>')
TAG_SYNONYMS_DELIMITER = ('<a', '</a>')
TAG_EXTRA = ('class="adicional"', '</p>')
TAG_EXTRA_SEP = 'br'
TAG_EXTRA_DELIMITER = ('<b>', '</b>')
TAG_PHRASE_DELIMITER = ('<div class="frase"', '</div>')
class Word(object):
def __init__(self, word, meaning=None, etymology=None, synonyms=[], examples=[], extra={}):
self.word = word.strip().lower()
self.url = BASE_URL.format(Utils.remove_accents(self.word))
self.meaning = meaning
self.etymology = etymology
self.synonyms = synonyms
self.extra = extra
self.examples = examples
def load(self, dicio=None, get=urlopen):
if dicio:
found = dicio.search(self.word)
else:
found = Dicio(get).search(self.word)
if found is not None:
self.word = found.word
self.meaning = found.meaning
self.etymology = found.etymology
self.synonyms = found.synonyms
self.extra = found.extra
self.examples = found.examples
def __repr__(self):
return 'Word({!r})'.format(self.word)
def __str__(self):
if self.meaning:
return self.word + ': ' + self.meaning
return self.word
class Dicio(object):
"""
Dicio API with meaning, synonyms and extra information.
"""
def __init__(self, get=urlopen):
self.get = get
def search(self, word):
"""
Search for word.
"""
if len(word.split()) > 1:
return None
_word = Utils.remove_accents(word).strip().lower()
try:
with self.get(BASE_URL.format(_word)) as request:
page = html.unescape(request.read().decode(CHARSET))
except:
return None
meaning, etymology = self.scrape_meaning(page)
return Word(
Utils.text_between(page, "<h1", "</h1>", force_html=True).lower(),
meaning=meaning,
etymology=etymology,
synonyms=self.scrape_synonyms(page),
examples=self.scrape_examples(page),
extra=self.scrape_extra(page),
)
def scrape_meaning(self, page):
"""
Return meaning and etymology.
"""
html = Utils.text_between(page, *TAG_MEANING, force_html=True)
etymology = Utils.text_between(html, *TAG_ETYMOLOGY, force_html=True)
etymology = Utils.remove_spaces(Utils.remove_tags(etymology))
meanings = Utils.split_html_tag(html, 'br')
meanings = [Utils.remove_spaces(Utils.remove_tags(x))
for x in meanings]
meaning = '; '.join([x for x in meanings if x != etymology])
return meaning, etymology
def scrape_synonyms(self, page):
"""
Return list of synonyms.
"""
synonyms = []
if page.find(TAG_SYNONYMS[0]) > -1:
html = Utils.text_between(page, *TAG_SYNONYMS, force_html=True)
while html.find(TAG_SYNONYMS_DELIMITER[0]) > -1:
synonym, html = self.first_synonym(html)
synonyms.append(synonym)
return synonyms
def first_synonym(self, html):
"""
Return the first synonym found and html without his marking.
"""
synonym = Utils.text_between(html, *TAG_SYNONYMS_DELIMITER,
force_html=True)
synonym = Utils.remove_spaces(synonym)
_html = html.replace(TAG_SYNONYMS_DELIMITER[0], "", 1)
_html = _html.replace(TAG_SYNONYMS_DELIMITER[1], "", 1)
return Word(synonym), _html
def scrape_examples(self, page):
"""
Return a list of examples.
"""
examples = []
html = page
index = html.find(TAG_PHRASE_DELIMITER[0])
while index > -1:
example_html = Utils.text_between(
html, *TAG_PHRASE_DELIMITER, force_html=True)
examples += [Utils.remove_spaces(Utils.remove_tags(example_html))]
html = html[index+len(TAG_PHRASE_DELIMITER[0]):]
index = html.find(TAG_PHRASE_DELIMITER[0])
return examples
def scrape_extra(self, page):
"""
Return a dictionary of extra information.
"""
dict_extra = {}
try:
if page.find(TAG_EXTRA[0]) > -1:
html = Utils.text_between(page, *TAG_EXTRA, force_html=True)
extra_rows = Utils.split_html_tag(Utils.remove_spaces(html),
TAG_EXTRA_SEP)
for row in extra_rows:
_row = Utils.remove_tags(row)
key, value = map(Utils.remove_spaces, _row.split(":"))
dict_extra[key] = value
except:
pass
return dict_extra
|
felipemfp/dicio
|
dicio/dicio.py
|
Python
|
mit
| 5,198
|
import smtplib
import email.utils
import os
import csv
import email.Message
from email import Charset
from email.header import Header
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
import quopri
import jinja2
class MailSender(object):
def __init__(self, toName, toAddr, ticketID):
# Sender information
self.fromName = 'Example Example'
self.fromAddr = 'example@example.com'
# Receiver/s information
self.toName = toName
self.toAddr = []
self.toAddr.append(toAddr)
# Support for multiple targets from csv file, no comma
#with open('testlist.csv', 'rb') as mysecondfile:
#self.csvdata = csv.reader(mysecondfile, delimiter=' ', quotechar='|')
#for row in csvdata:
#receivers.append(row)
self.ticketID = ticketID
# Subject of mail
self.subject = 'Ticket ' + self.ticketID + ' successfully purchased.'
# Variables to HTML template
self.context = {
'name' : toName,
'qr_code' : ticketID
}
self.msg = MIMEMultipart('mixed')
self.inline = MIMEMultipart ('alternative')
# Local SMTP - server - Requires working one e.g Postfix
self.server = smtplib.SMTP('127.0.0.1', 25)
# Global charset to UTF-8
Charset.add_charset('utf-8', Charset.QP, Charset.QP, 'utf-8')
def createHeaders(self):
self.msg['To'] = email.utils.formataddr((self.toName, self.toAddr))
self.msg['From'] = email.utils.formataddr((self.fromName, self.fromAddr))
self.msg['Subject'] = self.subject
self.msg['List-Unsubscribe'] = '<mailto:example@example.com>, <example.com>'
self.msg['List-Unsubscribe-Post'] = 'List-Unsubscribe=One-Click'
def createMessage (self):
#TXT version of mail
self.createHeaders()
with open ('data.txt', 'rb') as mytxt:
self.text = mytxt.read()
# HTML version of mail, generated from template
self.html = self.render('templates/mailtemplate.html', self.context)
# Mime - parts
part1 = MIMEText(self.text, 'plain', 'utf-8')
part2 = MIMEText(self.html, 'html', 'utf-8')
# Attach to the HTML/TXT version of the mail
self.inline.attach(part1)
self.inline.attach(part2)
# Attach to whole message
self.msg.attach(self.inline)
def render(self, tpl_path, context):
path, filename = os.path.split(tpl_path)
return jinja2.Environment(loader=jinja2.FileSystemLoader(path or './')).get_template(filename).render(context)
def sendMail(self):
self.server.set_debuglevel(True)
try:
self.server.sendmail(self.fromAddr, self.toAddr, self.msg.as_string())
finally:
self.server.quit()
|
Nicceboy/python-email-imp
|
src/mailsender.py
|
Python
|
mit
| 2,867
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import unittest
import mock
from shim import HDDTempMonitor, MonitorCheckFailure
class TestHDDTempMonitor(unittest.TestCase):
def setUp(self):
self.monitor_id = "monitor_id"
self.monitor_options = {
'device': "/dev/null",
'lower_threshold': 1,
'upper_threshold': 100,
}
self.monitor = self._create_hdd_monitor()
def _create_hdd_monitor(self):
return HDDTempMonitor(self.monitor_id, **self.monitor_options)
def test_name_includes_monitors_generic_name(self):
self.assertIn("HDDTempMonitor", self.monitor.name)
def test_name_includes_device_name(self):
device = "/dev/custom"
self.monitor_options.update({"device": device})
self.monitor = self._create_hdd_monitor()
self.assertIn(device, self.monitor.name)
@mock.patch("shim.Monitor._get_raw_value")
def test_get_current_value_returns_the_temperature_if_hddtemp_returned_properly_formatted_data(self, raw_value_patch):
raw_value_patch.return_value = "/dev/sda: WDC WD10ABCD-12FOO6: 37°C"
self.assertEqual(self.monitor.get_current_value(), 37)
@mock.patch("shim.Monitor._get_raw_value")
def test_get_current_value_raises_MonitorCheckFailure_if_hddtemp_returned_malformed_data(self, raw_value_patch):
raw_value_patch.return_value = "WARNING: Drive /dev/sda doesn't seem to have a temperature sensor."
self.assertRaises(MonitorCheckFailure, self.monitor.get_current_value)
@mock.patch("shim.Monitor._get_raw_value")
def test_get_current_value_raises_MonitorCheckFailure_if_hddtemp_returned_malformed_temperature_value(self, raw_value_patch):
raw_value_patch.return_value = "/dev/sda: WDC WD10ABCD-12FOO6: foo°C"
self.assertRaises(MonitorCheckFailure, self.monitor.get_current_value)
|
maszczyn/shim
|
tests/test_hdd_temp_monitor.py
|
Python
|
mit
| 1,889
|
from django.db import models
from rest_framework import serializers
class Question(models.Model):
version = models.CharField(primary_key=True, max_length=8)
title = models.CharField(max_length=255)
text = models.TextField()
created_on = models.DateTimeField(auto_now_add=True)
updated_on = models.DateTimeField(auto_now=True)
class Choice(models.Model):
text = models.TextField()
question = models.ForeignKey(Question, on_delete=models.CASCADE)
created_on = models.DateTimeField(auto_now_add=True)
updated_on = models.DateTimeField(auto_now=True)
class Answer(models.Model):
question = models.ForeignKey(Question, on_delete=models.CASCADE)
choice = models.ForeignKey(Choice, on_delete=models.CASCADE)
user_id = models.TextField()
created_on = models.DateTimeField(auto_now_add=True)
class ChoiceSerializer(serializers.ModelSerializer):
version = serializers.SerializerMethodField('get_version_from_question')
class Meta:
model = Choice
fields = ('id', 'text', 'version', 'created_on', 'updated_on',)
def get_version_from_question(self, obj):
return obj.question_id
class QuestionSerializer(serializers.ModelSerializer):
# TODO: create a serializer that returns list of choices for the question
class Meta:
model = Question
fields = ('title', 'text', 'version', 'created_on', 'updated_on',)
class AnswerSerializer(serializers.ModelSerializer):
choice = serializers.PrimaryKeyRelatedField(queryset=Choice.objects.all())
question = serializers.PrimaryKeyRelatedField(queryset=Question.objects.all())
class Meta:
model = Answer
fields = ('question', 'choice', 'user_id')
|
holycattle/pysqueak-api
|
api/models.py
|
Python
|
mit
| 1,714
|
# Exercise 7
# Write a function char_freq() that takes in a string and counts the number of
# times each letter appeared in the string.
# For example, the string "abca" should return a dictionary in the form
# {"a": 2, "b": 1, "c": 1}.
def char_freq(s):
pass
print(char_freq("abca"))
print(char_freq("elephant"))
print(char_freq("alphabet"))
print(char_freq("abcabcabcabcabc"))
|
vinaymayar/python-game-workshop
|
lesson7/char_freq.py
|
Python
|
mit
| 385
|
import pkg_resources
try:
pkg_resources.get_distribution('numpy')
except pkg_resources.DistributionNotFound:
numpyPresent = False
print("Error: Numpy package not available.")
else:
numpyPresent = True
import numpy as np
try:
pkg_resources.get_distribution('pandas')
except pkg_resources.DistributionNotFound:
pandasPresent = False
print("Error: Pandas package not available.")
else:
pandasPresent = True
import pandas as pd
try:
pkg_resources.get_distribution('matplotlib')
except pkg_resources.DistributionNotFound:
matplotlibPresent = False
print("Error: Matplotlib package not available.")
else:
matplotlibPresent = True
import matplotlib.pyplot as plt
try:
pkg_resources.get_distribution('scipy')
except pkg_resources.DistributionNotFound:
scipyPresent = False
print("Error: Scipy package not available.")
else:
scipyPresent = True
from scipy.stats import norm
try:
pkg_resources.get_distribution('statsmodels')
except pkg_resources.DistributionNotFound:
statsmodelsPresent = False
print("Error: Statsmodels package not available.")
else:
statsmodelsPresent = True
import statsmodels.formula.api as smf
import statsmodels.api as sm
import re
import collections
import inspect
# Import minor epydemiology functions from other epydemiology files
# -----------------------------------------------------------------
# In order to use the phjDefineSuffixDict() function from a different .py file in the
# same package, it seems that we need to import that function explicitly. This can be
# done using the same format as in the __init__.py file e.g.:
# from .pythonFileName import functionName
# Where the pythonFileName is a file in the same package.
# For more details, see tutorial at https://www.youtube.com/watch?v=0oTh1CXRaQ0.
from .phjRROR import phjOddsRatio
from .phjRROR import phjRemoveNaNRows
from .phjCalculateProportions import phjDefineSuffixDict
from .phjCalculateProportions import phjGetYErrors
from .phjExtFuncs import getJenksBreaks
from .phjTestFunctionParameters import phjAssert
# ==============
# Main functions
# ==============
#
def phjViewLogOdds(phjDF,
phjBinaryDepVarName = None, # This is a required variable therefore setting default to None
# is not required but all parameters without default values need
# to be defined first which would change the order in which
# parameters are defined; and order is useful to ensure correct
# values are defined.
phjCaseValue = 1,
phjContIndepVarName = None,
phjMissingValue = np.nan,
phjNumberOfCategoriesInt = 5,
phjNewCategoryVarName = None,
phjCategorisationMethod = 'jenks', # Need to be able to pass a list of cut-off values here as well.
phjGroupVarName = None,
phjAlpha = 0.05,
phjPrintResults = False):
# In several functions, it's useful to have access to a dict containing column headings
# and suffixes used in a variety of situations.
phjSuffixDict = phjDefineSuffixDict()
# Create a name for the new categorical variable by replacing all spaces with underscores
# and adding the suffix to indicate that a continuous variable has been converted to a category.
if phjNewCategoryVarName is None:
phjNewCategoryVarName = phjSuffixDict['joinstr'].join([phjContIndepVarName.replace(' ','_'),
phjSuffixDict['categorisedvar']])
try:
phjAssert('phjDF',phjDF,pd.DataFrame)
phjAssert('phjBinaryDepVarName',phjBinaryDepVarName,str,phjMustBePresentColumnList = list(phjDF.columns))
phjAssert('phjCaseValue',phjCaseValue,(str,int,float),phjAllowedOptions = list(phjDF[phjBinaryDepVarName].replace(phjMissingValue,np.nan).dropna().unique()))
phjAssert('phjContIndepVarName',phjContIndepVarName,str,phjMustBePresentColumnList = list(phjDF.columns))
phjAssert('phjMissingValue',phjMissingValue,(str,int,float))
phjAssert('phjNumberOfCategoriesInt',phjNumberOfCategoriesInt,int,phjAllowedOptions = {'min':2,'max':len(phjDF.index)})
phjAssert('phjNewCategoryVarName',phjNewCategoryVarName,str,phjMustBeAbsentColumnList = list(phjDF.columns))
phjAssert('phjCategorisationMethod',phjCategorisationMethod,(str,collections.abc.Mapping))
if phjGroupVarName is not None:
phjAssert('phjGroupVarName',phjGroupVarName,str,phjMustBePresentColumnList = list(phjDF.columns))
phjAssert('phjAlpha',phjAlpha,float,phjAllowedOptions = {'min':0.0001,'max':0.9999})
phjAssert('phjPrintResults',phjPrintResults,bool)
except AssertionError as e:
# Assign value to variable phjOR which will be returned at end of function.
phjOR = None
# If function has been called directly, present message.
if inspect.stack()[1][3] == '<module>':
print("An AssertionError occurred in {fname}() function. ({msg})\n".format(msg = e,
fname = inspect.stack()[0][3]))
# If function has been called by another function then modify message and re-raise exception
else:
print("An AssertionError occurred in {fname}() function when called by {callfname}() function. ({msg})\n".format(msg = e,
fname = inspect.stack()[0][3],
callfname = inspect.stack()[1][3]))
raise
else:
# Retain only those columns that will be analysed (otherwise, it is feasible that
# unrelated columns that contain np.nan values will cause removal of rows in
# unexpected ways.
phjDF = phjDF[[col for col in [phjBinaryDepVarName,phjContIndepVarName,phjGroupVarName] if col is not None]]
# Data to use - remove rows that have a missing value
phjDF = phjRemoveNaNRows(phjDF = phjDF,
phjCaseVarName = phjBinaryDepVarName,
phjRiskFactorVarName = phjContIndepVarName,
phjMissingValue = phjMissingValue)
# Deep copy phjDF to phjDF to avoid errors relating editing dataframe slice
phjDF = phjDF.copy(deep = True)
# Convert a continuous variable to a categorical variable using a variety of methods.
# If phjReturnBreaks = True then function also returns a list of the break points
# for the continuous variable.
phjDF, phjBreaks = phjCategoriseContinuousVariable(phjDF = phjDF,
phjContinuousVarName = phjContIndepVarName,
phjMissingValue = phjMissingValue,
phjNumberOfCategoriesInt = phjNumberOfCategoriesInt,
phjNewCategoryVarName = phjNewCategoryVarName,
phjCategorisationMethod = phjCategorisationMethod,
phjReturnBreaks = True,
phjPrintResults = phjPrintResults)
print('Third DF')
print(phjBreaks)
print(phjDF)
# If the breaks have been calculated (and the continuous variable categorised successfully)
# then plot the graph of logodds against mid-points
if phjBreaks is not None:
# The following DF contains an index that may be numeric.
phjOR = phjOddsRatio(phjDF = phjDF,
phjCaseVarName = phjBinaryDepVarName,
phjCaseValue = phjCaseValue,
phjRiskFactorVarName = phjNewCategoryVarName,
phjRiskFactorBaseValue = 0, # Use the minimum value as the base value (but it's not important in this context)
phjMissingValue = phjMissingValue,
phjAlpha = phjAlpha,
phjPrintResults = phjPrintResults)
print('Fourth DF')
print(phjOR)
if phjOR is not None:
phjOR[phjSuffixDict['logodds']] = np.log(phjOR[phjSuffixDict['odds']])
# Calculate log odds using logistic regression and retrieve the se from the statistical model
phjSE = phjCalculateLogOddsSE(phjDF = phjDF,
phjCaseVarName = phjBinaryDepVarName,
phjCaseValue = phjCaseValue,
phjCategoricalVarName = phjNewCategoryVarName,
phjMissingValue = phjMissingValue,
phjAlpha = phjAlpha,
phjPrintResults = phjPrintResults)
# Join to phjOR dataframe
phjOR = phjOR.join(phjSE)
# Calculate lower and upper limits assuming normal distribution
phjRelCoef = norm.ppf(1 - (phjAlpha/2))
phjOR[phjSuffixDict['joinstr'].join([phjSuffixDict['cisuffix'],
phjSuffixDict['cilowlim']])] = phjOR[phjSuffixDict['logodds']] - (phjRelCoef * phjOR[phjSuffixDict['stderr']])
phjOR[phjSuffixDict['joinstr'].join([phjSuffixDict['cisuffix'],
phjSuffixDict['ciupplim']])] = phjOR[phjSuffixDict['logodds']] + (phjRelCoef * phjOR[phjSuffixDict['stderr']])
# Calculae midpoints of categories
phjOR[phjSuffixDict['catmidpoints']] = [((phjBreaks[i] + phjBreaks[i+1]) / 2) for i in range(len(phjBreaks) - 1)]
# Plot log odds against midpoints of categories
phjYErrors = phjGetYErrors(phjDF = phjOR,
phjCategoriesToPlotList = phjOR.index.tolist(),
phjParameterValue = 'logodds',
phjGroupVarName = None,
phjGroupLevelsList = None,
phjAlpha = phjAlpha,
phjPrintResults = phjPrintResults)
ax = phjOR.plot(x = phjSuffixDict['catmidpoints'],
y = phjSuffixDict['logodds'],
kind = 'line',
yerr = phjYErrors,
capsize = 4,
title = 'Log-odds against mid-points of categories')
ax.set_ylabel("Log odds")
ax.set_xlabel(phjNewCategoryVarName)
ax.set_xlim([phjBreaks[0],phjBreaks[-1]])
# Add vertical lines to indicate boundaries of categories
for xline in phjBreaks:
ax.axvline(x = xline,
linestyle = 'dashed',
color = 'gray')
else:
# Otherwise, attempts to categorise the data failed and phjBreaks returned as None
phjOR = None
if phjPrintResults == True:
print('\nOdds ratio dataframe')
print(phjOR)
return phjOR
# ====================
# Supporting functions
# ====================
def phjCalculateLogOddsSE(phjDF,
phjCaseVarName,
phjCaseValue,
phjCategoricalVarName,
phjMissingValue = np.nan,
phjAlpha = 0.05,
phjPrintResults = False):
# Get a list of the terms used to head columns in summary tables
phjSuffixDict = phjDefineSuffixDict(phjAlpha = phjAlpha)
# statsmodels has some slightly unexpected behaviour if outcome variable contains
# strings (see https://stackoverflow.com/questions/48312979/how-does-statsmodels-encode-endog-variables-entered-as-strings).
# Convert to case variable to 0/1 for logistic regression model.
# The original calculation of odds table in phjOddsRatio() function checks to make
# sure there are only 2 levels present in the case variable and that the case value
# is actually present in the column so no need to check again.
# Get a list of values in the case variable, create a dictionary to convert values
# to binary representation (based on given value of case value) – assuming it's not
# already binary – and use the dictionary to convert case variable to a binary format.
phjCaseLevelsList = phjDF[phjCaseVarName].unique()
if set(phjCaseLevelsList) != set([0,1]):
phjBinaryConvertDict = {c:(1 if c==phjCaseValue else 0) for c in phjCaseLevelsList}
phjDF[phjCaseVarName] = phjDF[phjCaseVarName].replace(phjBinaryConvertDict)
# Run a logistic regression model with no constant term (in patsy package, the -1 removes the constant term)
phjLogisticRegressionResults = smf.glm(formula='{0} ~ C({1}) -1'.format(phjCaseVarName,phjCategoricalVarName),
data=phjDF,
family = sm.families.Binomial(link = sm.genmod.families.links.logit)).fit()
if phjPrintResults == True:
print('\nResults of logistic regression model')
print(phjLogisticRegressionResults.summary())
# Extract group codes from index of logistic regression results table.
# The index values have the structure: varName[level].
# Extract just the bit contained in square brackets:
# i. Define and compile regex
# (Picks out integer or floats from within square brackets)
phjRegex = re.compile('\[(?P<group_index>\d+.?\d*)\]$')
# ii. Extract std err data from model
phjSEResultsDF = pd.DataFrame(phjLogisticRegressionResults.bse)
# iii. Rename column heading and generate a new index and replace the old one.
phjSEResultsDF.columns = [phjSuffixDict['stderr']]
# The following list comprehension steps through each index and extracts the regex
# group (in this case, the bit between the square brackets)
phjNewIndex = [re.search(phjRegex,i).group('group_index') for i in phjSEResultsDF.index]
# ...and the extracted bits are converted to ints if possible
for n,j in enumerate(phjNewIndex):
try:
phjNewIndex[n] = int(float(j)) # Can't convert a string of a float to int using int(); need to use float() as well.
except ValueError:
phjNewIndex[n] = j
phjSEResultsDF.index = phjNewIndex
return phjSEResultsDF
def phjCategoriseContinuousVariable(phjDF,
phjContinuousVarName = None,
phjMissingValue = np.nan,
phjNumberOfCategoriesInt = 5,
phjNewCategoryVarName = None,
phjCategorisationMethod = 'jenks',
phjReturnBreaks = False,
phjPrintResults = False):
phjDF = phjDF.copy(deep = True)
try:
phjAssert('phjDF',phjDF,pd.DataFrame)
phjAssert('phjContinuousVarName',phjContinuousVarName,str,phjMustBePresentColumnList = list(phjDF.columns))
# Deal with missing values in continuous variable.
# The phjMissingValue argument can be str, float or int. If str then check that
# the only string in the column is equal to the phjMissingValue argument. This
# is done by creating a list of items that cannot be converted to a number and
# do not match the phjMissingValue argument. If the list is not empty then it
# indicates that there are strings in the variable.
phjAssert('phjMissingValue',phjMissingValue,(str,int,float))
phjExtraStrs = list(set([s for s in phjDF[phjContinuousVarName] if ((phjCheckIsNumber(s) is False) & (s != phjMissingValue))]))
assert len(phjExtraStrs) == 0, 'The continuous variable contains strings that are not recognised as missing values (namely {}).'.format(phjExtraStrs)
phjAssert('phjNumberOfCategoriesInt',phjNumberOfCategoriesInt,int,phjAllowedOptions = {'min':2,'max':min([100,len(pd.to_numeric(phjDF[phjContinuousVarName],errors = 'coerce').dropna(axis = 0))])})
phjAssert('phjNewCategoryVarName',phjNewCategoryVarName,str,phjMustBeAbsentColumnList = list(phjDF.columns))
# Check phjCategorisationMethod is either a string indicating method to use to
# calculate breaks or a list giving required breaks.
# If a string is entered, check it is one of the recognised options.
# If a list is entered, check it contains only numbers and that each consecutive
# number is greater than the number preceding it.
phjAssert('phjCategorisationMethod',phjCategorisationMethod,(str,list))
if isinstance(phjCategorisationMethod,str):
phjAssert('phjCategorisationMethod', phjCategorisationMethod.lower(), str,
phjBespokeMessage = "The selected method to calculate category boundaries is not recognised or has not yet been implemented. The variable '{}' has not been categorised.".format(phjContinuousVarName),
phjAllowedOptions = ['quantile','jenks'])
elif isinstance(phjCategorisationMethod,collections.Sequence):
assert phjCheckListOfIncreasingNumbers(phjList = phjCategorisationMethod) == True, "The list entered for phjCategorisationMethod must contain sequentially increasing numbers."
phjAssert('phjReturnBreaks',phjReturnBreaks,bool)
phjAssert('phjPrintResults',phjPrintResults,bool)
except AssertionError as e:
# Define phjBreaks before returning at end of function
if phjReturnBreaks == True:
phjBreaks = None
# If function has been called directly, present message.
if inspect.stack()[1][3] == '<module>':
print("An AssertionError occurred in {fname}() function. ({msg})\n".format(msg = e,
fname = inspect.stack()[0][3]))
# If function has been called by another function then modify message and re-raise exception
else:
print("An AssertionError occurred in {fname}() function when called by {callfname}() function. ({msg})\n".format(msg = e,
fname = inspect.stack()[0][3],
callfname = inspect.stack()[1][3]))
raise
else:
# Deal with missing values in continuous variable.
# If a variable contains a missing value string, the dtype is 'object'. If
# the missing value string is replaced by np.nan then the variable is changed
# to 'float' and number strings are converted to actual numbers. However, if
# there is another string in the variable, the dtype remains as 'object'; the
# latter situation should have been identified by the assert statement above.
phjContinuousSer = phjDF[phjContinuousVarName].replace(phjMissingValue,np.nan)
# Check if phjCategorisationMethod is a list.
# If so, the phjNumberOfCategoriesInt is ignored and the number of categories
# is inferred from the break points.
if isinstance(phjCategorisationMethod,collections.Sequence) and not isinstance(phjCategorisationMethod,str):
phjBreaks = phjCategorisationMethod
phjDF[phjNewCategoryVarName] = pd.cut(phjContinuousSer,
bins = phjBreaks,
right = True,
labels = False)
# If phjCategorisationMethod is a string, use the appropriate method to
# calculate breaks
# N.B. If add additional methods, remember to add to list in phjAssert() function.
elif isinstance(phjCategorisationMethod,str):
if phjCategorisationMethod.lower() == 'jenks':
phjBreaks = phjImplementGetBreaks(phjDF = phjDF,
phjContinuousVarName = phjContinuousVarName,
phjMissingValue = phjMissingValue,
phjNumberOfCategoriesInt = phjNumberOfCategoriesInt,
phjPrintResults = phjPrintResults)
# Cut data series based on Jenks breaks
phjDF[phjNewCategoryVarName] = pd.cut(phjContinuousSer,
bins = phjBreaks,
right = True,
labels = False)
if phjPrintResults == True:
print('Category quantile bins (Jenks) = ',phjBreaks)
print('\n')
elif phjCategorisationMethod.lower() == 'quantile':
# Cut data series based on quantiles / number of required bins
phjDF[phjNewCategoryVarName], phjBreaks = pd.cut(phjContinuousSer,
bins = phjNumberOfCategoriesInt,
right = True,
retbins = True,
labels = False)
if phjPrintResults == True:
print('Category quantile bins (quantile) = ',phjBreaks)
print('\n')
if phjReturnBreaks == True:
return phjDF,phjBreaks
else:
return phjDF
def phjCheckListOfIncreasingNumbers(phjList):
# This function checks that the list contains only numbers and that
# the numbers are consecutively increasing.
if isinstance(phjList,collections.Sequence) and not isinstance(phjList,str):
# List comprehension steps through each value of list
# and only retains the numbers. If the length of the orginal
# list is different from the list with numbers only, then
# some items were not numbers
if len(phjList) == len([i for i in phjList if phjCheckIsNumber(i) == True]):
# If all items are numbers, check that each consecutive number
# is bigger than the preceding one
phjIncrease = True
for j in range(1,len(phjList)):
if (phjIncrease == True) and (phjList[j] - phjList[j-1] > 0):
phjIncrease = True
else:
phjIncrease = False
print('Item at position {0} in list ({1}) is not larger than preceding number ({2}).'.format(j,phjList[j],phjList[j-1]))
break
if phjIncrease == True:
phjListCheck = True
else:
phjListCheck = False
else:
# Here, could identify which list items are not numbers if so inclined...
print('Some items in list are not numbers.')
phjListCheck = False
return phjListCheck
def phjCheckIsNumber(i):
try:
i = float(i)
phjIsNumber = True
except ValueError:
phjIsNumber = False
except TypeError:
phjIsNumber = False
return phjIsNumber
def phjImplementGetBreaks(phjDF,
phjContinuousVarName = None,
phjMissingValue = 'missing',
phjNumberOfCategoriesInt = 5,
phjCategorisationMethod = 'jenks',
phjPrintResults = False):
phjTempSer = phjDF[phjContinuousVarName].replace(phjMissingValue,np.nan).dropna(axis = 0)
if phjCategorisationMethod == 'jenks':
if len(phjTempSer.index) <= 1000:
phjBreaks = getJenksBreaks(np.array(phjTempSer),
phjNumberOfCategoriesInt)
else:
phjBreaks = getJenksBreaks(np.array(phjTempSer.sample(1000)),
phjNumberOfCategoriesInt)
# As the breaks were calculated from a sample, the last value
# may be smaller than the maximum. Hence, when categorising the
# continuous variable in the original dataframe, there would be a
# small number of individuals who wouldn't appear in any category.
# Replace the end values of the break list with values that are
# slightly bigger or smaller (0.1%) than the maximum or minimum.
# This is the same procedure used by pandas.cut() method.
phjBreaks[0] = phjTempSer.min() * 0.999
phjBreaks[-1] = phjTempSer.max() * 1.001
else:
print('The selected method to calculate category boundaries has not yet been implemented.')
phjBreaks = None
return phjBreaks
if __name__ == '__main__':
main()
|
lvphj/epydemiology
|
epydemiology/phjExploreData.py
|
Python
|
mit
| 26,912
|
"""test_flask_utils: validates expected behavior for prosper.common.flask_utils"""
import atexit
import importlib.util
from os import path, environ, remove
import platform
import pytest
from plumbum import local
import prosper.common.flask_utils as flask_utils
HERE = path.abspath(path.dirname(__file__))
ROOT = path.dirname(HERE)
python = local['python']
if platform.system() == 'Windows':
which = local['where']
else:
which = local['which']
def atexit_remove_file(filepath):
"""atexit handler to remove tempfiles and avoid clutter"""
print('ATEXIT removing: ' + path.abspath(filepath))
remove(filepath)
assert not path.isfile(filepath)
def test_cli():
"""make sure entry_point/console_script does what it says"""
# TODO: local.cwd() swapping to test dirs
gunicorn_conf = local[which('make_gunicorn_config').rstrip()]
if path.isfile('gunicorn.conf'):
remove('gunicorn.conf')
gunicorn_conf()
assert path.isfile('gunicorn.conf')
atexit.register(atexit_remove_file, 'gunicorn.conf')
def test_gunicorn_conf():
"""make sure gunicorn contents works as expected"""
# Prep Test
environ['GUNICORN_TEST1'] = 'hello'
environ['GUNICORN_TEST2'] = 'world'
gunicorn_filename = path.join(HERE, '_gunicorn.py')
if path.isfile(gunicorn_filename):
remove(gunicorn_filename)
# Create gunicorn config file (.py)
flask_utils.make_gunicorn_config(_gunicorn_config_path=gunicorn_filename)
assert path.isfile(gunicorn_filename)
# use importlib to load _gunicorn.py and make sure expected values are there
spec = importlib.util.spec_from_file_location('_gunicorn', gunicorn_filename)
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
assert module.test1 == 'hello'
assert module.test2 == 'world'
# Sanitize after test
del environ['GUNICORN_TEST1']
del environ['GUNICORN_TEST2']
atexit.register(atexit_remove_file, gunicorn_filename)
|
EVEprosper/ProsperCommon
|
tests/test_flask_utils.py
|
Python
|
mit
| 1,995
|
"""
Given a string, find the length of the longest substring without repeating characters.
Examples:
Given "abcabcbb", the answer is "abc", which the length is 3.
Given "bbbbb", the answer is "b", with the length of 1.
Given "pwwkew", the answer is "wke", with the length of 3. Note that the answer must be a substring, "pwke" is a subsequence and not a substring.
"""
class Solution(object):
"""
O(n)
using hashset to record which char is already covered.
"""
def lengthOfLongestSubstring(self, s):
"""
:type s: str
:rtype: int
"""
p1, p2 = 0, 0
longest = p2 - p1
coverset = set()
while p2 < len(s):
c2 = s[p2]
if c2 not in coverset:
coverset.add(c2)
p2 += 1
d = p2 - p1
if d > longest:
longest = d
else:
while c2 in coverset:
coverset.remove(s[p1])
p1 += 1
return longest
|
weixsong/algorithm
|
leetcode/3.py
|
Python
|
mit
| 1,051
|
#!/usr/bin/env python2
#
# Find a sequence of duk_hobject hash sizes which have a desired 'ratio'
# and are primes. Prime hash sizes ensure that all probe sequence values
# (less than hash size) are relatively prime to hash size, i.e. cover the
# entire hash. Prime data is packed into about 1 byte/prime using a
# prediction-correction model.
#
# Also generates a set of probe steps which are relatively prime to every
# hash size.
#
import sys
import math
def is_prime(n):
if n == 0:
return False
if n == 1 or n == 2:
return True
n_limit = int(math.ceil(float(n) ** 0.5)) + 1
n_limit += 100 # paranoia
if n_limit >= n:
n_limit = n - 1
for i in xrange(2,n_limit + 1):
if (n % i) == 0:
return False
return True
def next_prime(n):
while True:
n += 1
if is_prime(n):
return n
def generate_sizes(min_size, max_size, step_ratio):
"Generate a set of hash sizes following a nice ratio."
sizes = []
ratios = []
curr = next_prime(min_size)
next = curr
sizes.append(curr)
step_ratio = float(step_ratio) / 1024
while True:
if next > max_size:
break
ratio = float(next) / float(curr)
if ratio < step_ratio:
next = next_prime(next)
continue
sys.stdout.write('.'); sys.stdout.flush()
sizes.append(next)
ratios.append(ratio)
curr = next
next = next_prime(int(next * step_ratio))
sys.stdout.write('\n'); sys.stdout.flush()
return sizes, ratios
def generate_corrections(sizes, step_ratio):
"Generate a set of correction from a ratio-based predictor."
# Generate a correction list for size list, assuming steps follow a certain
# ratio; this allows us to pack size list into one byte per size
res = []
res.append(sizes[0]) # first entry is first size
for i in xrange(1, len(sizes)):
prev = sizes[i - 1]
pred = int(prev * step_ratio) >> 10
diff = int(sizes[i] - pred)
res.append(diff)
if diff < 0 or diff > 127:
raise Exception('correction does not fit into 8 bits')
res.append(-1) # negative denotes last end of list
return res
def generate_probes(count, sizes):
res = []
# Generate probe values which are guaranteed to be relatively prime to
# all generated hash size primes. These don't have to be primes, but
# we currently use smallest non-conflicting primes here.
i = 2
while len(res) < count:
if is_prime(i) and (i not in sizes):
if i > 255:
raise Exception('probe step does not fit into 8 bits')
res.append(i)
i += 1
continue
i += 1
return res
# NB: these must match duk_hobject defines and code
step_ratio = 1177 # approximately (1.15 * (1 << 10))
min_size = 16
max_size = 2**32 - 1
sizes, ratios = generate_sizes(min_size, max_size, step_ratio)
corrections = generate_corrections(sizes, step_ratio)
probes = generate_probes(32, sizes)
print len(sizes)
print 'SIZES: ' + repr(sizes)
print 'RATIOS: ' + repr(ratios)
print 'CORRECTIONS: ' + repr(corrections)
print 'PROBES: ' + repr(probes)
# highest 32-bit prime
i = 2**32
while True:
i -= 1
if is_prime(i):
print 'highest 32-bit prime is: %d (0x%08x)' % (i, i)
break
|
jmptrader/duktape
|
util/genhashsizes.py
|
Python
|
mit
| 3,405
|
# -*- coding: utf-8 -*-
from django.conf.urls.defaults import patterns, include, url
from django.conf import settings
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
# Examples:
url(r'^$', 'django.views.generic.simple.direct_to_template',
{'template': 'index.html'}, name='home'),
url(r'^layout/$', 'django.views.generic.simple.direct_to_template',
{'template': 'guideline.html'}, name='layout'),
url(r'^login/$', 'django.contrib.auth.views.login', {'template_name': 'registration/login.html'}, name='login'),
# url(r'^portal/', include('portal.foo.urls')),
# Uncomment the admin/doc line below to enable admin documentation:
# url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
url(r'^admin/', include(admin.site.urls)),
url(r'^blog/', include('portal.apps.blog.urls')),
url(r'^gallery/', include('portal.apps.gallery.urls')),
url(r'^badges/', include('portal.apps.badges.urls')),
url(r'^home/$', 'portal.views.home', name='home'),
)
if settings.DEBUG:
urlpatterns += patterns('django.views.static',
(r'media/(?P<path>.*)', 'serve', {'document_root': settings.MEDIA_ROOT}),
)
|
pugpe/pugpe_portal
|
portal/urls.py
|
Python
|
mit
| 1,303
|
#
# Copyright (c) Elliot Peele <elliot@bentlogic.net>
#
# This program is distributed under the terms of the MIT License as found
# in a file called LICENSE. If it is not present, the license
# is always available at http://www.opensource.org/licenses/mit-license.php.
#
# This program is distributed in the hope that it will be useful, but
# without any warrenty; without even the implied warranty of merchantability
# or fitness for a particular purpose. See the MIT License for full details.
#
import logging
from zope.interface import implementer
from pyramid.interfaces import IAuthenticationPolicy
from pyramid.authentication import AuthTktAuthenticationPolicy
from pyramid.authentication import CallbackAuthenticationPolicy
from pyramid.httpexceptions import HTTPBadRequest
from pyramid.httpexceptions import HTTPUnauthorized
from .models import Oauth2Token
from .models import DBSession as db
from .errors import InvalidToken
from .errors import InvalidRequest
from .util import getClientCredentials
log = logging.getLogger('pyramid_oauth2_provider.authentication')
@implementer(IAuthenticationPolicy)
class OauthAuthenticationPolicy(CallbackAuthenticationPolicy):
def _isOauth(self, request):
return bool(getClientCredentials(request))
def _get_auth_token(self, request):
try:
token_type, token = getClientCredentials(request)
except TypeError:
raise HTTPBadRequest(InvalidRequest())
if token_type != 'bearer':
return None
auth_token = db.query(Oauth2Token).filter_by(access_token=token).first()
# Bad input, return 400 Invalid Request
if not auth_token:
raise HTTPBadRequest(InvalidRequest())
# Expired or revoked token, return 401 invalid token
if auth_token.isRevoked():
raise HTTPUnauthorized(InvalidToken())
return auth_token
def unauthenticated_userid(self, request):
auth_token = self._get_auth_token(request)
if not auth_token:
return None
return auth_token.user_id
def remember(self, request, principal, **kw):
"""
I don't think there is anything to do for an oauth request here.
"""
def forget(self, request):
auth_token = self._get_auth_token(request)
if not auth_token:
return None
auth_token.revoke()
@implementer(IAuthenticationPolicy)
class OauthTktAuthenticationPolicy(OauthAuthenticationPolicy,
AuthTktAuthenticationPolicy):
def __init__(self, *args, **kwargs):
OauthAuthenticationPolicy.__init__(self)
AuthTktAuthenticationPolicy.__init__(self, *args, **kwargs)
def unauthenticated_userid(self, request):
if self._isOauth(request):
return OauthAuthenticationPolicy.unauthenticated_userid(
self, request)
else:
return AuthTktAuthenticationPolicy.unauthenticated_userid(
self, request)
def remember(self, request, principal, **kw):
if self._isOauth(request):
return OauthAuthenticationPolicy.remember(
self, request, principal, **kw)
else:
return AuthTktAuthenticationPolicy.remember(
self, request, principal, **kw)
def forget(self, request):
if self._isOauth(request):
return OauthAuthenticationPolicy.forget(
self, request)
else:
return AuthTktAuthenticationPolicy.forget(
self, request)
|
elliotpeele/pyramid_oauth2_provider
|
pyramid_oauth2_provider/authentication.py
|
Python
|
mit
| 3,576
|
#!/usr/bin/env python
# __author__ = "Ronie Martinez"
# __copyright__ = "Copyright 2016-2020, Ronie Martinez"
# __credits__ = ["Ronie Martinez"]
# __maintainer__ = "Ronie Martinez"
# __email__ = "ronmarti18@gmail.com"
import pytest
from latex2mathml.symbols_parser import convert_symbol
@pytest.mark.parametrize(
"name, latex, expected",
[("operator plus", "+", "0002B"), ("alias command", r"\to", "02192")],
)
def test_convert_symbol(name: str, latex: str, expected: str):
assert convert_symbol(latex) == expected
|
Code-ReaQtor/latex2mathml
|
tests/test_symbol_parser.py
|
Python
|
mit
| 530
|
#!/usr/bin/env python3
# https://www.hackerrank.com/challenges/most-commons
import collections
import io
import sys
import unittest
def mangle(s):
counted = collections.Counter()
for entry in s:
counted[entry] += 1
ordered = sorted(
counted.keys(),
key=lambda key: (counted[key] << 8) | (256 - ord(key)),
reverse=True)
return ordered, counted
def main():
s = input().strip()
ordered, counted = mangle(s)
for entry in ordered[:3]:
print('%s %d' % (entry, counted[entry]))
if __name__ == '__main__': # pragma: no cover
main()
class TestCode(unittest.TestCase):
def generalized_test(self, which):
sys.stdin = open(__file__.replace('.py', f'.{which}.in'), 'r')
sys.stdout = io.StringIO()
expected = open(__file__.replace('.py', f'.{which}.out'), 'r')
main()
self.assertEqual(sys.stdout.getvalue(), expected.read())
for handle in [sys.stdin, sys.stdout, expected]:
handle.close()
def test_0(self):
self.generalized_test('0')
|
altermarkive/Coding-Interviews
|
algorithm-design/hackerrank/most_commons/test_most_commons.py
|
Python
|
mit
| 1,079
|
def read_all(*files):
"""Read file(s) and return the content
Args:
files (str): variable number of file names
Returns:
combined content as a string
"""
output = []
for f in files:
with open(f, 'r') as IN:
output.append(IN.read())
return "\n".join(output)
|
yuhangwang/ninjag-python
|
ninjag/tk/ioTK/read_all.py
|
Python
|
mit
| 320
|
import os
from lxml import etree
from pyqtcli import verbose as v
WARNING_TEMPLATE = "Alias \'{}\' already exists in \'{}\' at prefix \'{}\'."
def write_alias(qrc_files, verbose):
"""Write alias for resources within qrc files.
Alias are base in basename of theses resources. In the case where two
resource files would have the same name, and so, the same alias, the
script warns the user of incriminated files.
Args:
qrc_files (list or tuple): A list containing path to qrc files.
verbose (bool): True if the user pass '-v' or '--verbose' option
to see what's happening.
"""
warnings = [] # List containing all warnings message
# Loop over all provided qrc files
for qrc_file in qrc_files:
tree = etree.parse(qrc_file)
root = tree.getroot()
# Inform which qrc file is processed
v.info("Current file: {}".format(qrc_file), verbose)
# Iterate over each qresource containing file resources
for qresource in root.iter(tag="qresource"):
# Alias are prefixed by qresource prefix so we check only
# duplication within qresource
aliases = []
# Iterate over each file that doesn't have already an alias
for resource in qresource.iter(tag="file"):
alias = os.path.basename(resource.text)
if alias not in aliases:
if not resource.attrib.get("alias", None):
resource.set("alias", alias)
# Inform which alias is given to the current resource
v.info("resource: '{}' => {}".format(
resource.text, alias), verbose)
else:
# Add same alias warning
warnings.append(WARNING_TEMPLATE.format(
alias, qrc_file, qresource.attrib.get(
"prefix", ""))
)
break
# Append created alias to used aliases in current qresource
aliases.append(alias)
# Rewrite qrc file
tree.write(qrc_file)
# Warning user of which resources that could not receive alias
# because of duplication
for message in warnings:
v.warning(message)
|
Kynarth/pyqtcli
|
pyqtcli/makealias.py
|
Python
|
mit
| 2,341
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
In England the currency is made up of pound, £, and pence, p, and there are eight coins in general circulation:
1p, 2p, 5p, 10p, 20p, 50p, £1 (100p) and £2 (200p).
It is possible to make £2 in the following way:
1×£1 + 1×50p + 2×20p + 1×5p + 1×2p + 3×1p
How many different ways can £2 be made using any number of coins?
"""
def lower_bound(sequence, lower, upper, value):
if lower == upper:
return None
middle = (lower + upper) // 2
if value < sequence[middle]:
result = lower_bound(sequence, lower, middle, value)
return middle if result is None else result
elif value > sequence[middle]:
result = lower_bound(sequence, middle + 1, upper, value)
return upper if result is None else result
else:
return middle
def combinations(value, cache):
if value in cache:
return cache[value]
if value == 0:
return [[]]
possibilities = []
for i in [1, 2, 5, 10, 20, 50, 100, 200]:
if i <= value:
for j in combinations(value - i, cache):
if j:
#print('!', value, i, j, possibilities)
index = lower_bound(j, 0, len(j), i)
permutation = j[:]
permutation.insert(index, i)
if permutation not in possibilities:
possibilities.append(permutation)
#print(permutation, index, possibilities)
else:
possibilities.append([i])
cache[value] = possibilities
return possibilities
def find_combinations(value):
cache = {}
result = len(combinations(value, cache))
#print(cache)
return result
def main():
l = [2, 2]
#print(lower_bound(l, 0, len(l), 1))
print(find_combinations(200))
if __name__ == '__main__':
main()
|
goldsborough/euler
|
31.py
|
Python
|
mit
| 1,645
|
from unittest import TestCase
import unittest
from equadratures.poly import Poly
from equadratures.sampling_methods.induced import Induced
from equadratures.parameter import Parameter
from equadratures.basis import Basis
import numpy as np
import time
class TestSamplingGeneration(TestCase):
def test_sampling(self):
d = 3
order = 3
param = Parameter(distribution='uniform', order=order, lower=-1.0, upper=1.0)
myparameters = [param for _ in range(d)]
mybasis2 = Basis('total-order')
mypoly2 = Poly(myparameters, mybasis2, method='least-squares', sampling_args={'mesh':'induced', 'subsampling-algorithm':'qr', 'sampling-ratio':1.0})
assert mypoly2._quadrature_points.shape == (mypoly2.basis.cardinality, d)
def test_induced_jacobi_evaluation(self):
dimension = 3
parameters = [Parameter(1, "Uniform", upper=1, lower=-1)]*dimension
basis = Basis("total-order")
induced_sampling = Induced(parameters, basis)
parameter = parameters[0]
parameter.order = 3
cdf_value = induced_sampling.induced_jacobi_evaluation(0, 0, 0, parameter)
np.testing.assert_allclose(cdf_value, 0.5, atol=0.00001)
cdf_value = induced_sampling.induced_jacobi_evaluation(0, 0, 1, parameter)
assert cdf_value == 1
cdf_value = induced_sampling.induced_jacobi_evaluation(0, 0, -1, parameter)
assert cdf_value == 0
cdf_value = induced_sampling.induced_jacobi_evaluation(0, 0, 0.6, parameter)
np.testing.assert_allclose(cdf_value, 0.7462, atol=0.00005)
cdf_value = induced_sampling.induced_jacobi_evaluation(0, 0, 0.999, parameter)
np.testing.assert_allclose(cdf_value, 0.99652, atol=0.000005)
if __name__ == '__main__':
unittest.main()
|
psesh/Effective-Quadratures
|
tests/test_induced.py
|
Python
|
mit
| 1,796
|
from sqlalchemy.test.testing import eq_, assert_raises, assert_raises_message
from sqlalchemy import *
from sqlalchemy.orm import attributes
from sqlalchemy import exc as sa_exc
from sqlalchemy.orm import exc as orm_exc
from sqlalchemy.orm import *
from sqlalchemy.test.util import gc_collect
from sqlalchemy.test import testing
from test.orm import _base
from test.orm._fixtures import FixtureTest, User, Address, users, addresses
class TransactionTest(FixtureTest):
run_setup_mappers = 'once'
run_inserts = None
session = sessionmaker()
@classmethod
def setup_mappers(cls):
mapper(User, users, properties={
'addresses':relation(Address, backref='user',
cascade="all, delete-orphan", order_by=addresses.c.id),
})
mapper(Address, addresses)
class FixtureDataTest(TransactionTest):
run_inserts = 'each'
def test_attrs_on_rollback(self):
sess = self.session()
u1 = sess.query(User).get(7)
u1.name = 'ed'
sess.rollback()
eq_(u1.name, 'jack')
def test_commit_persistent(self):
sess = self.session()
u1 = sess.query(User).get(7)
u1.name = 'ed'
sess.flush()
sess.commit()
eq_(u1.name, 'ed')
def test_concurrent_commit_persistent(self):
s1 = self.session()
u1 = s1.query(User).get(7)
u1.name = 'ed'
s1.commit()
s2 = self.session()
u2 = s2.query(User).get(7)
assert u2.name == 'ed'
u2.name = 'will'
s2.commit()
assert u1.name == 'will'
class AutoExpireTest(TransactionTest):
def test_expunge_pending_on_rollback(self):
sess = self.session()
u2= User(name='newuser')
sess.add(u2)
assert u2 in sess
sess.rollback()
assert u2 not in sess
def test_trans_pending_cleared_on_commit(self):
sess = self.session()
u2= User(name='newuser')
sess.add(u2)
assert u2 in sess
sess.commit()
assert u2 in sess
u3 = User(name='anotheruser')
sess.add(u3)
sess.rollback()
assert u3 not in sess
assert u2 in sess
def test_update_deleted_on_rollback(self):
s = self.session()
u1 = User(name='ed')
s.add(u1)
s.commit()
# this actually tests that the delete() operation,
# when cascaded to the "addresses" collection, does not
# trigger a flush (via lazyload) before the cascade is complete.
s.delete(u1)
assert u1 in s.deleted
s.rollback()
assert u1 in s
assert u1 not in s.deleted
def test_gced_delete_on_rollback(self):
s = self.session()
u1 = User(name='ed')
s.add(u1)
s.commit()
s.delete(u1)
u1_state = attributes.instance_state(u1)
assert u1_state in s.identity_map.all_states()
assert u1_state in s._deleted
s.flush()
assert u1_state not in s.identity_map.all_states()
assert u1_state not in s._deleted
del u1
gc_collect()
assert u1_state.obj() is None
s.rollback()
assert u1_state in s.identity_map.all_states()
u1 = s.query(User).filter_by(name='ed').one()
assert u1_state not in s.identity_map.all_states()
assert s.scalar(users.count()) == 1
s.delete(u1)
s.flush()
assert s.scalar(users.count()) == 0
s.commit()
def test_trans_deleted_cleared_on_rollback(self):
s = self.session()
u1 = User(name='ed')
s.add(u1)
s.commit()
s.delete(u1)
s.commit()
assert u1 not in s
s.rollback()
assert u1 not in s
def test_update_deleted_on_rollback_cascade(self):
s = self.session()
u1 = User(name='ed', addresses=[Address(email_address='foo')])
s.add(u1)
s.commit()
s.delete(u1)
assert u1 in s.deleted
assert u1.addresses[0] in s.deleted
s.rollback()
assert u1 in s
assert u1 not in s.deleted
assert u1.addresses[0] not in s.deleted
def test_update_deleted_on_rollback_orphan(self):
s = self.session()
u1 = User(name='ed', addresses=[Address(email_address='foo')])
s.add(u1)
s.commit()
a1 = u1.addresses[0]
u1.addresses.remove(a1)
s.flush()
eq_(s.query(Address).filter(Address.email_address=='foo').all(), [])
s.rollback()
assert a1 not in s.deleted
assert u1.addresses == [a1]
def test_commit_pending(self):
sess = self.session()
u1 = User(name='newuser')
sess.add(u1)
sess.flush()
sess.commit()
eq_(u1.name, 'newuser')
def test_concurrent_commit_pending(self):
s1 = self.session()
u1 = User(name='edward')
s1.add(u1)
s1.commit()
s2 = self.session()
u2 = s2.query(User).filter(User.name=='edward').one()
u2.name = 'will'
s2.commit()
assert u1.name == 'will'
class TwoPhaseTest(TransactionTest):
@testing.requires.two_phase_transactions
def test_rollback_on_prepare(self):
s = self.session(twophase=True)
u = User(name='ed')
s.add(u)
s.prepare()
s.rollback()
assert u not in s
class RollbackRecoverTest(TransactionTest):
def test_pk_violation(self):
s = self.session()
a1 = Address(email_address='foo')
u1 = User(id=1, name='ed', addresses=[a1])
s.add(u1)
s.commit()
a2 = Address(email_address='bar')
u2 = User(id=1, name='jack', addresses=[a2])
u1.name = 'edward'
a1.email_address = 'foober'
s.add(u2)
assert_raises(sa_exc.FlushError, s.commit)
assert_raises(sa_exc.InvalidRequestError, s.commit)
s.rollback()
assert u2 not in s
assert a2 not in s
assert u1 in s
assert a1 in s
assert u1.name == 'ed'
assert a1.email_address == 'foo'
u1.name = 'edward'
a1.email_address = 'foober'
s.commit()
eq_(
s.query(User).all(),
[User(id=1, name='edward', addresses=[Address(email_address='foober')])]
)
@testing.requires.savepoints
def test_pk_violation_with_savepoint(self):
s = self.session()
a1 = Address(email_address='foo')
u1 = User(id=1, name='ed', addresses=[a1])
s.add(u1)
s.commit()
a2 = Address(email_address='bar')
u2 = User(id=1, name='jack', addresses=[a2])
u1.name = 'edward'
a1.email_address = 'foober'
s.begin_nested()
s.add(u2)
assert_raises(sa_exc.FlushError, s.commit)
assert_raises(sa_exc.InvalidRequestError, s.commit)
s.rollback()
assert u2 not in s
assert a2 not in s
assert u1 in s
assert a1 in s
s.commit()
assert s.query(User).all() == [User(id=1, name='edward', addresses=[Address(email_address='foober')])]
class SavepointTest(TransactionTest):
@testing.requires.savepoints
def test_savepoint_rollback(self):
s = self.session()
u1 = User(name='ed')
u2 = User(name='jack')
s.add_all([u1, u2])
s.begin_nested()
u3 = User(name='wendy')
u4 = User(name='foo')
u1.name = 'edward'
u2.name = 'jackward'
s.add_all([u3, u4])
eq_(s.query(User.name).order_by(User.id).all(), [('edward',), ('jackward',), ('wendy',), ('foo',)])
s.rollback()
assert u1.name == 'ed'
assert u2.name == 'jack'
eq_(s.query(User.name).order_by(User.id).all(), [('ed',), ('jack',)])
s.commit()
assert u1.name == 'ed'
assert u2.name == 'jack'
eq_(s.query(User.name).order_by(User.id).all(), [('ed',), ('jack',)])
@testing.requires.savepoints
def test_savepoint_delete(self):
s = self.session()
u1 = User(name='ed')
s.add(u1)
s.commit()
eq_(s.query(User).filter_by(name='ed').count(), 1)
s.begin_nested()
s.delete(u1)
s.commit()
eq_(s.query(User).filter_by(name='ed').count(), 0)
s.commit()
@testing.requires.savepoints
def test_savepoint_commit(self):
s = self.session()
u1 = User(name='ed')
u2 = User(name='jack')
s.add_all([u1, u2])
s.begin_nested()
u3 = User(name='wendy')
u4 = User(name='foo')
u1.name = 'edward'
u2.name = 'jackward'
s.add_all([u3, u4])
eq_(s.query(User.name).order_by(User.id).all(), [('edward',), ('jackward',), ('wendy',), ('foo',)])
s.commit()
def go():
assert u1.name == 'edward'
assert u2.name == 'jackward'
eq_(s.query(User.name).order_by(User.id).all(), [('edward',), ('jackward',), ('wendy',), ('foo',)])
self.assert_sql_count(testing.db, go, 1)
s.commit()
eq_(s.query(User.name).order_by(User.id).all(), [('edward',), ('jackward',), ('wendy',), ('foo',)])
@testing.requires.savepoints
def test_savepoint_rollback_collections(self):
s = self.session()
u1 = User(name='ed', addresses=[Address(email_address='foo')])
s.add(u1)
s.commit()
u1.name='edward'
u1.addresses.append(Address(email_address='bar'))
s.begin_nested()
u2 = User(name='jack', addresses=[Address(email_address='bat')])
s.add(u2)
eq_(s.query(User).order_by(User.id).all(),
[
User(name='edward', addresses=[Address(email_address='foo'), Address(email_address='bar')]),
User(name='jack', addresses=[Address(email_address='bat')])
]
)
s.rollback()
eq_(s.query(User).order_by(User.id).all(),
[
User(name='edward', addresses=[Address(email_address='foo'), Address(email_address='bar')]),
]
)
s.commit()
eq_(s.query(User).order_by(User.id).all(),
[
User(name='edward', addresses=[Address(email_address='foo'), Address(email_address='bar')]),
]
)
@testing.requires.savepoints
def test_savepoint_commit_collections(self):
s = self.session()
u1 = User(name='ed', addresses=[Address(email_address='foo')])
s.add(u1)
s.commit()
u1.name='edward'
u1.addresses.append(Address(email_address='bar'))
s.begin_nested()
u2 = User(name='jack', addresses=[Address(email_address='bat')])
s.add(u2)
eq_(s.query(User).order_by(User.id).all(),
[
User(name='edward', addresses=[Address(email_address='foo'), Address(email_address='bar')]),
User(name='jack', addresses=[Address(email_address='bat')])
]
)
s.commit()
eq_(s.query(User).order_by(User.id).all(),
[
User(name='edward', addresses=[Address(email_address='foo'), Address(email_address='bar')]),
User(name='jack', addresses=[Address(email_address='bat')])
]
)
s.commit()
eq_(s.query(User).order_by(User.id).all(),
[
User(name='edward', addresses=[Address(email_address='foo'), Address(email_address='bar')]),
User(name='jack', addresses=[Address(email_address='bat')])
]
)
@testing.requires.savepoints
def test_expunge_pending_on_rollback(self):
sess = self.session()
sess.begin_nested()
u2= User(name='newuser')
sess.add(u2)
assert u2 in sess
sess.rollback()
assert u2 not in sess
@testing.requires.savepoints
def test_update_deleted_on_rollback(self):
s = self.session()
u1 = User(name='ed')
s.add(u1)
s.commit()
s.begin_nested()
s.delete(u1)
assert u1 in s.deleted
s.rollback()
assert u1 in s
assert u1 not in s.deleted
class AccountingFlagsTest(TransactionTest):
def test_no_expire_on_commit(self):
sess = sessionmaker(expire_on_commit=False)()
u1 = User(name='ed')
sess.add(u1)
sess.commit()
testing.db.execute(users.update(users.c.name=='ed').values(name='edward'))
assert u1.name == 'ed'
sess.expire_all()
assert u1.name == 'edward'
def test_rollback_no_accounting(self):
sess = sessionmaker(_enable_transaction_accounting=False)()
u1 = User(name='ed')
sess.add(u1)
sess.commit()
u1.name = 'edwardo'
sess.rollback()
testing.db.execute(users.update(users.c.name=='ed').values(name='edward'))
assert u1.name == 'edwardo'
sess.expire_all()
assert u1.name == 'edward'
def test_commit_no_accounting(self):
sess = sessionmaker(_enable_transaction_accounting=False)()
u1 = User(name='ed')
sess.add(u1)
sess.commit()
u1.name = 'edwardo'
sess.rollback()
testing.db.execute(users.update(users.c.name=='ed').values(name='edward'))
assert u1.name == 'edwardo'
sess.commit()
assert testing.db.execute(select([users.c.name])).fetchall() == [('edwardo',)]
assert u1.name == 'edwardo'
sess.delete(u1)
sess.commit()
def test_preflush_no_accounting(self):
sess = sessionmaker(_enable_transaction_accounting=False, autocommit=True)()
u1 = User(name='ed')
sess.add(u1)
sess.flush()
sess.begin()
u1.name = 'edwardo'
u2 = User(name="some other user")
sess.add(u2)
sess.rollback()
sess.begin()
assert testing.db.execute(select([users.c.name])).fetchall() == [('ed',)]
class AutoCommitTest(TransactionTest):
def test_begin_nested_requires_trans(self):
sess = create_session(autocommit=True)
assert_raises(sa_exc.InvalidRequestError, sess.begin_nested)
def test_begin_preflush(self):
sess = create_session(autocommit=True)
u1 = User(name='ed')
sess.add(u1)
sess.begin()
u2 = User(name='some other user')
sess.add(u2)
sess.rollback()
assert u2 not in sess
assert u1 in sess
assert sess.query(User).filter_by(name='ed').one() is u1
class NaturalPKRollbackTest(_base.MappedTest):
@classmethod
def define_tables(cls, metadata):
Table('users', metadata,
Column('name', String(50), primary_key=True)
)
@classmethod
def setup_classes(cls):
class User(_base.ComparableEntity):
pass
@testing.resolve_artifact_names
def test_rollback_recover(self):
mapper(User, users)
session = sessionmaker()()
u1, u2, u3= \
User(name='u1'),\
User(name='u2'),\
User(name='u3')
session.add_all([u1, u2, u3])
session.commit()
session.delete(u2)
u4 = User(name='u2')
session.add(u4)
session.flush()
u5 = User(name='u3')
session.add(u5)
assert_raises(orm_exc.FlushError, session.flush)
assert u5 not in session
assert u2 not in session.deleted
session.rollback()
|
obeattie/sqlalchemy
|
test/orm/test_transaction.py
|
Python
|
mit
| 15,716
|
# Newton's Method to calculate square root
# get three inputs from the user (two ints, 1 float)
num_str = input("Find the square root of integer: ")
while not num_str.isdigit():
print("Pay attention")
num_str = input("Find the square root integer: ")
number_int = int(num_str)
guess_str = input("Initial guess: ")
while not guess_str.isdigit():
print("Pay attention")
guess_str = input("Initial guess: ")
guess_float= float(guess_str)
originalGuess_float = guess_float # hang onto the original guess
count_int = 0 # count the number_int of guesses
# get the float tolerance, no checking of input!
tolerance_float = float(input("What tolerance :"))
# do the algorithm steps as described above
previous_float = 0
count_int = 0
while abs(previous_float - guess_float) > tolerance_float:
previous_float = guess_float
quotient = number_int/guess_float
guess_float= (quotient+guess_float)/2
count_int = count_int + 1
# output the three original values, the num_int of
# iterations and the square root
print("Square root of",number_int," is: ",guess_float)
print("Took ",count_int," reps to get it to tolerance: ",tolerance_float)
print("Starting from a guess of: ", originalGuess_float)
|
r-martin-/Code_College
|
PythonProgramming/newtonsqrt.py
|
Python
|
mit
| 1,263
|