repo_name stringlengths 5 100 | path stringlengths 4 231 | language stringclasses 1 value | license stringclasses 15 values | size int64 6 947k | score float64 0 0.34 | prefix stringlengths 0 8.16k | middle stringlengths 3 512 | suffix stringlengths 0 8.17k |
|---|---|---|---|---|---|---|---|---|
dmnfarrell/epitopemap | modules/pepdata/hpv.py | Python | apache-2.0 | 2,977 | 0.003023 | # Copyright (c) 2014. Mount Sinai School of Medicine
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
HPV T-cell antigens and MHC ligands from Dana Farber CVC
http://cvc.dfci.harvard.edu/hpv/HTML/help.html
"""
from __future__ import print_function, division, absolute_import
from os.path import join
import pandas as pd
from .common import bad_amino_acids
from .features import make_unlabeled_ngram_dataset_from_args
from .reduced_alphabet import make_alphabet_transformer
from .static_data import DATA_DIR
def _load_dataframe(
path,
epitope_column_name,
mhc_class=None,
hla_type=None,
exclude_hla_type=None, # regex pattern i.e. '(HLA-A2)|(HLA-A\*02)'
peptide_length=None,
reduced_alphabet=None,
nrows=None):
df = pd.read_csv(path, skipinitialspace=True, nrows=nrows)
epitopes = df[epitope_column_name]
hla = df['HLA allele']
mask = ~(epitopes.str.contains(bad_amino_acids, na=False).astype('bool'))
if mhc_class == 1:
a = hla.str.startswith('A')
b = hla.str.startswith('B')
c = hla.str.startswith('C')
mask &= (a | b | c)
elif mhc_class == 2:
mask &= hla.str.startswith('D')
if hla_type:
mask &= hla.str.contains(hla_type, na=False).astype('bool')
if exclude_hla_type:
mask &= ~(hla.str.contains(exclude_hla_type, na=True).astype('bool'))
if peptide_length: |
mask &= epitopes.str.len() == peptide_length
df = df[mask]
if reduced_alphabet:
epitopes = df[epitope_column_name]
df[epitope_column_name] = \
epitopes.map(make_alphabet_transformer(reduced_alphabet))
return df
def load_tcell(*args, **kwargs):
tcell_path = join(DATA_DIR, 'cvc_hpv_tcell.csv')
return _load_d | ataframe(tcell_path, 'Epitope sequence', *args, **kwargs)
def load_tcell_set(*args, **kwargs):
df = load_tcell(*args, **kwargs)
return set(df['Epitope sequence'])
def load_tcell_ngrams(*args, **kwargs):
return make_unlabeled_ngram_dataset_from_args(
load_tcell_set, *args, **kwargs)
def load_mhc(*args, **kwargs):
mhc_path = join(DATA_DIR, 'cvc_hpv_ligand.csv')
return _load_dataframe(mhc_path, 'Ligand sequence', *args, **kwargs)
def load_mhc_set(*args, **kwargs):
df = load_mhc(*args, **kwargs)
return set(df['Ligand sequence'])
def load_mhc_ngrams(*args, **kwargs):
return make_unlabeled_ngram_dataset_from_args(load_mhc_set, *args, **kwargs) |
oihane/odoo-addons | account_invoice_validate_tax/tests/__init__.py | Python | agpl-3.0 | 48 | 0 | from . import test_ac | count_invoice_ | validate_tax
|
onshape-public/onshape-clients | python/onshape_client/oas/models/btpl_value_var_reference252_all_of.py | Python | mit | 4,301 | 0 | # coding: utf-8
"""
Onshape REST API
The Onshape REST API consumed by all clients. # noqa: E501
The version of the OpenAPI document: 1.111
Contact: api-support@onshape.zendesk.com
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import sys # noqa: F401
import six # noqa: F401
from onshape_client.oas.model_utils import ModelNormal # noqa: F401
try:
from onshape_client.oas.models import btp_identifier8
except ImportError:
btp_identifier8 = sys.modules["onshape_client.oas.models.btp_identifier8"]
class BTPLValueVarReference252AllOf(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {}
validations = {}
additional_properties_type = None
@staticmethod
def openapi_types():
"""
This must be a class method so a model may have properties that are
of type self, this ensures that we don't create a cyclic import
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
"name": (btp_identifier8.BTPIdentifier8,), # noqa: E501
}
@staticmethod
def discriminator():
return None
attribute_map = {
"name": "name", # noqa: E501
}
@staticmethod
def _composed_schemas():
return None
required_properties = set(
[
"_data_store",
"_check_type",
"_from_server",
"_path_to_item",
"_configuration",
]
)
def __init__(
self,
_check_type=True,
_from_server=False,
_path_to | _item=(),
_configuration=None,
**kwargs
): # noqa: E501
""" | btpl_value_var_reference252_all_of.BTPLValueVarReference252AllOf - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_from_server (bool): True if the data is from the server
False if the data is from the client (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
name (btp_identifier8.BTPIdentifier8): [optional] # noqa: E501
"""
self._data_store = {}
self._check_type = _check_type
self._from_server = _from_server
self._path_to_item = _path_to_item
self._configuration = _configuration
for var_name, var_value in six.iteritems(kwargs):
setattr(self, var_name, var_value)
|
kstilwell/tcex | app_init/service_webhook/args.py | Python | apache-2.0 | 186 | 0 | """Playbook Args"""
from argparse import ArgumentParser
class Args:
"""Playbook Args"""
def __init__(self, parser: ArgumentParser):
"""Initial | ize | class properties."""
|
ip-tools/ip-navigator | patzilla/access/sip/client.py | Python | agpl-3.0 | 15,596 | 0.004681 | # -*- coding: utf-8 -*-
# (c) 2014-2018 Andreas Motl <andreas.motl@ip-tools.org>
import timeit
import logging
import requests
from lxml import etree
from beaker.cache import cache_region
from requests.exceptions import ConnectionError, ConnectTimeout
from patzilla.access.generic.exceptions import NoResultsException, GenericAdapterException
from patzilla.access.generic.search import GenericSearchResponse, GenericSearchClient
from patzilla.access.sip import get_sip_client
from patzilla.util.data.container import SmartBunch
"""
elmyra.ip.access.sip: lowlevel adapter to search provider "SIP"
"""
log = logging.getLogger(__name__)
class SipException(GenericAdapterException):
# TODO: Clean up this mess ;]
def __init__(self, *args, **kwargs):
self.sip_info = ''
super(SipException, self).__init__(*args)
if kwargs.has_key('sip_info'):
self.sip_info = kwargs['sip_info']
if kwargs.has_key('sip_response'):
self.sip_info = kwargs['sip_response'].get_childvalue('Info')
if self.sip_info:
self.user_info = self.sip_info
class LoginException(SipException):
pass
class SearchException(SipException):
pass
class SipClient(GenericSearchClient):
def __init__(self, uri, username=None, password=None, sessionid=None):
self.backend_name = 'sip'
self.search_method = sip_published_data_search
self.crawl_max_count = 5000
self.uri = uri
self.username = username
self.password = password
self.sessionid = sessionid
self.pagesize = 250
self.stale = False
def login(self):
starttime = timeit.default_timer()
try:
response = requests.post(self.uri + '/login', data={'Username': self.username, 'Password': self.password}, timeout=(3, 30))
except (ConnectionError, ConnectTimeout) as ex:
log.error('SIP login for user "{username}" at "{uri}" failed. Reason: {0} {1}.'.format(
ex.__class__, ex.message, username=self.username, uri=self.uri))
self.logout()
error = LoginException(ex.message)
error.sip_info = 'Error or timeout while connecting to upstream database. Database might be offline.'
raise error
if response.status_code == 200:
try:
self.sessionid = self._login_parse_xml(response.content)
duration = timeit.default_timer() - starttime
log.info('SIP login succeeded. sessionid={0}, duration={1}s'.format(self.sessionid, round(duration, 1)))
return True
except Exception as ex:
log.error('SIP login for user "{username}" failed. Reason: {0} {1}. status_code={2}, respo | nse={3}'.format(
ex.__class__, ex.message, response.status_code, response.content, username=self.username))
self.logout()
raise
else:
message = 'SIP login failed. status_code={0}, content={1}'.format(response.status_code, response.content)
log.error(message)
error = LoginException(message)
error.sip_info = 'Login to upst | ream database failed.'
self.logout()
raise error
self.sessionid = None
return False
def logout(self):
log.info('Logging out user "{username}"'.format(username=self.username))
self.stale = True
def search(self, expression, options=None):
options = options or SmartBunch()
options.setdefault('offset', 0)
options.setdefault('limit', self.pagesize)
offset = options.offset
limit = options.limit
log.info(u"{backend_name}: searching documents, expression='{0}', offset={1}, limit={2}".format(
expression, offset, limit, **self.__dict__))
if not self.sessionid or self.stale:
self.login()
starttime = timeit.default_timer()
try:
response = requests.post(self.uri + '/search/new', data={'session': self.sessionid, 'searchtree': expression})
except (ConnectionError, ConnectTimeout) as ex:
log.error(u'SIP search for user "{username}" at "{uri}" failed. Reason: {0} {1}.'.format(
ex.__class__, ex.message, username=self.username, uri=self.uri))
self.logout()
raise SearchException(ex.message,
sip_info=u'Error or timeout while connecting to upstream database. Database might be offline.')
# Process search response
if response.status_code == 200:
#print "SIP search response (raw)"; print response.content # debugging
try:
search_response = self._search_parse_xml(response.content)
if search_response['success'] == 'false':
raise SearchException(u'Search failed', sip_response=search_response['response'])
if 'ResultSetId' in search_response['data']:
search_info = search_response['data']
ResultSetId = search_info['ResultSetId']
# Inject offset and limit into metadata, pretend it comes from server
search_info['Offset'] = offset
search_info['Limit'] = limit
# perform second request to actually retrieve the results by ResultSetId
search_results = self.getresults(ResultSetId, options)
#print "SIP search results:", search_results
duration = timeit.default_timer() - starttime
log.info(u'Search succeeded. duration={0}s, search_info={1}'.format(round(duration, 1), search_info))
upstream_response = {
'info': search_info,
'results': search_results or [],
}
# Mogrify search response
# TODO: Generalize between all search backends
sr = SipSearchResponse(upstream_response, options=options)
result = sr.render()
duration = round(duration, 1)
# TODO: Unify between SIP and IFI CLAIMS
log.info(u'{backend_name}: Search succeeded. duration={duration}s, meta=\n{meta}'.format(
duration=duration, meta=result['meta'].prettify(), **self.__dict__))
if not result['numbers']:
log.warn(u'{backend_name} search from "{user}" for "{expression}" had empty results.'.format(
user=self.username, expression=expression, **self.__dict__
))
return result
else:
message = u'Search failed. Reason: Upstream response lacks valid ResultSetId. content={0}'.format(response.text)
raise SearchException(message, sip_info=u'Search failed. Search response could not be parsed.')
except Exception as ex:
log.error(u'Search failed. {name}: {message}. expression={expression}, response={response}'.format(
name=ex.__class__.__name__, message=ex.message, response=response.text, expression=expression))
raise
else:
response_status = str(response.status_code) + ' ' + response.reason
message = u'SIP search failed. Reason: response status != 200. status={0}, content={1}'.format(
response_status,
response.text)
log.error(message)
raise SearchException(message,
sip_info=u'HTTP error "{status}" while searching upstream database'.format(status=response_status))
def getresults(self, resultid, options):
request_xml = '<getresult id="{0}" start="{1}" count="{2}" />'.format(resultid, options.offset, options.limit)
log.info('SIP: Getting results: {}'.format(request_xml))
starttime = timeit.default_timer()
response = requests.post(self.uri + '/search/getresults', data={'session': self.sessionid |
skosukhin/spack | var/spack/repos/builtin.mock/packages/openblas/package.py | Python | lgpl-2.1 | 1,575 | 0.000635 | ##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is pa | rt of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the term | s of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Openblas(Package):
"""OpenBLAS: An optimized BLAS library"""
homepage = "http://www.openblas.net"
url = "http://github.com/xianyi/OpenBLAS/archive/v0.2.15.tar.gz"
version('0.2.15', 'b1190f3d3471685f17cfd1ec1d252ac9')
provides('blas')
def install(self, spec, prefix):
pass
|
oss/shrunk | backend/shrunk/client/aggregations.py | Python | mit | 2,981 | 0.001342 | from typing import Any
from collections import OrderedDict
def match_link_id(link_id: str) -> Any:
return {'$match': {'link_id': link_id}}
# daily visits aggregations phases
group_tracking_ids = {'$group': {
'_id': '$tracking_id',
'visits': {
'$addToSet': '$$ROOT',
},
}}
find_first = {'$project': {
'visits': {
'$reduce': {
# input is with visits[1:] beacuse first starts as visits[0]
# not removing 0 from input would double include
'input': {'$slice': ['$visits', 1, {'$size': '$visits'}]},
'initialValue': {'first': {'$arrayElemAt': ['$visits', 0]}, 'rest': []},
'in': {
'$cond': {
'if': {'$lt': ['$$this.time', '$$value.first.time']},
'then': {
'first': '$$this',
'rest': {'$concatArrays': [['$$value.first'], '$$value.rest']},
},
'else': {
'first': '$$value.first',
'rest': {'$concatArrays': [['$$this'], '$$value.rest']},
},
},
},
},
},
}}
mark_unqiue = {'$project': {
'visits': {
'$let': {
'vars': {
'first': {'$mergeObjects': ['$visits.first',
{'first_time': 1}]},
'rest': {'$map': {
| 'input': '$visits.rest',
'as': 'visit',
'in': {'$mergeObjects': ['$$visit', {'first_time': 0}]},
}},
},
| 'in': {'$concatArrays': [['$$first'], '$$rest']},
},
},
}}
unwind_ips = {'$unwind': '$visits'}
group_days = {'$group': {
'_id': {
'month': {'$month': '$visits.time'},
'year': {'$year': '$visits.time'},
'day': {'$dayOfMonth': '$visits.time'},
},
'first_time_visits': {
'$sum': '$visits.first_time',
},
'all_visits': {
'$sum': 1,
},
}}
# when added to the end of daily_visits_aggregation it will group by month at the end
chunk_months = {'$group': {
'_id': {
'month': '$_id.month',
'year': '$_id.year',
},
'days': {'$push': {
'day': '$_id.day',
'first_time_visits': '$first_time_visits',
'all_visits': '$all_visits',
}},
}}
make_sortable = {'$project': {
'month': '$_id.month',
'year': '$_id.year',
'day': '$_id.day',
'first_time_visits': 1,
'all_visits': 1,
}}
chronological_sort = {'$sort': OrderedDict([
('year', 1),
('month', 1),
('day', 1),
])}
clean_results = {'$project': {
'first_time_visits': 1,
'all_visits': 1,
}}
daily_visits_aggregation = [
# mark the first_time_visits
group_tracking_ids, find_first, mark_unqiue, unwind_ips,
# break into days
group_days,
# sort
make_sortable, chronological_sort, clean_results,
]
|
RensaProject/nodebox_linguistics_extended | nodebox_linguistics_extended/plural.py | Python | gpl-2.0 | 12,919 | 0.021209 | # PLURAL - last updated for NodeBox 1rc7
# Author: Tom De Smedt <tomdesmedt@organisms.be>
# See LICENSE.txt for details.
# Based on "An Algorithmic Approach to English Pluralization" by Damian Conway:
# http://www.csse.monash.edu.au/~damian/papers/HTML/Plurals.html
# Prepositions are used to solve things like
# "mother-in-law" or "man at arms"
plural_prepositions = ["about", "above", "across", "after", "among", "around", "at", "athwart", "before", "behind", "below", "beneath", "beside", "besides", "between", "betwixt", "beyond", "but", "by", "during", "except", "for", "from", "in", "into", "near", "of", "off", "on", "onto", "out", "over", "since", "till", "to", "under", "until", "unto", "upon", "with"]
# Inflection rules that are either general,
# or apply to a certain category of words,
# or apply to a certain category of words only in classical mode,
# or apply only in classical mode.
# Each rule consists of:
# suffix, inflection, category and classic flag.
plural_rules = [
# 0/ Indefinite articles and demonstratives.
[
["^a$|^an$", "some", None, False],
["^this$", "these", None, False],
["^that$", "those", None, False],
["^any$", "all", None, False]
],
# 1/ Possessive adjectives.
# Overlaps with 1/ for "his" and "its".
# Overlaps with 2/ for "her".
[
["^my$", "our", None, False],
["^your$|^thy$", "your", None, False],
["^her$|^his$|^its$|^their$", "their", None, False]
],
# 2/
# Possessive pronouns.
[
["^mine$", "ours", None, False],
["^yours$|^thine$", "yours", None, False],
["^hers$|^his$|^its$|^theirs$", "theirs", None, False]
],
# 3/
# Personal pronouns.
[
["^I$", "we", None, False],
["^me$", "us", None, False],
["^myself$", "ourselves", None, False],
["^you$", "you", None, False],
["^thou$|^thee$", "ye", None, False],
["^yourself$|^thyself$", "yourself", None, False],
["^she$|^he$|^it$|^they$", "th | ey", None, False],
["^her$|^him$|^it$|^them$", "them", None, False],
["^herself$|^himself$|^itself$|^themself$", "themselves", None, False],
["^oneself$", "one | selves", None, False]
],
# 4/
# Words that do not inflect.
[
["$", "", "uninflected", False],
["$", "", "uncountable", False],
["s$", "s", "s-singular", False],
["fish$", "fish", None, False],
["([- ])bass$", "\\1bass", None, False],
["ois$", "ois", None, False],
["sheep$", "sheep", None, False],
["deer$", "deer", None, False],
["pox$", "pox", None, False],
["([A-Z].*)ese$", "\\1ese", None, False],
["itis$", "itis", None, False],
["(fruct|gluc|galact|lact|ket|malt|rib|sacchar|cellul)ose$", "\\1ose", None, False]
],
# 5/
# Irregular plurals.
# (mongoose, oxen).
[
["atlas$", "atlantes", None, True],
["atlas$", "atlases", None, False],
["beef$", "beeves", None, True],
["brother$", "brethren", None, True],
["child$", "children", None, False],
["corpus$", "corpora", None, True],
["corpus$", "corpuses", None, False],
["^cow$", "kine", None, True],
["ephemeris$", "ephemerides", None, False],
["ganglion$", "ganglia", None, True],
["genie$", "genii", None, True],
["genus$", "genera", None, False],
["graffito$", "graffiti", None, False],
["loaf$", "loaves", None, False],
["money$", "monies", None, True],
["mongoose$", "mongooses", None, False],
["mythos$", "mythoi", None, False],
["octopus$", "octopodes", None, True],
["opus$", "opera", None, True],
["opus$", "opuses", None, False],
["^ox$", "oxen", None, False],
["penis$", "penes", None, True],
["penis$", "penises", None, False],
["soliloquy$", "soliloquies", None, False],
["testis$", "testes", None, False],
["trilby$", "trilbys", None, False],
["turf$", "turves", None, True],
["numen$", "numena", None, False],
["occiput$", "occipita", None, True],
],
# 6/
# Irregular inflections for common suffixes
# (synopses, mice, men).
[
["man$", "men", None, False],
["person$", "people", None, False],
["([lm])ouse$", "\\1ice", None, False],
["tooth$", "teeth", None, False],
["goose$", "geese", None, False],
["foot$", "feet", None, False],
["zoon$", "zoa", None, False],
["([csx])is$", "\\1es", None, False]
],
# 7/
# Fully assimilated classical inflections
# (vertebrae, codices).
[
["ex$", "ices", "ex-ices", False],
["ex$", "ices", "ex-ices-classical", True],
["um$", "a", "um-a", False],
["um$", "a", "um-a-classical", True],
["on$", "a", "on-a", False],
["a$", "ae", "a-ae", False],
["a$", "ae", "a-ae-classical", True]
],
# 8/
# Classical variants of modern inflections
# (stigmata, soprani).
[
["trix$", "trices", None, True],
["eau$", "eaux", None, True],
["ieu$", "ieu", None, True],
["([iay])nx$", "\\1nges", None, True],
["en$", "ina", "en-ina-classical", True],
["a$", "ata", "a-ata-classical", True],
["is$", "ides", "is-ides-classical", True],
["us$", "i", "us-i-classical", True],
["us$", "us", "us-us-classical", True],
["o$", "i", "o-i-classical", True],
["$", "i", "-i-classical", True],
["$", "im", "-im-classical", True]
],
# 9/
# -ch, -sh and -ss take -es in the plural
# (churches, classes).
[
["([cs])h$", "\\1hes", None, False],
["ss$", "sses", None, False],
["x$", "xes", None, False]
],
# 10/
# Certain words ending in -f or -fe take -ves in the plural
# (lives, wolves).
[
["([aeo]l)f$", "\\1ves", None, False],
["([^d]ea)f$", "\\1ves", None, False],
["arf$", "arves", None, False],
["([nlw]i)fe$", "\\1ves", None, False],
],
# 11/
# -y takes -ys if preceded by a vowel,
# or when a proper noun,
# but -ies if preceded by a consonant
# (storeys, Marys, stories).
[
["([aeiou])y$", "\\1ys", None, False],
["([A-Z].*)y$", "\\1ys", None, False],
["y$", "ies", None, False]
],
# 12/
# Some words ending in -o take -os,
# the rest take -oes.
# Words in which the -o is preceded by a vowel always take -os
# (lassos, potatoes, bamboos).
[
["o$", "os", "o-os", False],
["([aeiou])o$", "\\1os", None, False],
["o$", "oes", None, False]
],
# 13/
# Miltary stuff (Major Generals).
[
["l$", "ls", "general-generals", False]
],
# 14/
# Otherwise, assume that the plural just adds -s
# (cats, programmes).
[
["$", "s", None, False]
],
]
# Suffix categories
plural_categories = {
"uninflected" : ["bison", "bream", "breeches", "britches", "carp", "chassis", "clippers", "cod", "contretemps", "corps", "debris", "diabetes", "djinn", "eland", "elk", "flounder", "gallows", "graffiti", "headquarters", "herpes", "high-jinks", "homework", "innings", "jackanapes", "mackerel", "measles", "mews", "mumps", "news", "pincers", "pliers", "proceedings", "rabies", "salmon", "scissors", "series", "shears", "species", "swine", "trout", "tuna", "whiting", "wildebeest"],
"uncountable" : ["advice", "bread", "butter", "cheese", "electricity", "equipment", "fruit", "furniture", "garbage", "gravel", "happiness", "information", "ketchup", "knowledge", "love", "luggage", "mathematics", "mayonnaise", "meat", "mustard", "news", "progress", "research", "rice", "sand", "software", "understanding", "water"],
"s-singular" : ["acropolis", "aegis", "alias", "asbestos", "bathos", "bias", "caddis", "cannabis", "canvas", "chaos", "cosmos", "dais", "digitalis", "epidermis", "ethos", "gas", "glottis", "glottis", "ibis", "lens", "mantis", "marquis", "metropolis", "pathos", "pelvis", "polis", "rhinoceros", "sassafras", "trellis"],
"ex-ices" : ["codex", "murex", "silex"],
"ex-ices-classical" : ["apex", "cortex", "index", "latex", "pontifex", "simplex", "vertex", "vortex"],
"um-a" : ["agendum", "bacterium", "candelabrum", "datum", "desideratum", "erratum", "extremum", "ovum", "stratum" |
hesseltuinhof/mxnet | example/dsd/mlp.py | Python | apache-2.0 | 4,340 | 0.004608 | import mxnet as mx
import os
import logging
import argparse
from math import ceil
import sparse_sgd
# symbol net
def get_symbol():
data = mx.symbol.Variable('data')
fc1 = mx.symbol.FullyConnected(data, name='fc1', num_hidden=128)
act1 = mx.symbol.Activation(fc1, name='relu1', act_type="relu")
fc2 = mx.symbol.FullyConnected(act1, name='fc2', num_hidden=64)
act2 = mx.symbol.Activation(fc2, name='relu2', act_type="relu")
fc3 = mx.symbol.FullyConnected(act2, name='fc3', num_hidden=10)
softmax = mx.symbol.SoftmaxOutput(fc3, name='sm')
return softmax
# download ubyte version of mnist and untar
def download_data():
if not os.path.isdir("data/"):
os.system("mkdir data/")
if (not os.path.exists('data/train-images-idx3-ubyte')) or \
(not os.path.exists('data/train-labels-idx1-ubyte')) or \
(not os.path.exists('data/t10k-images-idx3-ubyte')) or \
(not os.path.exists('data/t10k-labels-idx1-ubyte')):
os.system("wget -q http://data.mxnet.io/mxnet/data/mnist.zip -P data/")
os.chdir("./data")
os.system("unzip -u mnist.zip")
os.chdir("..")
# get data iterators
def get_iters(batch_size):
train = mx.io.MNISTIter(
image="data/train-images-idx3-ubyte",
label="data/train-labels-idx1-ubyte",
data_shape=(784,),
label_name='sm_label',
batch_size=batch_size,
shuffle=True,
flat=True,
silent=False,
seed=10)
val = mx.io.MNISTIter(
image="data/t10k-images-idx3-ubyte",
label="data/t10k-labels-idx1-ubyte",
data_shape=(784,),
label_name='sm_label',
batch_size=batch_size,
shuffle=True,
flat=True,
silent=False)
return (train, val)
def test_mlp(args):
# get parameters
prefix = './mlp'
batch_size = 100
pruning_switch_epoch = [int(i) for i in args.pruning_switch_epoch.split(',')]
num_epoch = pruning_switch_epoch[-1]
batches_per_epoch = ceil(60000.0/batch_size)
weight_sparsity = args.weight_sparsity
bias_sparsity = args.bias_sparsity
weight_threshold = args.weight_threshold
bias_threshold = args.bias_threshold
if args.weight_sparsity:
weight_sparsity = [float(i) for i in args.weight_sparsity.split(',')]
bias_sparsity = [float(i) for i in args.bias_sparsity.split(',')]
else:
weight_threshold = [float(i) for i in args.weight_threshold.split(',')]
bias_threshold = [float(i) for i in args.bias_threshold.split(',')]
# get symbols and iterators
sym = get_symbol()
download_data()
(train, val) = get_iters(batch_size)
# fit model
model = mx.mod.Module(
sym,
context=[mx.cpu(i) for i in range(2)],
data_names=['data'],
label_names=['sm_label'])
optimizer_params = {
'learning_rate' : 0.1,
'wd' : 0.004,
'momentum' : 0.9,
'pruning_switch_epoch' : pruning_switch_epoch,
'batches_per_epoch' : batches_per_epoch,
'weight_sparsity' : weight_sparsity,
'bias_sparsity' : bias_sparsity,
'weight_threshold' : weight_threshold,
'bias_threshold' : bias_threshold}
logging.info('Start training...')
model.fit(train,
eval_data=val,
eval_metric='acc',
epoch_end_callback=mx.callback.do_checkpoint(prefix),
num_epoch=num_epoch,
optimi | zer='sparsesgd',
optimizer_params=optimizer_params)
logging.info('Finish traning...')
# remove files
for i in range(num_epoch):
os.remove('%s-%04d.params' % (prefix, i + 1))
os.remove('%s-symbol.json' % prefix)
if __name__ == "__main__" | :
# print logging by default
logging.basicConfig(level=logging.DEBUG)
parser = argparse.ArgumentParser(description="sparse training")
parser.add_argument('--pruning_switch_epoch', type=str)
parser.add_argument('--weight_sparsity', type=str, default=None)
parser.add_argument('--bias_sparsity', type=str, default=None)
parser.add_argument('--weight_threshold', type=str, default=None)
parser.add_argument('--bias_threshold', type=str, default=None)
args = parser.parse_args()
test_mlp(args)
|
emad8878/is210-week-05-warmup | task_02.py | Python | mpl-2.0 | 160 | 0 | #!/usr/bin/env python
# -*- | coding: utf-8 | -*-
"""This is week5 warmup task_02 Import a Module Namespace"""
import task_01.peanut
TIME = task_01.peanut.BUTTER
|
cms-ttbarAC/CyMiniAna | python/runDeepLearning.py | Python | mit | 4,594 | 0.014367 | """
Created: 12 November 2016
Last Updated: 11 April 2018
Dan Marley
daniel.edison.marley@cernSPAMNOT.ch
Texas A&M University
-----
Script for running the deep learning implementation
To run:
$ python python/runDeepLearning.py config/mlconfig.txt
-- the second argument is the text file with configurations for the NN/setup
"""
import os
import sys
import json
import util
from config import Config
from collections import Counter
from time import strftime,localtime
from deepLearning import DeepLearning
print
print " ------------------------------ "
print " * Deep Learning with Keras * "
print " ------------------------------ "
print
date = strftime("%d%b", localtime())
cmaDir = os.path.expandvars('$CYMINIANADIR')
vb = util.VERBOSE()
## Set configuration options ##
if len(sys.argv)<2:
vb.HELP()
sys.exit(1)
config = Config(sys.argv[1])
vb.level = config.verbose_level
vb.initialize()
if not config.runTraining and not config.runInference:
vb.ERROR("RUN : No configuration set ")
vb.ERROR("RUN : Please set the arguments 'runTraining' or 'runInference' to define workflow ")
vb.ERROR("RUN : Exiting.")
sys.exit(1)
## Setup features
NN_parameters = ['epochs','batch_size','loss','optimizer','metrics','activations',
'nHiddenL | ayers','nNodes','input_dim','kfold_splits']
featureKeys = json.load(open('config/features.json'))
featureKey = -1
for key in featureKeys.keys():
if Counter(featureKeys[key])==Counter(config.features):
featureKey = int(key)
break
if featureKey | <0:
featureKey = max([int(i) for i in featureKeys.keys()])+1
featureKeys[str(featureKey)] = config.features
vb.INFO("RUN : New features for NN ")
with open('config/features.json','w') as outfile:
json.dump(featureKeys,outfile)
## Set output directory
output_dir = "nHiddenLayers{0}_".format(config.nHiddenLayers)
output_dir += "nNodes{0}_".format('-'.join(config.nNodes))
output_dir += "epoch{0}_".format(config.epochs)
output_dir += "batch{0}_".format(config.batch_size)
output_dir += "kfold{0}_".format(config.kfold_splits)
output_dir += "activation-{0}_".format(config.activation.replace(',','-'))
output_dir += "featureKey{0}".format(featureKey)
hep_data_name = config.hep_data.split('/')[-1].split('.')[0]
## Setup Deep Learning class
dnn = DeepLearning()
dnn.target_names = ["top","antitop"]
dnn.target_values = [0,1]
dnn.hep_data = config.hep_data
dnn.model_name = config.dnn_data
dnn.verbose_level = config.verbose_level
dnn.treename = config.treename
dnn.useLWTNN = True
dnn.dnn_name = "dnn"
dnn.output_dim = config.output_dim
dnn.dnn_method = 'binary'
dnn.loss = config.loss
dnn.init = config.init
dnn.nNodes = config.nNodes
dnn.dropout = None
dnn.metrics = config.metrics
dnn.features = config.features
dnn.epochs = config.epochs
dnn.optimizer = config.optimizer
dnn.input_dim = len(config.features)
dnn.batch_size = config.batch_size
dnn.activations = config.activation.split(',')
dnn.kfold_splits = config.kfold_splits
dnn.nHiddenLayers = config.nHiddenLayers
#dnn.earlystopping = {'monitor':'loss','min_delta':0.0001,'patience':5,'mode':'auto'}
## inference/training
output = "{0}/{1}/{2}".format( config.output_path,output_dir,hep_data_name)
if config.runTraining:
output += "/training/"
else:
output += "/inference/"
dnn.output_dir = output
if not os.path.isdir(output):
vb.INFO("RUN : '{0}' does not exist ".format(output))
vb.INFO("RUN : Creating the directory. ")
os.system( 'mkdir -p {0}'.format(output) )
else:
vb.INFO("RUN : Saving output to {0}".format(output))
## load hep data (physics data -- .json file). Always need this for testing/training
dnn.features = config.features
## Setup
dnn.initialize()
if config.runTraining:
vb.INFO("RUN : > Build the NN")
# set properties of the NN
dnn.runTraining(['ljet_BEST_t','ljet_BEST_j']) # add extra attributes to plot
## -- Save information on the NN to a text file to reference later
outputFile = open(dnn.output_dir+'/ABOUT.txt','w')
outputFile.write(" * NN Setup * \n")
outputFile.write("\n > NN parameters: \n")
for NN_parameter in NN_parameters:
outputFile.write( " >> {0}: {1}\n".format(NN_parameter,getattr(dnn,NN_parameter)))
outputFile.write( "\n > NN Features: \n" )
for feature in dnn.features:
outputFile.write(" >> "+feature+"\n" )
outputFile.close()
if config.runInference:
vb.INFO("RUN : > Load NN model from disk")
dnn.runInference()
## END ##
|
AgResearch/prbdf | database_paper/figure_4/fasta_yank.py | Python | gpl-3.0 | 467 | 0.025696 | #!/usr/bin/python2.6
#
# filters a fasta file (from standard input), pulling o | ut sequences by name
#
from Bio import SeqIO
import sys
usage = "usage : cat some_file.fa | fasta_yank.py file_of_names "
if len(sys.argv) != 2:
print usage
sys.exit(1)
SeqIO.write ((r for r in SeqIO.parse(sys.stdin, "fasta") if r.name in [name.strip() for name in open(sys.argv[1],"r")]) , sys.stdout, "fasta")
#for r in SeqIO.parse(sys.stdin, "f | asta"):
# print r.name
|
krisss2121/bpmn-python | bpmn_python/graph/classes/activities/activity_type.py | Python | gpl-3.0 | 1,224 | 0.001634 | # coding=utf-8
"""
Class used for representing | tActivity of BPMN 2.0 graph
"""
import graph.classes.flow_node_type as flow_node
class Activity(flow_node.FlowNode):
"""
Class used for representing tActivity of BPMN 2.0 graph
Fields (except inherited):
- default: ID of default | flow of gateway. Must be either None (default is optional according to BPMN 2.0 XML Schema)
or String.
"""
def __init__(self):
"""
Default constructor, initializes object fields with new instances.
"""
super(Activity, self).__init__()
self.__default = None
def get_default(self):
"""
Getter for 'default' field.
:return:a value of 'default' field.
"""
return self.__default
def set_default(self, value):
"""
Setter for 'default' field.
:param value - a new value of 'default' field. Must be either None (default is optional according to
BPMN 2.0 XML Schema) or String.
"""
if value is None:
self.__default = value
elif not isinstance(value, str):
raise TypeError("Default must be set to a String")
else:
self.__default = value
|
biggapoww/Python-CIS-5 | simple pypy/compute_time_8.py | Python | mit | 375 | 0.010667 | ##Write Python s | tatements to prompt for and collect values
##for the time in hours and minutes (two integer quantities).
def time():
hours = input('What are the hours ')
minutes = input('What are the minutes ')
if (int(hours) >= 1 and i | nt(hours) <= 12 and int(minutes) >= 0 and int(minutes) <= 59):
print('The time is '+ hours + ':' + minutes)
time()
|
daStrauss/subsurface | src/expts/numSensorsCTX.py | Python | apache-2.0 | 1,572 | 0.01464 | '''
Created on Oct 5, 2012
Copyright © 2013
The Board of Trustees of The Leland Stanford Junior University.
All Rights Reserved
Licensed under the | Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS, |
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
@author: dstrauss
'''
import numpy as np
import scipy.io as spio
# F = spio.loadmat('incCondGo.mat')
# numRuns = F['goTo'].shape[0]
D = {'solverType':'contrastX', 'flavor':'TE', 'numRuns':4800, 'expt':'noSense', 'numProcs':16}
def getMyVars(parseNumber, D):
'''routine to return the parameters to test at the current iteration.'''
# noFreqs,noPhis,bkg = np.meshgrid(range(1,7), range(1,7), range(100))
noFreqs,noPhis,bkg = np.mgrid[1:7,0:16,0:50]
noFreqs = noFreqs.flatten()
noPhis = noPhis.flatten()
bkg = bkg.flatten()
allfrq = np.arange(1,80,5)
D['freqs'] = np.round(np.logspace(np.log10(1000), np.log10(50000), noFreqs[parseNumber]))
# D['inc'] = [75.0*np.pi/180.0] -- use defaults!
D['numSensors'] = allfrq[noPhis[parseNumber]]
D['bkgNo'] = bkg[parseNumber]+100;
D['numProcs'] = len(D['freqs'])*4
return D
|
jtara1/SimpleFlaskWebsite | app/views.py | Python | apache-2.0 | 3,783 | 0.001322 | from flask import render_template, flash, redirect, session, url_for, request, g
from flask_login import login_user, logout_user, current_user, login_required
from app import app, db, lm, oid
from .forms import LoginForm, EditForm
from .models import User
from datetime import datetime
@app.before_request
def before_request():
g.user = current_user
if g.user.is_authenticated:
g.user.last_seen = datetime.utcnow()
db.session.add(g.user)
db.session.commit()
@app.route('/')
@app.route('/index')
@login_required
def index():
user = g.user
posts = [ # fake array of posts
{
'author': {'nickname': 'John'},
'body': 'Beautiful day in Portland!'
},
{
'author': {'nickname': 'Susan'},
'body': 'The Avengers movie was so cool!'
}
]
return render_template('index.html',
title='Home',
user=user,
posts=posts)
@app.route('/login', methods=['GET', 'POST'])
@oid.loginhandler
def login():
if g.user is not None and g.user.is_authenticated:
return redirect(url_for('index'))
form = LoginForm()
if form.validate_on_submit():
session['remember_me'] = form.remember_me.data
return oid.try_login(form.openid.data, ask_for=['nickname', 'email'])
return render_template('login.html',
title='Sign In',
form=form,
providers=app.config['OPENID_PROVIDERS'])
@lm.user_loader
def load_user(id):
return User.query.get(int(id))
@oid.after_login
def after_login(resp):
if resp.email is None or resp.email == "":
flash('Invalid login. Please try again.')
return redirect(url_for('login'))
user = User.query.filter_by(email=resp.email).first()
if user is None:
nickname = resp.nickname
| if nickname is None or nickname == "":
nickname = resp.email.split('@')[0]
nickname = User.make_unique_nickname(nickn | ame=nickname)
user = User(nickname=nickname, email=resp.email)
db.session.add(user)
db.session.commit()
remember_me = False
if 'remember_me' in session:
remember_me = session['remember_me']
session.pop('remember_me', None)
login_user(user, remember = remember_me)
return redirect(request.args.get('next') or url_for('index'))
@app.route('/logout')
def logout():
logout_user()
return redirect(url_for('index'))
@app.route('/user/<nickname>')
@login_required
def user(nickname):
user = User.query.filter_by(nickname=nickname).first()
if user is None:
flash('User %s not found.' % nickname)
return redirect(url_for('index'))
posts = [
{'author': user, 'body': 'Test post #1'},
{'author': user, 'body': 'Test post #2'}
]
return render_template('user.html',
user=user,
posts=posts)
@app.route('/edit', methods=['GET', 'POST'])
@login_required
def edit():
form = EditForm(g.user.nickname)
if form.validate_on_submit():
g.user.nickname = form.nickname.data
g.user.about_me = form.about_me.data
db.session.add(g.user)
db.session.commit()
flash('You changes have been saved.')
return redirect(url_for('edit'))
else:
form.nickname.data = g.user.nickname
form.about_me.data = g.user.about_me
return render_template('edit.html', form=form)
@app.errorhandler(404)
def not_found_error(error):
return render_template('404.html'), 404
@app.errorhandler(500)
def internal_error(error):
db.session.rollback()
return render_template('500.html'), 500 |
dougbeal/nosy | nosy/nosy.py | Python | bsd-3-clause | 5,722 | 0 | """Watch for changes in a collection of source files. If changes, run
the specified test runner (nosetests, by default).
"""
from argparse import ArgumentParser
import ConfigParser
import glob
import os
import stat
import subprocess
import sys
import time
class Nosy(object):
"""Watch for changes in all source files. If changes, run the
specified test runner (nosetests, by default).
"""
def __init__(self):
"""Return an instance with the default configuration, and a
command line parser.
"""
self.config = ConfigParser.SafeConfigParser()
self.config.add_section('nosy')
self.config.set('nosy', 'test_runner', 'nosetests')
self.config.set('nosy', 'base_path', '.')
self.config.set('nosy', 'glob_patterns', '')
self.config.set('nosy', 'exclude_patterns', '')
self.config.set('nosy', 'extra_paths', '')
self.config.set('nosy', 'options', '')
self.config.set('nosy', 'tests', '')
# paths config retained for backward compatibility; use
# extra_paths for any files or paths that aren't easily
# included via base_path, glob_patterns, and exclude_patterns
self.config.set('nosy', 'paths', '*.py')
self._build_cmdline_parser()
def _build_cmdline_parser(self):
self.parser = ArgumentParser(
description='Automatically run a command (nosetest, by default) '
'whenever source files change.')
self.parser.add_argument(
'-c', '--config', dest='config_file', default='setup.cfg',
help='configuration file path and name; defaults to %(default)s')
def parse_cmdline(self):
"""Parse the command line and set the config_file attribute.
"""
args = self.parser.parse_args()
self.config_file = args.config_file
def _read_config(self):
try:
self.config.readfp(open(self.config_file, 'rt'))
except IOError, msg:
self.parser.error("can't read config file:\n %s" % msg)
self.test_runner = self.config.get('nosy', 'test_runner')
self.base_path = self.config.get('nosy', 'base_path')
self.glob_patterns = self.config.get(
'nosy', 'glob_patterns').split()
self.exclude_patterns = self.config.get(
'nosy', 'exclude_patterns').split()
self.extra_paths = self.config.get('nosy', 'extra_paths').split()
self.cmd_opts = self.config.get('nosy', 'options')
self.cmd_args = self.config.get('nosy', 'tests')
# paths config retained for backward compatibility; use
# extra_paths for any files or paths that aren't easily
# included via base_path, glob_patterns, and
# exclude_patterns
self.paths = self.config.get('nosy', 'paths').split()
def _calc_extra_paths_checksum(self):
"""Return the checksum for the files given by the extra paths
pattern(s).
self.paths is included for backward compatibility.
"""
checksum = 0
for path in self.extra_paths + self.paths:
for file_path in glob.iglob(path):
stats = os.stat(file_path)
checksum += stats[stat.ST_SIZE] + stats[stat.ST_MTIME]
return checksum
def _calc_exclusions(self, root):
"""Return a set of file paths to be excluded from the checksum
calculation.
"""
exclusions = set()
for pattern in self.exclude_patterns:
for file_path in glob.iglob(os.path.join(root, pattern)):
exclusions.add(file_path)
return exclusions
def _calc_dir_checksum(self, exclusions, root):
"""Return the checksum for the monitored files in the
specified directory tree.
"""
checksum = 0
for pattern in self.glob_patterns:
for file_path in glob.iglob(os.path.join(root, pattern)):
if file_path not in exclusions:
stats = os.stat(file_path)
checksum += stats[stat.ST_SIZE] + stats[stat.ST_MTIME]
return checksum
def _checksum(self):
"""Return a checksum which indicates if any files in the paths
list have changed.
"""
checksum = self._calc_extra_paths_checksum()
for root, dirs, files in os.walk(self.base_path):
exclusions = self._calc_exclusions(root)
checksum += self._calc_dir_checksum(exclusions, root)
return checksum
def run(self):
"""Run specified test runner (default no | setests) whenever the
source files (default ./*.py) change.
Re-read the configuration before each run so that options and
arguments may be changed.
"""
checksum = 0
self._read_config()
while True:
if self._checksum() != checksum:
self._read_config()
checksum = self._checksum()
cmd = (self. | test_runner.split() if ' ' in self.test_runner
else [self.test_runner])
try:
subprocess.call(
cmd
+ self.cmd_opts.replace('\\\n', '').split()
+ self.cmd_args.replace('\\\n', '').split())
except OSError, msg:
sys.stderr.write('Command error: %s: %s\n' % (msg, cmd))
sys.exit(2)
time.sleep(1)
def main():
nosy = Nosy()
nosy.parse_cmdline()
try:
nosy.run()
except KeyboardInterrupt:
sys.exit(130)
except SystemExit:
sys.exit(0)
if __name__ == '__main__':
main()
|
lento/cortex | test/IECoreRI/ParameterisedProcedural.py | Python | bsd-3-clause | 4,780 | 0.038494 | ##########################################################################
#
# Copyright (c) 2009-2013, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Image Engine Design nor the names of any
# other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import unittest
import os
import IECore
import IECoreRI
class TeapotProcedural( IECore.ParameterisedProcedural ) :
def __init__( self ) :
IECore.ParameterisedProcedural.__init__( self )
self.boundCalled = False
self.renderCalled = False
self.renderState | Called = False
def doBound( self, args ) :
self.boundCalled = True
return IECore.Box3f( IECore.V3f( -1 ), IECore.V3f( 1 ) )
def doRenderState( self, renderer, | args ) :
self.renderStateCalled = True
renderer.setAttribute( "ri:visibility:diffuse", IECore.BoolData( 1 ) )
def doRender( self, renderer, args ) :
self.renderCalled = True
renderer.geometry( "teapot", {}, {} )
class ParameterisedProceduralTest( IECoreRI.TestCase ) :
def checkContents( self, fileName, expectedElements, unexpectedElements ) :
l = file( fileName ).readlines()
lineIndex = 0
for expected in expectedElements :
found = False
for i in range( lineIndex, len( l ) ) :
if expected in l[i] :
lineIndex = i
found = True
break
self.assert_( found )
for e in unexpectedElements :
for ll in l :
self.assert_( e not in ll )
def testNormalCall( self ) :
r = IECoreRI.Renderer( "test/IECoreRI/output/testParameterisedProcedural.rib" )
r.worldBegin()
t = TeapotProcedural()
t.render( r )
r.worldEnd()
self.checkContents(
"test/IECoreRI/output/testParameterisedProcedural.rib",
[
"AttributeBegin",
"Attribute \"visibility\" \"int diffuse\" [ 1 ]",
"Geometry \"teapot\"",
"AttributeEnd",
],
[]
)
self.assertEqual( t.renderStateCalled, True )
self.assertEqual( t.boundCalled, True )
self.assertEqual( t.renderCalled, True )
def testStateOnly( self ) :
r = IECoreRI.Renderer( "test/IECoreRI/output/testParameterisedProcedural.rib" )
r.worldBegin()
t = TeapotProcedural()
t.render( r, inAttributeBlock=False, withState=True, withGeometry=False )
r.worldEnd()
self.checkContents(
"test/IECoreRI/output/testParameterisedProcedural.rib",
[
"Attribute \"visibility\" \"int diffuse\" [ 1 ]",
],
[
"AttributeBegin",
"Geometry \"teapot\"",
"AttributeEnd",
],
)
self.assertEqual( t.renderStateCalled, True )
self.assertEqual( t.boundCalled, False )
self.assertEqual( t.renderCalled, False )
def testImmediateGeometryOnly( self ) :
r = IECoreRI.Renderer( "test/IECoreRI/output/testParameterisedProcedural.rib" )
r.worldBegin()
t = TeapotProcedural()
t.render( r, inAttributeBlock=False, withState=False, withGeometry=True, immediateGeometry=True )
r.worldEnd()
self.checkContents(
"test/IECoreRI/output/testParameterisedProcedural.rib",
[
"Geometry \"teapot\"",
],
[
"AttributeBegin",
"Attribute \"visibility\" \"int diffuse\" [ 1 ]",
"AttributeEnd",
],
)
self.assertEqual( t.renderStateCalled, False )
self.assertEqual( t.boundCalled, False )
self.assertEqual( t.renderCalled, True )
if __name__ == "__main__":
unittest.main()
|
ulikoehler/UliEngineering | tests/SignalProcessing/TestUtils.py | Python | apache-2.0 | 6,677 | 0.002097 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
from numpy.testing import assert_approx_equal, assert_allclose, assert_array_equal
from UliEngineering.SignalProcessing.Utils import *
from parameterized import parameterized
import concurrent.futures
import numpy as np
import datetime
import unittest
unstairMethods = [
("left",),
("middle",),
("right",),
("reduce",),
]
class TestRemoveMean(unittest.TestCase):
def testRemoveMean(self):
assert_allclose(remove_mean([]), [])
assert_allclose(remove_mean([1.0, 2.0, 3.0]), [-1.0, 0.0, 1.0])
class TestRMS(unittest.TestCase):
def testRMS(self):
assert_allclose(rms([]), [])
assert_allclose(rms([1.0, 2.0, 3.0]), np.sqrt(np.mean([1*1, 2*2, 3*3])))
class TestPeakToPeak(unittest.TestCase):
def testPeakToPeak(self):
assert_allclose(peak_to_peak(None), 0.0)
assert_allclose(peak_to_peak([]), 0.0)
assert_allclose(peak_to_peak([0.0]), 0.0)
assert_allclose(peak_to_peak([1.0]), 0.0)
assert_allclose(peak_to_peak([1.0, 1.0]), 0.0)
assert_allclose(peak_to_peak([1.0, 2.0]), 1.0)
assert_allclose(peak_to_peak([2.0, 1.0]), 1.0)
assert_allclose(peak_to_peak([0, 1, 3, -3, 0, 5, 0.7, 0.9]), 8)
assert_allclose(peak_to_peak(np.asarray([])), 0.0)
assert_allclose(peak_to_peak(np.asarray([0.0])), 0.0)
assert_allclose(peak_to_peak(np.asarray([1.0])), 0.0)
assert_allclose(peak_to_peak(np.asarray([1.0, 1.0])), 0.0)
assert_allclose(peak_to_peak(np.asarray([1.0, 2.0])), 1.0)
assert_allclose(peak_to_peak(np.asarray([2.0, 1.0])), 1.0)
assert_allclose(peak_to_peak(np.asarray([0, 1, 3, -3, 0, 5, 0.7, 0.9])), 8)
class TestUnstair(unittest.TestCase):
@parameterized.expand(unstairMethods)
def testNoReduction(self, method):
# Test if unstair returns the original array for a non-step function
x = np.arange(10)
y = np.square(x)
xres, yres = unstair(x, y, method=method)
assert_array_equal(xres, x)
assert_array_equal(yres, y)
def testSimpleLeft(self):
y = np.asarray([0, 0, 0, 1, 2, 2, 2, 3, 4, 5, 5])
x = np.arange(y.size)
xexpected = [0, 3, 4, 7, 8, 9, 10]
yexpected = y[xexpected]
xres, yres = unstair(x, y, method="left")
assert_array_equal(xres, xexpected)
assert_array_equal(yres, yexpected)
def testSimpleRight(self):
y = np.asarray([0, 0, 0, 1, 2, 2, 2, 3, 4, 5, 5])
x = np.arange(y.size)
xexpected = [0, 2, 3, 6, 7, 8, 10]
yexpected = y[xexpected]
xres, yres = unstair(x, y, method="right")
assert_array_equal(xres, xexpected)
assert_array_equal(yres, yexpected)
def testSimpleMiddle(self):
y = np.asarray([0, 0, 0, 1, 2, 2, 2, 3, 4, 5, 5])
x = np.arange(y.size)
xexpected = [0, 1, 3, 5, 7, 8, 10]
yexpected = y[xexpected]
xres, yres = unstair(x, y, method="middle")
assert_array_equal(xres, xexpected)
assert_array_equal(yres, yexpected)
def testSimpleReduce(self):
y = np.asarray([0, 0, 0, 1, 2, 2, 2, 3, 4, 5, 5])
x = np.arange(y.size)
xexpected = [0, 2, 3, 4, 6, 7, 8, 9, 10]
yexpected = y[xexpected]
xres, yres = unstair(x, y, method="reduce")
assert_array_equal(xres, xexpected)
assert_array_equal(yres, yexpected)
@parameterized.expand(unstairMethods)
def testSine(self, method):
# Test with a rounded sine function. Data should be reduced
sx = np.arange(1000) * .02
rsine = np.round(np.sin(sx) * 10.) / 10.
rx, ry = unstair(sx, rsine, method=method)
self.assertLess(rx.size, sx.size)
self.assertLess(ry.size, rsine.size)
class TestOptimumPolyfit(unittest.TestCase):
def testBasic(self):
x = np.linspace(-100., 100., 10000)
y = np.square(x)
poly, deg, score = optimum_polyfit(x, y)
self.assertLess(score, 1e-10)
self.assertEqual(np.max(np.abs(y - poly(x))), score)
def testRandom(self):
x = np.linspace(-100., 100., 1000)
y = np.random.random_sample(x.size)
poly, deg, score = optimum_polyfit(x, y)
class TestLinSpace(unittest.TestCase):
@parameterized.expand([
(0.0, 100.0, 101, True),
(0.0, 100.0, 202, True),
(0.0, 100.0, 735, True),
(0.0, 200.0, 101, True),
(12.5, 202.3, 101, True),
(0.0, 100.0, 101, False),
(0.0, 100.0, 202, False),
(0.0, 100.0, 735, False),
(0.0, 200.0, 101, False),
(12.5, 202.3, 101, False),
])
def testBasic(self, start, end, n, endpoint):
params = (start, end, n)
spc = Lin | Range(*params, endpoint=endpoint)
linspc = np.linspace(*params, endpoint=endpoint)
self.assertEqual(len(spc), params[2])
self.assertEqual(len(spc), linspc.size)
self.assertEqual((len(spc),), linspc.shape)
| assert_allclose(spc[:], linspc)
# Test samplerate
assert_approx_equal(spc.samplerate(), (n - 1 if endpoint else n) / (end - start))
# Test some slice
istart, iend = len(spc) // 3, len(spc) // 2
assert_allclose(spc[istart:iend], linspc[istart:iend])
# Test negative indices
assert_allclose(spc[-istart], linspc[-istart])
# Test mid
self.assertEqual(spc.mid, (start + end) / 2.)
# Test view
assert_allclose(spc.view(0, None).size, linspc.size)
assert_allclose(spc.view(0, None)[:], linspc)
def test_equal(self):
l1 = LinRange(0., 100., 100, endpoint=False)
l2 = LinRange(0., 100., 100, endpoint=False)
l3 = LinRange(0., 100., 100, endpoint=True)
self.assertTrue(l1 == l2)
self.assertTrue(l2 == l1)
self.assertFalse(l3 == l1)
self.assertFalse(l3 == l2)
def test_repr(self):
l = LinRange(0., 100., 100, endpoint=False)
self.assertEqual("LinRange(0.0, 100.0, 1.0)", str(l))
l = LinRange(0., 100., 100, endpoint=False, dtype=int)
self.assertEqual("LinRange(0.0, 100.0, 1.0, dtype=int)", str(l))
def testDtype(self):
lin1 = LinRange(0.0, 100.0, 101)
self.assertIsInstance(lin1, LinRange)
self.assertIsInstance(lin1.view(0, 5), LinRange)
class TestAggregate(unittest.TestCase):
def test_aggregate(self):
self.assertEqual([("a", 1), ("b", 1), ("c", 1)], list(aggregate("abc")))
self.assertEqual([], list(aggregate("")))
self.assertEqual([("a", 2), ("b", 1), ("c", 2), ("d", 1)],
list(aggregate("aabccd")))
|
rakeshmi/tempest | tempest/tests/services/compute/test_security_group_rules_client.py | Python | apache-2.0 | 2,571 | 0 | # Copyright 2015 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.services.compute.json import security_group_rules_client
from tempest.tests im | port fake_auth_provider
from tempest.tests.services.compute import base
class TestS | ecurityGroupRulesClient(base.BaseComputeServiceTest):
FAKE_SECURITY_GROUP_RULE = {
"security_group_rule": {
"id": "2d021cf1-ce4b-4292-994f-7a785d62a144",
"ip_range": {
"cidr": "0.0.0.0/0"
},
"parent_group_id": "48700ff3-30b8-4e63-845f-a79c9633e9fb",
"to_port": 443,
"ip_protocol": "tcp",
"group": {},
"from_port": 443
}
}
def setUp(self):
super(TestSecurityGroupRulesClient, self).setUp()
fake_auth = fake_auth_provider.FakeAuthProvider()
self.client = security_group_rules_client.SecurityGroupRulesClient(
fake_auth, 'compute', 'regionOne')
def _test_create_security_group_rule(self, bytes_body=False):
req_body = {
"from_port": "443",
"ip_protocol": "tcp",
"to_port": "443",
"cidr": "0.0.0.0/0",
"parent_group_id": "48700ff3-30b8-4e63-845f-a79c9633e9fb"
}
self.check_service_client_function(
self.client.create_security_group_rule,
'tempest.common.service_client.ServiceClient.post',
self.FAKE_SECURITY_GROUP_RULE,
to_utf=bytes_body, **req_body)
def test_create_security_group_rule_with_str_body(self):
self._test_create_security_group_rule()
def test_create_security_group_rule_with_bytes_body(self):
self._test_create_security_group_rule(bytes_body=True)
def test_delete_security_group_rule(self):
self.check_service_client_function(
self.client.delete_security_group_rule,
'tempest.common.service_client.ServiceClient.delete',
{}, status=202, group_rule_id='group-id')
|
DongElkan/flavonoid | flavonoid_parser.py | Python | gpl-3.0 | 15,159 | 0.000264 | """
Parse flavonoid molecule object read from Indigo toolkit. The molecular
identifiers accepted are canonical SMILES, InchI or file like format
..mol (file stored in ChemSpider database), and ..sdf (in PubChem database).
"""
import collections
from re import findall, sub
from indigo import Indigo, IndigoException
from indigo_inchi import IndigoInchi
from indigo_renderer import IndigoRenderer as renderer
idg = Indigo()
idgin = IndigoInchi(idg)
idgrender = renderer(idg)
Atom = collections.namedtuple("Atom",
["symbol", "charge", "neighbors", "neiset"])
Nei = collections.namedtuple("Nei",
["index", "symbol", "bondorder",
"degree", "bondtopology"])
Ring = collections.namedtuple("Ring",
["obj", "index", "smiles", "label", "supports",
"isringAC", "ringACnns"])
class FlavonoidException(Exception):
""" flavonoid exceptions, to identify whether the input molecule
is valid flavonoid candidate, and other related exceptions."""
def __init__(self, errid):
self.id = errid
def __str__(self):
if self.id == 1:
return " ".join(["Multiple components are unsupported in",
"identifying flavonoids."])
elif self.id == 2:
return " ".join(["Elements except CHNOS are not allowed as a",
"candidate of flavonoid."])
elif self.id == 3:
return "Group matching times out."
elif self.id == 4:
return "This is not a flavonoid."
elif self.id == 5:
return "Can not load molecule from the input identifier by Indigo."
elif self.id == 6:
return " ".join(["Charged atoms except oxygen or charges except",
"+1 are not allowed as a flavonoid."])
elif self.id == 7:
return " ".join(["Unexcepted side groups, elements or radical",
"atoms exist in flavonoids."])
elif self.id == 8:
return " ".join(["Unexpected side group including nitrogens in",
"a flavonoid."])
elif self.id == 9:
return " ".join(["Too condensed distribution of benzene rings",
"which unlikely appears as a flavonoid."])
elif self.id == 10:
return " ".join(["Not sufficient rings are found as a flavonoid"
"candidate."])
elif self.id == 11:
return " ".join(["Multiple oxygens bonding to the same atom"
"in a ring is unacceptable as a flavonoid."])
def loadmol(identifier):
"""
Load molecule using the identifier to construct an Indigo object.
"""
# It's a molecular formatted file, only ..mol and ..sdf are accepted.
if identifier.endswith(".mol") or identifier.endswith(".sdf"):
try:
mol = idg.loadMoleculeFromFile(identifier)
except IndigoException:
raise FlavonoidException(5)
# Otherwise it's an identifier, SMILES or InchI
try:
mol = idg.loadMolecule(identifier)
except IndigoException:
try:
mol = idgin.loadMolecule(identifier)
except IndigoException:
raise FlavonoidException(5)
return mol
class MoleculeParser(object):
"""
Parse molecular object for subsequent analysis
"""
def __init__(self, identifier):
mol = loadmol(identifier)
mol.dearomatize()
self.molecule = mol
self.CHAIN = idg.CHAIN
self.RING = idg.RING
self._atoms()
self._Rings()
self._assignbenzylabel()
self._RingA()
def _atoms(self):
"""
Get information of atoms and corresponding neighbor atoms in
current molecule and set the obtained variables as global.
The information for neighbors (in variable 'neis') include:
.. index of neighbor atom;
.. bond order the this neighbor atom to current atom;
.. atom symbol in lower case;
.. degree of neighbor atom.
The information for atoms (in )
"""
atoms = {}
for atom in self.molecule.iterateAtoms():
k = atom.index()
neis = tuple([Nei(nei.index(),
nei.symbol().lower(),
nei.bond().bondOrder(),
nei.degree(),
nei.bond().topology())
for nei in atom.iterateNeighbors()])
atoms[k] = Atom(atom.symbol().lower(), atom.charge(), neis,
set(nei.index for nei in neis))
self.atoms = atoms
def _Rings(self):
"""
Get ring objects and atom indices in each ring
"""
coset = set("co")
# rings containing atom objects
rings, benzylrings, thps = {}, {}, {}
for i, r in enumerate(self.molecule.iterateRings(3, 8)):
rjx = tuple([atom.index() for atom in r.iterateAtoms()])
s = r.clone().smiles()
setjx = set(rjx)
tempobj = Ring(r, setjx, s, None, None, None, None)
rings[rjx] = tempobj
if 5 <= len(rjx) <= 6:
# to avoid internal ring
if any(len(self.atoms[j].neiset & set(rjx)) >= 3 for j in rjx):
continue
# ring smiles
sc = ''.join(findall("[a-zA-Z]+", s)).lower()
if all(ak == "c" for ak in set(sc)):
# all are C and benzenes
benzylrings[rjx] = tempobj
elif sc.count("o") == 1 and coset.issuperset(set(sc)):
# tetrahydrofuran and tetrahydropyran
thps[rjx] = tempobj
self.rings = rings
self.benzylrings = benzylrings
self.thps = thps
def _assignbenzylabel(self):
"""
Assign names to benzyl rings
"""
for rgix, rgobj in self.benzylrings.items():
rsmile = rgobj.smiles
# For a candidate benzene ring, at least one neighbor not in
# the same ring is C
if not any(any(nei.symbol == "c" and nei.index not in rgix
for nei in self.atoms[j].neighbors)
for j in rgix):
continue
label, supports = None, None
# benzene ring
n, db = len(rgobj.index), rsmile.count("=")
if n == 6 or (db == 3 and n == 6):
label = "b"
# check whether the ring is a benzene if it is surrounded by other
# benzene rings
elif n == 6 and db >= 1:
cbs = []
for rgixk, rgobjk in self.benzylrings.items():
ck = rgobj.index & rgobjk.index
if len(ck) == 2 and rgobjk.smiles.count("=") > 2:
cbs.append(ck)
if (len(cbs) > 0 and db == 2) or (len(cbs) > 1 and db == 1):
label | = "b"
elif db == 2:
# candidate benzene ring
label = "bx"
# other types of aromatic rings
if label is None or label == "bx":
c2o = [ | ]
for j in rgix:
for nei in self.atoms[j].neighbors:
if nei.symbol == "o" and nei.bondorder == 2:
c2o.append(nei.index)
if db >= 1:
if db == 2 and len(c2o) == 2:
# quinone
if n == 6:
label, supports = "q", tuple(c2o)
elif db == 2 and c2o:
# methyldeoxybenzoin if 6 membered ring
# else chalcone cyclopentenedione
label, supports = "m" if n == 6 else "cc", c2o[0]
elif db == 1 and len(c2o) == 2:
label, supports |
lafranceinsoumise/api-django | agir/events/views/jitsi_views.py | Python | agpl-3.0 | 1,950 | 0.002062 | from datetime import timedelta
from django.http import JsonRespo | nse
from django.utils import timezone
from django.views.decorators.csrf import csrf_exempt
from django.views.decorators.http import require_POST, require_http_methods
from agir.events.models import JitsiMeeting
@csrf_exempt
@require_POST
def jitsi_reservation_view(request):
try:
meeting = JitsiMeeting.objects.get(room_name=request.POST.get("name"))
except JitsiMeeting.D | oesNotExist:
return JsonResponse({"message": "Cette conférence n'existe pas."}, status=404)
else:
if meeting.event is not None and not meeting.event.is_current():
return JsonResponse(
{
"message": "L'événement n'est pas encore commencé ou est déjà terminé."
},
status=403,
)
meeting.start_time = timezone.now()
meeting.save(update_fields=["start_time"])
return JsonResponse(
{
"id": meeting.pk,
"name": meeting.room_name,
"start_time": meeting.event.start_time.isoformat(
timespec="milliseconds"
)
if meeting.event is not None
else timezone.now(),
"duration": int(
(meeting.event.end_time - meeting.event.start_time).total_seconds()
)
if meeting.event is not None
else 3600,
}
)
@csrf_exempt
@require_http_methods(["DELETE"])
def jitsi_delete_conference_view(request, pk):
try:
meeting = JitsiMeeting.objects.get(pk=pk)
meeting.end_time = timezone.now()
meeting.save(update_fields=["end_time"])
return JsonResponse({"message": "Conférence terminée."})
except JitsiMeeting.DoesNotExist:
return JsonResponse({"message": "Cette conférence n'existe pas."}, status=404)
|
fedora-conary/rmake-2 | rmake/worker/command.py | Python | apache-2.0 | 19,427 | 0.001596 | #
# Copyright (c) SAS Institute Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import errno
import os
import shutil
import socket
import sys
import time
import traceback
from conary import conaryclient
from conary.lib import util
from conary.repository import changeset
from conary.repository.errors import CommitError
from rmake import errors
from rma | ke import failure
from rmake.build import subscriber
from rmake.worker import resolver
from rmake.lib import logfile
from rmake.lib import logger
from rmake.lib import procutil
from rmake.lib import repocache
from rmake.lib import recipeutil
from rmake.lib import server
from rmake.lib.apiutils imp | ort thaw, freeze
class Command(server.Server):
"""
Superclass for commands. Expects information to run one operation
which it will perform and report about.
The command is set up to handle forked operation:
The main action occurs inside the pipe and any information
processed is set via pipe. The readPipe, if it exists,
will parse data sent over that pipe.
"""
name = 'command' # command name is used for logging purposes.
def __init__(self, cfg, commandId, jobId):
server.Server.__init__(self)
self.cfg = cfg
self.commandId = commandId
self.jobId = jobId
self.logData = None
self._isErrored = False
self._errorMessage = ''
self._output = [] # default location for information read in
# from the readPipe.
self.failureReason = None
self.readPipe = None
self.writePipe = None
def _exit(self, exitRc):
sys.exit(exitRc)
def isReady(self):
return True
def shouldFork(self):
return True
def setWritePipe(self, writePipe):
"""
Sets the pipe to read data in. This should match a readPipe
set on the other side of a fork.
@type readPipe: instance of lib.pipereader.PipeWriter
"""
self.writePipe = writePipe
def setReadPipe(self, readPipe):
"""
Sets the pipe to read data in. This should match a writePipe
set on the other side of a fork.
@type readPipe: instance of lib.pipereader.PipeReader
"""
self.readPipe = readPipe
def fileno(self):
"""
Enables calling select() on a command.
"""
return self.readPipe.fileno()
def handleRead(self):
# depending on the class of readPipe, this may not return data
# until full objects have been read in.
data = self.readPipe.handle_read()
if data:
self._handleData(data)
def flushInputBuffer(self):
if not self.readPipe:
return
for data in self.readPipe.readUntilClosed(timeout=20):
self._handleData(data)
def handleWrite(self):
self.writePipe.handle_write()
def _handleData(self, data):
"""
Default behavior for handling incoming data on the readPipe.
"""
self._output.append(data)
def getCommandId(self):
return self.commandId
def getChrootFactory(self):
return None
def getLogPath(self):
"""
All commands log their activities to a file based on their command
it. Returns the path for that logFile.
"""
commandId = (self.getCommandId().replace('/', '_')
.replace('~', '')
.replace(' ', ''))
base = self.cfg.getBuildLogDir(self.jobId)
return '%s/%s.log' % (base, commandId)
def runCommandAndExit(self):
# we actually want to die when the command is killed.
# We want our entire process group to be killed.
# Remove signal handlers and set us to be the leader of our
# process group.
os.setpgrp()
self._resetSignalHandlers()
try:
self._try('Command', self._runCommand)
os._exit(0)
except SystemExit, err:
os._exit(err.args[0])
except:
# error occurred, but we'll let our parent node send out the
# "errored" message.
os._exit(1)
def _serveLoopHook(self):
"""
Called inside the forked command process until the command is done.
"""
self.writePipe.handleWriteIfReady()
self._collectChildren()
def runCommandNoExit(self):
try:
self._try('Command', self._runCommand)
except SystemExit, err:
return err.args[0]
return 0
def _runCommand(self):
self.commandStarted()
self.logger = logger.Logger(self.name) # output to stdout so logging
# is all covered by logFile
self.logFile = logfile.LogFile(self.getLogPath())
if self.logData:
self.logFile.logToPort(*self.logData)
else:
self.logFile.redirectOutput()
try:
self.logger.info('Running Command... (pid %s)' % os.getpid())
try:
self.runCommand()
except SystemExit, err:
raise
except Exception, err:
if isinstance(err, SystemExit) and not err.args[0]:
self.commandFinished()
else:
self.logger.error(traceback.format_exc())
self.commandErrored(err, traceback.format_exc())
raise
else:
self.commandFinished()
finally:
self.logFile.restoreOutput()
def isErrored(self):
return self._isErrored
def getFailureReason(self):
return self.failureReason
def setError(self, msg, tb=''):
self._isErrored = True
self.failureReason = failure.CommandFailed(self.commandId, str(msg), tb)
def commandDied(self, status):
self.flushInputBuffer()
exitRc = os.WEXITSTATUS(status)
signalRc = os.WTERMSIG(status)
commandId = self.getCommandId()
if exitRc or signalRc:
if exitRc:
msg = 'unexpectedly died with exit code %s'
msg = msg % (exitRc)
else:
msg = 'unexpectedly killed with signal %s'
msg = msg % (signalRc)
self.setError(msg)
self.commandFinished()
def commandStarted(self):
pass
def commandFinished(self):
pass
def commandErrored(self, msg, tb=''):
self.setError(msg, tb)
class AttachedCommand(Command):
def __init__(self, serverCfg, commandId, jobId, eventHandler,
job=None, trove=None):
assert job or trove
super(AttachedCommand, self).__init__(serverCfg, commandId, jobId)
self.eventHandler = eventHandler
self.job = job
self.trove = trove
if trove:
self.parent = trove
else:
self.parent = job
self.publisher = None
def _handleData(self, (jobId, eventList)):
self.eventHandler._receiveEvents(*thaw('EventList', eventList))
def getTrove(self):
return self.trove
def setFailure(self, failure):
if self.trove:
self.trove.troveFailed(failure)
else:
self.job.jobFailed(failure)
def runCommand(self):
try:
self.parent.getPublisher().reset()
self.publisher = PipePublisher(self.writePipe)
self.publisher.attach(self.parent)
|
DarthMaulware/EquationGroupLeaks | Leak #4 - Don't Forget Your Base/EQGRP-Auction-File/Linux/bin/earlyshovel/tmpnam.py | Python | unlicense | 54 | 0 | #!/usr/bin/python
import utils
print utils.tmpn | am()
| |
mitodl/open-discussions | course_catalog/migrations/0007_alters_created_on_field_20190111_1755.py | Python | bsd-3-clause | 922 | 0.001085 | # Generated by Django 2.0.8 on 2019-01-11 17:55
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("course_catalog", "0 | 006_adds_timestampedmodel_20190111_0031")]
operations = [
migrations.AlterField(
model_name="course",
name="created_on",
field=models.DateTimeField(auto_now_add=True),
),
migrations.AlterField(
| model_name="courseinstructor",
name="created_on",
field=models.DateTimeField(auto_now_add=True),
),
migrations.AlterField(
model_name="courseprice",
name="created_on",
field=models.DateTimeField(auto_now_add=True),
),
migrations.AlterField(
model_name="coursetopic",
name="created_on",
field=models.DateTimeField(auto_now_add=True),
),
]
|
digidotcom/python-wvalib | wva/test/test_subscriptions.py | Python | mpl-2.0 | 2,119 | 0.000472 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
#
# Copyright (c) 2015 Digi International Inc. All Rights Reserved.
import json
from wva.test.test_utilities import WVATestBase
class TestWVASubscriptions(WVATestBase):
def test_get_subscriptions(self):
self.prepare_json_response("GET", "/ws/subscriptions", {
"subscriptions": [
"subscriptions/a",
| "subscriptions/b",
| ]
})
subs = self.wva.get_subscriptions()
self.assertEqual(len(subs), 2)
self.assertEqual(subs[0].short_name, "a")
self.assertEqual(subs[1].short_name, "b")
def test_get_metadata(self):
self.prepare_json_response("GET", "/ws/subscriptions/speedy", {
'subscription': {'buffer': 'queue',
'interval': 1,
'uri': 'vehicle/data/VehicleSpeed'}
})
sub = self.wva.get_subscription("speedy")
self.assertEqual(sub.get_metadata(), {
'buffer': 'queue',
'interval': 1,
'uri': 'vehicle/data/VehicleSpeed',
})
def test_delete(self):
self.prepare_response("DELETE", "/ws/subscriptions/short-name", "")
sub = self.wva.get_subscription("short-name")
sub.delete()
self.assertEqual(self._get_last_request().method, "DELETE")
self.assertEqual(self._get_last_request().path, "/ws/subscriptions/short-name")
def test_create(self):
self.prepare_response("PUT", "/ws/subscriptions/new-short-name", "")
sub = self.wva.get_subscription("new-short-name")
sub.create("vehicle/data/EngineSpeed", buffer="drop", interval=5)
req = self._get_last_request()
self.assertDictEqual(json.loads(req.body.decode('utf-8')), {
'subscription': {'buffer': 'drop',
'interval': 5,
'uri': 'vehicle/data/EngineSpeed'},
})
|
JGulic/empathy | tools/glib-gtypes-generator.py | Python | gpl-2.0 | 12,522 | 0.001677 | #!/usr/bin/python
# Generate GLib GInterfaces from the Telepathy specification.
# The master copy of this program is in the telepathy-glib repository -
# please make any changes there.
#
# Copyright (C) 2006, 2007 Collabora Limited
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
import sys
import xml.dom.minidom
from libglibcodegen import escape_as_identifier, \
get_docstring, \
NS_TP, \
Signature, \
type_to_gtype, \
xml_escape
def types_to_gtypes(types):
return [type_to_gtype(t)[1] for t in types]
class GTypesGenerator(object):
def __init__(self, dom, output, mixed_case_prefix):
self.dom = dom
self.Prefix = mixed_case_prefix
self.PREFIX_ = self.Prefix.upper() + '_'
self.prefix_ = self.Prefix.lower() + '_'
self.header = open(output + '.h', 'w')
self.body = open(output + '-body.h', 'w')
self.docs = open(output + '-gtk-doc.h', 'w')
for f in (self.header, self.body, self.docs):
f.write('/* Auto-generated, do not edit.\n *\n'
' * This file may be distributed under the same terms\n'
' * as the specification from which it was generated.\n'
' */\n\n')
# keys are e.g. 'sv', values are the key escaped
self.need_mappings = {}
# keys are the contents of the struct (e.g. 'sssu'), values are the
# key escaped
self.need_structs = {}
# keys are the contents of the struct (e.g. 'sssu'), values are the
# key escaped
self.need_struct_arrays = {}
# keys are the contents of the array (unlike need_struct_arrays!),
# values are the key escaped
self.need_other_arrays = {}
def h(self, code):
self.header.write(code.encode("utf-8"))
def c(self, code):
self.body.write(code.encode("utf-8"))
def d(self, code):
self.docs.write(code.encode('utf-8'))
def do_mapping_header(self, mapping):
members = mapping.getElementsByTagNameNS(NS_TP, 'member')
assert len(members) == 2
impl_sig = ''.join([elt.getAttribute('type')
for elt in members])
esc_impl_sig = escape_as_identifier(impl_sig)
name = (self.PREFIX_ + 'HASH_TYPE_' +
mapping.getAttribute('name').upper())
impl = self.prefix_ + 'type_dbus_hash_' + esc_impl_sig
docstring = get_docstring(mapping) or '(Undocumented)'
self.d('/**\n * %s:\n *\n' % name)
self.d(' * %s\n' % xml_escape(docstring))
self.d(' *\n')
self.d(' * This macro expands to a call to a function\n')
self.d(' * that returns the #GType of a #GHashTable\n')
self.d(' * appropriate for representing a D-Bus\n')
self.d(' * dictionary of signature\n')
self.d(' * <literal>a{%s}</literal>.\n' % impl_sig)
self.d(' *\n')
key, value = members
self.d(' * Keys (D-Bus type <literal>%s</literal>,\n'
% key.getAttribute('type'))
tp_type = key.getAttributeNS(NS_TP, 'type')
if tp_type:
self.d(' * type <literal>%s</literal>,\n' % tp_type)
self.d(' * named <literal>%s</literal>):\n'
% key.getAttribute('name'))
docstring = get_docstring(key) or '(Undocumented)'
self.d(' * %s\n' % xml_escape(docstring))
self.d(' *\n')
self.d(' * Values (D-Bus type <literal>%s</literal>,\n'
% value.getAttribute('type'))
tp_type = value.getAttributeNS(NS_TP, 'type')
if tp_type:
self.d(' * type <literal>%s</literal>,\n' % tp_type)
self.d(' * named <literal>%s</literal>):\n'
% value.getAttribute('name'))
docstring = get_docstring(value) or '(Undocumented)'
self.d(' * %s\n' % xml_escape(docstring))
self.d(' *\n')
self.d(' */\n')
self.h('#define %s (%s ())\n\n' % (name, impl))
self.need_mappings[impl_sig] = esc_impl_sig
array_name = mapping.getAttribute('array-name')
if array_name:
gtype_name = self.PREFIX_ + 'ARRAY_TYPE_' + array_name.upper()
contents_sig = 'a{' + impl_sig + '}'
esc_contents_sig = escape_as_identifier(contents_sig)
impl = self.prefix_ + 'type_dbus_array_of_' + esc_contents_sig
self.d('/**\n * %s:\n\n' % gtype_name)
self.d(' * Expands to a call to a function\n')
self.d(' * that returns the #GType of a #GPtrArray\n')
self.d(' * of #%s.\n' % name)
self.d(' */\n\n')
self.h('#define %s (%s ())\n\n' % (gtype_name, impl))
self.need_other_arrays[contents_sig] = esc_contents_sig
def do_struct_header(self, struct):
members = struct.getElementsByTagNameNS(NS_TP, 'member')
impl_sig = ''.join([elt.getAttribute('type') for elt in members])
esc_impl_sig = escape_as_identifier(impl_sig)
name = (self.PREFIX_ + 'STRUCT_TYPE_' +
struct.getAttribute('name').upper())
impl = self.prefix_ + 'type_dbus_struct_' + esc_impl_sig
docstring = struct.getElementsByTagNameNS(NS_TP, 'docstring')
if docstring:
docstring = docstring[0].toprettyxml()
if docstring.startswith('<tp:docstring>'):
docstring = docstring[14:]
if docstring.endswith('</tp:docstring>\n'):
docstring = docstring[:-16]
if docstring.strip() in ('<tp:docstring/>', ''):
docstring = '(Undocumented)'
else:
docstring = '(Undocumented)'
self.d('/**\n * %s:\n\n' % name)
self.d(' * %s\n' % xml_escape(docstring))
self.d(' *\n')
self.d(' * This macro expands to a call to a function\n')
self.d(' * that returns the #GType of a #GValueArray\n')
self.d(' * appropriate for representing a D-Bus struct\n')
self.d(' * with signature <literal>(%s)</literal>.\n'
% impl_sig)
self.d(' *\n')
for i, member in enumerate(members):
self.d(' * Member %d (D-Bus type '
'<literal>%s</literal>,\n'
% (i, member.getAttribute('type')))
tp_type = member.getAttributeNS(NS_TP, 'type')
if tp_type:
self.d(' * type <literal>%s</literal>,\n | ' % tp_type)
self.d(' * named <literal>%s</literal>):\n'
% member.getAttribute('name'))
docstring = get_docstring(member) or '(Undocumented)'
self.d(' * %s\n' % xml_escape(docstring))
self.d(' *\n')
self.d(' */\n\n')
self.h('#define %s (%s ())\n\n' % (name, impl))
array_name = struct.getAttribute('array-name')
if array_name != '':
array_name = (self.PREFIX_ + 'ARRAY_TYPE_' + array_n | ame.upper())
impl = self.prefix_ + 'type_dbus_array_' + esc_impl_sig
self.d('/**\n * %s:\n\n' % array_name)
self.d(' * Expands to a call to a function\n')
self.d(' * that returns the #GType of a #GPtrArray\n')
self.d(' * of #%s.\n' % name)
self.d(' */\n\n')
self.h('#define %s (%s ())\n\n' % (array |
oldstylejoe/pychess-timed | lib/pychess/widgets/BorderBox.py | Python | gpl-3.0 | 2,441 | 0.006555 | from gi.repository import Gtk
from gi.repository import GObject
class BorderBox (Gtk.Alignment):
def __init__ (self, widget=None, top=False, right=False,
bottom=False, left=False):
GObject.GObject.__init__(self)
self.connect("draw", self._onExpose)
if widget:
self.add(widget)
self.__top = top
self.__right = right
self.__bottom = bottom
self.__left = left
self._updateBorders()
def _onExpose(self, area, ctx):
context = self.get_window().cairo_create()
sc = self.get_style_context()
found, color = sc.lookup_color("p_dark_color")
r, g, b, a = color.red, color.green, color.blue, color.alpha
context.set_source_rgba(r, g, b, a)
r = self.get_allocation()
x = r.x + .5
y = r.y + .5
width = r.width - 1
height = r.height - 1
if self.top:
context.move_to(x, y)
context.line_to(x+width, y)
if self.right:
context.move_to(x+width, y)
context.line_to(x+width, y+height)
if self.bottom:
context.move_to(x+width, y+height)
context.line_to(x, y+height)
if self.left:
context.move_to(x, y+height)
context.line_to(x, y)
conte | xt.set_line_width(1)
context.stroke()
def _updateBorders (self):
self.set_padding(self.top and 1 or 0,
self.bottom and 1 or 0,
self.right and 1 or 0,
self.left and 1 or 0)
def isTop(self):
return self.__top
def isRight(self):
return self.__right
def isBottom(self):
return self.__bottom
def isLeft(self):
return self._ | _left
def setTop(self, value):
self.__top = value
self._updateBorders()
def setRight(self, value):
self.__right = value
self._updateBorders()
def setBottom(self, value):
self.__bottom = value
self._updateBorders()
def setLeft(self, value):
self.__left = value
self._updateBorders()
top = property(isTop, setTop, None, None)
right = property(isRight, setRight, None, None)
bottom = property(isBottom, setBottom, None, None)
left = property(isLeft, setLeft, None, None)
|
amaas-fintech/amaas-core-sdk-python | amaascore/corporate_actions/dividend.py | Python | apache-2.0 | 1,565 | 0.005751 | from __future__ import absolute_import, division, print_function, unicode_literals
from decimal import Decimal
from amaascore.corporate_actions.corporate_action import CorporateAction
class Dividend(CorporateAction):
def __init__(self, asset_manager_id, corporate_action_id, record_date, dividend_rate, dividend_asset_id,
corporate_action_status='Open', asset_id=None, party_id=None, declared_date=None, settlement_date=None,
elective=False, messa | ge=None, description='', references=None, *args, **kwargs):
se | lf.dividend_rate = dividend_rate
self.dividend_asset_id = dividend_asset_id
super(Dividend, self).__init__(asset_manager_id=asset_manager_id, corporate_action_id=corporate_action_id,
record_date=record_date, corporate_action_status=corporate_action_status,
asset_id=asset_id, party_id=party_id, declared_date=declared_date,
settlement_date=settlement_date, elective=elective, message=message,
description=description, references=references, *args, **kwargs)
@property
def dividend_rate(self):
if hasattr(self, '_dividend_rate'):
return self._dividend_rate
@dividend_rate.setter
def dividend_rate(self, value):
"""
The rate per share to pay out for the dividend
:param value:
:return:
"""
if value:
self._dividend_rate = Decimal(value)
|
PavlovVitaly/python__homework_ITMO | task_html.py | Python | gpl-3.0 | 4,695 | 0.000429 | class TagException(Exception):
pass
class ArgumentException(Exception):
pass
class Tag(object):
__slots__ = ['_name', '_attributes', '_parent', '_previous_sibling', '_next_sibling', '_first_child',
'_last_child', '_children']
def __init__(self, name, attr=None):
self._name = name
if attr is None or not isinstance(attr, dict):
self._attributes = {}
else:
self._attributes = attr
self._parent = None
self._previous_sibling = None
self._next_sibling = None
self._first_child = None
self._last_child = None
self._children = list()
@property
def parent(self):
return self._parent
@parent.setter
def parent(self, value):
self._parent = value
@parent.deleter
def parent(self):
del self._parent
@property
def previous_sibling(self):
return self._previous_sibling
@previous_sibling.setter
def previous_sibling(self, value):
self._previous_sibling = value
@previous_sibling.deleter
def previous_sibling(self):
del self._previous_sibling
@property
def next_sibling(self):
return self._next_sibling
@next_sibling.setter
def next_sibling(self, value):
self._next_sibling = value
@next_sibling.deleter
def next_sibling(self):
del self._next_sibling
@property
def first_child(self):
raise TagException('Not a container tag!')
@property
def last_child(self):
raise TagException('Not a container tag!')
def __getattribute__(self, attr):
try:
return super().__getattribute__(attr)
except AttributeError:
return self._attributes.get(attr)
def __setattr__(self, name, val):
try:
super().__setattr__(name, val)
except AttributeError:
self._attributes[name] = val
def __delattr__(self, name):
try:
super().__delattr__(name)
except AttributeError:
if self._attributes.get(name):
self._attributes.pop(name)
def __str__(self):
result = '<' + self._name
for key, value in self._attributes.items():
result += ' ' + key + '="' + value + '"'
result += '>'
return result
class ContainerTag(Tag):
__slots__ = ['children']
def __init__(self, name, attr=None):
super().__init__(name, attr)
self.children = self.generator_of_children()
@property
def first_child(self):
return self._first_child
@property
def last_child(self):
return self._last_child
def generator_of_children(self):
for child in self._children:
yield child
def append_child(self, tag):
if not issubclass(type(tag), Tag):
raise TypeError("Argument isn't subclass of Tag.")
self._children.append(tag)
index_of_last_child = len(self._children) - 1
self._last_child = self._children[index_of_last_child]
self._last_child.parent = self
if len(self._children) == 1:
self._first_child = | self.last_child
def insert_before(self, tag, next_sibling):
if not issubclass(type(tag), Tag):
raise TypeError("Argument isn't subclass of Tag.")
if next_sibling not in self._children:
self.append_child(tag)
return
index_inserted_elemnt = self._children.index(next_sibling)
self._children.insert(index_inserted_elemnt, tag)
index_of_last_child = len(self._children) - 1
self._last_child = self._children[index_of_last_chil | d]
self._children[index_inserted_elemnt].parent = self
if index_inserted_elemnt == 0:
self._first_child = self._children[index_inserted_elemnt]
def __str__(self):
result = '<' + self._name
for key, value in self._attributes.items():
result += ' ' + key + '="' + value + '"'
result += '>'
for item in self.children:
result += str(item)
result += '</' + self._name + '>'
return result
if __name__ == '__main__':
img_1 = Tag('img')
img_1.src = '/python-developer.svg'
img_1.alt = 'Python Разработчик'
img_2 = Tag('img')
img_2.src = '/php-developer.svg'
img_2.alt = 'PHP Разработчик'
img_3 = Tag('img')
img_3.src = '/java-developer.svg'
img_3.alt = 'Java Разработчик'
div = ContainerTag('div')
div.append_child(img_1)
div.append_child(img_2)
div.insert_before(img_3, img_1)
print(div)
|
CyrilPeponnet/Archipel | ArchipelAgent/archipel-central-agent-platformrequest/setup.py | Python | agpl-3.0 | 3,265 | 0.006738 | #
# setup.py
#
# Copyright (C) 2010 Antoine Mercadal <antoine.mercadal@inframonde.eu>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from setuptools import setup, find_packages
VERSION = '0.6.0'
AUTHOR = 'Antoine Mercadal'
MAIL = 'antoine.mercadal@archipelproject.org'
URL = 'http://archipelproject.org'
LICENSE = 'AGPL'
NAME = 'archipel-central-agent-platformrequest'
SHORTDESCRIPTION = "Manage platform wide operation requests"
LONGDESCRIPTION = ""
ENTRY_POINTS = { 'archipel.plugin.centralagent' : [
'factory=archipelcentralagentplatformrequest:make_archipel_plugin'],
'archipel.plugin' : [
'version=archipelcentralagentplatformrequest:version']}
RPM_REQUIRED_DEPS = "archipel-core"
RPM_POST_INSTALL = "%post\narchipel-initinstall -m {0}\n".format(NAME)
## HACK FOR DEPS IN RPMS
from setuptools.command.bdist_rpm import bdist_rpm
def custom_make_spec_file(self):
spec = self._original_make_spec_file()
lineDescription = "%description"
spec.insert(spec.index(lineDescription) - 1, "requires: %s" % RPM_REQUIRED_DEPS)
spec.append(RPM_POST_INSTALL)
return spec
bdist_rpm._original_make_spec_file = bdist_rpm._make_spec_file
bdist_rpm._make_spec_file = custom_make_spec_file
## END OF HACK
setup(name=NAME,
version=VERSION,
description=SHORTDESCRIPTION,
long_description=LONGDESCRIPTION,
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'Environment :: No Input/Output (Daemon)',
'Intended Audience :: Developers',
'Intende | d Audience :: Education',
'Intended Audience :: End Users/Desktop',
'Intended Audience :: Science/Research',
'Intended Audience :: System Administrators',
'Intended Audience :: Telecommunications Industry',
'License :: OSI Approved :: GNU Affero General Public License v3',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python',
'Topic :: Internet',
'Topic :: System | :: Emulators',
'Topic :: System :: Operating System'],
keywords='archipel, virtualization, libvirt, orchestration',
author=AUTHOR,
author_email=MAIL,
url=URL,
license=LICENSE,
packages=find_packages(exclude=['ez_setup', 'examples', 'tests']),
include_package_data=True,
zip_safe=False,
install_requires=[
"archipel-core>=0.6.0beta"
],
entry_points=ENTRY_POINTS
)
|
sandrofolk/girox | girox/blog/migrations/0007_auto_20170620_0011.py | Python | gpl-3.0 | 467 | 0 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-06-20 00:11
from __future__ import unicode_literals
from django.db import migrations, models
class Migrat | ion(migrations.Migration):
dependencies = [
('blog', '0006_auto_20170620_0010'),
]
operations = [
migrations.AlterField(
model_name='post',
nam | e='posted',
field=models.DateField(db_index=True, verbose_name='data'),
),
]
|
Krish95/tango | evaluator.py | Python | mit | 3,730 | 0.00563 | import pdb
from scan_lexer import *
from global_env import *
from sys import exit
macros = {}
scope = []
def evaluate(exp, envs = [global_env]):
#Use the global scope list
global scope
# Is exp an atom?
if atom(exp):
if type(exp) == Symbol:
return lookup(exp, envs)
elif True in [isinstance(exp, x) for x in [int, float, str, bool]]:
return exp
else:
raise TypeError("Unknown type atom", exp)
# Is exp the null list?
if exp == []:
return []
elif exp[0] == "exit":
print("Moriturus te saluto.")
exit(0)
elif exp[0] == "load":
temp,dummy = prstree_balance(tokenize_from_file(str(exp[1]+".scm")))
for x in temp:
evaluate(x)
# Is exp is a special form?
elif exp[0] == "quote":
return exp[1]
elif exp[0] == "if":
if istrue(evaluate(exp[1], envs)):
return evaluate(exp[2], envs)
else:
return evaluate(exp[3], envs)
elif exp[0] == "begin":
return eprogn(exp[1:], envs)
elif exp[0] == "set!":
update(exp[1], envs, | evaluate(exp[2], envs))
elif exp[0] == "define":
envs[0].insert(0, (exp[1], evaluate(exp[2], envs)))
elif exp[0] == "lambda":
return make_function(exp[1], exp[2:], envs)
elif exp[0] == "macro":
return (exp[1], exp[2])
# exp is a macro expansion
elif type(evaluate(exp[0], envs)) == tuple:
f = evaluate(exp[0], envs)
# pdb.set_trace()
e | xpanded_form = macro_expand(f[0], exp[1:], f[1], envs)
return evaluate(evaluate(expanded_form, envs), envs)
# exp is function application
else:
return invoke(evaluate(exp[0], envs), evlist(exp[1:], envs))
#Preserve current scope
scope = [str(inner[0]) for outer in envs for inner in outer]
def macro_expand(variables, values, body, envs):
if len(variables) != len(values):
raise ValueError("Too few or too many values.")
def substitute(exp):
nonlocal variables, values
if atom(exp):
if exp in variables:
return [Symbol("quote"), values[variables.index(exp)]]
else:
return exp
else:
return [substitute(e) for e in exp]
result = [substitute(exp) for exp in body]
return result
def istrue(exp):
if exp == False:
return False
else:
return True
def eprogn(exps, envs):
results = [evaluate(exp, envs) for exp in exps]
return results[-1]
def invoke(fn, arg_list):
# pdb.set_trace()
return fn(*arg_list)
def evlist(l, envs):
return [evaluate(x, envs) for x in l]
# update is impure.
def update(var, envs, value):
# pdb.set_trace()
for i in range(len(envs)):
for j in range(len(envs[i])):
if envs[i][j][0] == var:
envs[i][j] = (var, value)
return
raise Exception("No such Symbol found: ", var)
def make_function(variables, body, envs):
return lambda *values : evaluate(body[0], extend(envs, variables, list(values)))
def lookup(var, envs):
for env in envs:
for u, v in env:
if u == var:
return v
raise Exception("No such binding: ", var)
def extend(envs, variables, values):
if len(variables) != len(values):
raise ValueError("Too few or too many values.")
else:
bindings = list(zip(variables, values))
new_envs = [bindings]
for env in envs:
new_envs.append(env)
return new_envs
|
iandees/all-the-places | locations/spiders/ljsilvers.py | Python | mit | 1,524 | 0.007218 | # -*- | coding: utf-8 -*-
import scrapy
import json
import re
from locations.items import GeojsonPointItem
class LjsilversSpider(scrapy.Spider):
name = "ljsilvers"
allowed_domai | ns = ["ljsilvers.com"]
start_urls = (
'http://www.ljsilvers.com/locator?postalcode=76010',
)
def parse(self, response):
data = response.body_as_unicode()
base_data = re.search('dataout\s--Array\s\((.*)\)\s\s--><style type="text/css">', data, re.DOTALL).group(1)
detail_matches = re.findall('\((.*?)\)', base_data, re.DOTALL)
for detail_match in detail_matches:
key_values = re.findall('(.*?)\s=>\s(.*)', detail_match)
props = {}
for key_value in key_values:
key = key_value[0].strip()
value = key_value[1].strip()
if key == '[storeID]':
props['ref'] = value
if key == '[address]':
props['addr_full'] = value
if key == '[city]':
props['city'] = value
if key == '[state]':
props['state'] = value
if key == '[zip]':
props['postcode'] = value
if key == '[phone_number]':
props['phone'] = value
if key == '[latitude]':
props['lat'] = value
if key == '[longitude]':
props['lon'] = value
yield GeojsonPointItem(**props)
|
Ginkgo-Biloba/Misc-Python | sklearn/SKLearn3KMOutlier.py | Python | gpl-3.0 | 1,135 | 0.004955 | # coding = utf-8
"""
3.8 将 KMeans 用于离群点检测
http://git.oschina.net/wizardforcel/sklearn-cb/blob/master/3.md
"""
# 生成 100 个点的单个数据块,然后识别 5 个离形心最远的点
import numpy as np
from sklearn.datasets import make_blobs
from sklearn.cluster import KMeans
(x, labels) = make_blobs(100, centers=1)
kms = KMeans(n_clusters=1)
kms.fit(x)
# 识别 5 个最远的点
dist = kms.transform(x)
sortedIdx = np.argsort(dist.ravel())[::-1][:5]
# 移除这些点
nx = np.delete(x, sortedIdx, axis=0)
# 形心位置变化了
nkms = KMeans(n_clusters=1)
nkms.fit(nx)
from matplotlib import pyplot as plt
plt.style.use("ggplot")
(fig, ax) = plt.subplots(figsize=(6, 5))
ax.scatter(x[:, 0], x[:, 1], s=10, label="点")
ax.scatter(kms.cluster | _centers_[:, 0], kms.cluster_centers_[:, 1], label="形心", s=50, alpha=0.7)
ax.scatter(x[sortedIdx][:, 0], x[sortedIdx][:, 1], label="极值", s=100, alpha=0.7)
ax.scatter(nkms.cluster_centers_[:, 0], nkms.cluster_centers_[:, 1], label="新的形心", s=50, alpha=0.7)
ax.set_title("单点簇集")
ax.legend(loc="best")
fig.tight_lay | out()
fig.show()
plt.show()
|
johngumb/danphone | wavetx2.py | Python | gpl-3.0 | 2,677 | 0.065745 | import sys
import os
import time
import serial
#DTR0 - blue DATA
#RTS0 - purple STB RX
#DTR1 - (blue on 232 side) then green CLK
#CTS0 - black LD
#RTS1 - purple STB TX
delay=0.001
def getserials():
s0 = serial.Serial("/dev/ttyUSB0")
s1 = serial.Serial("/dev/ttyUSB1")
return (s0,s1)
def test():
period=0.001
i = 0
s=serial.Serial("/dev/ttyUSB1")
while True:
s.setDTR(True)
#s.setRTS(True)
time.sleep(period)
i = i + 1
if i % 10 == 0:
print s.getCTS()
s.setDTR(False)
#s.setRTS(False)
time.sleep(period)
def outbit(s0,s1,valn):
clk=True
if valn==0:
val=True
else:
val=False
print valn
s0.setDTR(val) # rx strobe
time.sleep(delay/10)
s1.setDTR(clk)
time.sleep(delay)
s1.setDTR(not clk)
time.sleep(delay)
return
def latch(s0):
val=True
s0.setRTS(val)
s1.setRTS(val) # tx strobe
time.sleep(delay)
s0.setRTS(not val)
s1.setRTS(not val) # tx strobe
time.sleep(delay)
s0.setRTS(val)
s1.setRTS(val) # tx strobe
return
def enable_outputs(s0,s1):
d=[1,0,1,1]
for x in d:
| outbit(s0,s1,x)
latch(s0)
return
if __name__=="__main__":
os.system("/usr/bin/chrt -r -p 99 %s"%os.getpid())
| (s0,s1)=getserials()
# set up reference divider
# r=[1,0,0,0,0,0,0,0,0,0,0,1]
r=[1,1,1,1,1,1,1,1,1,1,1,0]
r=[0,0,1,1,1,1,0,0,0,0,1,0] # good
r=[1,1,1,1,1,1,1,0,0,0,1,0] # good 1 jan
r=[1,0,1,1,1,1,0,0,0,0,1,0] # good
r=[0,0,0,0,0,0,1,0,0,0,1,0] # good
r=[0,0,0,0,0,0,0,0,1,1,1,0] # good
# r=[0,0,0,0,0,0,0,0,0,1,0,0]
# r=[0,0,0,0,0,0,0,0,0,0,1,0]
# r=[1,1,1,1,1,1,1,1,1,1,1,1]
# r=[1,1,1,1,1,1,1,1,1,1,1,1]
# TODO figure out what L2 is - depends whether LPF is inverting or non
# inverting
l=[0,0]
c=[0,1]
s0.setRTS(True)
for x in r+l+c:
outbit(s0,s1,x)
print
latch(s0)
print
# enable_outputs(s0,s1)
#d=[1,0,1,1]
#for x in d:
# outbit(s0,s1,x)
#latch(s0)
# set up counter
# a = [1,1,0,0,0,1,1]
# 91.2 MHz minimum.
# 82.8 MHz minimum.
#a_min = [0,0,0,0,0,0,0]
a = [1,1,0,0,0,0,0]
n = [0,1,0,1,1,1,1,1,0,0,0]
n = [1,1,1,1,1,1,1,1,1,0,0]
n = [1,0,0,0,0,0,0,0,0,1,0]
# n = [1,1,1,1,1,1,1,1,1,1,1]
c = [1,0]
for x in a + n + c:
outbit(s0,s1,x)
print
latch(s0)
print
# phase comparator
# d = [0,0]
# c = [0,0]
# for x in d + c:
# outbit(s0,s1,x)
# latch(s0)
#while True:
# print s0.getCTS()
# time.sleep(1)
#test()
|
Weil0ng/gem5 | util/cpt_upgraders/arm-sysreg-mapping-ns.py | Python | bsd-3-clause | 4,062 | 0.00837 | # Copyright (c) 2016 ARM Limited
# All rights reserved
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# reflect updated register mappings for ARM ISA
def upgrader(cpt):
if cpt.get('root','isa') == 'arm':
for sec in cpt.sections():
import re
# Search for all ISA sections
if re.search('.*sys.*\.cpu.*\.isa\d*$', sec):
mr = cpt.get(sec, 'miscRegs').split()
if int(mr[0]) & 16 == 0: # CPSR reg width; 0 for AArch64
mr[112] = mr[111] # ACTLR_NS = ACTLR
mr[146] = mr[145] # ADFSR_NS = ADFSR
mr[149] = mr[148] # AIFSR_NS = AIFSR
mr[253] = mr[252] # AMAIR0_NS = AMAIR0
mr[289] = mr[288] # CNTP_CTL_NS = CNTP_CTL
mr[313] = mr[312] # CNTP_CVAL_NS = CNTP_CVAL
mr[286] = mr[285] # CNTP_TVAL_NS = CNTP_TVAL
mr[271] = mr[270] # CONTEXTIDR_NS = CONTEXTIDR
mr[104] = mr[103] # CSSELR_NS = CSSELR
mr[137] = mr[136] # DACR_NS = DACR
mr[155] = mr[154] # DFAR_NS = DFAR
mr[158] = mr[157] # IFAR_NS = IFAR
mr[143] = mr[142] # IFSR_NS = IFSR
mr[247] = mr[246] # NMRR_NS = NMRR
mr[166] = mr[165] # PAR_NS = PAR
mr[241] = mr[240] # PRRR_NS = PRRR
mr[ 4] = mr[424] # SPSR_SVC = SPSR_EL1
mr[ 7] = mr[435] # SPSR_HYP = SPSR_EL2
mr[ 5] = mr[442] # SPSR_MON = SPSR_EL3
mr[277] = mr[276] # TPIDRURO_NS = TPIDRURO
| mr[280] = | mr[279] # TPIDRPRW_NS = TPIDRPRW
mr[274] = mr[273] # TPIDRURW_NS = TPIDRURW
mr[132] = mr[131] # TTBCR_NS = TTBCR
mr[126] = mr[125] # TTBR0_NS = TTBR0
mr[129] = mr[128] # TTBR1_NS = TTBR1
mr[263] = mr[262] # VBAR_NS = VBAR
cpt.set(sec, 'miscRegs', ' '.join(str(x) for x in mr))
|
memo/tensorflow | tensorflow/contrib/data/python/kernel_tests/zip_dataset_op_test.py | Python | apache-2.0 | 4,387 | 0.004331 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the experimental input pipeline ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.data.python.ops import dataset_ops
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
class ZipDatasetTest(test.TestCase):
def testZipDataset(self):
component_placeholders = [
array_ops.placeholder(dtypes.int64),
array_ops.placeholder(dtypes.int64),
array_ops.placeholder(dtypes.float64)
]
datasets = [
dataset_ops.Dataset.from_tensor_slices(component_placeholder)
for component_placeholder in component_placeholders
]
zipped = dataset_ops.Dataset.zip(datasets)
iterator = zipped.make_initializable_iterator()
init_op = iterator.initializer
get_next = iterator.get_next()
with self.te | st_session() as sess:
equal_length_components = [
np.tile(np.array([[1], [2], [3], [4]]), 20),
np.tile(np.array([[12], [13], [14], [15]]), 22),
np.array([37.0, 38.0, 39.0, 40.0])
]
sess.run(init_op, feed_dict={ph: value for ph, value in zip(
component_placeholders, equal_length_components)})
for i in range(4):
results = sess.run(get_next)
for component, result_comp | onent in zip(
equal_length_components, results):
self.assertAllEqual(component[i], result_component)
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
variable_length_components = [[1, 2, 3, 4], [1, 2, 3, 4, 5], [1.0, 2.0]]
sess.run(init_op, feed_dict={ph: value for ph, value in zip(
component_placeholders, variable_length_components)})
for i in range(2):
results = sess.run(get_next)
for component, result_component in zip(
variable_length_components, results):
self.assertAllEqual(component[i], result_component)
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testNestedZipDataset(self):
component_placeholders = [
array_ops.placeholder(dtypes.int64, shape=[4, 20]),
array_ops.placeholder(dtypes.int64, shape=[4, 22]),
array_ops.placeholder(dtypes.float64, shape=[4])
]
datasets = [
dataset_ops.Dataset.from_tensor_slices(component_placeholder)
for component_placeholder in component_placeholders
]
zipped = dataset_ops.Dataset.zip((datasets[0], (datasets[1], datasets[2])))
iterator = zipped.make_initializable_iterator()
init_op = iterator.initializer
get_next = iterator.get_next()
self.assertEqual([20], get_next[0].shape)
self.assertEqual([22], get_next[1][0].shape)
self.assertEqual([], get_next[1][1].shape)
with self.test_session() as sess:
equal_length_components = [
np.tile(np.array([[1], [2], [3], [4]]), 20),
np.tile(np.array([[12], [13], [14], [15]]), 22),
np.array([37.0, 38.0, 39.0, 40.0])
]
sess.run(init_op, feed_dict={ph: value for ph, value in zip(
component_placeholders, equal_length_components)})
for i in range(4):
result1, (result2, result3) = sess.run(get_next)
self.assertAllEqual(equal_length_components[0][i], result1)
self.assertAllEqual(equal_length_components[1][i], result2)
self.assertAllEqual(equal_length_components[2][i], result3)
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
if __name__ == "__main__":
test.main()
|
npuichigo/ttsflow | third_party/tensorflow/tensorflow/contrib/keras/python/keras/layers/noise_test.py | Python | apache-2.0 | 1,668 | 0.004197 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==================== | ==========================================================
"""Tests for noise layers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.keras.python import keras
from tensorflow.contrib.ke | ras.python.keras import testing_utils
from tensorflow.python.platform import test
class NoiseLayersTest(test.TestCase):
def test_GaussianNoise(self):
with self.test_session():
testing_utils.layer_test(
keras.layers.GaussianNoise,
kwargs={'stddev': 1.},
input_shape=(3, 2, 3))
def test_GaussianDropout(self):
with self.test_session():
testing_utils.layer_test(
keras.layers.GaussianDropout,
kwargs={'rate': 0.5},
input_shape=(3, 2, 3))
def test_AlphaDropout(self):
with self.test_session():
testing_utils.layer_test(
keras.layers.AlphaDropout,
kwargs={'rate': 0.2},
input_shape=(3, 2, 3))
if __name__ == '__main__':
test.main()
|
YuepengGuo/sina_weibo_crawler | crawler/blogcrawler.py | Python | apache-2.0 | 10,877 | 0.001023 | # !/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import time
import math
from BeautifulSoup import BeautifulSoup
import config
from toolkit.downloader import Downloader, DownloadError
from toolkit import datetimelib as dt
from parser.commonblogparser import CommonBlogParser
class PreprocessError(Exception):
"""
预处理过程中遇到的异常
"""
def __init__(self, uid=''):
self.error_msg = "Preprocess %s error in blog crawler." % uid
def __str__(self):
return repr(self.error_msg)
class UserNotFoundError(PreprocessError):
"""
用户不存在
"""
def __init__(self, uid=''):
self.error_msg = "User %s not found." % uid
def __str__(self):
return repr(self.error_msg)
class BlogCrawler(object):
def __init__(self):
self.charset = 'utf-8'
self.parser = CommonBlogParser()
self.downloader = Downloader()
# 设置页面url加载的参数
self.http_params = {
'is_search': '0',
'visible': '0',
'is_tag': '0',
'profile_ftype': 1,
'pagebar': '',
'pre_page': '0',
'page': 1
}
self.uid = ''
# ========= 完成解析用户微博数据前的准备工作 ========#
def _init_(self, url):
"""
解析用户微博数据前的准备工作,包括:
1. 获取当前用户的page_id
2. 获取当前用户的微博总页数
"""
http_params = {
'__rnd': '',
'_k': '',
'_t': '0',
'count': '15',
'end_id': '',
'max_id': '',
'page': 1,
'pagebar': '',
'pre_page': '0',
'profile_ftype': '1',
'uid': ''
}
content = self.downloader.download(url)
# 判断用户是否存在
if not self.exist(content):
raise UserNotFoundError(url)
# 获取用户ID
btag = "$CONFIG['oid']='"
etag = "';"
bpos = content.find(btag) + len(btag)
epos = content.find(etag, bpos)
uid = content[bpos:epos]
self.uid = uid
# 获取 page_id
self.page_id = self._parse_pageid(content)
# 获取微博总页数
self.pagenum = self._caculate_pagenum(content)
# 获取pid,抓取微博所需的domain参数
self.pid = self._parse_pid(content)
# 获取用户头像地址和昵称
img_url, nick_name = self._parse_userinfo(content)
self.parser.init_user(self.uid, img_url, nick_name)
self.url = self.get_url()
def exist(self, content):
"""
判断当前用户是否存在
------------------------------
return: 用户存在返回True,否则返回False
"""
if content.find('<title>错误提示') != -1:
return False
return True
def _parse_pageid(self, content):
"""
解析页面的page_id
----------------------------------
content: 待解析的网页内容
----------------------------------
return: page_id, 或空
"""
b | tag = "$CONFIG['page_id']='"
etag = "'"
page_id = '' |
if content:
bpos = content.find(btag)
if bpos:
bpos += len(btag)
epos = content.find(etag, bpos)
page_id = content[bpos:epos]
return page_id
def _caculate_pagenum(self, content):
"""
计算微博的总页数
------------------------------
return: 微博页数
"""
msgcount = self._parse_msgcount(content)
per_pagenum = 45
total_pagenum = msgcount / per_pagenum
if msgcount % per_pagenum:
total_pagenum += 1
return total_pagenum
def _parse_msgcount(self, content):
"""
解析微博条数
---------------------------
content: 网页文本
---------------------------
return: 微博条数
"""
if not content:
raise PreprocessError(self.uid)
etag1 = '>微博<\/span>'
etag2 = '<\/strong>'
btag = '>'
epos = content.find(etag1)
epos = content[:epos].rfind(etag2)
bpos = content[:epos].rfind(btag) + len(btag)
return int(content[bpos:epos])
def _parse_userinfo(self, content):
"""
解析用户的头像地址/用户昵称
-----------------------------
content: 网页文本
------------------------------
return: (img_url, nick_name)
"""
btag = '<div class="pf_photo"'
etag = '<\/div>'
bpos = content.find(btag)
epos = content.find(etag, bpos)
soup = BeautifulSoup(content[bpos:epos].replace('\\/', '/') + '</div>')
img_url = soup.img['src']
nick_name = soup.img['alt']
return img_url, nick_name
#======== 解析用户的微博数据,并保存结果 =======#
def scratch(self, url, start_pageindex=1):
"""
获取给定用户的所有满足条件的微博,并写入文件
----------------------------------------
uid: 待抓取微博数据的用户ID
start_pageindex: 从第几页开始抓取用户的微博数据
"""
self._init_(url)
from controller import Controller
# print Controller._get_filepath(self.uid)
if os.path.isfile(Controller._get_filepath(self.uid)): # 用户微博已下载
print self.uid, u'用户的微博已下载!'
return None
if start_pageindex > self.pagenum:
return []
#return self._binary_scratch(uid, start_pageindex)
return self._sequence_scratch(self.uid, start_pageindex, self.pagenum)
def _binary_scratch(self, uid, start_pageindex):
"""
执行二分式的抓取策略,从页码的中间偏后的位置开始抓取。
现在从总页面数的三分之一处开始抓取数据。
----------------------------------------------
start_pageindex: 起始页码
---------------------------------------------
return: blogs
"""
mid_pageindex = max((start_pageindex + self.pagenum) / 3, 1)
# 从前往后抓取微博
blogs1 = self._sequence_scratch(uid, mid_pageindex, self.pagenum, 1)
# 从后往前抓取微博
if mid_pageindex > start_pageindex:
blogs2 = self._sequence_scratch(uid, mid_pageindex - 1, start_pageindex, -1)
blogs1.extend(blogs2)
return blogs1
def _sequence_scratch(self, uid, start_pageindex, end_pageindex, direction=1):
"""
执行顺序抓取策略,按照顺序从前往后或者从后往前抓取
---------------------------------------------------
uid: 待抓取的用户ID
start_pageindex: 起始页码
end_pageindex: 结束页面
direction: 抓取的方向, 1->从前往后,pageindex递增;-1->从后往前,pageindex递减
---------------------------------------------------
return: blogs
"""
blogs = []
for pageindex in range(start_pageindex, end_pageindex + direction, direction):
temp_blogs = self._parse_blogs(pageindex)
print uid + ':获取第' + str(pageindex) + '页微博成功.'
blogs.extend(temp_blogs)
time.sleep(1)
if not self._continue(temp_blogs, direction):
break
return blogs
def _parse_blogs(self, pageindex):
"""
获取指定微博页面的三个子页的微博内容
-----------------------------------
return: 该页的微博列表
"""
blogs = []
self.http_params['page'] = pageindex
self.http_params['id'] = self.page_id
self.http_params['domain'] = self.pid
# 下载第一页
self.http_params['pre_page'] = self.http_params['page'] - 1
content = self.downloader.download(self.url, self.http_params)
if content:
sub_blogs = self.parser.parse(content)
blogs.extend(sub_blogs)
if not self._continue(blogs):
return blogs
# 下载第二页
self.http_params['count'] = '15'
self.http_params['pagebar'] = '0'
self.http_params['pre_page'] = self.http_params['page']
content = self.downloader.download(self.url, self.http_params)
if content:
sub_blogs = self.parser.parse(content)
blogs.extend(sub_blogs)
if not self._continue(sub_blogs):
return blogs
# 下载第三页
self.http_params['count'] = '15'
self.http_params['pagebar'] = '1'
self.http_params['pre_page'] = self.http_params['page']
content = self.downloader.download(self.url, self.http_params)
if content:
sub_blogs = self.parser.parse(conten |
lmazuel/azure-sdk-for-python | azure-mgmt-network/azure/mgmt/network/v2017_08_01/models/troubleshooting_result_py3.py | Python | mit | 1,603 | 0.001871 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated | by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class TroubleshootingResult(Model):
"""Troubleshooting information gained from specified resource.
:param start | _time: The start time of the troubleshooting.
:type start_time: datetime
:param end_time: The end time of the troubleshooting.
:type end_time: datetime
:param code: The result code of the troubleshooting.
:type code: str
:param results: Information from troubleshooting.
:type results:
list[~azure.mgmt.network.v2017_08_01.models.TroubleshootingDetails]
"""
_attribute_map = {
'start_time': {'key': 'startTime', 'type': 'iso-8601'},
'end_time': {'key': 'endTime', 'type': 'iso-8601'},
'code': {'key': 'code', 'type': 'str'},
'results': {'key': 'results', 'type': '[TroubleshootingDetails]'},
}
def __init__(self, *, start_time=None, end_time=None, code: str=None, results=None, **kwargs) -> None:
super(TroubleshootingResult, self).__init__(**kwargs)
self.start_time = start_time
self.end_time = end_time
self.code = code
self.results = results
|
skdaccess/skdaccess | skdaccess/engineering/webcam/mit_sailing/stream.py | Python | mit | 2,645 | 0.006049 | # The MIT License (MIT)
# Copyright (c) 2018 Massachusetts Institute of Technology
#
# Authors: Cody Rude
# This software has been created in projects supported by the US National
# Science Foundation and NASA (PI: Pankratius)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPL | IED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# Skdaccess imports
from skdaccess.framewor | k.data_class import DataFetcherStream, ImageWrapper
from skdaccess.framework.param_class import *
# Standard library imports
from collections import OrderedDict
class DataFetcher(DataFetcherStream):
"""
Data Fetcher for retrieving webcam images from the MIT Sailing Pavilion
"""
def __init__(self, camera_list = ['E','SE','SW','W']):
"""
@param camera_list: Which camera to retrieve from (List that contains one or more of the following: 'E', 'SE', 'SW', or 'W')
"""
self.camera_list = camera_list
for camera in camera_list:
if camera not in ['E','SE','SW','W']:
raise RuntimeError('Camera: "' + camera + '" not understood')
self._base_url = 'http://sailing.mit.edu/img/'
self._image_name = '/latest.jpg'
super(DataFetcher, self).__init__()
def output(self):
"""
Retrieve data from webcams at the MIT Sailing Pavilion
@return Image Wrapper containing the latest images from the webcams
"""
url_list = []
for camera in self.camera_list:
url_list.append(self._base_url + camera + self._image_name)
metadata, data = self.retrieveOnlineData(url_list)
return ImageWrapper(data, meta_data = metadata)
|
guillaumelenoir/WAVEPAL | carmcmc/carma_pack.py | Python | mit | 72,802 | 0.005096 | __author__ = 'Brandon C. Kelly'
import numpy as np
import matplotlib.pyplot as plt
from scipy.linalg import solve
from scipy.optimize import minimize
import samplers
import multiprocessing
import _carmcmc as carmcmcLib
class CarmaModel(object):
"""
Class for performing statistical inference assuming a CARMA(p,q) model.
"""
def __init__(self, time, y, ysig, p=1, q=0):
"""
Constructor for the CarmaModel class.
:param time: The observation times.
| :param y: Th | e measured time series.
:param ysig: The standard deviation in the measurements errors on the time series.
:param p: The order of the autoregressive (AR) polynomial. Default is p = 1.
:param q: The order of the moving average (MA) polynomial. Default is q = 0. Note that p > q.
"""
try:
p > q
except ValueError:
" Order of AR polynomial, p, must be larger than order of MA polynomial, q."
# check that time values are unique and in ascending ordered
s_idx = np.argsort(time)
t_unique, u_idx = np.unique(time[s_idx], return_index=True)
u_idx = s_idx[u_idx]
# convert input to std::vector<double> extension class
self._time = carmcmcLib.vecD()
self._time.extend(time[u_idx])
self._y = carmcmcLib.vecD()
self._y.extend(y[u_idx])
self._ysig = carmcmcLib.vecD()
self._ysig.extend(ysig[u_idx])
# save parameters
self.time = time[u_idx]
self.y = y[u_idx]
self.ysig = ysig[u_idx]
self.p = p
self.q = q
self.mcmc_sample = None
def run_mcmc(self, nsamples, nburnin=None, ntemperatures=None, nthin=1, init=None):
"""
Run the MCMC sampler. This is actually a wrapper that calls the C++ code that runs the MCMC sampler.
:param nsamples: The number of samples from the posterior to generate.
:param ntemperatures: Number of parallel MCMC chains to run in the parallel tempering algorithm. Default is 1
(no tempering) for p = 1 and max(10, p+q) for p > 1.
:param nburnin: Number of burnin iterations to run. The default is nsamples / 2.
:param nthin: Thinning interval for the MCMC sampler. Default is 1 (no thinning).
:return: Either a CarmaSample or Car1Sample object, depending on the values of self.p. The CarmaSample object
will also be stored as a data member of the CarmaModel object.
"""
if ntemperatures is None:
ntemperatures = max(10, self.p + self.q)
if nburnin is None:
nburnin = nsamples / 2
if init is None:
init = carmcmcLib.vecD()
if self.p == 1:
# Treat the CAR(1) case separately
cppSample = carmcmcLib.run_mcmc_car1(nsamples, nburnin, self._time, self._y, self._ysig,
nthin, init)
# run_mcmc_car1 returns a wrapper around the C++ CAR1 class, convert to python object
sample = Car1Sample(self.time, self.y, self.ysig, cppSample)
else:
cppSample = carmcmcLib.run_mcmc_carma(nsamples, nburnin, self._time, self._y, self._ysig,
self.p, self.q, ntemperatures, False, nthin, init)
# run_mcmc_car returns a wrapper around the C++ CARMA class, convert to a python object
sample = CarmaSample(self.time, self.y, self.ysig, cppSample, q=self.q)
self.mcmc_sample = sample
return sample
def get_mle(self, p, q, ntrials=100, njobs=1):
"""
Return the maximum likelihood estimate (MLE) of the CARMA model parameters. This is done by using the
L-BFGS-B algorithm from scipy.optimize on ntrials randomly distributed starting values of the parameters. This
this return NaN for more complex CARMA models, especially if the data are not well-described by a CARMA model.
In addition, the likelihood space can be highly multi-modal, and there is no guarantee that the global MLE will
be found using this procedure.
@param p: The order of the AR polynomial.
@param q: The order of the MA polynomial. Must be q < p.
@param ntrials: The number of random starting values for the optimizer. Default is 100.
@param njobs: The number of processors to use. If njobs = -1, then all of them are used. Default is njobs = 1.
@return: The scipy.optimize.Result object corresponding to the MLE.
"""
if njobs == -1:
njobs = multiprocessing.cpu_count()
args = [(p, q, self.time, self.y, self.ysig)] * ntrials
if njobs == 1:
MLEs = map(_get_mle_single, args)
else:
# use multiple processors
pool = multiprocessing.Pool(njobs)
# warm up the pool
pool.map(int, range(multiprocessing.cpu_count()))
MLEs = pool.map(_get_mle_single, args)
pool.terminate()
best_MLE = MLEs[0]
for MLE in MLEs:
if MLE.fun < best_MLE.fun: # note that MLE.fun is -loglik since we use scipy.optimize.minimize
# new MLE found, save this value
best_MLE = MLE
print best_MLE.message
return best_MLE
def choose_order(self, pmax, qmax=None, pqlist=None, njobs=1, ntrials=100):
"""
Choose the order of the CARMA model by minimizing the AICc(p,q). This first computes the maximum likelihood
estimate on a grid of (p,q) values using self.get_mle, and then choosing the value of (p,q) that minimizes
the AICc. These values of p and q are stored as self.p and self.q.
@param pmax: The maximum order of the AR(p) polynomial to search over.
@param qmax: The maximum order of the MA(q) polynomial to search over. If none, search over all possible values
of q < p.
@param pqlist: A list of (p,q) tuples. If supplied, the (p,q) pairs are used instead of being generated from the
values of pmax and qmax.
@param njobs: The number of processors to use for calculating the MLE. A value of njobs = -1 will use all
available processors.
@param ntrials: The number of random starts to use in the MLE, the default is 100.
@return: A tuple of (MLE, pqlist, AICc). MLE is a scipy.optimize.Result object containing the maximum-likelihood
estimate. pqlist contains the values of (p,q) used in the search, and AICc contains the values of AICc for
each (p,q) pair in pqlist.
"""
try:
pmax > 0
except ValueError:
"Order of AR polynomial must be at least 1."
if qmax is None:
qmax = pmax - 1
try:
pmax > qmax
except ValueError:
" Order of AR polynomial, p, must be larger than order of MA polynimial, q."
if pqlist is None:
pqlist = []
for p in xrange(1, pmax+1):
for q in xrange(p):
pqlist.append((p, q))
MLEs = []
for pq in pqlist:
MLE = self.get_mle(pq[0], pq[1], ntrials=ntrials, njobs=njobs)
MLEs.append(MLE)
best_AICc = 1e300
AICc = []
best_MLE = MLEs[0]
print 'p, q, AICc:'
for MLE, pq in zip(MLEs, pqlist):
nparams = 2 + pq[0] + pq[1]
deviance = 2.0 * MLE.fun
this_AICc = 2.0 * nparams + deviance + 2.0 * nparams * (nparams + 1.0) / (self.time.size - nparams - 1.0)
print pq[0], pq[1], this_AICc
AICc.append(this_AICc)
if this_AICc < best_AICc:
# new optimum found, save values
best_MLE = MLE
best_AICc = this_AICc
self.p = pq[0]
self.q = pq[1]
print 'Model with best AICc has p =', self.p, ' and q = ', self.q
return best_MLE, pqlist, AICc
def _get_mle_single(args):
p, q, time, y, ysig = args
nsamples = 1
nburnin = 25
nwalkers = 10 |
skirsdeda/django-filer | filer/server/backends/xsendfile.py | Python | bsd-3-clause | 647 | 0.001546 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from django.http import HttpResponse
from .base import ServerBase
class ApacheXSendfileServer(ServerBase):
def serve(self, request, file_obj, **kwargs):
response = HttpResponse()
response['X-Sendfile | '] = file_obj.path
# This is needed for lighttpd | , hopefully this will
# not be needed after this is fixed:
# http://redmine.lighttpd.net/issues/2076
response['Content-Type'] = self.get_mimetype(file_obj.path)
self.default_headers(request=request, response=response, file_obj=file_obj, **kwargs)
return response
|
zhlinh/leetcode | 0204.Count Primes/solution.py | Python | apache-2.0 | 1,253 | 0.002394 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
*****************************************
Author: zhlinh
Email: zhlinhng@gmail.com
Version: 0.0.1
Created Time: 2016-03-24
Last_modify: 2016-03-24
******************************************
'''
'''
Description:
Count the number of prime numbers less than a non-negative number, n.
Credits:
Special thanks to @mithmatt for adding this problem and creating all test cases.
Hint:
Let's start with a isPrime function. To determine if a number is prime,
we need to check if it is not divisible by any number less than n.
The runtime complexity of isPrime function would be O(n) and hence
counting the total prime numbers up to n would be O(n2). Could we do better?
Show More Hint
'''
import math
class Solution(obje | ct):
def countPrimes(self, n):
"""
:type n: int
:rtype: int
"""
if n < 3:
return 0
res = 1
primes = [True] * n
upper = math.sqrt(n)
for i in range(3, n, 2):
if primes[i]:
res += 1
if i > upper:
contin | ue
for j in range(i*i, n, i):
primes[j] = False
return res
|
sergey-a-berezin/acme-tiny-cron | acme_tiny_cron/protos/__init__.py | Python | apache-2.0 | 575 | 0 | # Copyright 2017 Sergey Berezin
# Licensed under the Apache License | , Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the Lic | ense for the specific language governing permissions and
# limitations under the License.
|
iamgp/pyCa | pyCa/Cell.py | Python | gpl-3.0 | 1,577 | 0.000634 | from Stimulant import *
import pandas as pd
class Cell(object):
"""docstring for Cell"""
def __init__(self):
self.stimulants = {}
self.stimulant = Stimulant()
self.p1 = Point()
self.p2 = Point()
self.p1.x = 0
self.p1.y = 0
self.p2.x = 0
self.p2.y = 0
self.cellname = ''
def reset(self):
| self.p1 = Point()
self.p2 = Point()
self.p1.x = 0
self.p1.y = 0
self.p2.x = 0
self.p2.y = 0
def addFirstPoint(self, x, y):
self.p1.x = x
self.p1.y = y
self.stimulant.p1 = self.p1
def addSecondPointWithName(self, x, y, name):
self.p2.x = x
self.p2.y = y
self.stimulant.p2 = self.p2
self.stimulant.name = name
self.stimula | nts[name] = self.stimulant
del(self.stimulant)
del(self.p1)
del(self.p2)
self.stimulant = Stimulant()
self.p1 = Point()
self.p2 = Point()
def makePandasDF(self):
bpdict = {}
gdict = {}
for s in self.stimulants:
bpdict[self.stimulants[s].name] = self.stimulants[s].basalToPeak()
gdict[self.stimulants[s].name] = self.stimulants[s].gradient()
return {'bp': pd.DataFrame.from_dict({self.cellname: bpdict}), 'g': pd.DataFrame.from_dict({self.cellname: gdict})}
def describe(self):
description = ''
for s in self.stimulants:
description += ' >> ' + self.stimulants[s].description() + '\n'
return description
|
InQuest/ThreatKB | app/routes/errors.py | Python | gpl-2.0 | 952 | 0.002101 | from flask import abort, jsonify, request, send_file, json, Response
from flask_login import login_required, current_user
from app import app, db, admin_only, auto
from app.models import errors
@app.route('/ThreatKB/errors', methods=['GET'])
@auto.doc()
@login_required
@admin_only()
def get_all_errors():
"""Retu | rn all releases in ThreatKB
R | eturn: list of release dictionaries"""
entities = errors.Error.query.order_by(errors.Error.id.desc()).limit(50).all()
return Response(json.dumps([entity.to_dict() for entity in entities]), mimetype="application/json")
@app.route('/ThreatKB/errors/<int:error_id>', methods=['GET'])
@auto.doc()
@login_required
@admin_only()
def get_error(error_id):
"""Return error associated with error_id
Return: error dictionary"""
entity = errors.Error.query.get(error_id)
if not entity:
abort(404)
return Response(json.dumps(entity.to_dict()), mimetype="application/json")
|
pycroscopy/pycroscopy | pycroscopy/image/image_atoms.py | Python | mit | 7,225 | 0.002215 | """
Voronoi analysis of atom positions
author Gerd and Rama
part of pycrosocpy
"""
import numpy as np
import sys
# from skimage.feature import peak_local_max
from skimage.feature import blob_log
from sklearn.cluster import KMeans
from scipy.spatial import cKDTree
import scipy.optimize as optimization
import pyTEMlib.probe_tools as probe_tools
import pyTEMlib.file_tools as ft
import sidpy
from tqdm import trange
def find_atoms(image, atom_size=0.1, threshold=-1.):
""" Find atoms is a simple wrapper for blob_log in skimage.feature
Parameters
----------
image: sidpy.Dataset
the image to find atoms
atom_size: float
visible size of atom blob diameter in nm gives minimal distance between found blobs
threshold: float
threshold for blob finder; (usually between 0.001 and 1.0) for threshold <= 0 we use the RMS contrast
Returns
-------
atoms: numpy array(Nx3)
atoms positions and radius of blob
"""
if not isinstance(image, sidpy.Dataset):
raise TypeError('We need a sidpy.Dataset')
if image.data_type.name != 'IMAGE':
raise TypeError('We need sidpy.Dataset of sidpy.Datatype: IMAGE')
if not isinstance(atom_size, (float, int)):
raise TypeError('atom_size parameter has to be a number')
if not isinstance(threshold, float):
raise TypeError('threshold parameter has to be a float number')
scale_x = ft.get_slope(image.dim_0)
im = np.array(image-image.min())
im = im/im.max()
if threshold < 0.:
threshold = np.std(im)
atoms = blob_log(im, max_sigma=atom_size/scale_x, threshold=threshold)
return atoms
def atoms_clustering(atoms, mid_atoms, number_of_clusters=3, nearest_neighbours=7):
""" A wrapper for sklearn.cluster kmeans clustering of atoms.
Parameters
----------
atoms: list or np.array (Nx2)
list of all atoms
mid_atoms: list or np.array (Nx2)
atoms to be evaluated
number_of_clusters: int
number of clusters to sort (ini=3)
nearest_neighbours: int
number of nearest neighbours evaluated
Returns
-------
clusters, distances, indices: numpy arrays
"""
# get distances
nn_tree = cKDTree(np.array(atoms)[:, 0:2])
distances, indices = nn_tree.query(np.array(mid_atoms)[:, 0:2], nearest_neighbours)
# Clustering
k_means = KMeans(n_clusters=number_of_clusters, random_state=0) # Fixing the RNG in kmeans
k_means.fit(distances)
clusters = k_means.predict(distances)
return clusters, distances, indices
def gauss_difference(params, area):
"""
Difference between part of an image and a Gaussian
This function is used int he atom refine function of pyTEMlib
Parameters
----------
params: list
list of Gaussian parameters [width, position_x, position_y, intensity]
area: numpy array
2D matrix = part of an image
Returns
-------
numpy array: flattened array of difference
"""
gauss = probe_tools.make_gauss(area.shape[0], area.shape[1], width=params[0], x0=params[1], y0=params[2],
intensity=params[3])
return (area - gauss).flatten()
def atom_refine(image, atoms, radius, max_int=0, min_int=0, max_dist=4):
"""Fits a Gaussian in a blob of an image
Parameters
----------
image: np.array or sidpy Dataset
atoms: list or np.array
positions of atoms
radius: float
radius of circular mask to define fitting of Gaussian
max_int: float
optional - maximum intensity to be considered for fitting (to exclude contaminated areas for example)
min_int: float
optional - minimum intensity to be considered for fitting (to exclude contaminated holes for example)
max_dist: float
optional - maximum distance of movement of Gaussian during fitting
Returns
-------
sym: dict
dictionary containing new atom positions and other output such as intensity of the fitted Gaussian
"""
rr = int(radius + 0.5) # atom radius
print('using radius ', rr, 'pixels')
pixels = np.linspace(0, 2 * rr, 2 * rr + 1) - rr
x, y = np.meshgrid(pixels, pixels)
mask = (x ** 2 + y ** 2) < rr ** 2
guess = [rr * 2, 0.0, 0.0, 1]
sym = {'number_of_atoms': len(atoms)}
volume = []
position = []
intensities = []
maximum_area = []
new_atoms = []
gauss_width = []
gauss_amplitude = []
gauss_intensity = []
done = 0
for i in trange(len(atoms)):
x, y = atoms[i][0:2]
x = int(x)
y = int(y)
area = image[x - rr:x + rr + 1, y - rr:y + rr + 1]
append = False
if (x - rr) < 0 or y - rr < 0 or x + rr + 1 > image.shape[0] or y + rr + 1 > image.shape[1]:
position.append(-1)
intensities.append(-1.)
maximum_area.append(-1.)
else:
position.append(1)
intensities.append((area * mask).sum())
maximum_area.append((area * mask).max())
if max_int > 0:
if area.sum() < max_int:
if area.sum() > min_int:
| append = True
elif area.sum() > min_int:
append = True
pout = [0, 0, 0, 0]
if append:
if (x - rr | ) < 0 or y - rr < 0 or x + rr + 1 > image.shape[0] or y + rr + 1 > image.shape[1]:
pass
else:
[pout, _] = optimization.leastsq(gauss_difference, guess, args=area)
if (abs(pout[1]) > max_dist) or (abs(pout[2]) > max_dist):
pout = [0, 0, 0, 0]
volume.append(2 * np.pi * pout[3] * pout[0] * pout[0])
new_atoms.append([x + pout[1], y + pout[2]]) # ,pout[0], volume)) #,pout[3]))
if all(v == 0 for v in pout):
gauss_intensity.append(0.)
else:
gauss = probe_tools.make_gauss(area.shape[0], area.shape[1], width=pout[0], x0=pout[1], y0=pout[2],
intensity=pout[3])
gauss_intensity.append((gauss * mask).sum())
gauss_width.append(pout[0])
gauss_amplitude.append(pout[3])
sym['inside'] = position
sym['intensity_area'] = intensities
sym['maximum_area'] = maximum_area
sym['atoms'] = new_atoms
sym['gauss_width'] = gauss_width
sym['gauss_amplitude'] = gauss_amplitude
sym['gauss_intensity'] = gauss_intensity
sym['gauss_volume'] = volume
return sym
def intensity_area(image, atoms, radius):
"""
integrated intensity of atoms in an image with a mask around each atom of radius radius
"""
rr = int(radius + 0.5) # atom radius
print('using radius ', rr, 'pixels')
pixels = np.linspace(0, 2 * rr, 2 * rr + 1) - rr
x, y = np.meshgrid(pixels, pixels)
mask = np.array((x ** 2 + y ** 2) < rr ** 2)
intensities = []
for i in range(len(atoms)):
x = int(atoms[i][1])
y = int(atoms[i][0])
area = image[x - rr:x + rr + 1, y - rr:y + rr + 1]
if area.shape == mask.shape:
intensities.append((area * mask).sum())
else:
intensities.append(-1)
return intensities
|
estebanfallasf/python_training | ex2.py | Python | gpl-2.0 | 978 | 0.005112 | # A comment, this is so you can read your program later.
# Anything after the # is ignored by python.
print "I could have code like this." # and the comment after is ignored
# You can also use a comment to "disable" or comment out a piece of code:
# print "This won't run."
print " | This will run."
# Adding another few lines just for fun.
print 'Q: Why does the "#" in "print "Hi # there." not get ignored?'
# print 'The # in that code is inside a string, so it will put into the string until the ending " character is hit. These pound characters are just considered characters and are not considered comments.'
# Another way to put it: (aren't instead of "are not")
print "The # in that code is inside a string, so it will put into the strin | g until the ending \" character is hit. These pound characters are just considered characters and aren't considered comments."
# The backslash will escape the special character, as seen on the code above. Isaac Albeniz - Asturias :-)
|
nwjs/chromium.src | testing/unexpected_passes_common/data_types.py | Python | bsd-3-clause | 22,630 | 0.006805 | # Copyright 2020 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Various custom data types for use throughout the unexpected pass finder."""
from __future__ import print_function
import collections
import copy
import fnmatch
import logging
import six
FULL_PASS = 1
NEVER_PASS = 2
PARTIAL_PASS = 3
# Allow different unexpected pass finder implementations to register custom
# data types if necessary. These are set to the base versions at the end of the
# file.
Expectation = None
Result = None
BuildStats = None
TestExpectationMap = None
def SetExpectationImplementation(impl):
global Expectation
assert issubclass(impl, BaseExpectation)
Expectation = impl
def SetResultImplementation(impl):
global Result
assert issubclass(impl, BaseResult)
Result = impl
def SetBuildStatsImplementation(impl):
global BuildStats
assert issubclass(impl, BaseBuildStats)
BuildStats = impl
def SetTestExpectationMapImplementation(impl):
global TestExpectationMap
assert issubclass(impl, BaseTestExpectationMap)
TestExpectationMap = impl
class BaseExpectation(object):
"""Container for a test expectation.
Similar to typ's expectations_parser.Expectation class, but with unnecessary
data stripped out and made hashable.
The data contained in an Expectation is equivalent to a single line in an
expectation file.
"""
def __init__(self, test, tags, expected_results, bug=None):
self.test = test
self.tags = frozenset(tags)
self.bug = bug or ''
if isinstance(expected_results, str):
self.expected_results = frozenset([expected_results])
else:
self.expected_results = frozenset(expected_results)
# We're going to be making a lot of comparisons, and fnmatch is *much*
# slower (~40x from rough testing) than a straight comparison, so only use
# it if necessary.
if '*' in test:
self._comp = self._CompareWildcard
else:
self._comp = self._CompareNonWildcard
def __eq__(self, other):
return (isinstance(other, BaseExpectation) and self.test == other.test
and self.tags == other.tags
and self.expected_results == other.expected_results
and self.bug == other.bug)
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return hash((self.test, self.tags, self.expected_results, self.bug))
def _CompareWildcard(self, result_test_name):
return fnmatch.fnmatch(result_test_name, self.test)
def _CompareNonWildcard(self, result_test_name):
return result_test_name == self.test
def AppliesToResult(self, result):
"""Checks whether this expectation should have applied to |result|.
An expectation applies to a result if the test names match (including
wildcard expansion) and the expectation's tags are a subset of the result's
tags.
Args:
result: A Result instance to check against.
Returns:
True if |self| applies to |result|, otherwise False.
"""
assert isinstance(result, BaseResult)
return (self._comp(result.test) and self.tags <= result.tags)
def MaybeAppliesToTest(self, test_name):
"""Similar to AppliesToResult, but used to do initial filtering.
Args:
test_name: A string containing the name of a test.
Returns:
True if |self| could apply to a test named |test_name|, otherwise False.
"""
return self._comp(test_name)
class BaseResult(object):
"""Container for a test result.
Contains the minimal amount of data necessary to describe/identify a result
from ResultDB for the purposes o | f the unexpected pass finder.
"""
def __init__(self, test, tags, actual_result, step, build_id):
"""
Args:
test: A string containing the name of the test.
tags: An iterable containing the typ tags for the result.
actual_result: The actual result of the test as a string.
step: A string containing the name of the step o | n the builder.
build_id: A string containing the Buildbucket ID for the build this result
came from.
"""
self.test = test
self.tags = frozenset(tags)
self.actual_result = actual_result
self.step = step
self.build_id = build_id
def __eq__(self, other):
return (isinstance(other, BaseResult) and self.test == other.test
and self.tags == other.tags
and self.actual_result == other.actual_result
and self.step == other.step and self.build_id == other.build_id)
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return hash(
(self.test, self.tags, self.actual_result, self.step, self.build_id))
class BaseBuildStats(object):
"""Container for keeping track of a builder's pass/fail stats."""
def __init__(self):
self.passed_builds = 0
self.total_builds = 0
self.failure_links = frozenset()
@property
def failed_builds(self):
return self.total_builds - self.passed_builds
@property
def did_fully_pass(self):
return self.passed_builds == self.total_builds
@property
def did_never_pass(self):
return self.failed_builds == self.total_builds
def AddPassedBuild(self):
self.passed_builds += 1
self.total_builds += 1
def AddFailedBuild(self, build_id):
self.total_builds += 1
build_link = BuildLinkFromBuildId(build_id)
self.failure_links = frozenset([build_link]) | self.failure_links
def GetStatsAsString(self):
return '(%d/%d passed)' % (self.passed_builds, self.total_builds)
def NeverNeededExpectation(self, expectation): # pylint:disable=unused-argument
"""Returns whether the results tallied in |self| never needed |expectation|.
Args:
expectation: An Expectation object that |stats| is located under.
Returns:
True if all the results tallied in |self| would have passed without
|expectation| being present. Otherwise, False.
"""
return self.did_fully_pass
def AlwaysNeededExpectation(self, expectation): # pylint:disable=unused-argument
"""Returns whether the results tallied in |self| always needed |expectation.
Args:
expectation: An Expectation object that |stats| is located under.
Returns:
True if all the results tallied in |self| would have failed without
|expectation| being present. Otherwise, False.
"""
return self.did_never_pass
def __eq__(self, other):
return (isinstance(other, BuildStats)
and self.passed_builds == other.passed_builds
and self.total_builds == other.total_builds
and self.failure_links == other.failure_links)
def __ne__(self, other):
return not self.__eq__(other)
def BuildLinkFromBuildId(build_id):
return 'http://ci.chromium.org/b/%s' % build_id
# These explicit overrides could likely be replaced by using regular dicts with
# type hinting in Python 3. Based on https://stackoverflow.com/a/2588648, this
# should cover all cases where the dict can be modified.
class BaseTypedMap(dict):
"""A base class for typed dictionaries.
Any child classes that override __setitem__ will have any modifications to the
dictionary go through the type checking in __setitem__.
"""
def __init__(self, *args, **kwargs): # pylint:disable=super-init-not-called
self.update(*args, **kwargs)
def update(self, *args, **kwargs):
if args:
assert len(args) == 1
other = dict(args[0])
for k, v in other.items():
self[k] = v
for k, v in kwargs.items():
self[k] = v
def setdefault(self, key, value=None):
if key not in self:
self[key] = value
return self[key]
def _value_type(self):
raise NotImplementedError()
def IterToValueType(self, value_type):
"""Recursively iterates over contents until |value_type| is found.
Used to get rid of nested loops, instead using a single loop that
automatically iterates through all the contents at a certain depth.
Args:
value_type: The type to recurse to and then iterate over. For example,
"BuilderStepMap" would result in iterating over th |
decvalts/iris | lib/iris/tests/integration/test_pp_constrained_load_cubes.py | Python | gpl-3.0 | 2,825 | 0 | # (C) British Crown Copyright 2014 - 2015, Met Office
#
# This file is part of Iris.
#
# Iris is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Iris is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see <http://www.gnu.org/licenses/>.
"""Integration tests for :func:`iris.fileformats.rules.load_cubes`."""
from __future__ import (absolute_import, division, print_function)
from six.moves import (filter, input, map, range, zip) # noqa
# Import iris.tests first so that some things can be initialised before
# importing anything else.
import iris.tests as tests
import iris
from iris.fileformats import pp
from iris.fileformats.pp_rules import convert
from iris.fileformats.rules import load_cubes
class Test(tests.IrisTest):
@tests.skip_data
def test_pp_with_stash_constraint(self):
filenames = [tests.get_data_path(('PP', 'globClim1', 'dec_subset.pp'))]
stcon = iris.AttributeConstraint(STASH='m01s00i004')
pp_constraints = pp._convert_constraints(stcon)
pp_loader = iris.fileformats.rules.Loader(pp.load, {},
convert, pp._load_rules)
cubes = list(load_cubes(filenames, None, pp_loader, pp_constraints))
self.assertEqual(len(cubes), 38)
@tests.skip_data
def test_pp_with_stash_constraints(self):
filenames = [tests.get_data_path(('PP', 'globClim1', 'dec_subset.pp'))]
stcon1 = iris.AttributeConstraint(STAS | H='m01s00i004')
stcon2 = iris.AttributeConstraint(STASH='m01s00i010')
pp_constraints = pp._convert_constraints([stcon1, stcon2])
pp_loader = iris.fileformats.rules.Loader(pp.load, {},
convert, pp._load_rules)
cubes = list(load_cubes(filenames, None, pp_loader, pp_constraints))
self.assertEq | ual(len(cubes), 76)
@tests.skip_data
def test_pp_no_constraint(self):
filenames = [tests.get_data_path(('PP', 'globClim1', 'dec_subset.pp'))]
pp_constraints = pp._convert_constraints(None)
pp_loader = iris.fileformats.rules.Loader(pp.load, {},
convert, pp._load_rules)
cubes = list(load_cubes(filenames, None, pp_loader, pp_constraints))
self.assertEqual(len(cubes), 152)
if __name__ == "__main__":
tests.main()
|
project-chip/connectedhomeip | scripts/idl/generators/types.py | Python | apache-2.0 | 14,504 | 0.002965 | # Copyright (c) 2022 Project CHIP Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express | or implied.
# See the License for the specific l | anguage governing permissions and
# limitations under the License.
import logging
import enum
from idl.matter_idl_types import DataType
from idl import matter_idl_types # to explicitly say 'Enum'
from typing import Union, List, Optional
from dataclasses import dataclass
def ToPowerOfTwo(bits: int) -> int:
"""
Given a number, find the next power of two that is >= to the given value.
Can be used to figure out a variable size given non-standard bit sizes in
matter: eg. a int24 can be stored in an int32, so ToPortOfTwo(24) == 32.
"""
# probably bit manipulation can be faster, but this should be ok as well
result = 1
while result < bits:
result = result * 2
return result
@dataclass
class BasicInteger:
"""
Represents something that is stored as a basic integer.
"""
idl_name: str
byte_count: int # NOTE: may NOT be a power of 2 for odd sized integers
is_signed: bool
@property
def bits(self):
return self.byte_count * 8
@property
def power_of_two_bits(self):
return ToPowerOfTwo(self.bits)
@dataclass
class BasicString:
"""
Represents either a string or a binary string (blob).
"""
idl_name: str
is_binary: bool
max_length: Union[int, None] = None
class FundamentalType(enum.Enum):
"""
Native types, generally available across C++/ObjC/Java/python/other.
"""
BOOL = enum.auto()
FLOAT = enum.auto()
DOUBLE = enum.auto()
@property
def idl_name(self):
if self == FundamentalType.BOOL:
return "bool"
elif self == FundamentalType.FLOAT:
return "single"
elif self == FundamentalType.DOUBLE:
return "double"
else:
raise Error("Type not handled: %r" % self)
@property
def byte_count(self):
if self == FundamentalType.BOOL:
return 1
elif self == FundamentalType.FLOAT:
return 4
elif self == FundamentalType.DOUBLE:
return 8
else:
raise Error("Type not handled: %r" % self)
@property
def bits(self):
return self.byte_count * 8
@dataclass
class IdlEnumType:
"""
An enumeration type. Enumerations are constants with an underlying
base type that is an interger.
"""
idl_name: str
base_type: BasicInteger
@property
def byte_count(self):
return base_type.byte_count()
@property
def bits(self):
return base_type.bits()
@dataclass
class IdlBitmapType:
"""
Bitmaps mark that each bit (or a subset of said bits) have a meaning.
Examples include "feature maps" where bits represent feature available or not.
"""
idl_name: str
base_type: BasicInteger
@property
def byte_count(self):
return base_type.byte_count()
@property
def bits(self):
return base_type.bits()
class IdlItemType(enum.Enum):
UNKNOWN = enum.auto()
STRUCT = enum.auto()
@dataclass
class IdlType:
"""
A type defined within the IDL.
IDLs would generally only define structures as all other types are
described in other things like enums/bitmaps/basic types etc.
However since IDL parsing is not yet codegen just syntactically, we allow
the option to have a type that is marked 'unknown' (likely invalid/never
defined).
"""
idl_name: str
item_type: IdlItemType
@property
def is_struct(self) -> bool:
return self.item_type == IdlItemType.STRUCT
# Data types, held by ZAP in chip-types.xml
__CHIP_SIZED_TYPES__ = {
"bitmap16": BasicInteger(idl_name="bitmap16", byte_count=2, is_signed=False),
"bitmap24": BasicInteger(idl_name="bitmap24", byte_count=3, is_signed=False),
"bitmap32": BasicInteger(idl_name="bitmap32", byte_count=4, is_signed=False),
"bitmap64": BasicInteger(idl_name="bitmap64", byte_count=8, is_signed=False),
"bitmap8": BasicInteger(idl_name="bitmap8", byte_count=1, is_signed=False),
"enum16": BasicInteger(idl_name="enum16", byte_count=2, is_signed=False),
"enum32": BasicInteger(idl_name="enum32", byte_count=4, is_signed=False),
"enum8": BasicInteger(idl_name="enum8", byte_count=1, is_signed=False),
"int16s": BasicInteger(idl_name="int16s", byte_count=2, is_signed=True),
"int16u": BasicInteger(idl_name="int16u", byte_count=2, is_signed=False),
"int24s": BasicInteger(idl_name="int24s", byte_count=3, is_signed=True),
"int24u": BasicInteger(idl_name="int24u", byte_count=3, is_signed=False),
"int32s": BasicInteger(idl_name="int32s", byte_count=4, is_signed=True),
"int32u": BasicInteger(idl_name="int32u", byte_count=4, is_signed=False),
"int40s": BasicInteger(idl_name="int40s", byte_count=5, is_signed=True),
"int40u": BasicInteger(idl_name="int40u", byte_count=5, is_signed=False),
"int48s": BasicInteger(idl_name="int48s", byte_count=6, is_signed=True),
"int48u": BasicInteger(idl_name="int48u", byte_count=6, is_signed=False),
"int56s": BasicInteger(idl_name="int56s", byte_count=7, is_signed=True),
"int56u": BasicInteger(idl_name="int56u", byte_count=7, is_signed=False),
"int64s": BasicInteger(idl_name="int64s", byte_count=8, is_signed=True),
"int64u": BasicInteger(idl_name="int64u", byte_count=8, is_signed=False),
"int8s": BasicInteger(idl_name="int8s", byte_count=1, is_signed=True),
"int8u": BasicInteger(idl_name="int8u", byte_count=1, is_signed=False),
# Derived types
"action_id": BasicInteger(idl_name="action_id", byte_count=1, is_signed=True),
"attrib_id": BasicInteger(idl_name="attrib_id", byte_count=4, is_signed=True),
"cluster_id": BasicInteger(idl_name="cluster_id", byte_count=4, is_signed=True),
"command_id": BasicInteger(idl_name="command_id", byte_count=4, is_signed=True),
"data_ver": BasicInteger(idl_name="data_ver", byte_count=4, is_signed=True),
"date": BasicInteger(idl_name="date", byte_count=4, is_signed=True),
"devtype_id": BasicInteger(idl_name="devtype_id", byte_count=4, is_signed=True),
"endpoint_no": BasicInteger(idl_name="endpoint_no", byte_count=2, is_signed=True),
"epoch_s": BasicInteger(idl_name="epoch_s", byte_count=4, is_signed=False),
"epoch_us": BasicInteger(idl_name="epoch_us", byte_count=8, is_signed=False),
"event_id": BasicInteger(idl_name="event_id", byte_count=4, is_signed=True),
"event_no": BasicInteger(idl_name="event_no", byte_count=8, is_signed=True),
"fabric_id": BasicInteger(idl_name="fabric_id", byte_count=8, is_signed=True),
"fabric_idx": BasicInteger(idl_name="fabric_idx", byte_count=1, is_signed=True),
"field_id": BasicInteger(idl_name="field_id", byte_count=4, is_signed=True),
"group_id": BasicInteger(idl_name="group_id", byte_count=2, is_signed=True),
"node_id": BasicInteger(idl_name="node_id", byte_count=8, is_signed=True),
"percent": BasicInteger(idl_name="percent", byte_count=1, is_signed=True),
"percent100ths": BasicInteger(idl_name="percent100ths", byte_count=2, is_signed=True),
"status": BasicInteger(idl_name="status", byte_count=2, is_signed=True),
"systime_us": BasicInteger(idl_name="systime_us", byte_count=8, is_signed=True),
"tod": BasicInteger(idl_name="tod", byte_count=4, is_signed=True),
"trans_id": BasicInteger(idl_name="trans_id", byte_count=4, is_signed=True),
"utc": BasicInteger(idl_name="utc", byte_count=4, is_signed=True),
"vendor_id": BasicInteger(idl_name="vendor_id", byte_count=2, is_signed=True),
}
class TypeLookupContext:
"""
Handles type lookups within a scope.
Generally when looking for a stru |
PythonSanSebastian/python-rtmbot | rtmbot/logger.py | Python | mit | 246 | 0.012195 |
import logging
def setup_logging(logfile, level=logging.IN | FO):
log_args = {'level' : level,
'format' : '%(asctime)s %(message)s'}
if logfile:
log_args['filename'] = logfile
logging.basicC | onfig(**log_args)
|
picklecai/OMOOC2py | _src/om2py3w/3wex0/notebooknetc.py | Python | mit | 645 | 0.006656 | # _*_coding:utf-8_*_
# 客户端程序
from socket import *
import time
from sys import exit
import notebooknets
def main():
BUF_SIZE = 65565
ss | _addr = ('127.0.0.1', 8800)
cs = socket(AF_INET, SOCK_DGRAM)
notebooknets.printhistory()
while True:
global data
data = raw_input('今日记录,请输入(输入quit退出程序):')
if data == 'quit':
exit(0)
el | se:
cs.sendto(data, ss_addr)
data, addr = cs.recvfrom(BUF_SIZE)
print "Data: ", data
notebooknets.save(data)
cs.close
if __name__ == '__main__':
main()
|
hycis/TensorGraph | tensorgraph/layers/cast.py | Python | apache-2.0 | 306 | 0 | import tensorflow as tf
from .template import Bas | eLayer
class ToFloat(BaseLayer):
def _train_fprop(self, state_below):
retu | rn tf.to_float(state_below, name='ToFloat')
class ToInt32(BaseLayer):
def _train_fprop(self, state_below):
return tf.to_int32(state_below, name='ToInt32')
|
ssut/py-hanspell | tests.py | Python | mit | 2,729 | 0.002597 | # -*- coding: utf-8 -*-
import unittest
from hanspell import spell_checker
from hanspell.constants import CheckResult
from textwrap import dedent as trim
class SpellCheckerTests(unittest.TestCase):
def setUp(self):
pass
def test_basic_check(self):
result = spell_checker.check(u'안녕 하세요. 저는 한국인 입니다. 이문장은 한글로 작성됬습니다.')
assert result.errors == 4
assert result.checked == u'안녕하세요. 저는 한국인입니다. 이 문장은 한글로 작성됐습니다.'
def test_words(self):
result = spell_checker.check(u'한아이가 장난깜을 갖고놀고있다. 그만하게 할가?')
assert result.errors == 4
items = result.words
assert items[u'한'] == CheckResult.WRONG_SPA | CING
assert items[u'아이가'] == CheckResult.WRONG_SPACING
assert items[u'장난감을'] == CheckResult.STATISTICAL_CORRECTION
assert items[u'갖고'] == CheckResult.WRONG_SPACING
assert items[u'놀고'] == CheckResult.WRONG_SPACING
assert items[u'있다.'] == CheckResult.WRONG_SPACING
assert items[u'그만하게'] == CheckResult.PASSED
assert items[u'할까?'] == CheckResult.WRONG_SPELLING
def test_list(self):
results = spell_ch | ecker.check([u'안녕 하세요.', u'저는 한국인 입니다.'])
assert results[0].checked == u'안녕하세요.'
assert results[1].checked == u'저는 한국인입니다.'
def test_long_paragraph(self):
paragraph = trim("""
ubit.info(유빗인포)는 코나미 리듬게임, 유비트의 플레이 데이터 관리 및 열람 서비스입니다. 등록 후에 자신과 친구의 기록을 p.eagate.573.jp에 접속할 필요 없이 본 웹 사이트에서 바로 확인할 수 있습니다.
등록 후에는 "https://ubit.info/별칭"으로 자신의 개인 페이지가 생성되며 이 주소(별칭)를 아는 사람만 접속할 수 있습니다. 다른 친구에게 기록을 보여주고 싶다면 본인의 인포 주소를 알려주면 됩니다.
이 사이트는 최신 브라우저 환경만을 제대로 지원합니다. 만약 크롬, 파이어폭스 등의 최신 브라우저 안정버전(stable)을 사용하고 있는데도 페이지 레이아웃이 깨지는 경우 사이트 관리자에게 문의해주세요.
등록 과정은 간단합니다. 상단 메뉴에서 등록을 클릭한 후 양식에 맞게 입력하시면 자동으로 공개설정이 완료됨과 동시에 유빗인포 계정이 생성됩니다.
""")
result = spell_checker.check(paragraph)
if __name__ == '__main__':
unittest.main()
|
alxgu/ansible | lib/ansible/modules/network/netvisor/pn_port_cos_bw.py | Python | gpl-3.0 | 4,163 | 0.000961 | #!/usr/bin/python
# Copyright: (c) 2018, Pluribus Networks
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: pn_port_cos_bw
author: "Pluribus Networks (@rajaspachipulusu17)"
version_added: "2.8"
short_description: CLI command to modify port-cos-bw
description:
- This module can be used to update bw settings for CoS queues.
options:
pn_cliswitch:
description:
- Target switch to run the CLI on.
required: False
type: str
state:
description:
- State the action to perform. Use C(update) to modify the port-cos-bw.
required: True
type: str
choices: ['update']
pn_max_bw_limit:
description:
- Maximum b/w in percentage.
required: False
type: str
pn_cos:
description:
- CoS priority.
required: False
type: str
pn_port:
description:
- physical port number.
required: False
type: str
pn_weight:
description:
- Scheduling weight (1 to 127) after b/w guarantee met.
required: False
type: str
choices: ['priority', 'no-priority']
pn_min_bw_guarantee:
description:
- Minimum b/w in precentage.
required: False
type: str
"""
EXAMPLES = """
- name: port cos bw modify
pn_port_cos_bw:
pn_cliswitch: "sw01"
state: "update"
pn_port: "1"
pn_cos: "0"
pn_min_bw_guarantee: "60"
- name: port cos bw modify
pn_port_cos_bw:
pn_cliswitch: "sw01"
state: "update"
pn_port: "all"
pn_cos: "0"
pn_weight: "priority"
"""
RETURN = """
command:
description: the CLI command run on the target node.
returned: always
type: str
stdout:
description: set of responses from the port-cos-bw command.
returned: always
type: list
stderr:
description: set of error responses from the port-cos-bw command.
returned: on error
type: list
changed:
description: indicates whether the CLI caused changes on the target.
returned: always
type: bool
"""
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.netvisor.pn_nvos import pn_cli, run_cli
def main():
""" This section is for arguments parsing """
state_map = dict(
update='port-cos-bw-modify'
)
module = AnsibleModule(
argument_spec=dict(
| pn_cliswitch=dict(required=False, type='str'),
state=dict(required=True, type='str',
choices=state_map.keys()),
pn_max_bw_limit=dict(required=False, type='str'),
pn_cos=dict(required=False, type= | 'str'),
pn_port=dict(required=False, type='str'),
pn_weight=dict(required=False, type='str',
choices=['priority', 'no-priority']),
pn_min_bw_guarantee=dict(required=False, type='str'),
),
required_if=(
['state', 'update', ['pn_cos', 'pn_port']],
),
required_one_of=[['pn_max_bw_limit', 'pn_min_bw_guarantee', 'pn_weight']],
)
# Accessing the arguments
cliswitch = module.params['pn_cliswitch']
state = module.params['state']
max_bw_limit = module.params['pn_max_bw_limit']
cos = module.params['pn_cos']
port = module.params['pn_port']
weight = module.params['pn_weight']
min_bw_guarantee = module.params['pn_min_bw_guarantee']
command = state_map[state]
# Building the CLI command string
cli = pn_cli(module, cliswitch)
if command == 'port-cos-bw-modify':
cli += ' %s ' % command
if max_bw_limit:
cli += ' max-bw-limit ' + max_bw_limit
if cos:
cli += ' cos ' + cos
if port:
cli += ' port ' + port
if weight:
cli += ' weight ' + weight
if min_bw_guarantee:
cli += ' min-bw-guarantee ' + min_bw_guarantee
run_cli(module, cli, state_map)
if __name__ == '__main__':
main()
|
plotly/plotly.py | packages/python/plotly/plotly/validators/waterfall/_constraintext.py | Python | mit | 500 | 0.002 | import _plotly_utils.basevalidators
class ConstraintextValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(self, plotly_name="constraintext", parent_name="waterfall", **kwargs):
super(ConstraintextValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
values=kwargs.pop("values", ["inside", "outside", "both", "none"]) | ,
**kwargs
| )
|
ssarangi/numba | numba/tests/test_utils.py | Python | bsd-2-clause | 3,857 | 0.000519 | """
Tests for numba.utils.
"""
from __future__ import print_function, absolute_import
import threading
import time
from numba import utils
from numba import unittest_support as unittest
class C(object):
def __init__(self, value):
self.value = value
def __eq__(self, o):
return self.value == o.value
def __ne__(self, o):
return self.value != o.value
def __gt__(self, o):
return self.value > o.value
class D(C):
pass
class TestTotalOrdering(unittest.TestCase):
def test_is_inherited(self):
f = utils._is_inherited_from_object
for cls in (C, D):
self.assertFalse(f(cls, '__eq__'))
self.assertFalse(f(cls, '__gt__'))
self.assertFalse(f(cls, '__ne__'))
self.assertTrue(f(cls, '__ge__'))
self.assertTrue(f(cls, '__le__'))
self.assertTrue(f(cls, '__lt__'))
def check_total_ordering(self, cls):
# Duplicate the class-under-test, to avoid mutating the original
cls = type(cls.__name__, cls.__bases__, dict(cls.__dict__))
cls = utils.total_ordering(cls)
a, b, c, d = cls(10), cls(5), cls(15), cls(10)
self.assertFalse(a < b)
self.assertTrue(a < c)
self.assertFalse(a < d)
self.assertTrue(b < c)
self.assertTrue(b < d)
self.assertFalse(c < d)
self.assertFalse(a <= b)
self.assertTrue(a <= c)
self.assertTrue(a <= d)
self.assertTrue(b <= c)
self.assertTrue(b <= d)
self.assertFalse(c <= d)
self.assertTrue(a > b)
self.assertFalse(a > c)
self.assertFalse(a > d)
self.assertFalse(b > c)
self.assertFalse(b > d)
self.assertTrue(c > d)
self.assertTrue(a >= b)
self.assertFalse(a >= c)
self.assertTrue(a >= d)
self.assertFalse(b >= c)
self.assertFalse(b >= d)
self.assertTrue(c >= d)
def test_total_ordering(self):
self.check_total_ordering(C)
def test_total_ordering_derived(self):
self.check_total_ordering(D)
class TestNonReentrantLock(unittest.TestCase):
def _lock(self):
return utils.NonReentrantLock()
def test_acquire_release(self):
lock = self._lock()
self.assertFalse(lock.is_owned())
lock.acquire()
self.assertTrue(lock.is_owned())
lock.release()
self.assertFalse(lock.is_owned())
with self.assertRaises(RuntimeError):
lock.release()
lock.acquire()
self.assertTrue(lock.is_owned())
with self.assertRaises(RuntimeError):
lock.acquire()
self.assertTrue(lock.is_owned())
lock | .release()
lock.acquire()
lock.release()
self.assertFalse(lock.is_owned())
def test_multithreaded(self):
lock | = self._lock()
errors = []
def do_things():
for i in range(5):
self.assertFalse(lock.is_owned())
lock.acquire()
time.sleep(1e-4)
self.assertTrue(lock.is_owned())
lock.release()
def wrapper():
try:
do_things()
except BaseException as e:
errors.append(e)
threads = [threading.Thread(target=wrapper) for i in range(40)]
for t in threads:
t.start()
for t in threads:
t.join()
self.assertFalse(errors)
self.assertFalse(lock.is_owned())
def test_with(self):
lock = self._lock()
with lock:
self.assertTrue(lock.is_owned())
self.assertRaises(RuntimeError, lock.acquire)
self.assertFalse(lock.is_owned())
self.assertRaises(RuntimeError, lock.release)
if __name__ == '__main__':
unittest.main()
|
ProfessorKazarinoff/staticsite | content/code/statistics/box_plot.py | Python | gpl-3.0 | 931 | 0.120301 | from statistics import mean, median, mode, stdev
import matplotlib.pyplot as plt
plt.style.use('seaborn')
data1 = [83,83,93,91,85,64,79,97,95,67,83,90,92,73,82,85,96,84,82,71,86,68,66,95,87,81,77,81,97]
data2 = [60,79,89,97,68,82,67,59,77,87,99,102 | ,73,78,91,89,84,81,78,90,92,97,82]
data3 = [85,94,100,100,47,78,100,92,49,86,90,100,84,89,82,100,100,100,96,82,65,92,96,85,76,100,90,100]
data4 = [79,100,90,82,76,90,86,88,86,93,99,92,84,77,100,100,96,93,91,86,74,74,100,93,69,89,93,100]
data = data1 + data2 + data3 + data4
data_mean = mean(data)
data_median = median(d | ata)
try:
data_mode = mode(data)
except:
data_mode = 'None'
data_stdev = stdev(data)
print('mean: %4.1f' %data_mean)
print('median: %4.1f' %data_median)
print('mode: %s' %data_mode)
print('std dev: %4.1f ' %data_stdev)
plt.hist(data)
plt.xlabel('Grade Range')
plt.ylabel('Number of Students')
plt.title('Historgram of ENGR101 Exam Grades')
plt.show()
|
ilastik/ilastik-0.5 | ilastik/core/testThread.py | Python | bsd-2-clause | 6,464 | 0.01052 | from PyQt4 import QtCore
import numpy
from ilastik.core import dataImpex
import shlex
from ilastik.core.listOfNDArraysAsNDArray import ListOfNDArraysAsNDArray
from ilastik.core.overlays.selectionOverlay import SelectionAccessor
| from subprocess import Popen, PIPE
import h5py
# this is the core replacement of the guiThread used to test module functionality
#*******************************************************************************
# T e s t T h r e a d *
#*******************************************************************************
import ilastik.core.jobM | achine
def setUp():
if not ilastik.core.jobMachine.GLOBAL_WM:
ilastik.core.jobMachine.GLOBAL_WM = ilastik.core.jobMachine.WorkerManager()
def tearDown():
ilastik.core.jobMachine.GLOBAL_WM.stopWorkers()
del ilastik.core.jobMachine.GLOBAL_WM
ilastik.core.jobMachine.GLOBAL_WM = None
class TestThread(QtCore.QObject):#QtCore.QThread):
def __init__(self, baseMgr, listOfResultOverlays, listOfFilenames, tolerance = 0):
__pyqtSignals__ = ( "done()")
#QtCore.QThread.__init__(self, parent)
QtCore.QObject.__init__(self)
self.baseMgr = baseMgr
self.listOfResultOverlays = listOfResultOverlays
self.listOfFilenames = listOfFilenames
self.tolerance = tolerance
self.passedTest = False
def start(self, input):
self.timer = QtCore.QTimer()
QtCore.QObject.connect(self.timer, QtCore.SIGNAL("timeout()"), self.updateProgress)
# call core function
self.myTestThread = self.baseMgr.computeResults(input)
self.timer.start(200)
def updateProgress(self):
if not self.myTestThread.isRunning():
self.timer.stop()
self.myTestThread.wait()
self.finalize()
def finalize(self):
# call core function
self.baseMgr.finalizeResults()
# compare obtained results with ground truth results
self.passedTest = TestHelperFunctions.compareResultsWithFile(self.baseMgr, self.listOfResultOverlays, self.listOfFilenames, self.tolerance)
# announce that we are done
self.emit(QtCore.SIGNAL("done()"))
'''
# in case you want to create ground truth overlays, use the following code instead of the above
for i in range(len(self.listOfResultOverlays)):
obtained = self.baseMgr.dataMgr[self.baseMgr.dataMgr._activeImageNumber].overlayMgr["Unsupervised/pLSA component %d" % (i+1)]
dataImpex.DataImpex.exportOverlay(self.listOfFilenames[i], "h5", obtained)
'''
#*******************************************************************************
# T e s t H e l p e r F u n c t i o n s *
#*******************************************************************************
class TestHelperFunctions():
@staticmethod
def compareResultsWithFile(baseMgr, listOfResultOverlays, listOfFilenames, tolerance = 0):
equalOverlays = True
for i in range(len(listOfResultOverlays)):
obtained = baseMgr.dataMgr[baseMgr.dataMgr._activeImageNumber].overlayMgr[listOfResultOverlays[i]]
prefix = "Ground_Truth/"
dataImpex.DataImpex.importOverlay(baseMgr.dataMgr[baseMgr.dataMgr._activeImageNumber], listOfFilenames[i], prefix)
groundTruth = baseMgr.dataMgr[baseMgr.dataMgr._activeImageNumber].overlayMgr[prefix + listOfResultOverlays[i]]
equalOverlays = equalOverlays & TestHelperFunctions.compareOverlayData(obtained, groundTruth, tolerance)
print "all ", str(len(listOfResultOverlays)), " compared overlays are equal: ", equalOverlays
return equalOverlays
@staticmethod
# we only compare the data of the overlay, since we want to avoid dependence on color tables etc.
def compareOverlayData(overlay1, overlay2, tolerance = 0):
# overlay1._data._data can be a listOfNDArraysAsNDArray instance, overlay2._data._data is loaded from file, so it should be an NDArray
if isinstance(overlay1._data._data, ListOfNDArraysAsNDArray):
datatemp1 = overlay1._data._data.ndarrays
elif isinstance(overlay1._data._data, SelectionAccessor):
datatemp1 = overlay1._data._data[:]
else:
datatemp1 = overlay1._data._data
datatemp2 = overlay2._data._data
if numpy.all(numpy.abs(datatemp1 - datatemp2) <= tolerance):
return True
else:
return False
@staticmethod
def arrayEqual(a,b):
assert a.shape == b.shape
assert a.dtype == b.dtype
if not numpy.array_equal(a,b):
assert len(a.shape) == 3
for x in range(a.shape[0]):
for y in range(a.shape[1]):
for z in range(a.shape[2]):
if a[x,y,z] != b[x,y,z]:
print x,y,z, "a=", a[x,y,z], "b=", b[x,y,z]
return False
return True
@staticmethod
def compareH5Files(file1, file2):
print "files to compare: ", file1, file2
#have to spawn a subprocess, because h5diff has no wrapper in python
cl = "h5diff -cv '" + file1 + "' '" + file2 + "'"
args = shlex.split(cl)
print args
'''
cl_header1 = "h5dump --header " + file1
args_header1 = shlex.split(cl_header1)
cl_header2 = "h5dump --header " + file2
args_header2 = shlex.split(cl_header2)
try:
p1 = Popen(args_header1, stdout=PIPE, stderr=PIPE)
out1, err1 = p1.communicate()
p2 = Popen(args_header2, stdout=PIPE, stderr=PIPE)
out2, err2 = p2.communicate()
if out1 != out2:
print "different header dumps"
print out1
print ""
print out2
except Exception, e:
print e
return False
#print args
'''
try:
p = Popen(args, stdout=PIPE, stderr=PIPE)
stdout, stderr = p.communicate()
if p.returncode >0:
print stdout
print stderr
return False
else :
return True
except Exception, e:
print e
return False
return True
|
RagingRoosevelt/BackupMediaSyncer | _sync.py | Python | mit | 3,135 | 0.010526 | from os import remove, mkdir, listdir, rmdir
from os.path import join, expanduser, isdir
from os.path import split as splitdir
import codecs
from shutil import copy2
indir = join(expanduser("~"),"Desktop")
orgdir = ""
bakdir = ""
with codecs.open(join(indir,"_diff.txt"), 'r', encoding='utf8') as diff:
# Read first line. Should contain original directory
line = diff.readline()
try:
line = line.replace("\n","").split("**")
if line[0] == "[ORG_DIR]":
orgdir = line[1]
except:
print("error: Bad logfile")
| quit()
# Read second line. Should contain backup directory
line = diff.readline()
try:
line = line.replace | ("\n","").split("**")
if line[0] == "[BAK_DIR]":
bakdir = line[1]
except:
print("error: Bad logfile")
quit()
# If either of the directories weren't read in, then quit
print("orig: %s, bak: %s" % (orgdir, bakdir))
if orgdir == "" or bakdir == "":
print("error: Bad logfile")
quit()
with codecs.open(join(indir,"_log.txt"), 'w', encoding='utf8') as log:
log.write("Original directory: " + orgdir + "\n")
log.write("Backup directory : " + bakdir + "\n\n")
for line in diff:
if line.startswith("[ADD]"):
line = line.replace("\n","").split("**")
src = join(orgdir,line[1])
dst = join(bakdir,line[1])
if not isdir(splitdir(dst)[0]):
print("Directory \'" + splitdir(dst)[0] + "\' does not exist. Creating directory.")
log.write("Directory \'" + splitdir(dst)[0] + "\' does not exist. Creating directory.\n")
mkdir(splitdir(dst)[0])
try:
print("Copying " + src + " to " + dst + "")
log.write("Copying " + src + " to " + dst + "\n")
copy2(src, dst)
except:
print("error: %s not copied" % join(orgdir,line[1]))
log.write("error: " + join(orgdir,line[1]) + " not copied\n")
elif line.startswith("[DEL]"):
line = line.replace("\n","").split("**")
dst = join(bakdir,line[1])
try:
print("Deleting " + dst + "")
log.write("Deleting " + dst + "\n")
remove(dst)
if listdir(splitdir(dst)[0]) == []:
print("Directory " + splitdir(dst)[0] + "is empty, removing")
log.write("Directory " + splitdir(dst)[0] + "is empty, removing\n")
rmdir(splitdir(dst)[0])
except:
print("error: %s not removed" % join(orgdir,line[1]))
log.write("error: " + join(orgdir,line[1]) + " not removed\n")
elif line.startswith("====Removed files===="):
print("\n\n")
log.write("\n\n")
|
rackerlabs/deuce | deuce/tests/db_mocking/__init__.py | Python | apache-2.0 | 46 | 0 | import deuce.tests.db_mocking | .mongodb_mockin | g
|
IvayloTsankov/carty | src/carty.py | Python | gpl-2.0 | 649 | 0 | import remote_controller
import blu | etooth
class Carty:
""" this is the main module of the application """
def __init__(self):
self.controller | = remote_controller.RemoteController()
self.bluetooth = bluetooth.Bluetooth(self.controller.on_open,
self.controller.on_message,
self.controller.on_close,
self.controller.on_error)
def start(self):
self.bluetooth.connect()
self.bluetooth.recieve_blocking(1)
if __name__ == "__main__":
carty = Carty()
carty.start()
|
jeffre/docker-rdr | bootstrap/env_to_xml.py | Python | gpl-2.0 | 5,303 | 0.004903 | #!/usr/bin/python
import os
from AhsayTools import XmlConf, env_true, env_false
# Map Enviroment Variables to server.xml > Xpath > attibute
server_conf = XmlConf("/rdr/conf/server.xml")
server_conf.set_attrib("Service/Connector", "port", "RDR_HTTP_PORT")
server_conf.set_attrib("Service/Connector[@scheme='https']", "port",
"RDR_HTTPS_PORT")
server_conf.set_attrib("Service/Connector[@scheme='https']", "keystoreFile",
"RDR_KEYSTORE_FILE")
server_conf.set_attrib("Service/Connector[@scheme='https']", "keystorePass",
"RDR_KEYSTORE_PASSWORD")
server_conf.set_attrib("Service/Connector[@scheme='https']",
"sslEnabledProtocols", "RDR_PROTOCOLS")
# If true, tomcat will trust the X-Forwarded-For header from IPs that match
# proxies
if "RDR_PROXIED" in os.environ:
parent_xpath = "Service/Engine/Host"
elem = "Valve"
attr_key = "className"
attr_val = "org.apache.catalina.valves.RemoteIpValve"
xpath = '%s/%s[@%s="%s"]' % (parent_xpath, elem, attr_key, attr_val)
internal_proxies = ("172.1[6-9]\.\d+\.\d+, "
"172.2[0-9]\.\d+\.\d+, "
"172.3[0-1]\.\d+\.\d+")
if env_true("RDR_PROXIED"):
if server_conf.find(xpath) is None:
server_conf.subelement(parent_xpath, elem, {attr_key: attr_val,
"internalProxies": internal_proxies})
if env_false("RDR_PROXIED"):
if server_conf.find(xpath) is not None:
server_conf.remove(parent_xpath, elem, {attr_key: attr_val})
server_conf.write()
# Map Enviroment Variables to obs.xml > Xpath > attibute
rdr_conf = XmlConf("/rdr/conf/rdr.xml")
rdr_conf.set_attrib(
"Key[@name='com.ahsay.rdr.core.key.rdr.Rdr']/Value[@name='rsv-login-name']",
"data", "RDR_USERNAME")
rdr_conf.set_attrib(
"Key[@name='com.ahsay.rdr.core.key.rdr.Rdr']/Value[@name='rsv-password']",
"data", "RDR_PASSWORD")
rdr_conf.set_attrib(
"Key[@name='com.ahsay.rdr.core.key.rdr.Rdr']/Value[@name='rsv-hashed-password']",
"data", "RDR_PASSWORD_HASHED")
rdr_conf.set_attrib(
"Key[@name='com.ahsay.rdr.core.key.rdr.Rdr']/Key[@name='com.ahsay.rdr.core.key.rdr.License']/Value[@name='rsv-licensee-name']", "data", "RDR_LICENSE_NAME")
rdr_conf.set_attrib(
"Key[@name='com.ahsay.rdr.core.key.rdr.Rdr']/Key[@name='com.ahsay.rdr.core.key.rdr.License']/Value[@name='rsv-license-key']",
"data", "RDR_LICENSE_KEY")
rdr_conf.write()
# Enforce SSL on /obs/jsp/user/* and /obs/jsp/system/*
if "RDR_ENFORCE_SSL" in os.environ:
namespace = "http://java.sun.com/xml/ns/j2ee"
elem = "security-constraint"
xpath = '{%s}%s' % (namespace, elem)
rdr_web = XmlConf("/rdr/webapps/rdr/WEB-INF/web.xml", {'': namespace})
if env_true("RDR_ENFORCE_SSL"):
if rdr_web.find(xpath) is None:
rdr_web.subelement('.', elem)
rdr_web.subelement(elem, 'web-resource-collection')
web_resource_name = rdr_web.subelement(
'%s/web-resource-collection' % elem,
'web-resource-name')
web_resource_name.text = 'Automatic SSL Forwarding'
url_pattern = rdr_web.subelement(
'%s/web-resource-collection' % elem,
'url-pattern')
url_pattern.text = '/jsp/user/*'
url_pattern2 = rdr_web.subelement(
'%s/web-resource-collection' % elem,
'url-pattern')
url_pattern2.text = '/jsp/system/*'
user_data_constraint = rdr_web.subelement(
'%s' % elem,
'user-data-constraint')
transport_guarantee = rdr_web.subelement(
'%s/user-data-constraint' % elem,
'transport-guarantee')
transport_guarantee.text = 'CONFIDENTIAL | '
if env_false("RDR_ENFORCE_SSL"):
if rdr_web.find(xpath) is not None:
rdr_web.remove('.', xpath)
rdr_web.write()
# Enforce SSL on /obs/jsp/user/* and /obs/jsp/system/*
if "ROOT_ENF | ORCE_SSL" in os.environ:
namespace = "http://java.sun.com/xml/ns/j2ee"
elem = "security-constraint"
xpath = '%s' % (elem)
root_web = XmlConf("/rdr/webapps/ROOT/WEB-INF/web.xml")
if env_true("ROOT_ENFORCE_SSL"):
if root_web.find(xpath) is None:
root_web.subelement('.', elem)
root_web.subelement(elem, 'web-resource-collection')
web_resource_name = root_web.subelement(
'%s/web-resource-collection' % elem,
'web-resource-name')
web_resource_name.text = 'Automatic SSL Forwarding'
url_pattern = root_web.subelement(
'%s/web-resource-collection' % elem,
'url-pattern')
url_pattern.text = '/*'
user_data_constraint = root_web.subelement(
'%s' % elem,
'user-data-constraint')
transport_guarantee = root_web.subelement(
'%s/user-data-constraint' % elem,
'transport-guarantee')
transport_guarantee.text = 'CONFIDENTIAL'
if env_false("ROOT_ENFORCE_SSL"):
if root_web.find(xpath) is not None:
root_web.remove('.', xpath)
root_web.write()
|
walteryang47/ovirt-engine | packaging/setup/plugins/ovirt-engine-common/vmconsole_proxy_helper/__init__.py | Python | apache-2.0 | 841 | 0 | #
# ovirt-engine-setup -- ovirt engine setup
# Copyright (C) 2015 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in complianc | e with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express | or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""ovirt-host-setup vmconsole_proxy plugin."""
from otopi import util
from . import core
@util.export
def createPlugins(context):
core.Plugin(context=context)
# vim: expandtab tabstop=4 shiftwidth=4
|
plipp/Python-Coding-Dojos | katas/2X-Racing-Car-Katas/TurnTicketDispenser/test_turn_ticket.py | Python | mit | 264 | 0.015152 | import unittest
| from turn_ticket import TicketDispenser
class TicketDispenserTest(unittest.TestCase):
def test_do_something(self):
dispenser = TicketDispenser()
ticket = dispenser.getTurnTicket()
if _ | _name__ == "__main__":
unittest.main() |
ikoz/mitmproxy | mitmproxy/models/http.py | Python | mit | 14,861 | 0.001144 | from __future__ import (absolute_import, print_function, division)
import Cookie
import copy
import warnings
from email.utils import parsedate_tz, formatdate, mktime_tz
import time
from netlib import encoding
from netlib.http import status_codes, Headers, Request, Response, decoded
from netlib.tcp import Address
from .. import utils
from .. import version
from .flow import Flow
class MessageMixin(object):
def get_decoded_content(self):
"""
Returns the decoded content based on the current Content-Encoding
header.
Doesn't change the message iteself or its headers.
"""
ce = self.headers.get("content-encoding")
if not self.content or ce not in encoding.ENCODINGS:
return self.content
return encoding.decode(ce, self.content)
def copy(self):
c = copy.copy(self)
if hasattr(self, "data"): # FIXME remove condition
c.data = copy.copy(self.data)
c.headers = self.headers.copy()
return c
def replace(self, pattern, repl, *args, **kwargs):
"""
Replaces a regular expression pattern with repl in both the headers
and the body of the message. Encoded body will be decoded
before replacement, and re-encoded afterwards.
Returns the number of replacements made.
"""
count = 0
if self.content:
with decoded(self):
self.content, count = utils.safe_subn(
pattern, repl, self.content, *args, **kwargs
)
fields = []
for name, value in self.headers.fields:
name, c = utils.safe_subn(pattern, repl, name, *args, **kwargs)
count += c
value, c = utils.safe_subn(pattern, repl, value, *args, **kwargs)
count += c
fields.append([name, value])
self.headers.fields = fields
return count
class HTTPRequest(MessageMixin, Request):
"""
An HTTP request.
Exposes the following attributes:
method: HTTP method
scheme: URL scheme (http/https)
host: Target hostname of the request. This is not neccessarily the
directy upstream server (which could be another proxy), but it's always
the target server we want to reach at the end. This attribute is either
inferred from the request itself (absolute-form, authority-form) or from
the connection metadata (e.g. the host in reverse proxy mode).
port: Destination port
path: Path portion of the URL (not present in authority-form)
http_version: HTTP version, e.g. "HTTP/1.1"
headers: Headers object
content: Content of the request, None, or CONTENT_MISSING if there
is content associated, but not present. CONTENT_MISSING evaluates
to False to make checking for the presence of content natural.
form_in: The request form which mitmproxy has received. The following
values are possible:
- relative (GET /index.html, OPTIONS *) (covers origin form and
asterisk form)
- absolute (GET http://example.com:80/index.html)
- authority-form (CONNECT example.com:443)
Details: http://tools.ietf.org/html/draft-ietf-httpbis-p1-messaging-25#section-5.3
form_out: The request form which mitmproxy will send out to the
destination
timestamp_start: Timestamp indicating when request transmission started
timestamp_end: Timestamp indicating when request transmission ended
"""
def __init__(
self,
first_line_format,
method,
scheme,
host,
port,
path,
http_version,
headers,
content,
timestamp_start=None,
timestamp_end=None,
form_out=None,
is_replay=False,
stickycookie=False,
stickyauth=False,
):
Request.__init__(
self,
first_line_format,
method,
scheme,
host,
port,
path,
http_version,
headers,
content,
timestamp_start,
timestamp_end,
)
self.form_out = form_out or first_line_format # FIXME remove
# Have this request's cookies been modified by sticky cookies or auth?
self.stickycookie = stickycookie
self.stickyauth = stickyauth
# Is this request replayed?
self.is_replay = is_replay
def get_state(self):
state = super(HTTPRequest, self).get_state()
state.update(
stickycookie = self.stickycookie,
stickyauth = self.stickyauth,
is_replay = self.is_replay,
)
return state
def set_state(self, state):
self.stickycookie = state.pop("stickycookie")
self.stickyauth = state.pop("stickyauth")
self.is_replay = state.pop("is_replay")
super(HTTPRequest, self).set_state(state)
@classmethod
def wrap(self, request):
req = HTTPRequest(
first_line_format=request.form_in,
method=request.method,
scheme=request.scheme,
host=request.host,
port=request.port,
path=request.path,
http_version=re | quest.http_version,
headers=request.headers,
content=request.content,
timestamp_start=request.timestamp_start,
timestamp_end=request.timestamp_end,
form_out=(request.form_out if hasattr(request, 'form_out') else None),
)
return req
@property
| def form_out(self):
warnings.warn(".form_out is deprecated, use .first_line_format instead.", DeprecationWarning)
return self.first_line_format
@form_out.setter
def form_out(self, value):
warnings.warn(".form_out is deprecated, use .first_line_format instead.", DeprecationWarning)
self.first_line_format = value
def __hash__(self):
return id(self)
def set_auth(self, auth):
self.data.headers.set_all("Proxy-Authorization", (auth,))
def replace(self, pattern, repl, *args, **kwargs):
"""
Replaces a regular expression pattern with repl in the headers, the
request path and the body of the request. Encoded content will be
decoded before replacement, and re-encoded afterwards.
Returns the number of replacements made.
"""
c = MessageMixin.replace(self, pattern, repl, *args, **kwargs)
self.path, pc = utils.safe_subn(
pattern, repl, self.path, *args, **kwargs
)
c += pc
return c
class HTTPResponse(MessageMixin, Response):
"""
An HTTP response.
Exposes the following attributes:
http_version: HTTP version, e.g. "HTTP/1.1"
status_code: HTTP response status code
msg: HTTP response message
headers: Headers object
content: Content of the request, None, or CONTENT_MISSING if there
is content associated, but not present. CONTENT_MISSING evaluates
to False to make checking for the presence of content natural.
timestamp_start: Timestamp indicating when request transmission started
timestamp_end: Timestamp indicating when request transmission ended
"""
def __init__(
self,
http_version,
status_code,
reason,
headers,
content,
timestamp_start=None,
timestamp_end=None,
is_replay = False
):
Response.__init__(
self,
http_version,
status_code,
reason,
headers,
content,
timestamp_start=timestamp_start,
timestamp_end=timestamp_end,
)
# Is this request replayed?
self.is_replay = is_replay
self.stream = False
@classmethod
def wrap(self, response): |
south-coast-science/scs_mfr | src/scs_mfr/eeprom_read.py | Python | mit | 2,020 | 0.002475 | #!/usr/bin/env python3
"""
Created on 26 Sep 2016
@author: Bruno Beloff (bruno.beloff@southcoastscience.com)
DESCRIPTION
The eeprom_read utility presents a formatted copy of the South Coast Science digital front-end (DFE) boar | d's EEPROM
contents to stdout.
The EEPROM con | tains information on vendor, product ID and a universally unique ID (UUID) code, as specified by either
the Raspberry Pi HAT or BeagleBone cape standards.
SYNOPSIS
eeprom_read.py
EXAMPLES
./eeprom_read.py
SEE ALSO
scs_mfr/dfe_id
scs_mfr/eeprom_write
RESOURCES
https://github.com/raspberrypi/hats
https://github.com/picoflamingo/BBCape_EEPROM
https://lb.raspberrypi.org/forums/viewtopic.php?t=108134
https://github.com/raspberrypi/hats/tree/master/eepromutils
https://www.raspberrypi.org/documentation/configuration/device-tree.md
https://github.com/jbdatko/eeprom_tutorial
http://azkeller.com/blog/?p=62
http://papermint-designs.com/community/node/331
https://learn.adafruit.com/introduction-to-the-beaglebone-black-device-tree/compiling-an-overlay
"""
from scs_dfe.interface.component.cat24c32 import CAT24C32
from scs_host.bus.i2c import I2C
from scs_host.sys.host import Host
# --------------------------------------------------------------------------------------------------------------------
Host.enable_eeprom_access()
# --------------------------------------------------------------------------------------------------------------------
if __name__ == '__main__':
try:
I2C.EEPROM.open()
# ------------------------------------------------------------------------------------------------------------
# resources...
eeprom = CAT24C32()
# ------------------------------------------------------------------------------------------------------------
# run...
eeprom.image.formatted(32)
# ----------------------------------------------------------------------------------------------------------------
# end...
finally:
I2C.EEPROM.close()
|
barentsen/reproject | reproject/spherical_intersect/tests/test_high_level.py | Python | bsd-3-clause | 1,908 | 0.003145 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import numpy as np
import pytest
from astropy.io import fits
from astropy.utils.data import get_pkg_data_filename
from astropy.wcs import WCS
from numpy.testing import assert_allclose
from ..high_level import reproject_exact
class TestReprojectExact(object):
def setup_class(self):
self.header_in = fits.Header.fromtextfile(get_pkg_data_filename('../../tests/data/gc_ga.hdr'))
self.header_out = fits.Header.fromtextfile(get_pkg_data_filename('../../tests/data/gc_eq.hdr'))
self.header_out['NAXIS'] = 2
self.header_out['NAXIS1'] = 600
self.header_out['NAXIS2'] = 550 |
self.array_in = np.ones((100, 100))
self.wcs_in = WCS(self.header_in)
self.wcs_out = WCS(self.header_out | )
def test_array_wcs(self):
reproject_exact((self.array_in, self.wcs_in), self.wcs_out, shape_out=(200, 200))
def test_array_header(self):
reproject_exact((self.array_in, self.header_in), self.header_out)
def test_parallel_option(self):
reproject_exact((self.array_in, self.header_in), self.header_out, parallel=1)
with pytest.raises(ValueError) as exc:
reproject_exact((self.array_in, self.header_in), self.header_out, parallel=-1)
assert exc.value.args[0] == "The number of processors to use must be strictly positive"
def test_identity():
# Reproject an array and WCS to itself
wcs = WCS(naxis=2)
wcs.wcs.ctype = 'RA---TAN', 'DEC--TAN'
wcs.wcs.crpix = 322, 151
wcs.wcs.crval = 43, 23
wcs.wcs.cdelt = -0.1, 0.1
wcs.wcs.equinox = 2000.
np.random.seed(1249)
array_in = np.random.random((423, 344))
array_out, footprint = reproject_exact((array_in, wcs), wcs,
shape_out=array_in.shape)
assert_allclose(array_out, array_in, atol=1e-10)
|
SimonWang2014/DockerConsoleApp | libs/stormed-amqp/examples/tutorial6/rpc_server.py | Python | apache-2.0 | 972 | 0.004115 | #!/usr/bin/env python
import logging
from tornado.ioloop import IOLoop
from stormed import Connection, Message
def fib(n):
if n == 0:
return 0
elif n == 1:
return 1
else:
return fib(n-1) + fib(n-2)
def on_connect():
global ch
ch = conn.channel()
ch.queue_declare(queue='rpc_queue', dura | ble=True)
ch.qos(prefetch_count=1)
ch.consume('rpc_queue', on_request)
def on_request(msg):
n = int(msg.body)
print " [.] fib(%s)" % n
response = str(fib(n))
response_msg = Message(response, delivery_mode=2,
correlation_id=msg.correlation_id)
ch.publish(response_msg, ex | change='', routing_key=msg.reply_to)
msg.ack()
logging.basicConfig()
ch = None
conn = Connection(host='localhost')
conn.connect(on_connect)
io_loop = IOLoop.instance()
print ' [*] Waiting for messages. To exit press CTRL+C'
try:
io_loop.start()
except KeyboardInterrupt:
conn.close(io_loop.stop)
|
Opentrons/labware | shared-data/python/setup.py | Python | apache-2.0 | 5,215 | 0.000384 | import os
import sys
from setuptools.command import build_py, sdist
from setuptools import setup, find_packages
HERE = os.path.abspath(os.path.dirname(__file__))
sys.path.append(os.path.join(HERE, '..', '..', 'scripts'))
from python_build_utils import normalize_version
# make stdout blocking since Travis sets it to nonblocking
if os.name == 'posix':
import fcntl
flags = fcntl.fcntl(sys.stdout, fcntl.F_GETFL)
fcntl.fcntl(sys.stdout, fcntl.F_SETFL, flags & ~os.O_NONBLOCK)
DATA_ROOT = '..'
DATA_SUBDIRS = ['deck',
'labware',
'module',
'pipette',
'protocol']
DEST_BASE_PATH = 'data'
def get_shared_data_files():
to_include = []
for subdir in DATA_SUBDIRS:
top = os.path.join(DATA_ROOT, subdir)
for dirpath, dirnames, filenames in os.walk(top):
from_source = os.path.relpath(dirpath, DATA_ROOT)
to_include.extend([os.path.join(from_source, fname)
for fname in filenames])
return to_include
class SDistWithData(sdist.sdist):
description = sdist.sdist.description\
+ " Also, include data files."
def make_release_tree(self, base_dir, files):
self.announce("adding data files to base dir {}"
.format(base_dir))
for data_file in get_shared_data_files():
sdist_dest = os.path.join(base_dir, DEST_BASE_PATH)
self.mkpath(os.path.join(sdist_dest, 'opentrons_shared_data',
os.path.dirname(data_file)))
self.copy_file(os.path.join(DATA_ROOT, data_file),
os.path.join(sdist_dest, data_file))
# also grab our package.json
self.copy_file(os.path.join(HERE, '..', 'package.json'),
os.path.join(base_dir, 'package.json'))
super().make_release_tree(base_dir, files)
class BuildWithData(build_py.build_py):
description = build_py.build_py.description\
+ " Also, include opentrons data files"
def _get_data_files(self):
"""
Override of build_py.get_data_files that includes out of tree configs.
These are currently hardcoded to include selected folders in
../shared-data/, which will move to opentrons/config/shared-data
"""
files = super()._get_data_files()
# We don’t really want to duplicate logic used in the original
# implementation, but we can back out what it did with commonpath -
# should be something ending in opentrons_shared_data
build_base = os.path.commonpath([f[2] for f in files])
# We want a list of paths to only files relative to ../shared-data
to_include = get_shared_data_files()
destination = os.path.join(build_base, DEST_BASE_PATH)
# And finally, tell the system about our files, including package.json
files.extend([('opentrons_shared_data', DATA_ROOT,
destination, to_include),
('opentrons_shared_data', '..',
build_base, ['package.json'])])
return files
def get_version():
buildno = os.getenv('BUILD_NUMBER')
if buildno:
normalize_opts = {'extra_tag': buildno}
else:
normalize_opts = {}
return normalize_version('shared-data', **normalize_opts)
VERSION = get_version()
DISTNAME = 'opentrons_shared_data'
LICENSE = 'Apache 2.0'
AUTHOR = "Open | trons"
EMAIL = "engineering@opentrons.com"
URL = "https://github.com/Opentrons/opentrons"
DOWNLOAD_URL | = ''
CLASSIFIERS = [
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Operating System :: OS Independent',
'Intended Audience :: Science/Research',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.7',
'Topic :: Scientific/Engineering',
]
KEYWORDS = ["robots", "protocols", "synbio", "pcr", "automation", "lab"]
DESCRIPTION = (
"A bundle of data and python binding that supports the Opentrons API. "
"Does not need to be installed manually; only a dependency of the "
"opentrons package")
PACKAGES = find_packages(where='.', exclude=['tests'])
INSTALL_REQUIRES = [
'jsonschema>=3.0.2,<4',
]
if __name__ == "__main__":
setup(
python_requires='>=3.7',
name=DISTNAME,
description=DESCRIPTION,
license=LICENSE,
version=VERSION,
author=AUTHOR,
author_email=EMAIL,
maintainer=AUTHOR,
maintainer_email=EMAIL,
keywords=KEYWORDS,
packages=PACKAGES,
zip_safe=False,
classifiers=CLASSIFIERS,
install_requires=INSTALL_REQUIRES,
include_package_data=True,
package_data={'opentrons_shared_data': ['py.typed']},
cmdclass={
'build_py': BuildWithData,
'sdist': SDistWithData
},
project_urls={
'opentrons.com': "https://www.opentrons.com",
'Source Code On Github':
"https://github.com/Opentrons/opentrons/tree/edge/shared-data",
'Documentation': "https://docs.opentrons.com"
}
)
|
vug/personalwebapp | views/login.py | Python | mit | 1,797 | 0.002226 | """
This Blueprint gives the ability of user logins, and login_required functionality using Flask-Login extension.
"""
from flask import Blueprint, render_template, request, flash, redirect, url_for
from flask_login import login_required, logout_user, login_user
from extensions import login_manager
from models import User
login = Blueprint('login', __name__)
login_manager.login_view = 'login.login_page' # redirect to this when arrived a login_required view without logged in
@login_manager.user_loader
def load_user(user_id):
"""Get the User object given the user_id stored in the session.
This is a callback functio | n to reload the user object from the user ID stored in the session.
:rtype: User |
:return: A User if user_id exists, else None."""
return User.query.get(int(user_id))
@login.route('/login', methods=['GET', 'POST'])
def login_page():
if request.method == 'GET':
return render_template('login.html')
elif request.method == 'POST':
email = request.form['email']
password = request.form['password']
user = User.query.filter_by(email=email).first()
if user is not None and user.password == password:
login_user(user, remember=True)
fullname = user.fullname
redirect_url = request.args.get('next')
html = 'Logged in as email: {}, fullname: {}<br><a href="/">Home</a> '.format(email, fullname)
if redirect_url:
html += '<a href="{}">Redirect</a>'.format(redirect_url)
return html
else:
flash('Username or Password is invalid', 'error')
return redirect(url_for('login.login_page'))
@login.route("/logout")
def logout_page():
logout_user()
return render_template('logout.html')
|
willcampbel/Character-Distribution | distribution.py | Python | mit | 1,888 | 0.03125 | """
distribution.py
Author: Will Campbell
Credit: Dave Wilson, Ethan Adner, http://stackoverflow.com/questions/3121979/how-to-sort-list-tuple-of-lists-tuples
Assignment:
Write and submit a Python program (distribution.py) that computes and displays
the distribution of characters in a given sample of text.
Output of your program should look like this:
Please enter a string of text (the bigger the better): The rain in Spain stays mainly in the plain.
The distribution of characters in "The rain in Spain stays mainly in the plain." is:
iiiiii
nnnnnn
aaaaa
sss
ttt
ee
hh
ll
pp
yy
m
r
Notice about this example:
* The text: 'The rain ... plain' is provided by the user as input to your program.
* Uppercase characters are converted to lowercase
* Spaces and punctuation marks are ignored completely.
* Character | s that are more common appear first in the list.
* Where the same number of characters occur, the lines are ordered alphabetically.
For example, in the printout above, the letters e, h, l, p and y both occur twice
in the text and they are listed in the output in alphabetical order.
* Letters that do not occur in the text are not listed in the output at all.
"""
string1 = input("Please enter a string of text (the bigger the | better): ")
string = string1.lower()
print('The distribution of characters in "'+string1+'" is:')
list1 = list(string)
alpha=["a","b","c","d","e","f","g","h","i","j","k","l","m","n","o","p","q","r","s","t","u","v","w","x","y","z"]
freq=[]
for x in alpha:
freq.append(0)
for x in list1:
if x in alpha:
freq[alpha.index(x)]=freq[alpha.index(x)]+1
a=0
while(a<len(freq)):
#print(freq[a]*alpha[a], end="")
a=a+1
combo = list(zip(freq,alpha))
combo=sorted(combo, key=lambda tup:( -tup[0] , tup[1]))
combo=list(combo)
z=0
while(z<len(combo)):
print(combo[z][0]*combo[z][1])
z=z+1
|
tlangerak/Multi-Agent-Systems | spade/odict.py | Python | lgpl-2.1 | 10,695 | 0 | # -*- coding: utf-8 -*-
"""
odict
~~~~~
This module is an example implementation of an ordered dict for the
collections module. It's not written for performance (it actually
performs pretty bad) but to show how the API works.
Questions and Answers
=====================
Why would anyone need ordered dicts?
Dicts in python are unordered which means that the order of items when
iterating over dicts is undefined. As a matter of fact it is most of
the time useless and differs from implementation to implementation.
Many developers stumble upon that problem sooner or later when
comparing the output of doctests which often does not match the order
the developer thought it would.
Also XML systems such as Genshi have their problems with unordered
dicts as the input and output ordering of tag attributes is often
mixed up because the ordering is lost when converting the data into
a dict. Switching to lists is often not possible because the
complexity of a lookup is too high.
Another very common case is metaprogramming. The default namespace
of a class in python is a dict. With Python 3 it becomes possible
to replace it with a different object which could be an ordered dict.
Django is already doing something similar with a hack that assigns
numbers to some descriptors initialized in the class body of a
specific subclass to restore the ordering after class creation.
When porting code from programming languages such as PHP and Ruby
where the item-or | der in a dict is guaranteed it's also a great help
to have an equivalent data structure in Python to ease the transition.
Where are new keys added?
At the end. This behavior is consistent with Ruby 1.9 Hashmaps
and PHP Arrays. It also matches what common ordered dict
implementations do currently.
What happens if an existing key is reassigned?
The key is *not* moved. This is consitent with existing
implementations and can be changed b | y a subclass very easily::
class movingodict(odict):
def __setitem__(self, key, value):
self.pop(key, None)
odict.__setitem__(self, key, value)
Moving keys to the end of a ordered dict on reassignment is not
very useful for most applications.
Does it mean the dict keys are sorted by a sort expression?
That's not the case. The odict only guarantees that there is an order
and that newly inserted keys are inserted at the end of the dict. If
you want to sort it you can do so, but newly added keys are again added
at the end of the dict.
I initializes the odict with a dict literal but the keys are not
ordered like they should!
Dict literals in Python generate dict objects and as such the order of
their items is not guaranteed. Before they are passed to the odict
constructor they are already unordered.
What happens if keys appear multiple times in the list passed to the
constructor?
The same as for the dict. The latter item overrides the former. This
has the side-effect that the position of the first key is used because
the key is actually overwritten:
>>> odict([('a', 1), ('b', 2), ('a', 3)])
odict.odict([('a', 3), ('b', 2)])
This behavor is consistent with existing implementation in Python
and the PHP array and the hashmap in Ruby 1.9.
This odict doesn't scale!
Yes it doesn't. The delitem operation is O(n). This is file is a
mockup of a real odict that could be implemented for collections
based on an linked list.
Why is there no .insert()?
There are few situations where you really want to insert a key at
an specified index. To now make the API too complex the proposed
solution for this situation is creating a list of items, manipulating
that and converting it back into an odict:
>>> d = odict([('a', 42), ('b', 23), ('c', 19)])
>>> l = d.items()
>>> l.insert(1, ('x', 0))
>>> odict(l)
odict.odict([('a', 42), ('x', 0), ('b', 23), ('c', 19)])
:copyright: (c) 2008 by Armin Ronacher and PEP 273 authors.
:license: modified BSD license.
"""
from itertools import izip, imap
from copy import deepcopy
missing = object()
class odict(dict):
"""
Ordered dict example implementation.
This is the proposed interface for a an ordered dict as proposed on the
Python mailinglist (proposal_).
It's a dict subclass and provides some list functions. The implementation
of this class is inspired by the implementation of Babel but incorporates
some ideas from the `ordereddict`_ and Django's ordered dict.
The constructor and `update()` both accept iterables of tuples as well as
mappings:
>>> d = odict([('a', 'b'), ('c', 'd')])
>>> d.update({'foo': 'bar'})
>>> d
odict.odict([('a', 'b'), ('c', 'd'), ('foo', 'bar')])
Keep in mind that when updating from dict-literals the order is not
preserved as these dicts are unsorted!
You can copy an odict like a dict by using the constructor, `copy.copy`
or the `copy` method and make deep copies with `copy.deepcopy`:
>>> from copy import copy, deepcopy
>>> copy(d)
odict.odict([('a', 'b'), ('c', 'd'), ('foo', 'bar')])
>>> d.copy()
odict.odict([('a', 'b'), ('c', 'd'), ('foo', 'bar')])
>>> odict(d)
odict.odict([('a', 'b'), ('c', 'd'), ('foo', 'bar')])
>>> d['spam'] = []
>>> d2 = deepcopy(d)
>>> d2['spam'].append('eggs')
>>> d
odict.odict([('a', 'b'), ('c', 'd'), ('foo', 'bar'), ('spam', [])])
>>> d2
odict.odict([('a', 'b'), ('c', 'd'), ('foo', 'bar'), ('spam', ['eggs'])])
All iteration methods as well as `keys`, `values` and `items` return
the values ordered by the the time the key-value pair is inserted:
>>> d.keys()
['a', 'c', 'foo', 'spam']
>>> d.values()
['b', 'd', 'bar', []]
>>> d.items()
[('a', 'b'), ('c', 'd'), ('foo', 'bar'), ('spam', [])]
>>> list(d.iterkeys())
['a', 'c', 'foo', 'spam']
>>> list(d.itervalues())
['b', 'd', 'bar', []]
>>> list(d.iteritems())
[('a', 'b'), ('c', 'd'), ('foo', 'bar'), ('spam', [])]
Index based lookup is supported too by `byindex` which returns the
key/value pair for an index:
>>> d.byindex(2)
('foo', 'bar')
You can reverse the odict as well:
>>> d.reverse()
>>> d
odict.odict([('spam', []), ('foo', 'bar'), ('c', 'd'), ('a', 'b')])
And sort it like a list:
>>> d.sort(key=lambda x: x[0].lower())
>>> d
odict.odict([('a', 'b'), ('c', 'd'), ('foo', 'bar'), ('spam', [])])
.. _proposal: http://thread.gmane.org/gmane.comp.python.devel/95316
.. _ordereddict: http://www.xs4all.nl/~anthon/Python/ordereddict/
"""
def __init__(self, *args, **kwargs):
dict.__init__(self)
self._keys = []
self.update(*args, **kwargs)
def __delitem__(self, key):
dict.__delitem__(self, key)
self._keys.remove(key)
def __setitem__(self, key, item):
if key not in self:
self._keys.append(key)
dict.__setitem__(self, key, item)
def __deepcopy__(self, memo=None):
if memo is None:
memo = {}
d = memo.get(id(self), missing)
if d is not missing:
return d
memo[id(self)] = d = self.__class__()
dict.__init__(d, deepcopy(self.items(), memo))
d._keys = self._keys[:]
return d
def __getstate__(self):
return {'items': dict(self), 'keys': self._keys}
def __setstate__(self, d):
self._keys = d['keys']
dict.update(d['items'])
def __reversed__(self):
return reversed(self._keys)
def __eq__(self, other):
if isinstance(other, odict):
if not dict.__eq__(self, other):
return False
|
lmazuel/azure-sdk-for-python | azure-mgmt-compute/azure/mgmt/compute/v2016_04_30_preview/models/key_vault_secret_reference_py3.py | Python | mit | 1,467 | 0 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class KeyVaultSecretReference(Model):
"""Describes a reference to Key Vault Secret.
All required parameters must be populated in order to send to Azure.
:param secret_url: Required. The URL referencing a secret in a Key Vault.
:type secret_url: str
:param sourc | e_vault: Required. The relative URL of the Key Vault
containing the secret.
:type source_vault:
~azure.mgmt.compute.v2016_04_30_preview.models.SubResource
"""
_validation = {
'secret_url': {'required': True},
'source_vault': {'required': True},
}
_attribute_map = {
| 'secret_url': {'key': 'secretUrl', 'type': 'str'},
'source_vault': {'key': 'sourceVault', 'type': 'SubResource'},
}
def __init__(self, *, secret_url: str, source_vault, **kwargs) -> None:
super(KeyVaultSecretReference, self).__init__(**kwargs)
self.secret_url = secret_url
self.source_vault = source_vault
|
brian-team/brian2genn | examples/simple_example_synapses.py | Python | gpl-2.0 | 971 | 0.00103 | from brian2 import *
import brian2genn
set_device('genn', directory='simple_example_synapses')
#set_device('cpp_standalone')
N = 100
tau = 10*ms
eqs = '''
dV/dt = -V/tau + Iin/tau : 1
Iin : 1
'''
G = NeuronG | roup(N, eqs, threshold='V>1', reset='V=0', name='PN')
G.V = rand()
G2 = NeuronGroup(N, eqs, threshold='V>1', reset='V=0', name='LN')
G2.V = 2 * rand()
alpha = 20/ms
beta = 30/ms
S = Synapses(G, G2,
model='''
ds/dt= alpha*(1-s) - beta*s: 1
g: 1
''',
pre='Iin_post+= g',
name='ex_syns')
alpha2 = 40/ms |
beta2 = 60/ms
p_post = 1
p_pre = 30
S2 = Synapses(G2, G,
model='''
ds/dt= alpha2*(1-s) - beta2*s: 1
g: 1
''',
pre='Iin_post+= g*p_pre',
post='''
g*= p_post-0.9;
''',
name='inh_syns')
S.connect(i=1, j=5)
S2.connect(i=[1, 2], j=[1, 2])
S.g = 'rand()'
run(100*ms)
|
snogaraleal/adjax | adjax/serializer.py | Python | mit | 2,855 | 0 | from json import loads, dumps
class ObjectType(object):
""" Class defining a pluggable type.
"""
TYPE = '__type__'
name = None
cls = None
@classmethod
def encode(cls, value):
""" Convert value to dictionary.
"""
return {
'value': value,
}
@classmethod
def decode(cls, value):
""" Convert dictionary to value.
"""
return value['value']
class Serializer(object):
""" Class for all serialization operations.
"""
def __init__(self, object_types=None):
""" Initialize serializer with provided object types list.
"""
if object_types is None:
object_types = []
self.object_types = list(object_types)
self.object_types_by_cls = {}
self.object_types_by_name = {}
for object_type in object_types:
self.enable(object_type)
def enable(self, object_type):
""" Enable the specified object type derived class.
"""
if object_type.cls is None:
raise ValueError('Custom type cls must be set')
if object_type.name is None:
raise ValueError('Custom type name must be set')
self.object_types.append(object_type)
self.object_types_by_cls[object_type.cls] = object_type
self.object_types_by_name[object_type.name] = object_type
return object_type
def disable(self, object_type):
""" Disable the specified object type derived class.
"""
self.object_types.remove(object_type)
self.object_types_by_cls.pop(object_type.cls)
self.object_types_by_name.pop(object_type.name)
return object_type
def encode(self, data):
""" Serialize data to string.
"""
def default(value):
""" Use object type encode if the value is of a registered type.
"""
object_type = self.object_types_by_cls.get(type(value))
if object_type:
value = object_type.encode(value)
if type(value) != dict:
raise TypeError('Object type encode must return dict')
value[ObjectType.TYPE] = object_type.name
return value
return dumps(data, default=default)
def decode(self, data):
""" Deserialize data to object.
"""
def object_hook(dct):
""" | Use object type decode if the dictionary specifies a type.
"""
if ObjectType.TYPE in dct:
name = dct[ObjectType.TYPE]
object_type = self | .object_types_by_name.get(name)
if object_type:
return object_type.decode(dct)
return dct
return loads(data, object_hook=object_hook)
serializer = Serializer()
|
pombredanne/TSP-CloudStack | src/it/tsp/cloudstack/pimf/extract_cr.py | Python | gpl-3.0 | 2,778 | 0.007559 | # -*- Mode: python; py-indent-offset: 4; indent-tabs-mode: nil; coding: utf-8; -*-
# Copyright (C) 2011 Khaled Ben Bahri - Institut Telecom
#
# This library is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 3 of
# the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this library. If not, see <http://www.gnu.org/licenses/>.
'''
Created on Feb 25, 2011
@author: Khaled Ben Bahri
@contact: khaled.ben_bahri@it-sudparis.eu
@organization: Institut Telecom - Telecom SudParis
@version: 0.1
@license: LGPL - Lesser General Public License
'''
import logging
logging.basicConfig(Configformat='%(asctime)s %(message)s',level=logging.INFO)
class Indicator:
'''
this class wil specify the fixed characteristics for used indicators in architecture
these parameters are specifics of installed architecture
'''
name=None
maxApp = None
maxPhy = None
minApp = None
def __init__(self):
pass
class TransformXmlToCr:
__currentNode__ = None
__crList__ = None
def __init__(self):
pass
#self.readXml()
def readXml(self, file):
from xml.dom.minidom import parse
self.doc = parse(file)
def getRootElement(self):
if self.__currentNode__ == None:
self.__currentNode__ = self.doc.documentElement
return self.__currentNode__
def getcr(self):
if self.__crList__ != None:
re | turn
self.__crList__ = []
for crs in self.getRootElement().getElementsByTagName("cr"):
if crs.nodeType == crs.ELEMENT_NODE:
obj=Indicator()
try:
#ext | raction of value for each indicator
obj.name = self.getText(crs.getElementsByTagName("name")[0])
obj.maxPhy = self.getText(crs.getElementsByTagName("maxPhysicalThreshold")[0])
obj.maxApp = self.getText(crs.getElementsByTagName("maxApplicationThreshold")[0])
obj.minApp = self.getText(crs.getElementsByTagName("minApplicationThreshold")[0])
except:
logging.error('ERROR IN USED TAGS.........')
self.__crList__.append(obj)
return self.__crList__
def getText(self, node):
return node.childNodes[0].nodeValue
|
metulburr/puzzle | data/levels/level3.py | Python | gpl-3.0 | 5,988 | 0.01169 | import pygame as pg
from .. import prepare, tools
from ..components import drop_box, var_display
from ..states import game
class Level3(game.Game):
def __init__(self):
game.Game.__init__(self)
self.next = 'MENU'
self.drop_boxes = {
'move_down':drop_box.DropBox(self.tile_rect.size, tools.from_center(self.screen_rect, (100,-120))),
'move_right':drop_box.DropBox(self.tile_rect.size, tools.from_center(self.screen_rect, (-400,-120))),
'move_left':drop_box.DropBox(self.tile_rect.size, tools.from_center(self.screen_rect, (100,75))),
}
self.setup_end_text(pos=(300,700))
self.setup_text_flow()
self.fill_tile_queue_order()
self.tile_queue_layout()
self.fill_drop_box_layout()
self.control_flow_order()
self.control_arrow_rect.centery = self.control_flow[0].centery
self.var = var_display.VarDisplay((0,255,0),(255,0,0),(1200,1000), 75, prepare.FONTS['impact'])
def setup_text_flow(self):
'''setup arbitrary texts for control flow'''
self.text_flow = {
'reset_var':self.make_text('var = 0;', (255,255,255),(300,500), 75, prepare.FONTS['impact']),
'check':self.make_text('var > 0;', (255,255,255), (300,600), 75, prepare.FONTS['impact']),
'var':self.make_text('var = 100;', (255,255,255), (300,300), 75, prepare.FONTS['impact']),
'while':self.make_text('while(true)', (255,255,255), (800,500), 75, prepare.FONTS['impact']),
'continue':self.make_text('continue;', (255,255,255), (800,700), 75, prepare.FONTS['impact']),
}
def control_flow_order(self):
'''order of control flow to complete level'''
self.control_flow = [
self.start_text_rect,
self.text_flow['var'][1],
self.drop_boxes['move_right' | ].rect,
self.text_flow['reset_var'][1],
self.text_flow['check'][1],
self.end_text_rect,
| self.drop_boxes['move_down'].rect,
self.text_flow['while'][1],
self.drop_boxes['move_left'].rect,
self.text_flow['continue'][1],
]
def fill_tile_queue_order(self):
'''fill tile queue to specific level order '''
self.tile_queue = [
]
def fill_drop_box_layout(self):
'''drop boxes that have tiles in them from start'''
self.drop_box_queue = [
self.btn_dict['down_arrow'],
self.btn_dict['left_arrow'],
self.btn_dict['right_arrow'],
]
self.drop_boxes['move_right'].set_occupant(self.drop_box_queue[0])
self.drop_boxes['move_down'].set_occupant(self.drop_box_queue[1])
self.drop_boxes['move_left'].set_occupant(self.drop_box_queue[2])
def update_control_arrow(self, now):
'''pause/start control flow, change pause/start arrow color, and move arrow'''
if self.all_boxes_full():
self.control_paused = False
if now-self.timer > 1000:
self.timer = now
self.control_flow_index_next = self.control_flow_index
self.control_flow_index_next += 1
if self.control_flow_index > len(self.control_flow)-1:
self.control_flow_index = 0
self.set_control('left')
#+1 to index to hold arrow at position
if self.control_flow_index_next == 2:
self.var.value = 100
elif self.control_flow_index_next == 4:
self.var.value = 0
elif self.control_flow_index_next == 6:
if not self.var.value:
self.control_flow_index = 0
self.fail_sound.play()
else:
self.control_flow_index = self.control_flow_index_next
if self.control_flow_index_next == 3:
if self.drop_boxes['move_right'].occupant.value == 'right_arrow':
self.set_control('middle')
self.control_flow_index = 6
elif self.control_flow_index_next == 9:
if self.drop_boxes['move_left'].occupant.value == 'left_arrow':
self.control_flow_index = 4
self.set_control('left')
elif self.control_flow_index_next == 7:
if self.drop_boxes['move_down'].occupant.value == 'left_arrow':
self.set_control('left')
self.control_flow_index = 2
elif self.control_flow_index_next == 9:
self.set_control('left')
self.control_flow_index = 0
elif self.control_flow_index_next == 5:
if not self.var.value:
self.control_flow_index = 0
self.fail_sound.play()
else:
self.control_flow_index = self.control_flow_index_next
self.control_arrow_rect.centery = self.control_flow[self.control_flow_index].centery
else:
self.control_paused = True
#print(self.control_flow_index)
if self.control_flow_index == 5:
self.level_complete_sound.play()
self.done = True
def additional_update(self, now, keys, scale):
self.update_control_arrow(now)
self.var.update(now)
def additional_render(self, surface):
self.var.render(surface)
def reset(self):
self.control_flow_index = 0
self.tile_queue_layout()
self.fill_drop_box_layout()
self.var.value = 0
def cleanup(self):
self.reset()
def entry(self):
pass
|
VerifiableRobotics/ReSpeC | src/respec/formula/gr1_formulas.py | Python | bsd-3-clause | 8,371 | 0.030701 | #!/usr/bin/env python
from ..ltl import ltl as LTL
"""
Formulas for the GR(1) fragment of Linear Temporal Logic
This module contains two main classes:
* GR1Formula for vanilla GR(1) formulas
* FastSlowFormula for extended GR(1) formulas following the paradigm
in Vasumathi Raman and Hadas Kress-Gazit (ICRA 2013)
The LTL syntax used is that of .structuredslugs,
which is meant for use with the SLUGS synthesis tool.
"""
class GR1Formula(object):
"""
Arguments:
env_props (list of str) Environment propositions (strings)
sys_props (list of str) System propositions (strings)
ts (dict of str) Transition system, TS (e.g. workspace topology)
Implicitly contains propositions in the keys.
Attributes:
formula (list of str) Formulas whose conjunction makes up a | type
of GR(1) subformulas (e.g. fairness conditions)
type (str) GR(1) subformu | la type (sys_init, env_init,
sys_trans, env_trans, sys_liveness,
env_liveness)
Raises:
ValueError: When a proposition is neither a system nor
an environment proposition
"""
def __init__(self, env_props = [], sys_props = [], ts = {}):
#FIX: TS should be an argument of a subclass, not base class
self.sys_props = sys_props
self.env_props = env_props
self.ts = ts
self._add_props_from_ts()
# The formulas and their subfomrula type is set for
# classes (subformulas) that inherit from GR1Formula
self.formulas = list()
self.type = str()
#TODO: Make formulas a property. Setter would work with
# either a single formula or a list of formulas.
# =====================================================
# System and environment initial conditions
# =====================================================
def gen_sys_init_from_prop_assignment(self, prop_assignment, props = "sys_props"):
"""
Set the given propositions to the desired truth value in the
system initial conditions formula. Set all the others to False.
"""
sys_init = list()
props_of_type = getattr(self, props)
for prop in props_of_type:
if prop in prop_assignment.keys():
if prop_assignment[prop] is True:
sys_init.append(prop)
else:
sys_init.append(LTL.neg(prop))
else:
sys_init.append(LTL.neg(prop))
return sys_init
def gen_env_init_from_prop_assignment(self, prop_assignment, props = "env_props"):
"""
Set the given propositions to the desired truth value in the
environment initial conditions formula. Set all the others to False.
"""
env_init = list()
props_of_type = getattr(self, props)
for prop in props_of_type:
if prop in prop_assignment.keys():
if prop_assignment[prop]:
env_init.append(prop)
else:
env_init.append(LTL.neg(prop))
else:
env_init.append(LTL.neg(prop))
return env_init
# =====================================================
# System transition formulas (e.g., workspace topology)
# =====================================================
def gen_sys_trans_formulas(self, future = True):
"""
Generate system requirement formulas that
encode the transition system (e.g. workspace topology).
The transition system TS, is provided in the form of a dictionary.
"""
sys_trans_formulas = list()
for prop in self.ts.keys():
left_hand_side = prop
right_hand_side = list()
for adj_prop in self.ts[prop]:
adj_phi_prop = self._gen_phi_prop(adj_prop)
disjunct = LTL.next(adj_phi_prop) if future else adj_phi_prop
right_hand_side.append(disjunct)
right_hand_side = LTL.disj(right_hand_side)
sys_trans_formulas.append(LTL.implication(left_hand_side,
right_hand_side))
return sys_trans_formulas
# =====================================================
# Various formulas
# =====================================================
def gen_mutex_formulas(self, mutex_props, future):
"""
Create a set of formulas that enforce mutual exclusion
between the given propositions, see Eq. (1).
The argument 'future' dictates whether the propositions will be
primed (T) or not (F). Should be set to True in fast-slow formulas.
"""
mutex_formulas = list()
for prop in mutex_props:
other_props = [p for p in mutex_props if p != prop]
negated_props = list()
for prop_prime in other_props:
if future:
left_hand_side = LTL.next(prop)
neg_prop = LTL.neg(LTL.next(prop_prime))
else:
left_hand_side = prop
neg_prop = LTL.neg(prop_prime)
negated_props.append(neg_prop)
right_hand_side = LTL.conj(negated_props)
mutex_formulas.append(LTL.iff(left_hand_side, right_hand_side))
return mutex_formulas
def gen_precondition_formula(self, action, preconditions):
'''Conditions that have to hold for an action (prop) to be allowed.'''
neg_preconditions = map(LTL.neg, preconditions)
left_hand_side = LTL.disj(neg_preconditions)
right_hand_side = LTL.neg(action)
precondition_formula = LTL.implication(left_hand_side, right_hand_side)
return precondition_formula
def gen_success_condition(self, mem_props, success = 'finished'):
'''
Creates a formula that turns 'finshed' to True
when all memory propositions corresponding to success have been set.'''
conjunct = LTL.conj(mem_props)
success_condition = LTL.iff(success, conjunct)
return success_condition
def gen_goal_memory_formula(self, goal):
'''
For a proposition corresponding to a desired objective, creates a memory
proposition and formulas for remembering achievement of that objective.'''
mem_prop = self.gen_memory_prop(goal)
set_mem_formula = LTL.implication(goal, LTL.next(mem_prop))
remembrance_formula = LTL.implication(mem_prop, LTL.next(mem_prop))
precondition = LTL.conj([LTL.neg(mem_prop), LTL.neg(goal)])
guard_formula = LTL.implication(precondition, LTL.next(LTL.neg(mem_prop)))
goal_memory_formula = list([set_mem_formula, remembrance_formula, guard_formula])
return mem_prop, goal_memory_formula
def gen_memory_prop(self, prop):
'''
Creates a memory proposition from the given proposition
and adds the memory proposition to the system propositions.'''
mem_prop = prop + '_m'
self.sys_props.append(mem_prop)
return mem_prop
# =====================================================
# Various helper methods
# =====================================================
def _add_props_from_ts(self):
"""Reads the items in the TS dictionary and adds them to the system propositions, if they are not already there."""
props_to_add = self.ts.keys()
for v in self.ts.values():
for prop in v:
props_to_add.append(prop)
self.sys_props = list(set(self.sys_props + props_to_add))
def _gen_phi_prop(self, prop):
"""
Generate (non-atomic) proposition of the form \phi_r,
i.e., mutex version of \pi_r (where prop = \pi_r)
"""
props_in_phi = [prop] # Initialize with just pi_r
other_props = self._get_other_trans_props(prop)
for other_prop in other_props:
props_in_phi.append(LTL.neg(other_prop)) # All pi_r' are negated
phi_prop = LTL.conj(props_in_phi)
return phi_prop
def _get_other_trans_props(self, prop):
"""For some proposition \pi_r, get all propositions \pi_r' such that r' =/= r."""
if prop in self.sys_props:
return [p for p in self.sys_props if p != prop]
elif prop in self.env_props:
return [p for p in self.env_props if p != prop]
else:
raise ValueError("Unknown type for proposition: %s" % prop)
class SimpleLivenessRequirementFormula(GR1Formula):
"""
Generates a single liveness requirement that's either a conjunction
or disjunction of the goals, depending on the flag.
"""
def __init__(self, goals, disjunction = False):
super(SimpleLivenessRequirementFormula, self).__init__(
sys_props = goals)
self.formulas = self._gen_liveness_formula(goals, disjunction)
self.type = 'sys_liveness'
def _gen_liveness_formula(self, goals, disjunction):
liveness_formula = LTL.disj(goals) if disjunction else LTL.conj(goals)
return [liveness_formula]
# =========================================================
# |
cloudify-cosmo/yo-ci | yoci/tests/__init__.py | Python | apache-2.0 | 21 | 0 | __autho | r__ = | 'nir0s'
|
Microvellum/Fluid-Designer | win64-vc/2.78/python/lib/site-packages/sqlalchemy/dialects/mysql/mysqlconnector.py | Python | gpl-3.0 | 5,323 | 0.000188 | # mysql/mysqlconnector.py
# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
.. dialect:: mysql+mysqlconnector
:name: MySQL Connector/Python
:dbapi: myconnpy
:connectstring: mysql+mysqlconnector://<user>:<password>@\
<host>[:<port>]/<dbname>
:url: http://dev.mysql.com/downloads/connector/python/
Unicode
-------
Please see :ref:`mysql_unicode` for current recommendations on unicode
handling.
"""
from .base import (MySQLDialect, MySQLExecutionContext,
MySQLCompiler, MySQLIdentifierPreparer,
BIT)
from ... import util
import re
class MySQLExecutionContext_mysqlconnector(MySQLExecutionContext):
def get_lastrowid(self):
return self.cursor.lastrowid
class MySQLCompiler_mysqlconnector(MySQLCompiler):
def visit_mod_binary(self, binary, operator, **kw):
if self.dialect._mysqlconnector_double_percents:
return self.process(binary.left, **kw) + " %% " + \
self.process(binary.right, **kw)
else:
return self.process(binary.left, **kw) + " % " + \
self.process(binary.right, **kw)
def post_process_text(self, text):
if self.dialect._mysqlconnector_double_percents:
return text.replace('%', '%%')
else:
return text
def escape_literal_column(self, text):
if self.dialect._mysqlconnector_double_percents:
return text.replace('%', '%%')
else:
return text
class MySQLIdentifierPreparer_mysqlconnector(MySQLIdentifierPreparer):
def _escape_identifier(self, value):
value = value.replace(self.escape_quote, self.escape_to_quote)
if self.dialect._mysqlconnector_double_percents:
return value.replace("%", "%%")
else:
return value
class _myconnpyBIT(BIT):
def result_processor(self, dialect, coltype):
"""MySQL-connector already converts mysql bits, so."""
return None
class MySQLDialect_mysqlconnector(MySQLDialect):
driver = 'mysqlconnector'
supports_unicode_binds = True
supports_sane_rowcount = True
supports_sane_multi_rowcount = True
supports_native_decimal = True
default_paramstyle = 'format'
execution_ctx_cls = MySQLExecutionContext_mysqlconnector
statement_compiler = MySQLCompiler_mysqlconnector
preparer = MySQLIdentifierPreparer_mysqlconnector
colspecs = util.update_copy(
MySQLDialect.colspecs,
{
BIT: _myconnpyBIT,
}
)
@util.memoized_property
def supports_unicode_statements(self):
return util.py3k or self._mysqlconnector_version_info > (2, 0)
@classmethod
def dbapi(cls):
from mysql import connector
return connector
def create_connect_args(self, url):
opts = url.translate_connect_args(username='user')
opts.update(url.query)
util.coerce_kw_type(opts, 'buffered', bool)
util.coerce_kw_type(opts, 'raise_on_warnings', bool)
# unfortunately, MySQL/connector python refuses to release a
# cursor without reading fully, so non-buffered isn't an option
opts.setdefault('buffered', True)
# FOUND_ROWS must be set in ClientFlag to enable
# supports_sane_rowcount.
if self.dbapi is not None:
try:
from mysql.connector.constants import ClientFlag
cl | ient_flags = opts.get(
'client_flags', ClientFlag.get_default())
client_flags |= ClientFlag.FOUND_ROWS
opts['client_flags'] = client_flags
except Exception:
pass
return [[], opts]
@util.memoized_property
| def _mysqlconnector_version_info(self):
if self.dbapi and hasattr(self.dbapi, '__version__'):
m = re.match(r'(\d+)\.(\d+)(?:\.(\d+))?',
self.dbapi.__version__)
if m:
return tuple(
int(x)
for x in m.group(1, 2, 3)
if x is not None)
@util.memoized_property
def _mysqlconnector_double_percents(self):
return not util.py3k and self._mysqlconnector_version_info < (2, 0)
def _get_server_version_info(self, connection):
dbapi_con = connection.connection
version = dbapi_con.get_server_version()
return tuple(version)
def _detect_charset(self, connection):
return connection.connection.charset
def _extract_error_code(self, exception):
return exception.errno
def is_disconnect(self, e, connection, cursor):
errnos = (2006, 2013, 2014, 2045, 2055, 2048)
exceptions = (self.dbapi.OperationalError, self.dbapi.InterfaceError)
if isinstance(e, exceptions):
return e.errno in errnos or \
"MySQL Connection not available." in str(e)
else:
return False
def _compat_fetchall(self, rp, charset=None):
return rp.fetchall()
def _compat_fetchone(self, rp, charset=None):
return rp.fetchone()
dialect = MySQLDialect_mysqlconnector
|
LibCrowds/libcrowds-statistics | tests/test_event_listeners.py | Python | bsd-3-clause | 1,768 | 0 | # -*- coding: utf8 -*-
from mock import patch, MagicMock
from default import with_context, Test
from pybossa.model.task_run import TaskRun
from factories import TaskRunFactory
from sqlalchemy import event
from libcrowds_statistics import event_listeners
class TestEventListener(Test):
def setUp(self):
self.ip_listener = event_listeners.record_new_task_run_ip_event
event.listen(TaskRun, 'before_insert', self.ip_listener)
def tearDown(self):
func = event_listeners.record_new_task_run_ip_event
event.remove(TaskRun, 'before_insert', self.ip_listener)
@with_context
@patch('libcrowds_statistic | s.event_listeners.request')
def test_ip_address_set_for_new_task_run(self, mock_request):
mock_target = MagicMock()
mock_conn = MagicMock()
mock_request.remot | e_addr = '1.2.3.4'
mock_request.headers.getlist.return_value = False
event_listeners.record_new_task_run_ip_event(None, mock_conn,
mock_target)
tr_info_args = mock_target.info.__setitem__.call_args_list
assert tr_info_args[0][0] == ('ip_address', '1.2.3.4')
@with_context
@patch('libcrowds_statistics.event_listeners.request')
def test_ip_address_still_set_when_behind_proxy(self, mock_request):
mock_target = MagicMock()
mock_conn = MagicMock()
mock_request.remote_addr = '1.2.3.4'
mock_request.headers.getlist.return_value = ['1.2.3.4']
event_listeners.record_new_task_run_ip_event(None, mock_conn,
mock_target)
tr_info_args = mock_target.info.__setitem__.call_args_list
assert tr_info_args[0][0] == ('ip_address', '1.2.3.4')
|
mcallaghan/tmv | BasicBrowser/scoping/migrations/0099_auto_20170512_1404.py | Python | gpl-3.0 | 444 | 0 | # | -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2017-05-12 14:04
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('scoping', '0098_auto_20170511_0955'),
]
operations = [
migrations.AlterField(
model_name='docrel',
name='url',
field=models.TextField(null=True),
),
]
| |
jonahbron/clue | setup.py | Python | gpl-3.0 | 3,316 | 0.000905 | import card
from card import Card
from player import Player
from hand import Hand
from prompt import Prompt, IntegerPrompt, SetPrompt
import pprint
class Setup:
def run(self, game):
self.game = game
self.cards_accounted_for = 0
self.setup_conviction()
self.initialize_cards()
self.setup_me()
self.setup_opponents()
self.setup_my_cards()
def setup_conviction(self):
self.game.conviction = Hand(card.COUNT_TYPES, game=self.game)
self.game.hands.add(self.game.conviction)
self.cards_accounted_for += card.COUNT_TYPES
def initialize_cards(self):
self.game.cards.add(Card(card.TYPE_ROOM, 'Lounge'))
self.game.cards.add(Card(card.TYPE_ROOM, 'Dining Room'))
self.game.cards.add(Card(card.TYPE_ROOM, 'Kitchen'))
self.game.cards.add(Card(card.TYPE_ROOM, 'Ballroom'))
self.game.cards.add(Card(card.TYPE_ROOM, 'Conservatory'))
self.game.cards.add(Card(card.TYPE_ROOM, 'Billiard Room'))
self.game.cards.add(Card(card.TYPE_ROOM, 'Library'))
self.game.cards | .add(Card(card.TYPE_ROOM, 'Study'))
self.game.cards.add(Card(card.TYPE_ROOM, 'Hall'))
self.game.cards.add(Card(card.TYPE_PERSON, 'Miss Scarlett'))
self.game.cards.add(Card(card.TYPE_PERSON, 'Coloniel Mustard'))
self.game.cards.add(Card(card.TYPE_PERSON, 'Misses White'))
self.game.cards.add(Card(card.TYPE_PERSON, 'Mister Green'))
self.game.cards.add(Card(card.TYPE_ | PERSON, 'Misses Peacock'))
self.game.cards.add(Card(card.TYPE_PERSON, 'Professor Plumb'))
self.game.cards.add(Card(card.TYPE_WEAPON, 'Lead Pipe'))
self.game.cards.add(Card(card.TYPE_WEAPON, 'Wrench'))
self.game.cards.add(Card(card.TYPE_WEAPON, 'Knife'))
self.game.cards.add(Card(card.TYPE_WEAPON, 'Revolver'))
self.game.cards.add(Card(card.TYPE_WEAPON, 'Candlestick'))
self.game.cards.add(Card(card.TYPE_WEAPON, 'Rope'))
def setup_me(self):
name = self.game.prompt(Prompt('Your name:'))
card_count = self.game.prompt(IntegerPrompt('Count your cards:', len(self.game.cards) - self.cards_accounted_for))
player = Player(name, Hand(card_count, game=self.game))
self.game.hands.add(player.hand)
self.game.me = player
self.game.players.add(player)
self.cards_accounted_for += card_count
def setup_opponents(self):
while self.cards_accounted_for < len(self.game.cards):
cards_left = len(self.game.cards) - self.cards_accounted_for
name = self.game.prompt(Prompt('Opponent name:'))
card_count = self.game.prompt(IntegerPrompt(
'Cards held by {} ({} left):'.format(
name,
cards_left
),
cards_left
))
player = Player(name, Hand(card_count, game=self.game))
self.game.hands.add(player.hand)
self.game.players.add(player)
self.cards_accounted_for += card_count
def setup_my_cards(self):
while len(self.game.me.hand.has_set) < self.game.me.hand.count:
self.game.me.hand.has(self.game.prompt(SetPrompt('Your card:', self.game.cards, exclude=self.game.me.hand.has_set)))
|
cladmi/RIOT | tests/nordic_softdevice/tests/01-run.py | Python | lgpl-2.1 | 291 | 0 | #!/usr/bin/env python3
import sys
from testrunner import run
def testfunc(child):
child.expect("All up, running the shell now")
child.sendline("ifconfig")
child.exp | ect(r"Iface\s+(\d+)\s+HWaddr:")
if __name__ == "__main__":
sys. | exit(run(testfunc, timeout=1, echo=False))
|
vikingco/django-advanced-reports | advanced_reports/backoffice/decorators.py | Python | bsd-3-clause | 1,384 | 0.002168 | from functools import wraps
from django.utils.translation import ugettext as _
from django.contrib.admin.forms import AdminAuthenticationForm
from django.contrib.auth.views import login
from django.contrib.auth import REDIRECT_FIELD_NAME
def staff_member_required(backoffice):
def decorate(view_func):
"""
Decorator for views that checks that the user is logged in and is a staff
member, displaying the login page if necessary.
"""
@wraps(view_func)
def _checklogin(request, *args, **kwargs):
if request.user.is_active and request.user.is_staff:
# The user is val | id. Continue to the admin page.
return view_func(request, *args, **kwargs)
assert hasattr(request, 'session'), "Advanced Reports Backoffice requires session middleware to be installed. Edit your MIDDLEWARE_CLASSES setting to insert 'django.contrib.sessions.middleware.SessionMiddleware'. | "
defaults = {
'template_name': backoffice.login_template,
'authentication_form': AdminAuthenticationForm,
'extra_context': {
'backoffice': backoffice,
REDIRECT_FIELD_NAME: request.get_full_path(),
},
}
return login(request, **defaults)
return _checklogin
return decorate
|
almarklein/flexx | flexx/ui/__init__.py | Python | bsd-2-clause | 725 | 0 | """ flexx.ui
GUI toolkit based on web technology with a Pythonic API.
"""
# We follow the convention of having one module per widget class (or a
# small set of closely related classes). In order not to pollute this
# namespace, we prefix the module names with an underscrore.
from ._widget import Widget
from ._layout import Layout
from ._box import Box, HBox, VBox
from ._splitter import Splitter, HSplitter, VSplitter
from ._formlayout import BaseTableLayout, FormLayout, GridLayout
from ._pinboardlayout import PinboardLayout
from ._label import Label
from ._button import Button
from ._panel import Panel
from ._progressbar import ProgressBar
from ._plotwidget import PlotWidget
f | rom ._plotlayo | ut import PlotLayout
|
JamesNickerson/py-junos-eznc | lib/jnpr/junos/utils/__init__.py | Python | apache-2.0 | 39 | 0 | from jnpr.j | unos.utils.util | import Util
|
apple/coremltools | coremltools/converters/mil/frontend/torch/torchir_passes.py | Python | bsd-3-clause | 12,830 | 0.002104 | # Copyright (c) 2021, Apple Inc. All rights reserved.
#
# Use of this source code is governed by a BSD-3-clause license that can be
# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause
from coll | ections import defaultdict, OrderedDict
import logging as _logging
from .internal_graph import InternalTorchIRNode, InternalTorchIRGraph
def generate_tensor_assignment_ops(graph):
"""
This graph pass handles inplace tensor assignements, specifically it handles:
`torch.Tensor.copy_` and `torch.Tensor.fill_`. There are many other inplace tensor
assignments which are currently not handled.
for in | stance:
def forward(self, x): # x a tensor with shape [4,10]
x[:2, 4] = [[1],[3]]
return x
In Pytorch, this is represented by a sequence of slice / select ops followed by a copy op:
input -> %x
%1 = slice(%x, dim=0, begin=0, end=2) # the slice for dimension 0
%2 = select(%1, dim=1, index=4) # the select for dimension 1
%3 = copy_(%2, value=[[1], [3]])
output -> %x
This graph pass fuses the sequences into a single InternalTorchIRNode of a new kind, which is defined as `_internal_op_tensor_inplace_copy`.
input -> %x
%nodes_to_fuse = [slice(%x, begin=0, end=2), select(%1, dim=1, index=4)]
%x_internal_tensor_assign_1 = _internal_op_tensor_inplace_copy(%x, value=[[1],[3]], nodes_to_fuse=nodes_to_fuse)
output -> x_internal_tensor_assign_1
The _internal_tensor_value_assign op takes an additional internal data member nodes_to_fuse,
which is a list of select / slice InternalTorchIRNodes that need to be fused.
Here is a more complicated example:
def forward(self, x): # x a tensor with shape [4,10]
x[0, 0] = 1
x[1:2, 1:2] = [[0]]
return x
Input graph:
input -> %x
%1 = select(%x, dim=0, index=0)
%2 = select(%1, dim=0, index=0)
%3 = copy_(%2, value=1)
%4 = slice(%x, dim=0, begin=1, end=2)
%5 = slice(%4, dim=1, begin=1, end=2)
%6 = copy_(%5, value=[[0]])
output -> %x
Output graph:
input -> %x
%nodes_to_fuse_1 = [select(%x, dim=0, index=0), select(%1, dim=0, index=0)]
%x_internal_tensor_assign_1 = _internal_op_tensor_inplace_copy(%x, value=1, nodes_to_fuse=nodes_to_fuse_1)
%nodes_to_fuse_2 = [slice(%x, dim=0, begin=1, end=2), slice(%4, dim=1, begin=1, end=2)]
%x_internal_tensor_assign_2 = _internal_op_tensor_inplace_copy(%x_internal_tensor_assign_1, value=[[0]], nodes_to_fuse=nodes_to_fuse_2)
output -> x_internal_tensor_assign_2
torch.Tensor.fill_ works in a similar way, except the InternalTorchIRNodes is defined by `_internal_op_tensor_inplace_fill`.
A fill_ operator is generated from the following forward pass:
def forward(self, x): # x a tensor with shape [5, 4]
x[2] = 9
return x
"""
TENSOR_ASSIGMENT_PREFIX = "_internal_tensor_assign_"
def _get_updated_name(name, updated_tensor_count):
if name in updated_tensor_count:
return name + TENSOR_ASSIGMENT_PREFIX + str(updated_tensor_count[name])
return name
def _construct_nodes_to_fuse_inputs(nodes_to_fuse):
inputs = []
for node in nodes_to_fuse:
if node.kind == "select":
inputs += [node.inputs[2], None]
if node.kind == "slice":
inputs += [node.inputs[2], node.inputs[3]]
return inputs
tensor_to_node_sequence_mapping = {}
updated_tensor_count = defaultdict(lambda : 0)
for i in range(len(graph.nodes)):
node = graph.nodes[i]
for idx in range(len(node.inputs)):
input_name = node.inputs[idx]
node.inputs[idx] = _get_updated_name(input_name, updated_tensor_count)
if node.kind in ("select", "slice"):
node_input = node.inputs[0]
node_output = node.outputs[0]
node_sequence = tensor_to_node_sequence_mapping.get(node_input, [])
if len(node_sequence) > 0:
tensor_to_node_sequence_mapping.pop(node_input)
node_sequence.append(node)
tensor_to_node_sequence_mapping[node_output] = node_sequence
if node.kind in ("copy_", "fill_"):
node_input = node.inputs[0]
if node_input not in tensor_to_node_sequence_mapping:
raise ValueError("No matching select or slice.")
if node.kind == "copy_":
kind = "_internal_op_tensor_inplace_copy"
else:
kind = "_internal_op_tensor_inplace_fill"
nodes_to_fuse = tensor_to_node_sequence_mapping[node_input]
source_tensor = nodes_to_fuse[0].inputs[0]
origin_name = source_tensor.split(TENSOR_ASSIGMENT_PREFIX)[0]
updated_tensor_count[origin_name] += 1
outputs = [_get_updated_name(origin_name, updated_tensor_count)]
update_value = node.inputs[1]
nodes_to_fuse_inputs = _construct_nodes_to_fuse_inputs(nodes_to_fuse)
tensor_assign_node = InternalTorchIRNode(
node=None,
inputs=[source_tensor, update_value] + nodes_to_fuse_inputs,
outputs=outputs,
kind=kind,
blocks=[],
)
graph.nodes[i] = tensor_assign_node
# modify the graph outputs if it is effected by this graph pass
for idx in range(len(graph.outputs)):
output = graph.outputs[idx]
if output in updated_tensor_count:
graph.outputs[idx] = _get_updated_name(output, updated_tensor_count)
def remove_getattr_nodes(graph):
"""
Remove the getattr nodes in the graph
"""
getattr_nodes = []
new_nodes = []
for node in graph.nodes:
for block in node.blocks:
remove_getattr_nodes(block)
if node.kind == "getattr":
getattr_nodes.append(node)
else:
new_nodes.append(node)
# check the getattr nodes not in the outputs
for node in getattr_nodes:
if node.name in graph.outputs:
raise RuntimeError("{} should not be in the graph outputs.".format(node.name))
# remove the getattr nodes
graph.nodes = new_nodes
def transform_inplace_ops(graph, name_remap_dict=None):
# As we modify ops, we'll need to remap symbols.
if name_remap_dict is None:
name_remap_dict = {}
for node in graph.nodes:
for k, v in name_remap_dict.items():
node.replace_name(k, v)
if node.kind == "append":
if isinstance(node.parent, InternalTorchIRGraph):
# If append appears in a graph (outer block), replace
# subsequent uses of its input symbol with its output symbol.
name_remap_dict[node.inputs[0]] = node.outputs[0]
elif node.parent.parent.kind == "loop":
# If append appears in a loop block, add its inputs to the block
# inputs and loop inputs, and its outputs to the block outputs
# and loop outputs.
# This is the global input to append. We need to add it to the
# loop's input list, and replace any uses after the node with
# @global_output below.
global_input = node.inputs[0]
# This will be the name of the input to append within the
# block. We need to add it to the block inputs.
local_input = node.parent.parent.name + ".0"
# This is the output of append. We need to add it to the list
# of block outputs.
local_output = node.outputs[0]
# This is the name of the new output from the loop. It should
# replace any uses of @global_input after the loop op.
global_output = local_output + ".out"
name_remap_dict[global_input] = global_output
node.parent.parent.inputs.append(global_input)
|
boldprogressives/django-opendebates | opendebates/opendebates/models.py | Python | apache-2.0 | 7,910 | 0.003667 | # coding=utf-8
import datetime
from django.db import models
from django.conf import settings
from django.contrib.auth.models import User
from django.core.signing import Signer
from djorm_pgfulltext.models import SearchManager
from djorm_pgfulltext.fields import VectorField
from urllib import quote_plus
from django.utils.translation import ugettext_lazy as _
class Category(models.Model):
name = models.CharField(max_length=255)
def __unicode__(self):
return self.name
class Meta:
ordering = ["name"]
verbose_name_plural = _("categories")
class Submission(models.Model):
def user_display_name(self):
return self.voter.user_display_name()
category = models.ForeignKey(Category)
idea = models.TextField(verbose_name=_('Question'))
headline = models.TextField(null=True, blank=True)
followup = models.TextField(null=True, blank=True)
citation = models.URLField(null=True, blank=True, db_index=True,
verbose_name=_("Optional link to full proposal or reference"))
citation_verified = models.BooleanField(default=False, db_index=True)
voter = models.ForeignKey("Voter")
created_at = models.DateTimeField(db_index=True)
ip_address = models.CharField(max_length=255, db_index=True)
editors_pick = models.BooleanField(default=False)
approved = models.BooleanField(default=False)
has_duplicates = models.BooleanField(default=False)
duplicate_of = models.ForeignKey('opendebates.Submission', null=True, blank=True,
related_name="duplicates")
votes = models.IntegerField(default=0, db_index=True)
score = models.FloatField(default=0, db_index=True)
rank = models.FloatField(default=0, db_index=True)
random_id = models.FloatField(default=0, db_index=True)
search_index = VectorField()
keywords = models.TextField(null=True, blank=True)
objects = SearchManager(fields=["idea", "keywords"],
auto_update_search_field=True)
source = models.CharField(max_length=255, null=True, blank=True)
def get_recent_votes(self):
timespan = datetime.datetime.now() - datetime.timedelta(1)
return Vote.objects.filter(submission=self, created_at__gte=timespan).count()
def get_duplicates(self):
if not self.has_duplicates:
return None
return Submission.objects.select_related(
"voter", "category", "voter__user").filter(
approved=True, duplicate_of=self)
def __unicode__(self):
return self.idea
@models.permalink
def get_absolute_url(self):
return "vote", [self.id]
def my_tweet_text(self):
return _(u"Vote for my progressive idea for @ThinkBigUS #BigIdeasProject. 30 leaders in Congress will see top ideas!")
def tweet_text(self):
text = _(u"Let's make sure 30 leaders in Congress see this #BigIdea about %(category_name)s - please vote and RT!" % {"category_name": quote_plus(self.category.name)})
if self.voter.twitter_handle:
text += u" h/t @%s" % self.voter.twitter_handle
return text
def facebook_text(self):
if len(self.idea) > 240:
return self.idea[:240] + u'…'
return self.idea
def facebook_url(self):
return u"https://www.facebook.com/sharer/sharer.php?&u=%(idea_url)s" % {
"idea_url": quote_plus(self.really_absolute_url()),
}
def really_absolute_url(self):
return settings.SITE_DOMAIN_WITH_PROTOCOL + self.get_absolute_url()
def email_subject_text(self):
return _("Vote+for+my+Big+Idea!")
def email_body_text(self):
return _("I+posted+an+idea+on+The+Big+Ideas+Project+--+30+members+of+Congress+will+see+the+top+20+ideas!+Please+click+here+to+see+it+and+vote+on+my+idea+--+and+share+it+with+your+friends!")
def email_url(self):
return u"mailto:?subject=%s&body=%s" % (self.email_subject_text(), self.email_body_text(), self.really_absolute_url())
def twitter_url(self):
return u"https://twitter.com/intent/tweet?url=%(SITE_DOMAIN)s%(idea_url)s&text=%(tweet_text)s" % {
"SITE_DOMAIN": quote_plus(settings.SITE_DOMAIN_WITH_PROTOCOL),
"idea_url": quote_plus(self.get_absolute_url()),
"tweet_text": quote_plus(self.tweet_text()),
}
class ZipCode(models.Model):
zip = models.CharField(max_length=10, unique=True)
city = models.CharField(max_length=255, null=True, blank=True)
state = models.CharField(max_length=255, null=True, blank=True)
class Voter(models.Model):
def user_display_name(self):
voter = self
if voter.display_name:
return voter.display_name
if not voter.user:
name = _(u"Somebody")
else:
user = voter.user
name = u"%s" % user.first_name
if user.last_name:
name = u"%s %s." % (name, user.last_name[0])
if not name or not name.strip():
name = _(u"Somebody")
if voter.state:
name = _(u"%(name)s from %(state)s" % {"name": name, "state": voter.state})
return name
email = models.EmailField(unique=True)
zip = models.CharField(max_length=10, db_index=True)
state = models.CharField(max_length=255, null=True, blank=True)
user = models.OneToOneField(User, null=True, blank=True, related_name="voter")
source = models.CharField(max_length=255, null=True, blank=True)
created_at = models.DateTimeField(auto_now_add=True)
display_name = models.CharField(max_length=255, null=True, blank=True)
twitter_handle = models.CharField(max_length=255, null=True, blank=True)
unsubscribed = models.BooleanField(default=False)
def __unicode__(self):
return self.email
def account_token(self):
return Voter.make_account_tok | en(self.email)
@classmethod
def make_account_token(cls, email):
signer = Signer()
value = signer.sign(email)
return value
class Vote(models.Model):
submission = models.ForeignKey(Submission)
voter = models.ForeignKey(Voter)
ip_address = models.CharField(max_length=255, db_index=True)
request_headers = models.TextField(null=True, blank=True)
origina | l_merged_submission = models.ForeignKey(Submission, null=True, blank=True,
related_name="votes_merged_elsewhere")
class Meta:
unique_together = [("submission", "voter")]
created_at = models.DateTimeField(db_index=True)
source = models.CharField(max_length=255, null=True, blank=True)
class Candidate(models.Model):
first_name = models.CharField(max_length=255, null=True, blank=True)
last_name = models.CharField(max_length=255, null=True, blank=True)
current_title = models.CharField(max_length=255, null=True, blank=True)
bio = models.TextField(default='', null=True, blank=True)
website = models.URLField(null=True, blank=True, db_index=True)
facebook = models.URLField(null=True, blank=True, db_index=True)
twitter_handle = models.CharField(max_length=16, null=True, blank=True)
display_name = models.CharField(max_length=255, null=True, blank=True,
help_text=_("Defaults to first_name last_name."))
created_at = models.DateTimeField(auto_now_add=True)
def save(self, *args, **kwargs):
if not self.display_name:
self.display_name = u'{0} {1}'.format(self.first_name, self.last_name)
super(Candidate, self).save(*args, **kwargs)
def __unicode__(self):
return self.display_name
from djangohelpers.lib import register_admin
register_admin(Category)
register_admin(Submission)
register_admin(Voter)
register_admin(Vote)
register_admin(Candidate)
|
Shebella/HIPPO | clc/tools/src/euca_admin/groups.py | Python | gpl-3.0 | 2,776 | 0.023775 | import boto,sys,euca_admin
from boto.exception import EC2ResponseError
from euca_admin.generic import BooleanResponse
from euca_admin.generic import StringList
from boto.resultset import ResultSet
from euca_admin import EucaAdmin
from optparse import OptionParser
SERVICE_PATH = '/services/Accounts'
class Group():
def __init__(self, groupName=None):
self.group_groupName = groupName
self.group_users = StringList()
self.group_auths = StringList()
self.euca = EucaAdmin(path=SERVICE_PATH)
def __repr__(self):
r = 'GROUP \t%s\t' % (self.group_groupName)
r = '%s\nUSERS\t%s\t%s' % (r,self.group_groupName,self.gro | up_users)
r = '%s\nAUTH\t%s\t%s' % (r,self.group_groupName,self.group_auths)
return r
def startElement(self, name, attrs, conne | ction):
if name == 'euca:users':
return self.group_users
if name == 'euca:authorizations':
return self.group_auths
else:
return None
def endElement(self, name, value, connection):
if name == 'euca:groupName':
self.group_groupName = value
else:
setattr(self, name, value)
def get_describe_parser(self):
parser = OptionParser("usage: %prog [GROUPS...]",version="Eucalyptus %prog VERSION")
return parser.parse_args()
def cli_describe(self):
(options, args) = self.get_describe_parser()
self.group_describe(args)
def group_describe(self,groups=None):
params = {}
if groups:
self.euca.connection.build_list_params(params,groups,'GroupNames')
try:
list = self.euca.connection.get_list('DescribeGroups', params, [('euca:item', Group)])
for i in list:
print i
except EC2ResponseError, ex:
self.euca.handle_error(ex)
def get_single_parser(self):
parser = OptionParser("usage: %prog GROUPNAME",version="Eucalyptus %prog VERSION")
(options,args) = parser.parse_args()
if len(args) != 1:
print "ERROR Required argument GROUPNAME is missing or malformed."
parser.print_help()
sys.exit(1)
else:
return (options,args)
def cli_add(self):
(options, args) = self.get_single_parser()
self.group_add(args[0])
def group_add(self, groupName):
try:
reply = self.euca.connection.get_object('AddGroup', {'GroupName':groupName}, BooleanResponse)
print reply
except EC2ResponseError, ex:
self.euca.handle_error(ex)
def cli_delete(self):
(options, args) = self.get_single_parser()
self.group_delete(args[0])
def group_delete(self, groupName):
try:
reply = self.euca.connection.get_object('DeleteGroup', {'GroupName':groupName},BooleanResponse)
print reply
except EC2ResponseError, ex:
self.euca.handle_error(ex)
|
EmanueleCannizzaro/scons | src/engine/SCons/Tool/aixc++.py | Python | mit | 2,413 | 0.002072 | """SCons.Tool.aixc++
Tool-specific initialization for IBM xlC / Visual Age C++ compiler.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001 - 2016 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
| # permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEM | ENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/aixc++.py rel_2.5.1:3735:9dc6cee5c168 2016/11/03 14:02:02 bdbaddog"
import os.path
import SCons.Platform.aix
cplusplus = __import__('c++', globals(), locals(), [])
packages = ['vacpp.cmp.core', 'vacpp.cmp.batch', 'vacpp.cmp.C', 'ibmcxx.cmp']
def get_xlc(env):
xlc = env.get('CXX', 'xlC')
return SCons.Platform.aix.get_xlc(env, xlc, packages)
def generate(env):
"""Add Builders and construction variables for xlC / Visual Age
suite to an Environment."""
path, _cxx, version = get_xlc(env)
if path and _cxx:
_cxx = os.path.join(path, _cxx)
if 'CXX' not in env:
env['CXX'] = _cxx
cplusplus.generate(env)
if version:
env['CXXVERSION'] = version
def exists(env):
path, _cxx, version = get_xlc(env)
if path and _cxx:
xlc = os.path.join(path, _cxx)
if os.path.exists(xlc):
return xlc
return None
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
hapylestat/apputils | src/modules/apputils/progressbar/__init__.py | Python | lgpl-3.0 | 9,262 | 0.010169 | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
# |
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# | limitations under the License.
#
# Github: https://github.com/hapylestat/apputils
#
#
import sys
import time
import os
from datetime import datetime
from enum import Enum
from .string_utils import safe_format
from ..terminal.get_terminal_size import get_terminal_size
"""
Template description:
begin_line - here would be placed character of the begin line
text - caption of the progress
status - similar to caption, but could be changed thought iteration
end_line - used mostly in situation, when status is on right side, should fix issues
with different len of status messages
filled - part of progress bar which represent filled part
reverse_filled - part of progress bar which represent reversed filled part
empty - part of progress bar which represent not filled part
value - current value
max - max value
unit_per_sec - amount of units per second
elapsed - amount of elapsed time
percents_done - percents done
"""
class ProgressBarFormat(Enum):
PROGRESS_FORMAT_DEFAULT: str = "{begin_line}{text} {percents_done:>3}% [{filled}{empty}] {value}/{max} {items_per_sec} i/s"
PROGRESS_FORMAT_SHORT: str = "{begin_line}{text} {percents_done:>3}% [{filled}{empty}] {value}/{max}"
PROGRESS_FORMAT_SIMPLE: str = "{begin_line}{text} [{filled}{empty}] {percents_done:>3}%"
PROGRESS_FORMAT_STATUS: str = "{begin_line}{text}: |{filled}{empty}| {percents_done:>3}% {value}/{max} [{status}]{end_line}"
PROGRESS_FORMAT_STATUS_SIMPLE: str = "{begin_line}|{filled}{empty}| {percents_done:>3}% [{status}]{end_line}"
PROGRESS_FORMAT_INFINITE_SIMPLE: str = "{begin_line} {filled}{empty} [text] {empty}{reverse_filled}"
class CharacterStyles(Enum):
default = (" ", "=")
simple = ("-", "#")
graphic = ("░", "█")
graphic1 = ("-", "▋")
doted = ("⣀", "⣿")
circled = ("○", "⬤")
squared = ("▱", "▰")
lines = (" ", "|")
class ProgressBarStatus(Enum):
stopped = 0
started = 1
class ProgressBarOptions(object):
def __init__(self, character_style: CharacterStyles = CharacterStyles.default,
progress_format: ProgressBarFormat or str = ProgressBarFormat.PROGRESS_FORMAT_DEFAULT):
self._blank_char, self._fill_char = character_style.value
self._progress_format = progress_format.value if isinstance(progress_format, ProgressBarFormat) else progress_format
@property
def fill_char(self):
return self._fill_char
@property
def blank_char(self):
return self._blank_char
@property
def progress_format(self):
return self._progress_format
class _ProgressBarTiming(object):
def __init__(self):
self.__max_value: int = 0
self.__unit_per_sec: int = 0
self.__unit_per_sec_prev: int = 0
# timers
self.__prev_tick = None
self.__start_tick = None
def init_timer(self, max_value: int):
self.__start_tick = time.time()
self.__max_value = max_value
self.__prev_tick = self.__start_tick
self.__unit_per_sec = 0
self.__unit_per_sec_prev = 0
def tick(self, unit_value: int):
total_secs = round(time.time() - self.__prev_tick)
if total_secs >= 1:
self.__unit_per_sec = unit_value / total_secs - self.__unit_per_sec_prev
self.__unit_per_sec_prev = unit_value
self.__prev_tick = time.time()
@property
def unit_per_sec(self) -> int:
return int(self.__unit_per_sec)
@property
def time_gone(self) -> str:
delta = time.time() - self.__start_tick
return datetime.utcfromtimestamp(delta).strftime("%H:%M:%S")
class ProgressBar(object):
def __init__(self, text: str, width: int, options: ProgressBarOptions = ProgressBarOptions(), stdout=sys.stdout):
"""
Create ProgressBar object
:argument text Text of the ProgressBar
:argument options Format of progress Bar
"""
self._text: str = text
self._status_msg: str = ""
self._width: int = width
self._max: int = 0
self._console_width: int = get_terminal_size(fallback=(80, 24))[0]
self._value: int or None = None
self._timer: _ProgressBarTiming = _ProgressBarTiming()
self._begin_line_character: str = '\r'
self._options = options
self._infinite_mode: int or None = None
self._infinite_position: int or None = None
self._infinite_width: int = 1
self.__stdout = stdout
self._status: ProgressBarStatus = ProgressBarStatus.stopped
@property
def value(self):
return self._value
@property
def status(self):
return self._status
@property
def _width(self) -> float:
return self.__width
@_width.setter
def _width(self, value: float):
self.__width = float(value)
@property
def _max(self) -> float:
return self.__max
@_max.setter
def _max(self, value: float):
self.__max = float(value)
def start(self, max_val: int):
self._timer.init_timer(max_value=max_val)
self._infinite_mode = max_val <= 0
self._infinite_position = 0
self._max = max_val
self._fill_empty()
self._value = 0
self.progress(0)
self._status = ProgressBarStatus.started
def _calc_percent_done(self, value: float):
return int(value / self._max * 100)
def _calc_filled_space(self, percents: int):
return int((self._width / 100) * percents)
def _calc_empty_space(self, percents: int):
return int(self._width - self._calc_filled_space(percents))
def _fill_empty(self):
data = " " * (self._console_width - len(self._begin_line_character))
self.__stdout.write(self._begin_line_character + data)
self.__stdout.flush()
def progress(self, value, new_status=None):
"""
:type value int
:type new_status str
"""
space_fillers = 0
if new_status is not None:
# if new text is shorter, then we need fill previously used place
space_fillers = len(self._status_msg) - len(new_status) if self._status_msg and len(self._status_msg) - len(new_status) > 0 else 0
self._status_msg = new_status
if not self._infinite_mode and value > self._max:
self._infinite_mode = True
self._fill_empty()
self._timer.tick(value)
if not self._infinite_mode:
percent_done = self._calc_percent_done(value)
filled = self._options.fill_char * int(self._calc_filled_space(percent_done))
empty = self._options.blank_char * int(self._calc_empty_space(percent_done))
else:
percent_done = 100
self._infinite_position = 1 if self._infinite_position + self._infinite_width >= self._width else self._infinite_position + 1
filled = "%s%s" % (self._options.blank_char * (self._infinite_position - self._infinite_width), self._options.fill_char * self._infinite_width)
empty = self._options.blank_char * int(self._width - self._infinite_position)
kwargs = {
"begin_line": self._begin_line_character,
"text": self._text,
"status": self._status_msg,
"end_line": " " * space_fillers,
"filled": filled,
"reverse_filled": filled[::-1],
"empty": empty,
"value": int(value),
"max": int(self._max),
"items_per_sec": self._timer.unit_per_sec,
"percents_done": percent_done,
"elapsed": self._timer.time_gone
}
self.__stdout.write(safe_format(self._options.progress_format, **kwargs))
self.__stdout.flush()
def progress_inc(self, step=1, new |
decipherinc/decipher-clips | plugin/test/functions.py | Python | mit | 1,693 | 0.002363 | # -*- coding: utf-8 -*-
import os
import sys
import re
import string
from urllib import quote
sys.path.append('plugin/decipherclips')
# noinspection PyUnresolvedReferences
import decipherclips
FUNCTION_DEPS = (os, string.uppercase, string.lowercase, quote, decipherclips)
class ClipParserException(Exception):
pass
class ClipFunctionParser(object):
"""Parses and compiles python functions from the vim plugin placi | ng them in the global namespace"""
FUNC_RGX = re.compile(r'\s*def ([A-Z].*?)\(.*') # testable funcs are capitalized
def __init__(self, clip_path):
try:
self.clip_lines = open(clip_path).readlines()
except IOError:
raise ClipParserException('Could not read plugin: {path}'.format(path=clip_path))
def get_indent(self, s):
"""Used to keep track of scope via white space
:param s:
:return: :rtyp | e: int
"""
return len(s) - len(s.lstrip())
def parse(self):
parsing = False
capture = []
indent = 0
for line in self.clip_lines:
if parsing:
if line.strip() and self.get_indent(line) <= indent:
self._exec(''.join(capture))
parsing = False
capture = []
indent = 0
else:
capture.append(line[indent:])
elif self.FUNC_RGX.match(line):
parsing = True
indent = self.get_indent(line)
capture.append(line.lstrip())
def _exec(self, code):
exec(code, globals(), globals())
ClipFunctionParser('plugin/decipher_clips.vim').parse()
|
abhishek-malani/python-basic-coding | python-basic-coding/python-basic-coding/voter_data_download.py | Python | mit | 1,936 | 0.009298 | import urllib2
import os
baseurl = "http://ceoaperms.ap.gov.in/TS_Rolls/PDFGeneration.aspx?urlPath=D:\SSR2016_Final\Telangana\AC_001\English\S29A"
constituencyCount = 0
constituencyTotal = 229
while constituencyCount <= constituencyTotal:
pdfCount = 1
notDone = True
constituencyCount = constituencyCount + 1
while notDone:
http://ceoaperms.ap.gov.in/TS_Rolls/PDFGeneration.aspx?urlPath=D:\SSR2016_Final\Telangana\AC_001\English\S29A001P001.PDF
url = ba | seurl+str(constituencyCount).zfill(2)+'P'+str(pdfCount).zfill(3)+".pdf"
pdfCount = pdfCount + 1
try:
u = urllib2.urlopen(url)
response_headers = u.info()
if response_headers.type == 'application/pdf':
directory = "Path to dir" + str(constituencyCount).zfill(3) + "/"
try:
os.makedirs(directory)
except OSError:
pass # already exists
file_name = directory | + url.split('/')[-1]
u = urllib2.urlopen(url)
f = open(file_name, 'wb')
meta = u.info()
file_size = int(meta.getheaders("Content-Length")[0])
print "Downloading: %s Bytes: %s" % (file_name, file_size)
file_size_dl = 0
block_sz = 8192
while True:
buffer = u.read(block_sz)
if not buffer:
break
file_size_dl += len(buffer)
f.write(buffer)
status = r"%10d [%3.2f%%]" % (file_size_dl, file_size_dl * 100. / file_size)
status = status + chr(8)*(len(status)+1)
print status,
f.close()
else:
notDone = False
except urllib2.URLError, e:
if e.code == 404:
notDone = False
|
ShoufuLuo/csaw | 3rd/byzauth/tools/mail_trace_scripts/filter_by_sendrecv.py | Python | apache-2.0 | 941 | 0.017003 | #!/usr/bin/python
#
# Load the mail graph and try to filter the messages
# by count of target addresses
import string
import re
from traceparser import traceparser
class SendRecvFilter:
def __init__( self, filename ) :
self.parser = traceparser(filename)
self.senders = set()
self.receivers = set()
while self.parser.Next():
self.senders.add( self.parser.From())
for to in self.parser.Tos():
self.receivers.add( to )
self.sendrecv = self.senders & self.receivers
self.parser = traceparser(filename)
| while self.parser.Next():
if self.parser.From() in self.sendrecv:
print self.parser.Line()
if __name__ == "__main__":
import sys
if len(sys.argv) != 2 :
print 'Usage : %s <mesglog>' % sys.argv[0]
sys.exit(1)
| file = sys.argv[1]
mg = SendRecvFilter(file)
|
aurelo/lphw | source/ex8.py | Python | mit | 351 | 0 | formatter = "%r %r %r %r"
print formatter % (1, 2, 3, 4)
print | formatter % ("one", "two", "three", "four")
print formatter % (True, False, False, True)
print formatter % (formatter | , formatter, formatter, formatter)
print formatter % (
"I had this thing",
"that you could type up right",
"But it didn't sing",
"So I said goodnight"
)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.