repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
|---|---|---|---|---|---|---|---|---|
welex91/ansible-modules-core
|
network/nxos/nxos_facts.py
|
Python
|
gpl-3.0
| 7,749
| 0.000645
|
#!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
DOCUMENTATION = '''
---
module: nxos_facts
version_added: "2.1"
short_description: Gets facts about NX-OS switches
description:
- Offers ability to extract facts from device
extends_documentation_fragment: nxos
author: Jason Edelman (@jedelman8), Gabriele Gerbino (@GGabriele)
'''
EXAMPLES = '''
# retrieve facts
- nxos_facts: host={{ inventory_hostname }}
'''
RETURN = '''
facts:
description:
- Show multiple information about device.
These include interfaces, vlans, module and environment information.
returned: always
type: dict
sample: {"fan_info": [{"direction":"front-to-back","hw_ver": "--",
"model":"N9K-C9300-FAN2","name":"Fan1(sys_fan1)","status":"Ok"}],
"hostname": "N9K2","interfaces": ["mgmt0","Ethernet1/1"],
"kickstart": "6.1(2)I3(1)","module": [{"model": "N9K-C9396PX",
"ports": "48","status": "active *"}],"os": "6.1(2)I3(1)",
"platform": "Nexus9000 C9396PX Chassis","power_supply_info": [{
"actual_output": "0 W","model": "N9K-PAC-650W","number": "1",
"status":"Shutdown"}],"rr":"Reset Requested by CLI command reload",
"vlan_list":[{"admin_state":"noshutdown","interfaces":["Ethernet1/1"],
"name": "default","state": "active","vlan_id": "1"}]}
'''
def get_cli_body_ssh(command, response, module):
if 'xml' in response[0]:
body = []
else:
body = [json.loads(response[0])]
return body
def execute_show(cmds, module, command_type=None):
try:
if command_type:
response = module.execute(cmds, command_type=command_type)
else:
response = module.execute(cmds)
except ShellError, clie:
module.fail_json(msg='Error sending {0}'.format(command),
error=str(clie))
return response
def execute_show_command(command, module, command_type='cli_show'):
if module.params['transport'] == 'cli':
command += ' | json'
cmds = [command]
response = execute_show(cmds, module)
body = get_cli_body_ssh(command, response, module)
elif module.params['transport'] == 'nxapi':
cmds = [command]
body = execute_show(cmds, module, command_type=command_type)
return body
def apply_key_map(key_map, table):
new_dict = {}
for key, value in table.items():
new_key = key_map.get(key)
if new_key:
value = table.get(key)
if value:
new_dict[new_key] = str(value)
else:
new_dict[new_key] = value
return new_dict
def get_show_version_facts(module):
command = 'show version'
body = execute_show_command(command, module)[0]
key_map = {
"rr_sys_ver": "os",
"kickstart_ver_str": "kickstart",
"chassis_id": "platform",
"host_name": "hostname",
"rr_reason": "rr"
}
mapped_show_version_facts = apply_key_map(key_map, body)
return mapped_show_version_facts
def get_interface_facts(module):
command = 'show interface status'
body = execute_show_command(command, module)[0]
interface_list = []
interface_table = body['TABLE_interface']['ROW_interface']
if isinstance(interface_table, dict):
interface_table = [interface_table]
for each in interface_table:
interface = str(each.get('interface', None))
if interface:
interface_list.append(interface)
return interface_list
def get_show_module_facts(module):
command = 'show module'
body = execute_show_command(command, module)[0]
module_facts = []
module_table = body['TABLE_modinfo']['ROW_modinfo']
key_map = {
"ports": "ports",
"type": "type",
"model": "model",
"status": "status"
}
if isinstance(module_table, dict):
module_table = [module_table]
for each in module_table:
mapped_module_facts = apply_key_map(key_map, each)
module_facts.append(mapped_module_facts)
return module_facts
def get_environment_facts(module):
command = 'show environment'
body = execute_show_command(command, module)[0]
powersupply = get_powersupply_facts(body)
fan = get_fan_facts(body)
return (powersupply, fan)
def get_powersupply_facts(body):
powersupply_facts = []
powersupply_table = body['powersup']['TABLE_psinfo']['ROW_psinfo']
key_map = {
"psnum": "number",
"psmodel": "model",
"actual_out": "actual_output",
"actual_in": "actual_input",
"total_capa": "total_capacity",
"ps_status": "status"
}
if isinstance(powersupply_table, dict):
powersupply_table = [powersupply_table]
for each in powersupply_table:
mapped_powersupply_facts = apply_key_map(key_map, each)
powersupply_facts.append(mapped_powersupply_facts)
return powersupply_facts
def get_fan_facts(body):
fan_facts = []
fan_table = body['fandetails']['TABLE_faninfo']['ROW_faninfo']
key_map = {
"fanname": "name",
"fanmodel": "model",
"fanhwver": "hw_ver",
"fandir": "direction",
"fanstatus": "status"
}
if isinstance(fan_table, dict):
fan_table = [fan_table]
for each in fan_table:
mapped_fan_facts = apply_key_map(key_map, each)
fan_facts.append(mapped_fan_facts)
return fan_facts
def get_vlan_facts(module):
command = 'show vlan brief'
body = execute_show_command(command, module)[0]
vlan_list = []
vlan_table = body['TABLE_vlanbriefxbrief']['ROW_vlanbriefxbrief']
if isinstance(vlan_table, dict):
vlan_table = [vlan_table]
for each in vlan_table:
vlan = str(each.get('vlanshowbr-vlanid-utf', None))
if vlan:
vlan_list.append(vlan)
return vlan_list
def main():
ar
|
gument_spec = dict()
module = get_module(argument_spec=argument_spec,
supports_check_mode=True)
# Get 'show version' facts.
show_version = get_show_version_facts(module)
# Get interfaces facts.
interfaces_list = get_interface_facts(module)
# Get module facts.
show_module = get_show_module_facts(module)
# Get environment facts
|
.
powersupply, fan = get_environment_facts(module)
# Get vlans facts.
vlan = get_vlan_facts(module)
facts = dict(
interfaces_list=interfaces_list,
module=show_module,
power_supply_info=powersupply,
fan_info=fan,
vlan_list=vlan)
facts.update(show_version)
module.exit_json(ansible_facts=facts)
from ansible.module_utils.basic import *
from ansible.module_utils.urls import *
from ansible.module_utils.shell import *
from ansible.module_utils.netcfg import *
from ansible.module_utils.nxos import *
if __name__ == '__main__':
main()
|
c24b/mango
|
database.py
|
Python
|
apache-2.0
| 5,004
| 0.034579
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import pymongo
from pymongo import MongoClient
from pymongo import errors
import re
class Database(object):
'''Database creation'''
def __init__(self, database_name):
self.client = MongoClient('mongodb://localhost,localhost:27017')
self.db_name = database_name
self.db = self.client[self.db_name]
#self.jobs = self.client[self.db_name].jobs
#self.results = self.db['results']
#self.queue = self.db['queue']
#self.log = self.db['log']
#self.sources = self.db['s
|
ources']
#self.jobs = self.db['jobs']
#self.db.x = self.db[x]
# def __repr__(self, database_name):
# print "Using database: %s" %self.client[database
|
_name]
# return self.db
def use_db(self, database_name):
return self.client[str(name)]
def show_dbs(self):
return self.client.database_names()
def create_coll(self, coll_name):
setattr(self, str(coll_name), self.db[str(coll_name)])
#print "coll : %s has been created in db:%s " %(self.__dict__[str(coll_name)], self.db_name)
return self.__dict__[str(coll_name)]
def create_colls(self, coll_names=["results","sources", "logs", "queue"]):
for n in coll_names:
setattr(self, n, self.db[str(n)])
# self.queue = self.db['queue']
# self.log = self.db['log']
# self.sources = self.db['sources']
# #print "Creating coll", [n for n in self.db.collection_names()]
return [n for n in self.db.collection_names()]
def show_coll(self):
try:
print "using collection %s in DB : %s" %(self.coll_name, self.db_name)
return self.coll_name
except AttributeError:
return False
#return self.db.collection_names()
def show_coll_items(self, coll_name):
return [n for n in self.db[str(coll_name)].find()]
# def count(self, coll_name):
# self.db_coll = self.db[str(coll_name)]
# return self.db_coll.count()
def drop(self, type, name):
if type == "collection":
return self.db[str(name)].drop()
elif type == "database":
return self.client.drop_database(str(name))
else:
print "Unknown Type"
return False
def drop_all_dbs():
'''remove EVERY SINGLE MONGO DATABASE'''
for n in self.show_dbs():
self.use_db(n)
self.drop("database", n)
def stats(self):
'''Output the current stats of database in Terminal'''
title = "===STATS===\n"
name ="Stored results in Mongo Database: %s \n" %(self.db_name)
res = "\t-Nombre de resultats dans la base: %d\n" % (self.db.results.count())
sources = "\t-Nombre de sources: %d\n" % len(self.db.sources.distinct('url'))
url = "\t-urls en cours de traitement: %d\n" % (self.db.queue.count())
url2 = "\t-urls traitees: %d\n" % (self.db.results.count()+ self.db.log.count())
url3 = "\t-urls erronées: %d\n" % (self.db.log.count())
size = "\t-Size of the database %s: %d MB\n" % (self.db_name, (self.db.command('dbStats', 1024)['storageSize'])/1024/1024.)
result = [title, name, res, sources, url, url2, size]
return "".join(result)
def report(self):
''' Output the currents of database for Email Report'''
res = "<li>Nombre de resultats dans la base: %d</li>" % (self.db.results.count())
sources = "<li>Nombre de sources: %d</li>" % len(self.db.sources.distinct('url'))
url = "<li>urls en cours de traitement: %d\n</li>" % (self.db.queue.count())
url2 = "<li>urls traitees: %d</li>" % (self.db.results.count()+ self.db.log.count())
size = "<li>Size of the database %s: %d MB</li>" % (self.db_name, (self.db.command('dbStats', 1024)['storageSize'])/1024/1024.)
result = [res, sources, url, url2, size]
return "".join(result)
# Define export gephi inside report option
# def create_node(self):
# label = ["url", "outlink", "backlink"]
# urllist = [n for n in self.db.results.distinct("url")]
# # outlist = [u for u in n['outlinks'] for n in self.db.results.find() if u not in outlist]
# # backlist = [u["url"] for u in n['backlinks'] for n in self.db.results.find() if u["url"] not in backlist]
# outlist = []
# backlist = []
# print len(urllist)
# for n in self.db.results.find():
# if n["outlinks"] is None:
# pass
# for o in n["outlinks"]:
# if o is not None:
# outlist.append([o["url"], "backlink"])
# for n in self.db.results.find():
# if n != []:
# for o in n["backlinks"]:
# if o is not None:
# backlist.append([o["url"], "backlink"])
# return
# def export_outlinks(self):
# '''Output url : outlink'''
# print "source; target"
# for n in self.db.results.find():
# for o in n["outlinks"]:
# if o is not None:
# print n['url']+";"+o
# else:
# print n["url"]+";None"
# return
# def export_backlinks(self):
# print "source;target"
# for n in self.db.results.find():
# if n != []:
# for u in n["backlinks"]:
# print n["url"]+";"+u["url"]
# # for o in n["backlinks"]:
# # if o is not None:
# # print n['url']+";"+o
# # else:
# # print n["url"]+";None"
# return
if __name__ == "__main__":
db = Database('RRI')
db.create_node()
|
ccauet/scikit-optimize
|
skopt/space/space.py
|
Python
|
bsd-3-clause
| 21,426
| 0.000047
|
import numbers
import numpy as np
from scipy.stats.distributions import randint
from scipy.stats.distributions import rv_discrete
from scipy.stats.distributions import uniform
from sklearn.utils import check_random_state
from sklearn.utils.fixes import sp_version
from .transformers import CategoricalEncoder
from .transformers import Normalize
from .transformers import Identity
from .transformers import Log10
from .transformers import Pipeline
# helper class to be able to print [1, ..., 4] instead of [1, '...', 4]
class _Ellipsis:
def __repr__(self):
return '...'
def check_dimension(dimension, transform=None):
"""
Checks that the provided dimension falls into one of the
supported types. For a list of supported types, look at
the documentation of `dimension` below.
Parameters
----------
* `dimension`:
Search space Dimension.
Each search dimension can be defined either as
- a `(upper_bound, lower_bound)` tuple (for `Real` or `Integer`
dimensions),
- a `(upper_bound, lower_bound, "prior")` tuple (for `Real`
dimensions),
- as a list of categories (for `Categorical` dimensions), or
- an instance of a `Dimension` object (`Real`, `Integer` or
`Categorical`).
* `transform` ["identity", "normalize", "onehot" optional]:
- For `Categorical` dimensions, the following transformations are
supported.
- "onehot" (default) one-hot transformation of the original space.
- "identity" same as the original space.
- For `Real` and `Integer` dimensions, the following transformations
are supported.
- "identity", (default) the transformed space is the same as the
original space.
- "normalize", the transformed space is scaled to be between 0 and 1.
Returns
-------
* `dimension`:
Dimension instance.
"""
if isinstance(dimension, Dimension):
return dimension
if not isinstance(dimension, (list, tuple, np.ndarray)):
raise ValueError("Dimension has to be a list or tuple.")
if (len(dimension) == 3 and
isinstance(dimension[0], numbers.Real) and
isinstance(dimension[2], str)):
return Real(*dimension, transform=transform)
if len(dimension) > 2 or isinstance(dimension[0], str):
return Categorical(dimension, transform=transform)
if len(dimension) == 2 and isinstance(dimension[0], numbers.Integral):
return Integer(*dimension, transform=transform)
if len(dimension) == 2 and isinstance(dimension[0], numbers.Real):
return Real(*dimension, transform=transform)
raise ValueError("Invalid dimension %s. Read the documentation for "
"supported types." % dimension)
class Dimension(object):
"""Base class for search space dimensions."""
def rvs(self, n_samples=1, random_state=None):
"""Draw random samples.
Parameters
----------
* `n_samples` [int or None]:
The number of samples to be drawn.
* `random_state` [int, RandomState instance, or None (default)]:
Set random state to something other than None for reproducible
results.
"""
rng = check_random_state(random_state)
samples = self._rvs.rvs(size=n_samples, random_state=rng)
return self.inverse_transform(samples)
def transform(self,
|
X):
"""Transform samples form the original space to a warped space."""
return self.transformer.transform(X)
def inverse_transform(self, Xt):
"""Inverse transform
|
samples from the warped space back into the
original space.
"""
return self.transformer.inverse_transform(Xt)
@property
def size(self):
return 1
@property
def transformed_size(self):
return 1
@property
def bounds(self):
raise NotImplementedError
@property
def transformed_bounds(self):
raise NotImplementedError
def _uniform_inclusive(loc=0.0, scale=1.0):
# like scipy.stats.distributions but inclusive of `high`
# XXX scale + 1. might not actually be a float after scale if
# XXX scale is very large.
return uniform(loc=loc, scale=np.nextafter(scale, scale + 1.))
class Real(Dimension):
def __init__(self, low, high, prior="uniform", transform=None):
"""Search space dimension that can take on any real value.
Parameters
----------
* `low` [float]:
Lower bound (inclusive).
* `high` [float]:
Upper bound (inclusive).
* `prior` ["uniform" or "log-uniform", default="uniform"]:
Distribution to use when sampling random points for this dimension.
- If `"uniform"`, points are sampled uniformly between the lower
and upper bounds.
- If `"log-uniform"`, points are sampled uniformly between
`log10(lower)` and `log10(upper)`.`
* `transform` ["identity", "normalize", optional]:
The following transformations are supported.
- "identity", (default) the transformed space is the same as the
original space.
- "normalize", the transformed space is scaled to be between
0 and 1.
"""
self.low = low
self.high = high
self.prior = prior
if transform is None:
transform = "identity"
self.transform_ = transform
if self.transform_ not in ["normalize", "identity"]:
raise ValueError(
"transform should be 'normalize' or 'identity' got %s" %
self.transform_)
# Define _rvs and transformer spaces.
# XXX: The _rvs is for sampling in the transformed space.
# The rvs on Dimension calls inverse_transform on the points sampled
# using _rvs
if self.transform_ == "normalize":
# set upper bound to next float after 1. to make the numbers
# inclusive of upper edge
self._rvs = _uniform_inclusive(0., 1.)
if self.prior == "uniform":
self.transformer = Pipeline(
[Identity(), Normalize(low, high)])
else:
self.transformer = Pipeline(
[Log10(), Normalize(np.log10(low), np.log10(high))]
)
else:
if self.prior == "uniform":
self._rvs = _uniform_inclusive(self.low, self.high - self.low)
self.transformer = Identity()
else:
self._rvs = _uniform_inclusive(
np.log10(self.low),
np.log10(self.high) - np.log10(self.low))
self.transformer = Log10()
def __eq__(self, other):
return (type(self) is type(other) and
np.allclose([self.low], [other.low]) and
np.allclose([self.high], [other.high]) and
self.prior == other.prior and
self.transform_ == other.transform_)
def __repr__(self):
return "Real(low={}, high={}, prior={}, transform={})".format(
self.low, self.high, self.prior, self.transform_)
def inverse_transform(self, Xt):
"""Inverse transform samples from the warped space back into the
orignal space.
"""
return super(Real, self).inverse_transform(Xt).astype(np.float)
@property
def bounds(self):
return (self.low, self.high)
def __contains__(self, point):
return self.low <= point <= self.high
@property
def transformed_bounds(self):
if self.transform_ == "normalize":
return 0.0, 1.0
else:
if self.prior == "uniform":
return self.low, self.high
else:
return np.log10(self.low), np.log10(self.high)
def distance(self, a, b):
"""Compute distance between point `a` and `b`.
Parameters
----------
* `a` [float]
First point.
* `b` [flo
|
azumimuo/family-xbmc-addon
|
plugin.video.salts/scrapers/filmikz_scraper.py
|
Python
|
gpl-2.0
| 4,359
| 0.004818
|
"""
SALTS XBMC Addon
Copyright (C) 2014 tknorris
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import re
import urllib
import urlparse
from salts_lib import kodi
from salts_lib import scraper_utils
from salts_lib.constants import FORCE_NO_MATCH
from salts_lib.constants import QUALITIES
from salts_lib.constants import VIDEO_TYPES
import scraper
BASE_URL = 'http://filmikz.ch'
class Filmikz_Scraper(scraper.Scraper):
base_url = BASE_URL
def __init__(self, timeout=scraper.DEFAULT_TIMEOUT):
self.timeout = timeout
self.base_url = kodi.get_setting('%s-base_url' % (self.get_name()))
@classmethod
def provides(cls):
return frozenset([VIDEO_TYPES.MOVIE])
@classmethod
def get_name(cls):
return 'filmikz.ch'
def resolve_link(self, link):
return link
def format_source_label(self, item):
return '[%s] %s' % (item['quality'], item['host'])
def get_sources(self, video):
source_url = self.get_url(video)
hosters = []
if source_url and source_url != FORCE_NO_MATCH:
url = urlparse.urljoin(self.base_url, source_url)
html = self._http_get(url, cache_limit=.5)
pattern = "/watch\.php\?q=([^']+)"
seen_hosts = {}
for match in re.finditer(pattern, html, re.DOTALL):
url = match.group(1)
hoster = {'multi-part': False, 'url': url.decode('base-64'), 'class': self, 'qual
|
ity': None, 'views': None, '
|
rating': None, 'direct': False}
hoster['host'] = urlparse.urlsplit(hoster['url']).hostname
# top list is HD, bottom list is SD
if hoster['host'] in seen_hosts:
quality = QUALITIES.HIGH
else:
quality = QUALITIES.HD720
seen_hosts[hoster['host']] = True
hoster['quality'] = scraper_utils.get_quality(video, hoster['host'], quality)
hosters.append(hoster)
return hosters
def get_url(self, video):
return self._default_get_url(video)
def search(self, video_type, title, year):
search_url = urlparse.urljoin(self.base_url, '/index.php?search=%s&image.x=0&image.y=0')
search_url = search_url % (urllib.quote_plus(title))
html = self._http_get(search_url, cache_limit=.25)
results = []
# Are we on a results page?
if not re.search('window\.location', html):
pattern = '<td[^>]+class="movieText"[^>]*>(.*?)</p>.*?href="(/watch/[^"]+)'
for match in re.finditer(pattern, html, re.DOTALL):
match_title_year, match_url = match.groups('')
# skip porn
if '-XXX-' in match_url.upper() or ' XXX:' in match_title_year: continue
match_title_year = re.sub('</?.*?>', '', match_title_year)
match = re.search('(.*?)\s+\(?(\d{4})\)?', match_title_year)
if match:
match_title, match_year = match.groups()
else:
match_title = match_title_year
match_year = ''
if not year or not match_year or year == match_year:
result = {'url': match_url, 'title': match_title, 'year': match_year}
results.append(result)
else:
match = re.search('window\.location\s+=\s+"([^"]+)', html)
if match:
url = match.group(1)
if url != 'movies.php':
result = {'url': scraper_utils.pathify_url(url), 'title': title, 'year': year}
results.append(result)
return results
|
pombredanne/taskflow-1
|
taskflow/utils/misc.py
|
Python
|
apache-2.0
| 20,033
| 0.00005
|
# -*- coding: utf-8 -*-
# Copyright (C) 2012 Yahoo! Inc. All Rights Reserved.
# Copyright (C) 2013 Rackspace Hosting All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import contextlib
import datetime
import errno
import inspect
import os
import re
import sys
import threading
import types
import enum
from oslo_serialization import jsonutils
from oslo_serialization import msgpackutils
from oslo_utils import encodeutils
from oslo_utils import importutils
from oslo_utils import netutils
from oslo_utils import reflection
import six
from six.moves import map as compat_map
from six.moves import range as compat_range
from taskflow.types import failure
from taskflow.types import notifier
from taskflow.utils import deprecation
NUMERIC_TYPES = six.integer_types + (float,)
# NOTE(imelnikov): regular expression to get scheme from URI,
# see RFC 3986 section 3.1
_SCHEME_REGEX = re.compile(r"^([A-Za-z][A-Za-z0-9+.-]*):")
class StrEnum(str, enum.Enum):
"""An enumeration that is also a string and can be compared to strings."""
def __new__(cls, *args, **kwargs):
for a in args:
if not isinstance(a, str):
raise TypeError("Enumeration '%s' (%s) is not"
" a string" % (a, type(a).__name__))
return super(StrEnum, cls).__new__(cls, *args, **kwargs)
class StringIO(six.StringIO):
"""String buffer with some small additions."""
def write_nl(self, value, linesep=os.linesep):
self.write(value)
self.write(linesep)
def match_type(obj, matchers):
"""Matches a given object using the given matchers list/iterable.
NOTE(harlowja): each element of the provided list/iterable must be
tuple of (valid types, result).
Returns the result (the second element of the provided tuple) if a type
match occurs, otherwise none if no matches are found.
"""
for (match_types, match_result) in matchers:
if isinstance(obj, match_types):
return match_result
else:
return None
def countdown_iter(start_at, decr=1):
"""Generator that decrements after each generation until <= zero.
NOTE(harlowja): we can likely remove this when we can use an
``itertools.count`` that takes a step (on py2.6 which we still support
that step parameter does **not** exist and therefore can't be used).
"""
if decr <= 0:
raise ValueError("Decrement value must be greater"
" than zero and not %s" % decr)
while start_at > 0:
yield start_at
start_at -= decr
def reverse_enumerate(items):
"""Like reversed(enumerate(items)) but with less copying/cloning..."""
for i in countdown_iter(len(items)):
yield i - 1, items[i - 1]
def merge_uri(uri, conf):
"""Merges a parsed uri into the given configuration dictionary.
Merges the username, password, hostname, port, and query parameters of
a URI into the given configuration dictionary (it does **not** overwrite
existing configuration keys if they already exist) and returns the merged
configuration.
NOTE(harlowja): does not merge the path, scheme or fragment.
"""
uri_port = uri.port
specials = [
('username', uri.username, lambda v: bool(v)),
('password', uri.password, lambda v: bool(v)),
# NOTE(harlowja): A different check function is used since 0 is
# false (when bool(v) is applied), and that is a valid port...
('port', uri_port, lambda v: v is not None),
]
hostname = uri.hostname
if hostname:
if uri_port is not None:
hostname += ":%s" % (uri_port)
specials.append(('hostname', hostname, lambda v: bool(v)))
for (k, v, is_not_empty_value_func) in specials:
if is_not_empty_value_func(v):
conf.setdefault(k, v)
for (k, v) in six.iteritems(uri.params()):
conf.setdefault(k, v)
return conf
def find_subclasses(locations, base_cls, exclude_hidden=True):
"""Finds subclass types in the given locations.
This will examines the given locations for types which are subclasses of
the base class type provided and returns the found subclasses (or fails
with exceptions if this introspection can not be accomplished).
If a string is provided as one of the locations it will be imported and
examined if it is a subclass of the base class. If a module is given,
all of its members will be examined for attributes which are subclasses of
the base class. If a type itself is given it will be examined for being a
subclass of the base class.
"""
derived = set()
for item in locations:
module = None
if isinstance(item, six.string_types):
try:
pkg, cls = item.split(':')
except ValueError:
module = importutils.import_module(item)
else:
obj = importutils.import_class('%s.%s' % (pkg, cls))
if not reflection.is_subclass(obj, base_cls):
raise TypeError("Object '%s' (%s) is not a '%s' subclass"
% (item, type(item), base_cls))
derived.add(obj)
elif isinstance(item, types.ModuleType):
module = item
elif reflection.is_subclass(item, base_cls):
derived.add(item)
else:
raise TypeError("Object '%s' (%s) is an unexpected type" %
(item, type(item)))
# If it's a module derive objects from it if we can.
if module is not None:
for (name, obj) in inspect.getmembers(module):
if name.startswith("_") and exclude_hidden:
continue
if reflection.is_subclass(obj, base_cls):
derived.add(obj)
return derived
def pick_first_not_none(*values):
"""Returns first of values that is *not* None (or None if all are/were)."""
for val in values:
if val is not None:
return val
return None
def parse_uri(uri):
"""Parses a uri into its components."""
# Do some basic validation before continuing...
if not isinstance(uri, six.string_types):
raise TypeError("Can only parse string types to uri data, "
"and not '%s' (%s)" % (uri, type(uri)))
match = _SCHEME_REGEX.match(uri)
if not match:
raise ValueError("Uri '%s' does not start with a RFC 3986 compliant"
" scheme" % (uri))
return netutils.urlsplit(uri)
def look_for(haystack, needles, extractor=None):
"""Find items in haystack and returns matches found (in haystack order).
Given a list of items (the haystack) and a list of items to look for (the
needles) this will look for the needles in the haystack and returns
|
the found needles (if any). The ordering of the
|
returned needles is in the
order they are located in the haystack.
Example input and output:
>>> from taskflow.utils import misc
>>> hay = [3, 2, 1]
>>> misc.look_for(hay, [1, 2])
[2, 1]
"""
if not haystack:
return []
if extractor is None:
extractor = lambda v: v
matches = []
for i, v in enumerate(needles):
try:
matches.append((haystack.index(extractor(v)), i))
except ValueError:
pass
if not matches:
return []
else:
return [needles[i] for (_hay_i, i) in sorted(matches)]
def disallow_when_frozen(excp_cls):
"""Frozen checking/raising method decorator."""
def decorator
|
jumpstarter-io/cinder
|
cinder/volume/drivers/emc/emc_vmax_common.py
|
Python
|
apache-2.0
| 94,140
| 0
|
# Copyright (c) 2012 - 2014 EMC Corporation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os.path
from oslo.config import cfg
import six
from cinder import exception
from cinder.i18n import _
from cinder.openstack.common import log as logging
from cinder.volume.drivers.emc import emc_vmax_fast
from cinder.volume.drivers.emc import emc_vmax_masking
from cinder.volume.drivers.emc import emc_vmax_provision
from cinder.volume.drivers.emc import emc_vmax_utils
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
try:
import pywbem
pywbemAvailable = True
except ImportError:
pywbemAvailable = False
CINDER_EMC_CONFIG_FILE = '/etc/cinder/cinder_emc_config.xml'
CINDER_EMC_CONFIG_FILE_PREFIX = '/etc/cinder/cinder_emc_config_'
CINDER_EMC_CONFIG_FILE_POSTFIX = '.xml'
EMC_ROOT = 'root/emc'
POOL = 'storagetype:pool'
ARRAY = 'storagetype:array'
FASTPOLICY = 'storagetype:fastpolicy'
BACKENDNAME = 'volume_backend_name'
COMPOSITETYPE = 'storagetype:compositetype'
STRIPECOUNT = 'storagetype:stripecount'
MEMBERCOUNT = 'storagetype:membercount'
STRIPED = 'striped'
CONCATENATED = 'concatenated'
emc_opts = [
cfg.StrOpt('cinder_emc_config_file',
default=CINDER_EMC_CONFIG_FILE,
help='use this file for cinder emc plugin '
'config data'), ]
CONF.register_opts(emc_opts)
class EMCVMAXCommon(object):
"""Common class for SMI-S based EMC volume drivers.
This common class is for EMC volume drivers based on SMI-S.
It supports VNX and VMAX arrays.
"""
stats = {'driver_version': '1.0',
'free_capacity_gb': 0,
'reserved_percentage': 0,
'storage_protocol': None,
'total_capacity_gb': 0,
'vendor_name': 'EMC',
'volume_backend_name': None}
def __init__(self, prtcl, configuration=None):
if not pywbemAvailable:
LOG.info(_(
'Module PyWBEM not installed. '
'Install PyWBEM using the python-pywbem package.'))
self.protocol = prtcl
self.configuration = configuration
self.configuration.append_config_values(emc_opts)
self.conn = None
self.url = None
self.user = None
self.passwd = None
self.masking = emc_vmax_masking.EMCVMAXMasking(prtcl)
self.utils = emc_vmax_utils.EMCVMAXUtils(prtcl)
self.fast = emc_vmax_fast.EMCVMAXFast(prtcl)
self.provision = emc_vmax_provision.EMCVMAXProvision(prtcl)
def create_volume(self, volume):
"""Creates a EMC(VMAX) volume from a pre-existing storage pool.
For a concatenated compositeType:
If the volume size is over 240GB then a composite is created
EMCNumberOfMembers > 1, otherwise it defaults to a non composite
For a striped compositeType:
The user must supply an extra spec to determine how many metas
will make up the striped volume.If the meta size is greater than
240GB an error is returned to the user. Otherwise the
EMCNumberOfMembers is what the user specifies.
:param volume: volume Object
:returns: volumeInstance, the volume instance
:raises: VolumeBackendAPIException
"""
volumeSize = int(self.utils.convert_gb_to_bits(volume['size']))
volumeName = volume['name']
extraSpecs = self._initial_setup(volume)
memberCount, errorDesc = self.utils.determine_member_count(
volume['size'], extraSpecs[MEMBERCOUNT], extraSpecs[COMPOSITETYPE])
if errorDesc is not None:
exceptionMessage = (_("The striped meta count of %(memberCount)s "
"is too small for volume: %(volumeName)s. "
"with size %(volumeSize)s ")
% {'memberCount': memberCount,
'volumeName': volumeName,
'volumeSize': volume['size']})
LOG.error(exceptionMessage)
raise exception.VolumeBackendAPIException(data=exceptionMessage)
self.conn = self._get_ecom_connection()
poolInstanceName, storageSystemName = (
self._get_pool_and_storage_system(extraSpecs))
LOG.debug("Create Volume: %(volume)s Pool: %(pool)s "
"Storage System: %(storageSystem)s "
|
"Size: %(size
|
)lu "
% {'volume': volumeName,
'pool': poolInstanceName,
'storageSystem': storageSystemName,
'size': volumeSize})
elementCompositionService = (
self.utils.find_element_composition_service(self.conn,
storageSystemName))
storageConfigService = self.utils.find_storage_configuration_service(
self.conn, storageSystemName)
# If FAST is intended to be used we must first check that the pool
# is associated with the correct storage tier
if extraSpecs[FASTPOLICY] is not None:
foundPoolInstanceName = self.fast.get_pool_associated_to_policy(
self.conn, extraSpecs[FASTPOLICY], extraSpecs[ARRAY],
storageConfigService, poolInstanceName)
if foundPoolInstanceName is None:
exceptionMessage = (_("Pool: %(poolName)s. "
"is not associated to storage tier for "
"fast policy %(fastPolicy)s.")
% {'poolName': extraSpecs[POOL],
'fastPolicy': extraSpecs[FASTPOLICY]})
LOG.error(exceptionMessage)
raise exception.VolumeBackendAPIException(
data=exceptionMessage)
compositeType = self.utils.get_composite_type(
extraSpecs[COMPOSITETYPE])
volumeDict, rc = self.provision.create_composite_volume(
self.conn, elementCompositionService, volumeSize, volumeName,
poolInstanceName, compositeType, memberCount)
# Now that we have already checked that the pool is associated with
# the correct storage tier and the volume was successfully created
# add the volume to the default storage group created for
# volumes in pools associated with this fast policy
if extraSpecs[FASTPOLICY]:
LOG.info(_("Adding volume: %(volumeName)s to default storage group"
" for FAST policy: %(fastPolicyName)s ")
% {'volumeName': volumeName,
'fastPolicyName': extraSpecs[FASTPOLICY]})
defaultStorageGroupInstanceName = (
self._get_or_create_default_storage_group(
self.conn, storageSystemName, volumeDict,
volumeName, extraSpecs[FASTPOLICY]))
if not defaultStorageGroupInstanceName:
exceptionMessage = (_(
"Unable to create or get default storage group for "
"FAST policy: %(fastPolicyName)s. ")
% {'fastPolicyName': extraSpecs[FASTPOLICY]})
LOG.error(exceptionMessage)
raise exception.VolumeBackendAPIException(
data=exceptionMessage)
self._add_volume_to_default_storage_group_on_create(
volumeDict, volumeName, storageConfigService,
storageSystemName, extraSpecs[FASTPOLICY])
LOG.info(_("Leaving create_volume: %(volumeName)s "
|
batxes/4Cin
|
SHH_WT_models/SHH_WT_models_final_output_0.1_-0.1_11000/mtx1_models/SHH_WT_models11760.py
|
Python
|
gpl-3.0
| 17,567
| 0.025104
|
import _surface
import chimera
try:
import chimera.runCommand
except:
pass
from VolumePath import markerset as ms
try:
from VolumePath import Marker_Set, Link
new_marker_set=Marker_Set
except:
from VolumePath import volume_path_dialog
d= volume_path_dialog(True)
new_marker_set= d.new_marker_set
marker_sets={}
surf_sets={}
if "particle_0 geometry" not in marker_sets:
s=new_marker_set('particle_0 geometry')
marker_sets["particle_0 geometry"]=s
s= marker_sets["particle_0 geometry"]
mark=s.place_marker((3895.85, 11095.7, 1669.2), (0.7, 0.7, 0.7), 890.203)
if "particle_1 geometry" not in marker_sets:
s=new_marker_set('particle_1 geometry')
marker_sets["particle_1 geometry"]=s
s= marker_sets["particle_1 geometry"]
mark=s.place_marker((5049.86, 10106.8, 2628.2), (0.7, 0.7, 0.7), 792.956)
if "particle_2 geometry" not in marker_sets:
s=new_marker_set('particle_2 geometry')
marker_sets["particle_2 geometry"]=s
s= marker_sets["particle_2 geometry"]
mark=s.place_marker((4999.85, 8217.57, 2297.15), (0.7, 0.7, 0.7), 856.786)
if "particle_3 geometry" not in marker_sets:
s=new_marker_set('particle_3 geometry')
marker_sets["particle_3 geometry"]=s
s= marker_sets["particle_3 geometry"]
mark=s.place_marker((4605.36, 9556.96, 428.799), (0.7, 0.7, 0.7), 963.679)
if "particle_4 geometry" not in marker_sets:
s=new_marker_set('particle_4 geometry')
marker_sets["particle_4 geometry"]=s
s= marker_sets["particle_4 geometry"]
mark=s.place_marker((3993.3, 8395.53, -356.255), (0.7, 0.7, 0.7), 761.442)
if "particle_5 geometry" not in marker_sets:
s=new_marker_set('particle_5 geometry')
marker_sets["particle_5 geometry"]=s
s= marker_sets["particle_5 geometry"]
mark=s.place_marker((3833.41, 6205.39, 807.458), (0.7, 0.7, 0.7), 961.183)
if "particle_6 geometry" not in marker_sets:
s=new_marker_set('particle_6 geometry')
marker_sets["particle_6 geometry"]=s
s= marker_sets["particle_6 geometry"]
mark=s.place_marker((3096.47, 4982.37, 1744.39), (0.7, 0.7, 0.7), 753.151)
if "particle_7 geometry" not in marker_sets:
s=new_marker_set('particle_7 geometry')
marker_sets["particle_7 geometry"]=s
s= marker_sets["particle_7 geometry"]
mark=s.place_marker((2870.09, 5434.45, 921.9), (1, 0.7, 0), 1098.07)
if "particle_8 geometry" not in marker_sets:
s=new_marker_set('particle_8 geometry')
marker_sets["particle_8 geometry"]=s
s= marker_sets["particle_8 geometry"]
mark=s.place_marker((2798.1, 4268.68, 3405.75), (0.7, 0.7, 0.7), 1010.42)
if "particle_9 geometry" not in marker_sets:
s=new_marker_set('particle_9 geometry')
marker_sets["pa
|
rticle_9 geometry"]=s
s= marker_sets["particle_9 geometry"]
mark=s.place_marker((1293.
|
54, 4153.85, 4133.28), (1, 0.7, 0), 821.043)
if "particle_10 geometry" not in marker_sets:
s=new_marker_set('particle_10 geometry')
marker_sets["particle_10 geometry"]=s
s= marker_sets["particle_10 geometry"]
mark=s.place_marker((2094.48, 3469.18, 5691.58), (0.7, 0.7, 0.7), 873.876)
if "particle_11 geometry" not in marker_sets:
s=new_marker_set('particle_11 geometry')
marker_sets["particle_11 geometry"]=s
s= marker_sets["particle_11 geometry"]
mark=s.place_marker((2552.27, 4849.29, 6065.71), (0.7, 0.7, 0.7), 625.532)
if "particle_12 geometry" not in marker_sets:
s=new_marker_set('particle_12 geometry')
marker_sets["particle_12 geometry"]=s
s= marker_sets["particle_12 geometry"]
mark=s.place_marker((3334.61, 5772.16, 7017.66), (0.7, 0.7, 0.7), 880.474)
if "particle_13 geometry" not in marker_sets:
s=new_marker_set('particle_13 geometry')
marker_sets["particle_13 geometry"]=s
s= marker_sets["particle_13 geometry"]
mark=s.place_marker((2638.81, 6917.15, 6196.15), (0.7, 0.7, 0.7), 659.161)
if "particle_14 geometry" not in marker_sets:
s=new_marker_set('particle_14 geometry')
marker_sets["particle_14 geometry"]=s
s= marker_sets["particle_14 geometry"]
mark=s.place_marker((2353.26, 8731.83, 7545.49), (0.7, 0.7, 0.7), 831.745)
if "particle_15 geometry" not in marker_sets:
s=new_marker_set('particle_15 geometry')
marker_sets["particle_15 geometry"]=s
s= marker_sets["particle_15 geometry"]
mark=s.place_marker((4356.4, 10765.3, 8677.4), (0.7, 0.7, 0.7), 803.065)
if "particle_16 geometry" not in marker_sets:
s=new_marker_set('particle_16 geometry')
marker_sets["particle_16 geometry"]=s
s= marker_sets["particle_16 geometry"]
mark=s.place_marker((5980.27, 10106, 7795.84), (0.7, 0.7, 0.7), 610.262)
if "particle_17 geometry" not in marker_sets:
s=new_marker_set('particle_17 geometry')
marker_sets["particle_17 geometry"]=s
s= marker_sets["particle_17 geometry"]
mark=s.place_marker((5894.39, 9122.5, 8760.62), (0.7, 0.7, 0.7), 741.265)
if "particle_18 geometry" not in marker_sets:
s=new_marker_set('particle_18 geometry')
marker_sets["particle_18 geometry"]=s
s= marker_sets["particle_18 geometry"]
mark=s.place_marker((4858.05, 7848.03, 8322.71), (0.7, 0.7, 0.7), 748.625)
if "particle_19 geometry" not in marker_sets:
s=new_marker_set('particle_19 geometry')
marker_sets["particle_19 geometry"]=s
s= marker_sets["particle_19 geometry"]
mark=s.place_marker((3898.69, 6994.31, 9087.45), (0.7, 0.7, 0.7), 677.181)
if "particle_20 geometry" not in marker_sets:
s=new_marker_set('particle_20 geometry')
marker_sets["particle_20 geometry"]=s
s= marker_sets["particle_20 geometry"]
mark=s.place_marker((4112.98, 5569.65, 7185.16), (0.7, 0.7, 0.7), 616.015)
if "particle_21 geometry" not in marker_sets:
s=new_marker_set('particle_21 geometry')
marker_sets["particle_21 geometry"]=s
s= marker_sets["particle_21 geometry"]
mark=s.place_marker((4518.45, 6983.77, 8595.65), (0.7, 0.7, 0.7), 653.154)
if "particle_22 geometry" not in marker_sets:
s=new_marker_set('particle_22 geometry')
marker_sets["particle_22 geometry"]=s
s= marker_sets["particle_22 geometry"]
mark=s.place_marker((5316.25, 6299.71, 8736.21), (0.7, 0.7, 0.7), 595.33)
if "particle_23 geometry" not in marker_sets:
s=new_marker_set('particle_23 geometry')
marker_sets["particle_23 geometry"]=s
s= marker_sets["particle_23 geometry"]
mark=s.place_marker((6281.96, 7073.27, 9116.57), (0.7, 0.7, 0.7), 627.901)
if "particle_24 geometry" not in marker_sets:
s=new_marker_set('particle_24 geometry')
marker_sets["particle_24 geometry"]=s
s= marker_sets["particle_24 geometry"]
mark=s.place_marker((6269.15, 8381.22, 8572.86), (0.7, 0.7, 0.7), 663.941)
if "particle_25 geometry" not in marker_sets:
s=new_marker_set('particle_25 geometry')
marker_sets["particle_25 geometry"]=s
s= marker_sets["particle_25 geometry"]
mark=s.place_marker((6343.5, 9949.26, 8990.83), (0.7, 0.7, 0.7), 663.899)
if "particle_26 geometry" not in marker_sets:
s=new_marker_set('particle_26 geometry')
marker_sets["particle_26 geometry"]=s
s= marker_sets["particle_26 geometry"]
mark=s.place_marker((5667.65, 8559.46, 8679.1), (0.7, 0.7, 0.7), 644.694)
if "particle_27 geometry" not in marker_sets:
s=new_marker_set('particle_27 geometry')
marker_sets["particle_27 geometry"]=s
s= marker_sets["particle_27 geometry"]
mark=s.place_marker((4689.48, 7374.43, 7082.07), (0.7, 0.7, 0.7), 896.802)
if "particle_28 geometry" not in marker_sets:
s=new_marker_set('particle_28 geometry')
marker_sets["particle_28 geometry"]=s
s= marker_sets["particle_28 geometry"]
mark=s.place_marker((5160.14, 6224.03, 7929.97), (0.7, 0.7, 0.7), 576.38)
if "particle_29 geometry" not in marker_sets:
s=new_marker_set('particle_29 geometry')
marker_sets["particle_29 geometry"]=s
s= marker_sets["particle_29 geometry"]
mark=s.place_marker((4572.35, 5142.21, 7625), (0.7, 0.7, 0.7), 635.092)
if "particle_30 geometry" not in marker_sets:
s=new_marker_set('particle_30 geometry')
marker_sets["particle_30 geometry"]=s
s= marker_sets["particle_30 geometry"]
mark=s.place_marker((5530.13, 5673.86, 7271.4), (0.7, 0.7, 0.7), 651.505)
if "particle_31 geometry" not in marker_sets:
s=new_marker_set('particle_31 geometry')
marker_sets["particle_31 geometry"]=s
s= marker_sets["particle_31 geometry"]
mark=s.place_marker((3922.34, 5132.86, 6790.43), (0.7, 0.7, 0.7), 718.042)
if "particle_32 geometry" not in marker_sets:
s=new_marker_set('particle_32 geometry')
marker_sets["particle_32 geometry"]=s
s=
|
efiop/dvc
|
tests/unit/ui/test_pager.py
|
Python
|
apache-2.0
| 2,747
| 0
|
import pytest
from dvc.env import DVC_PAGER
from dvc.ui.pager import (
DEFAULT_PAGER,
LESS,
PAGER_ENV,
find_pager,
make_pager,
pager,
)
@pytest.fixture(autouse=True)
def clear_envs(monkeypatch):
monkeypatch.delenv(DVC_PAGER, raising=False)
monkeypatch.delenv(PAGER_ENV, raising=False)
monkeypatch.delenv(LESS, raising=False)
def test_find_pager_when_not_isatty(mocker):
mocker.patch("sys.stdout.isatty", return_value=False)
assert find_pager() is None
def test_find_pager_uses_custom_pager_when_dvc_pager_env_var_is_defined(
mocker, monkeypatch
):
monkeypatch.setenv(DVC_PAGER, "my-pager")
mocker.patch("sys.stdout.isatty", return
|
_value=True)
assert find_pager() == "my-pager"
def test_find_pager_uses_custom_pager_when_pager_env_is_defined(
mocker, monkeypatch
):
monkeypatch.setenv(PAGER_ENV, "my-pager")
mocker.patch("sys.stdout.isatty", return_value=True)
assert find_pager() == "my-pager"
def test_find_pager_uses_default_pager_when_found(mocker):
mocker.patch("sys.stdout.isatty", return_value=True)
mocker.patch("os.system", return_value=0)
assert DEFAULT_PAGER
|
in find_pager()
def test_find_pager_fails_to_find_any_pager(mocker):
mocker.patch("os.system", return_value=1)
mocker.patch("sys.stdout.isatty", return_value=True)
assert find_pager() is None
@pytest.mark.parametrize("env", [DVC_PAGER, PAGER_ENV, None])
def test_dvc_sets_default_options_on_less_without_less_env(
mocker, monkeypatch, env
):
if env:
monkeypatch.setenv(env, "less")
mocker.patch("sys.stdout.isatty", return_value=True)
mocker.patch("os.system", return_value=0)
assert find_pager() == (
"less --quit-if-one-screen --RAW-CONTROL-CHARS"
" --chop-long-lines --no-init"
)
@pytest.mark.parametrize("env", [DVC_PAGER, PAGER_ENV, None])
def test_dvc_sets_some_options_on_less_if_less_env_defined(
mocker, monkeypatch, env
):
if env:
monkeypatch.setenv(env, "less")
mocker.patch("sys.stdout.isatty", return_value=True)
mocker.patch("os.system", return_value=0)
monkeypatch.setenv(LESS, "-R")
assert find_pager() == "less --RAW-CONTROL-CHARS --chop-long-lines"
def test_make_pager_when_no_pager_found(mocker, monkeypatch):
assert make_pager(None).__name__ == "plainpager"
def test_pager(mocker, monkeypatch):
monkeypatch.setenv(DVC_PAGER, "my-pager")
mocker.patch("sys.stdout.isatty", return_value=True)
m_make_pager = mocker.patch("dvc.ui.pager.make_pager")
_pager = m_make_pager.return_value = mocker.MagicMock()
pager("hello world")
m_make_pager.assert_called_once_with("my-pager")
_pager.assert_called_once_with("hello world")
|
Mirantis/solar
|
solar/utils.py
|
Python
|
apache-2.0
| 3,496
| 0
|
# Copyright 2015 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import glob
import io
import json
import logging
import os
import subprocess
import uuid
from jinja2 import Environment
import yaml
logger = logging.getLogger(__name__)
def to_json(data):
return json.dumps(data)
def to_pretty_json(data):
return json.dumps(data, indent=4)
def communicate(command, data):
popen = subprocess.Popen(
command,
stdout=subprocess.PIPE,
stdin=subprocess.PIPE,
stderr=subprocess.PIPE)
return popen.communicate(input=data)[0]
def execute(command, shell=False):
popen = subprocess.Popen(
command,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=shell)
out, err = popen.communicate()
return popen.returncode, out, err
# Configure jinja2 filters
jinja_env_with_filters = Environment()
jinja_env_with_filters.filters['to_json'] = to_json
jinja_env_with_filters.filters['to_pretty_json'] = to_pretty_json
def create_dir(dir_path):
logger.debug(u'Creating directory %s', dir_path)
if not os.path.isdir(dir_path):
os.makedirs(dir_path)
def yaml_load(file_path):
with io.open(file_path) as f:
result = yaml.load(f)
return result
def yaml_dump(yaml_data):
return yaml.safe_dump(yaml_data, default_flow_style=False)
def write_to_file(data, file_path):
with open(file_path, 'w') as f:
f.write(data)
def yaml_dump_to(data, file_path):
write_to_file(yaml_dump(data), file_path)
def find_by_mask(mask):
for file_path in glob.glob(mask):
yield os.path.abspath(file_path)
def load_by_mask(mask):
result = []
for file_path in find_by_mask(mask):
result.append(yaml_load(file_path))
return result
def generate_uuid():
return str(uuid.uuid4())
def render_template(template_path, **params):
with io.open(template_path) as f:
temp = jinja_env_with_filters.from_string(f.read())
return temp.render(**params)
def ext_encoder(fpath):
ext = os.path.splitext(os.path.basename(fpath))[1].strip('.')
if ext in ['json']:
return json
elif ext in ['yaml', 'yaml']:
return yaml
raise Exception('Unknown extension {}'.format(ext))
def load_file(fpath):
encoder = ext_encoder(fpath)
try:
with open(fpath) as f:
return encoder.load(f)
except IOError:
return {}
def read_config():
CONFIG
|
_FILE = os.environ.get('CONFIG_FILE') or '/vagrant/config.yaml'
return load_file(CONFIG_FILE)
def read_config_file(key):
fpath = read_config()[key]
return load_file(fpath)
def save_to_config_file(key, data):
fpath = read_config()[key]
with open(fpath, 'w') as f:
encoder = ext_encoder(fpath)
encoder.dump(data, f)
def solar_map(funct, args, **kwargs):
return map(funct, args)
def get_local():
import thread
|
ing
return threading.local
|
manashmndl/dfvfs
|
tests/path/ewf_path_spec.py
|
Python
|
apache-2.0
| 1,121
| 0.005352
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""Tests for the EWF image path specification implementation."""
import unittest
from dfvfs.path import ewf_path_spec
from tests.path import test_lib
class EwfPathSpecTest(test_lib.PathSpecTestCase):
"""Tests for the EWF image path specification implementation."""
def testInitialize(self):
"""Tests the path specification initialization."""
path_spec = ewf_path_spec.EwfPathSpec(parent=self._path_spec)
self.assertNotEqual(path_spec, None)
with self.assertRaises(ValueError):
_ = ewf_path_spec.EwfPathSpec(parent=None)
with self.assertRaises(ValueError):
_ = ewf_path_spec.EwfPathSpec(parent=self._path_spec, bogus=u'BOGUS')
def testComparable(self):
"""Tests the path specification comparable property."""
path_spec = ewf_path_spec.EwfPathSpec
|
(parent=self._path_spec)
self.assertNotEqual(path_spec, None)
expected_comparab
|
le = u'\n'.join([
u'type: TEST',
u'type: EWF',
u''])
self.assertEqual(path_spec.comparable, expected_comparable)
if __name__ == '__main__':
unittest.main()
|
iModels/mbuild
|
mbuild/formats/vasp.py
|
Python
|
mit
| 2,953
| 0
|
"""VASP POSCAR format."""
import numpy as np
__all__ = ["write_poscar"]
def write_poscar(
compound,
filename,
lattice_constant,
bravais=[[1, 0, 0], [0, 1, 0], [0, 0, 1]],
sel_dev=False,
coord="cartesian",
):
"""Write a VASP POSCAR file from a Compound.
See //https://www.vasp.at formore information.
Parameters
----------
compound: mb.Compound
mBuild Compound
filename: str
Path of the output file
lattice_constant: float
Scaling constant for POSCAR file, used to scale all lattice vectors and
atomic coordinates
bravais: array, default = [[1,0,0],[0,1,0],[0,0,1]]
array of bravais cell that defines unit cell of the system
sel_dev: boolean, default=False
Turns selective dynamics on. Not currently implemented.
coord: str, default = 'cartesian', other option = 'direct'
Coordinate style of atom positions
Notes
-----
Coordinates are broken up into a list of np.arrays to ensure that the
coordinates of the first atom listed are written to the file first
"""
structure = compound.to_parmed()
atom_names = np.unique([atom.name for atom in structure.a
|
toms])
count_list = list()
xyz_list = list()
if coord == "direct":
for atom in structure.atoms:
atom.xx = atom.xx / lattice_constant
atom.xy = atom.xy / lattice_constant
atom.xz = atom.xz / lattice_constant
for atom_name in atom_names:
atom_count = np.array(
[atom.name for atom in structure.atoms].count(atom_name)
)
count_list.append(atom_coun
|
t)
xyz = np.array(
[
[atom.xx, atom.xy, atom.xz]
for atom in structure.atoms
if atom.name == atom_name
]
)
xyz = xyz / 10 # unit conversion from angstroms to nm
xyz_list.append(xyz)
with open(filename, "w") as data:
data.write(filename + " - created by mBuild\n")
data.write(" {0:.15f}\n".format(lattice_constant))
data.write(" ")
for item in bravais[0]:
data.write(" {0:.15f}".format(item))
data.write("\n")
data.write(" ")
for item in bravais[1]:
data.write(" {0:.15f}".format(item))
data.write("\n")
data.write(" ")
for item in bravais[2]:
data.write(" {0:.15f}".format(item))
data.write("\n")
data.write("{}\n".format(" ".join(map(str, atom_names))))
data.write("{}\n".format(" ".join(map(str, count_list))))
if sel_dev:
data.write("Selective Dyn\n")
data.write(coord + "\n")
for xyz in xyz_list:
for pos in xyz:
data.write(
"{0:.15f} {1:.15f} {2:.15f}\n".format(
pos[0], pos[1], pos[2]
)
)
|
hgranlund/py-chess-engine
|
pavement.py
|
Python
|
mit
| 7,367
| 0.000814
|
# -*- coding: utf-8 -*-
from __future__ import print_function
import os
import sys
import time
import subprocess
# Import parameters from the setup file.
sys.path.append('.')
from setup import (
setup_dict, get_project_files, print_success_message,
print_failure_message, _lint, _test, _test_all,
CODE_DIRECTORY, DOCS_DIRECTORY, TESTS_DIRECTORY, PYTEST_FLAGS)
from paver.easy import options, task, needs, consume_args
from paver.setuputils import install_distutils_tasks
options(setup=setup_dict)
install_distutils_tasks()
## Miscellaneous helper functions
def print_passed():
# generated on http://patorjk.com/software/taag/#p=display&f=Small&t=PASSED
print_success_message(r''' ___ _ ___ ___ ___ ___
| _ \/_\ / __/ __| __| \
| _/ _ \\__ \__ \ _|| |) |
|_|/_/ \_\___/___/___|___/
''')
def print_failed():
# generated on http://patorjk.com/software/taag/#p=display&f=Small&t=FAILED
print_failure_message(r''' ___ _ ___ _ ___ ___
| __/_\ |_ _| | | __| \
| _/ _ \ | || |__| _|| |) |
|_/_/ \_\___|____|___|___/
''')
class cwd(object):
"""Class used for temporarily changing directories. Can be though of
as a `pushd /my/dir' then a `popd' at the end.
"""
def __init__(self, newcwd):
""":param newcwd: directory to make the cwd
:type newcwd: :class:`str`
"""
self.newcwd = newcwd
def __enter__(self):
self.oldcwd = os.getcwd()
os.chdir(self.newcwd)
return os.getcwd()
def __exit__(self, type_, value, traceback):
# This acts like a `finally' clause: it will always be executed.
os.chdir(self.oldcwd)
## Task-related functions
def _doc_make(*make_args):
"""Run make in sphinx' docs directory.
:return: exit code
"""
if sys.platform == 'win32':
# Windows
make_cmd = ['make.bat']
else:
# Linux, Mac OS X, and others
make_cmd = ['make']
make_cmd.extend(make_args)
# Account for a stupid Python "bug" on Windows:
# <http://bugs.python.org/issue15533>
with cwd(DOCS_DIRECTORY):
retcode = subprocess.call(make_cmd)
return retcode
## Tasks
@task
@needs('doc_html', 'setuptools.command.sdist')
def sdist():
"""Build the HTML docs and the tarball."""
pass
@task
def test():
"""Run the unit tests."""
raise SystemExit(_test())
@task
def lint():
# This refuses to format properly when running `paver help' unless
# this ugliness is used.
('Perform PEP8 style check, run PyFlakes, and run McCabe complexity '
'metrics on the code.')
raise SystemExit(_lint())
@task
def test_all():
"""Perform a style check and run all unit tests."""
retcode = _test_all()
if retcode == 0:
print_passed()
else:
print_failed()
raise SystemExit(retcode)
@task
@consume_args
def run(args):
"""Run the package's main script. All arguments are passed to it."""
# The main script expects to get the called executable's name as
# argv[0]. However, paver doesn't provide that in args. Even if it did (or
# we dove into sys.argv), it wouldn't be useful because it would be paver's
# executable. So we just pass the package name in as the executable name,
# since it's close enough. This should never be seen by an end user
# installing through Setuptools anyway.
from pychess_engine.main import main
raise SystemExit(main([CODE_DIRECTORY] + args))
@task
def commit():
"""Commit only if all the tests pass."""
if _test_all() == 0:
subprocess.check_call(['git', 'commit'])
else:
print_failure_message('\nTests failed, not committing.')
@task
def coverage():
"""Run tests and show test coverage report."""
try:
import pytest_cov # NOQA
except ImportError:
print_failure_message(
'Install the pytest coverage plugin to use this task, '
"i.e., `pip install pytest-cov'.")
raise Sy
|
stemExit(1)
import pytest
pytest.main(PYTEST_FLAGS + [
'--cov', CODE_DIRECTORY,
'--cov-report', 'te
|
rm-missing',
TESTS_DIRECTORY])
@task # NOQA
def doc_watch():
"""Watch for changes in the docs and rebuild HTML docs when changed."""
try:
from watchdog.events import FileSystemEventHandler
from watchdog.observers import Observer
except ImportError:
print_failure_message('Install the watchdog package to use this task, '
"i.e., `pip install watchdog'.")
raise SystemExit(1)
class RebuildDocsEventHandler(FileSystemEventHandler):
def __init__(self, base_paths):
self.base_paths = base_paths
def dispatch(self, event):
"""Dispatches events to the appropriate methods.
:param event: The event object representing the file system event.
:type event: :class:`watchdog.events.FileSystemEvent`
"""
for base_path in self.base_paths:
if event.src_path.endswith(base_path):
super(RebuildDocsEventHandler, self).dispatch(event)
# We found one that matches. We're done.
return
def on_modified(self, event):
print_failure_message('Modification detected. Rebuilding docs.')
# # Strip off the path prefix.
# import os
# if event.src_path[len(os.getcwd()) + 1:].startswith(
# CODE_DIRECTORY):
# # sphinx-build doesn't always pick up changes on code files,
# # even though they are used to generate the documentation. As
# # a workaround, just clean before building.
doc_html()
print_success_message('Docs have been rebuilt.')
print_success_message(
'Watching for changes in project files, press Ctrl-C to cancel...')
handler = RebuildDocsEventHandler(get_project_files())
observer = Observer()
observer.schedule(handler, path='.', recursive=True)
observer.start()
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
observer.stop()
observer.join()
@task
@needs('doc_html')
def doc_open():
"""Build the HTML docs and open them in a web browser."""
doc_index = os.path.join(DOCS_DIRECTORY, 'build', 'html', 'index.html')
if sys.platform == 'darwin':
# Mac OS X
subprocess.check_call(['open', doc_index])
elif sys.platform == 'win32':
# Windows
subprocess.check_call(['start', doc_index], shell=True)
elif sys.platform == 'linux2':
# All freedesktop-compatible desktops
subprocess.check_call(['xdg-open', doc_index])
else:
print_failure_message(
"Unsupported platform. Please open `{0}' manually.".format(
doc_index))
@task
def get_tasks():
"""Get all paver-defined tasks."""
from paver.tasks import environment
for task in environment.get_tasks():
print(task.shortname)
@task
def doc_html():
"""Build the HTML docs."""
retcode = _doc_make('html')
if retcode:
raise SystemExit(retcode)
@task
def doc_clean():
"""Clean (delete) the built docs."""
retcode = _doc_make('clean')
if retcode:
raise SystemExit(retcode)
|
skkwan/IC10X2
|
palomar/TSpec_reductions/TSpec basic plot.py
|
Python
|
mit
| 2,508
| 0.05303
|
# -*- coding: utf-8 -*-
"""
Created on Mon Aug 1 17:14:20 2016
@author: stephaniekwan
Plot prominent emission lines of IC 10 X-2 from TripleSpec chop-subtracted
data. He I and Pa-Gamma lines fit on one plot, Pa-Beta line goes in a separate
plot (comment/uncomment blocks to plot each set).
"""
import numpy as np
import matplotlib.pyplot as plt
plt.clf()
plt.close()
table = np.genfromtxt('IC10X2_JHK_modified.rtf', delimiter = ' ',
|
co
|
mments = '\p', skip_header = 2, skip_footer = 4)
wl = table[:, 0] - 0.0005
counts = table[:, 1]
fig = plt.figure()
normFlux = counts / 0.024
# Two subplots, unpack the axes array immediately
f, (ax1, ax2) = plt.subplots(1,2, sharey = True)
ax1.plot(wl[7100:7500], normFlux[7100:7500], color = 'black')
ax1.invert_xaxis()
ax1.set_xlim([1.075, 1.100])
ax1.set_ylim([0, 2.5])
# Plot and label the original He I line in red
ax1.axvline(x = 1.08303398 - 0.0005, color = 'red', ls = 'dashed')
ax1.text(1.084- 0.0005, 1.5, 'He I (1.0830)', color = 'red', rotation = 90,
fontsize = 12)
# Plot and label the peak emission line in green
ax1.axvline(x = 1.08239- 0.0005, color = 'green', ls = 'dashed')
ax1.text(1.08- 0.0005, 1.88, 'He I blueshifted (1.0819)', color = 'green',
rotation = 90, fontsize = 12)
# Paschen-gamma lines
ax1.axvline(x = 1.093817- 0.0005, color = 'red', ls = 'dashed')
ax1.text(1.095- 0.0005, 1.5, 'Pa$\gamma$ (1.0933)', rotation = 90,
fontsize = 12, color = 'red')
ax1.axvline(x = 1.0931- 0.0005, color = 'green', ls = 'dashed')
ax1.text(1.091- 0.0005, 1.5, 'Pa$\gamma$ (1.0926)',
rotation = 90, fontsize = 12, color = 'green')
# Paschen-beta lines
# Plot the original emission line in red
ax2.plot(wl[5200:5389], normFlux[5200:5389], color = 'black')
ax2.axvline(x = 1.282- 0.0005, color = 'red', ls = 'dashed')
ax2.text(1.283- 0.0005, 1.5, 'Pa $\\beta$ (1.2815)', rotation = 90,
fontsize = 12, color = 'red')
# Plot the peak emission line in green
ax2.axvline(x = 1.28103- 0.0005, color = 'green', ls = 'dashed')
ax2.text(1.278- 0.0005, 1.5, 'Pa $\\beta$ (1.2805)', rotation = 90,
fontsize = 12, color = 'green')
ax2.invert_xaxis()
ax2.set_xlim([1.270, 1.2939])
ax2.set_ylim([0, 2.0])
# Set common labels
f.text(0.5, 0.04, 'Wavelength ($\mu$m)', ha = 'center', va = 'center',
fontsize = 13)
f.text(0.06, 0.5, 'Relative strength to He I line', ha = 'center',
va = 'center', rotation = 'vertical', fontsize = 13)
plt.savefig('170331 TSpec plot.pdf')
|
ros2/launch
|
launch/doc/source/conf.py
|
Python
|
apache-2.0
| 6,246
| 0
|
# Copyright 2018 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/stable/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
project = 'launch'
copyright = '2018, Open Source Robotics Foundation, Inc.' # noqa
author = 'Open Source Robotics Foundation, Inc.'
# The short X.Y version
version = ''
# The full version, including alpha/beta/rc tags
release = '0.4.0'
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode',
'sphinx.ext.githubpa
|
ges',
]
# autodoc settings
autodoc_default_options = {
'special-members': '__init__',
'class-doc-from': 'class',
}
autodoc_class_signature
|
= 'separated'
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path .
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'launchdoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'launch.tex', 'launch Documentation',
'Open Source Robotics Foundation, Inc.', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'launch', 'launch Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'launch', 'launch Documentation',
author, 'launch', 'One line description of project.',
'Miscellaneous'),
]
# -- Extension configuration -------------------------------------------------
# -- Options for intersphinx extension ---------------------------------------
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'https://docs.python.org/': None}
# -- Options for todo extension ----------------------------------------------
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
|
stdweird/aquilon
|
lib/python2.6/aquilon/worker/anonwrappers.py
|
Python
|
apache-2.0
| 2,216
| 0.000451
|
# -*- cpy-indent-level: 4; indent-tabs-mode: nil -*-
# ex: set expandtab softtabstop=4 shiftwidth=4:
#
# Copyright (C) 2008,2009,2010,2011,2012,2013 Contributor
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the Licens
|
e.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions
|
and
# limitations under the License.
"""Provide an anonymous access channel to the Site."""
from twisted.web import server, http
class AnonHTTPChannel(http.HTTPChannel):
"""
This adds getPrincipal() to the base channel. Since there is no
knc in use here, it just returns None.
"""
def getPrincipal(self):
"""For any anonymous channel, always returns None."""
return None
class AnonSite(server.Site):
"""
Overrides the basic HTTPChannel protocol with AnonHTTPChannel to
provide a getPrincipal method. Should be kept consistent with
any other changes from kncwrappers.
"""
protocol = AnonHTTPChannel
# Overriding http.HTTPFactory's log() for consistency with KNCSite.
# This is exactly the default server.Site.log() method for now.
def log(self, request):
if hasattr(self, "logFile"):
line = '%s - %s %s "%s" %d %s "%s" "%s"\n' % (
request.getClientIP(),
# request.getUser() or "-", # the remote user is almost never important
"-",
self._logDateTime,
'%s %s %s' % (self._escape(request.method),
self._escape(request.uri),
self._escape(request.clientproto)),
request.code,
request.sentLength or "-",
self._escape(request.getHeader("referer") or "-"),
self._escape(request.getHeader("user-agent") or "-"))
self.logFile.write(line)
|
hsoft/aurdiff
|
update.py
|
Python
|
bsd-3-clause
| 2,337
| 0.003851
|
# Copyright 2013 Virgil Dupras (http://www.hardcoded.net)
#
# This software is licensed under the "BSD" License as described in the "LICENSE" file,
# which should be included with this package.
import os.path as op
from urllib.request import urlopen
import subprocess
import json
from bs4 import BeautifulSoup
HERE = op.dirname(__file__)
AUR_FOLDER = op.join(HERE, 'aur')
BASE_URL = 'https://aur.archlinux.org'
def get_pkg_list():
result = [] # (name, version)
URL = BASE_URL + '/packages/?SB=a&SO=d&O=0&PP=250'
with urlopen(URL) as fp:
contents = fp.read()
soup = BeautifulSoup(contents)
table = soup('table', class_='results')[0]
rows = table.tbody('tr')
for row in rows:
# Strangely enough, when querying throu
|
gh urlopen, we don't have the checkbox column. Is
# this column added through JS?
pair = (row('td')[1].text, row('td')[2].text)
result.append(pair)
return result
def download_pkgbuild(pkgname):
URL = '%s/packages/%s/' % (BASE_URL, pkgname)
with urlopen(URL) as fp:
contents = fp.read()
soup = BeautifulSoup(contents)
p
|
kgbuild_url = BASE_URL + soup('div', id='actionlist')[0].ul('li')[0].a['href']
with urlopen(pkgbuild_url) as fp:
contents = fp.read()
with open(op.join(AUR_FOLDER, pkgname), 'wb') as fp:
fp.write(contents)
def main():
json_path = op.join(HERE, 'lastupdate.json')
with open(json_path, 'rt') as fp:
info = json.load(fp)
lastname = info['name']
lastversion = info['version']
pkglist = get_pkg_list()
if (lastname, lastversion) in pkglist:
index = pkglist.index((lastname, lastversion))
pkglist = pkglist[:index]
if not pkglist:
print("Nothing to update")
return
for name, version in reversed(pkglist):
print("Updating %s to %s" % (name, version))
download_pkgbuild(name)
subprocess.call(['git', 'add', op.join(AUR_FOLDER, name)])
lastname, lastversion = pkglist[0]
info = {'name': lastname, 'version': lastversion}
with open(json_path, 'wt') as fp:
json.dump(info, fp)
subprocess.call(['git', 'add', json_path])
commit_msg = "Updated %d packages" % len(pkglist)
subprocess.call(['git', 'commit', '-m', commit_msg])
if __name__ == '__main__':
main()
|
Chasego/cod
|
lintcode/268-[DUP]-Find-the-Missing-Number/FindtheMissingNumber_001.py
|
Python
|
mit
| 209
| 0
|
clas
|
s Solution:
# @param nums: a list of integers
# @return: an integer
def findMissing(self, nums):
# write your code here
n = len(nums)
return n * (n + 1) / 2 - sum(n
|
ums)
|
jkandasa/integration_tests
|
cfme/services/catalogs/ansible_catalog_item.py
|
Python
|
gpl-2.0
| 16,192
| 0.002347
|
f
|
rom navmazing import NavigateToAttribute, NavigateToSibling
from widgetastic.utils import (Parameter, ParametrizedLocator, ParametrizedString, Version,
VersionPick)
from widgetastic.widget import Checkbox, T
|
able, Text, View
from widgetastic_manageiq import FileInput, SummaryForm, SummaryTable
from widgetastic_patternfly import (
BootstrapSelect as VanillaBootstrapSelect,
BootstrapSwitch,
Button,
Input,
Tab
)
from cfme.services.catalogs.catalog_item import AllCatalogItemView
from cfme.utils.appliance import Navigatable
from cfme.utils.appliance.implementations.ui import navigate_to, navigator, CFMENavigateStep
from cfme.utils.update import Updateable
from cfme.utils.wait import wait_for
from . import ServicesCatalogView
from cfme.common import WidgetasticTaggable, TagPageView
class BootstrapSelect(VanillaBootstrapSelect):
"""BootstrapSelect widget for Ansible Playbook Catalog Item form.
BootstrapSelect widgets don't have `data-id` attribute in this form, so we have to override ROOT
locator.
"""
ROOT = ParametrizedLocator('.//select[normalize-space(@name)={@id|quote}]/..')
class ActionsCell(View):
edit = Button(
**{"ng-click": ParametrizedString(
"vm.editKeyValue('{@tab}', this.key, this.key_value, $index)")}
)
delete = Button(
**{"ng-click": ParametrizedString(
"vm.removeKeyValue('{@tab}', this.key, this.key_value, $index)")}
)
def __init__(self, parent, tab, logger=None):
View.__init__(self, parent, logger=logger)
self.tab = parent.parent.parent.parent.tab
class AnsibleExtraVariables(View):
"""Represents extra variables part of ansible service catalog edit form.
Args:
tab (str): tab name where this view is located. Can be "provisioning" or "retirement".
"""
variable = Input(name=ParametrizedString("{@tab}_key"))
default_value = Input(name=ParametrizedString("{@tab}_value"))
add = Button(**{"ng-click": ParametrizedString("vm.addKeyValue('{@tab}')")})
variables_table = Table(
".//div[@id='variables_div']//table",
column_widgets={"Actions": ActionsCell(tab=Parameter("@tab"))}
)
def __init__(self, parent, tab, logger=None):
View.__init__(self, parent, logger=logger)
self.tab = tab
def _values_to_remove(self, values):
return list(set(self.all_vars) - set(values))
def _values_to_add(self, values):
return list(set(values) - set(self.all_vars))
def fill(self, values):
"""
Args:
values (list): [] to remove all vars or [("var", "value"), ...] to fill the view.
"""
if set(values) == set(self.all_vars):
return False
else:
for value in self._values_to_remove(values):
rows = list(self.variables_table)
for row in rows:
if row[0].text == value[0]:
row["Actions"].widget.delete.click()
break
for value in self._values_to_add(values):
self.variable.fill(value[0])
self.default_value.fill(value[1])
self.add.click()
return True
@property
def all_vars(self):
if self.variables_table.is_displayed:
return [(row["Variable"].text, row["Default value"].text) for
row in self.variables_table]
else:
return []
def read(self):
return self.all_vars
class AnsibleCatalogItemForm(ServicesCatalogView):
title = Text(".//span[@id='explorer_title_text']")
name = Input("name")
description = Input("description")
display_in_catalog = BootstrapSwitch(name="display")
catalog = BootstrapSelect("catalog_id")
@View.nested
class provisioning(Tab): # noqa
repository = BootstrapSelect("provisioning_repository_id")
playbook = BootstrapSelect("provisioning_playbook_id")
machine_credential = BootstrapSelect("provisioning_machine_credential_id")
cloud_type = BootstrapSelect("provisioning_cloud_type")
localhost = Input(id="provisioning_inventory_localhost")
specify_host_values = Input(id="provisioning_inventory_specify")
hosts = Input("provisioning_inventory")
logging_output = BootstrapSelect("provisioning_log_output")
max_ttl = Input("provisioning_execution_ttl")
escalate_privilege = BootstrapSwitch("provisioning_become_enabled")
verbosity = BootstrapSelect("provisioning_verbosity")
use_exisiting = Checkbox(locator=".//label[normalize-space(.)='Use Existing']/input")
create_new = Checkbox(locator=".//label[normalize-space(.)='Create New']/input")
provisioning_dialog_id = BootstrapSelect("provisioning_dialog_id")
provisioning_dialog_name = Input(name="vm.provisioning_dialog_name")
extra_vars = AnsibleExtraVariables(tab="provisioning")
@View.nested
class retirement(Tab): # noqa
# TODO Somehow need to handle a modal window
copy_from_provisioning = Button("Copy from provisioning")
repository = BootstrapSelect("retirement_repository_id")
playbook = BootstrapSelect("retirement_playbook_id")
machine_credential = BootstrapSelect("retirement_machine_credential_id")
cloud_type = BootstrapSelect("retirement_cloud_type")
localhost = Input(id="retirement_inventory_localhost")
specify_host_values = Input(id="retirement_inventory_specify")
hosts = Input("retirement_inventory")
logging_output = BootstrapSelect("retirement_log_output")
max_ttl = Input("retirement_execution_ttl")
escalate_privilege = BootstrapSwitch("retirement_become_enabled")
verbosity = BootstrapSelect("retirement_verbosity")
remove_resources = VersionPick({
Version.lowest(): BootstrapSelect("vm.catalogItemModel.retirement_remove_resources"),
"5.9": BootstrapSelect("vm.vm.catalogItemModel.retirement_remove_resources")
})
extra_vars = AnsibleExtraVariables(tab="retirement")
cancel = Button("Cancel")
class SelectCatalogItemTypeView(ServicesCatalogView):
title = Text(".//span[@id='explorer_title_text']")
catalog_item_type = BootstrapSelect("st_prov_type", can_hide_on_select=True)
add = Button("Add")
cancel = Button("Cancel")
@property
def is_displayed(self):
return (
self.in_explorer and
self.title.text == "Adding a new Service Catalog Item" and
self.catalog_item_type.is_displayed
)
class AddAnsibleCatalogItemView(AnsibleCatalogItemForm):
add = Button("Add")
@property
def is_displayed(self):
return False
class EditAnsibleCatalogItemView(AnsibleCatalogItemForm):
save = Button("Save")
reset = Button("Reset")
@property
def is_displayed(self):
return False
class DetailsEntitiesAnsibleCatalogItemView(View):
title = Text(".//span[@id='explorer_title_text']")
basic_information = SummaryForm("Basic Information")
custom_image = FileInput("upload_image")
upload = Button("Upload")
smart_management = SummaryTable("Smart Management")
@View.nested
class provisioning(Tab): # noqa
info = SummaryForm("Provisioning Info")
variables_and_default_values = Table(".//div[@id='provisioning']//table")
@View.nested
class retirement(Tab): # noqa
info = SummaryForm("Retirement Info")
variables_and_default_values = Table(".//div[@id='retirement']//table")
class DetailsAnsibleCatalogItemView(ServicesCatalogView):
"""Has to be in view standards, changed for WidgetasticTaggable.get_tags()"""
entities = View.nested(DetailsEntitiesAnsibleCatalogItemView)
@property
def is_displayed(self):
return (
self.in_explorer and
self.entities.title.text == 'Service Catalog Item "{}"'.format(
self.context["object"].name
)
)
class AnsiblePlaybookCatalogItem(Updateable, Navigatable, WidgetasticTaggable):
""
|
appuio/ansible-role-openshift-zabbix-monitoring
|
vendor/ansible-module-openshift/library/openshift_resource.py
|
Python
|
apache-2.0
| 9,578
| 0.01493
|
#!/usr/bin/python
import json
import tempfile
import re
import traceback
DOCUMENTATION = '''
---
module: openshift_resource
short_description: Creates and patches OpenShift resources.
description:
- Creates and patches OpenShift resources idempotently
- based on template or strategic merge patch.
options:
namespace:
description:
- The namespace in which to configure resources
default: None
required: true
aliases: []
template:
description:
- Path to template of resources to configure
- Mutually exclusive with I(patch)
required: false
default: None
aliases: []
app_name:
description:
- Name of application resources when instantiating the template,
- corresponds to the C(--name) option of C(oc new-app).
- Only relevant when I(template) parameter is given.
required: false
default: None
aliases: []
arguments:
description:
- Arguments to use when instantiating the template.
- Only relevant when I(template) parameter is given.
required: false
defaul
|
t: None
aliases: []
patch:
description:
- Strategic merge patch to apply
- Mutually exclusive with I(template)
required: false
default: None
aliases: []
author:
- "Daniel Tschan <tschan@puzzle.ch>"
extends_documentation_fragment: []
'''
EXAMPLES = '''
# TODO
'''
class ResourceModule:
def __init__(self, module):
self.module = module
self.changed = False
self.msg = []
self.l
|
og = []
self.arguments = []
for key in module.params:
setattr(self, key, module.params[key])
def debug(self, msg, *args):
if self.module._verbosity >= 3:
self.log.append(msg % args)
def trace(self, msg, *args):
if self.module._verbosity >= 4:
self.log.append(msg % args)
def run_command(self, args, **kwargs):
if self.module._verbosity < 3 or not kwargs['check_rc']: # Not running in debug mode, call module run_command which filters passwords
return self.module.run_command(args, **kwargs)
kwargs['check_rc'] = False
(rc, stdout, stderr) = self.module.run_command(args, **kwargs)
if rc != 0:
self.module.fail_json(cmd=args, rc=rc, stdout=stdout, stderr=stderr, msg=stderr, debug=self.log)
return (rc, stdout, stderr)
def remove_omitted_keys(self, object, parent = None, object_key = None):
if isinstance(object, dict):
for k, v in object.items():
self.remove_omitted_keys(v, object, k)
elif isinstance(object, list):
for i, v in enumerate(object[:]):
self.remove_omitted_keys(v, object, i)
elif isinstance(object, basestring):
if isinstance(object, basestring) and object.startswith('__omit_place_holder__'):
del parent[object_key]
def exemption(self, kind, current, patch, path):
if patch is None or isinstance(patch, (dict, list)) and not patch:
return True
elif re.match('\.status\..*', path):
return True
elif kind == 'DeploymentConfig' and re.match('.spec.template.spec.containers\[[0-9]+\].image', path):
return "@" in current
return False
def patch_applied(self, kind, name, current, patch, path = ""):
self.trace("patch_applied %s", path)
if current is None:
if not patch is None and not patch is False and not self.exemption(kind, current, patch, path):
self.msg.append(self.namespace + "::" + kind + "/" + name + "{" + path + "}(" + str(patch) + " != " + str(current) + ")")
return False
elif isinstance(patch, dict):
for key, val in patch.iteritems():
if not self.patch_applied(kind, name, current.get(key), val, path + "." + key):
return False
elif isinstance(patch, list):
if not self.strategic_list_compare(kind, name, current, patch, path):
return False
else:
if current != patch and not self.exemption(kind, current, patch, path):
self.msg.append(self.namespace + "::" + kind + "/" + name + "{" + path + "}(" + str(patch) + " != " + str(current) + ")")
return False
return True
def equalList(self, kind, resource, current, patch, path):
"""Compare two lists recursively."""
if len(current) != len(patch):
self.msg.append(self.namespace + "::" + kind + "/" + resource + "{" + path + "}(length mismatch)")
return False
for i, val in enumerate(patch):
if not self.patch_applied(kind, resource, current[i], val, path + "[" + str(i) + "]"):
return False
return True
def strategic_list_compare(self, kind, name, current, patch, path):
if not current and not patch:
return True
elif not current:
self.msg.append(self.namespace + "::" + kind + "/" + name + "{" + path + "}(new)")
return False
elif isinstance(current[0], dict) and 'name' in current[0]:
for i, patchVal in enumerate(patch):
elementName = patchVal.get('name')
if elementName is None: # Patch contains element without name attribute => fall back to plain list comparison.
self.debug("Patch contains element without name attribute => fall back to plain list comparison.")
return self.equalList(kind, name, current, patch, path)
curVals = [curVal for curVal in current if curVal.get('name') == elementName]
if len(curVals) == 0:
self.msg.append(self.namespace + "::" + kind + "/" + name + "{" + path + '[' + str(len(current)) + ']' + "}(new)")
return False
elif len(curVals) == 1:
if not self.patch_applied(kind, name, curVals[0], patchVal, path + '[' + str(i) + ']'):
return False
else:
self.module.fail_json(msg="Patch contains multiple attributes with name '" + elementName + "' under path: " + path, debug=self.log)
else:
return self.equalList(kind, name, current, patch, path)
return True
def export_resource(self, kind, name = None, label = None):
if label:
name = '-l ' + label
(rc, stdout, stderr) = self.module.run_command(['oc', 'get', '-n', self.namespace, kind + '/' + name, '-o', 'json'])
if rc == 0:
result = json.loads(stdout)
else:
result = {}
return result
def create_resource(self, kind, name, object):
if not self.module.check_mode:
file = tempfile.NamedTemporaryFile(prefix=kind + '_' + name, delete=True)
json.dump(object, file)
file.flush()
(rc, stdout, stderr) = self.run_command(['oc', 'create', '-n', self.namespace, '-f', file.name], check_rc=True)
file.close()
def patch_resource(self, kind, name, patch):
if not self.module.check_mode:
(rc, stdout, stderr) = self.run_command(['oc', 'patch', '-n', self.namespace, kind + '/' + name, '-p', json.dumps(patch)], check_rc=True)
def update_resource(self, object, path = ""):
kind = object.get('kind')
name = object.get('metadata', {}).get('name')
self.debug("update_resource %s %s", kind, name)
if not kind:
self.module.fail_json(msg=path + ".kind is undefined!", debug=self.log)
if not name:
self.module.fail_json(msg=path + ".metadata.name is undefined!", debug=self.log)
self.remove_omitted_keys(object)
current = self.export_resource(kind, name)
if not current:
self.changed = True
self.msg.append(self.namespace + "::" + kind + "/" + name + "(new)")
self.create_resource(kind, name, object)
elif not self.patch_applied(kind, name, current, object):
self.changed = True
self.patch_resource(kind, name, object)
return self.changed
def process_template(self, template_name, arguments):
self.debug("process_template")
if arguments:
args = [_ for arg in arguments.items() for _ in ('-v', "=".join(arg))]
else:
args = []
if "\n" in template_name:
(rc, stdout, stderr) = self.run_command(['oc', 'process', '-o', 'json', '-f', '-'] + args, data=template_name, check_rc=True)
else:
(rc, stdout, stderr) = self.run_command(['oc', 'process', '-o', 'json', '-f', template_name] + args, check_rc=True)
if rc != 0:
self.module.fail_json(msg=stderr, debug=self.log)
template = json.
|
jhdulaney/dnf
|
tests/support.py
|
Python
|
gpl-2.0
| 20,691
| 0.000242
|
# -*- coding: utf-8 -*-
# Copyright (C) 2012-2018 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License
|
v.2, or (at your option) any later version.
# This program is distributed i
|
n the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
from __future__ import absolute_import
from __future__ import unicode_literals
import contextlib
import logging
import os
import re
import unittest
from functools import reduce
import hawkey
import hawkey.test
from hawkey import SwdbReason, SwdbPkgData
import dnf
import dnf.conf
import dnf.cli.cli
import dnf.cli.demand
import dnf.cli.option_parser
import dnf.comps
import dnf.exceptions
import dnf.goal
import dnf.i18n
import dnf.package
import dnf.persistor
import dnf.pycomp
import dnf.repo
import dnf.sack
if dnf.pycomp.PY3:
from unittest import mock
from unittest.mock import MagicMock, mock_open
else:
from tests import mock
from tests.mock import MagicMock
def mock_open(mock=None, data=None):
if mock is None:
mock = MagicMock(spec=file)
handle = MagicMock(spec=file)
handle.write.return_value = None
if data is None:
handle.__enter__.return_value = handle
else:
handle.__enter__.return_value = data
mock.return_value = handle
return mock
logger = logging.getLogger('dnf')
skip = unittest.skip
TRACEBACK_RE = re.compile(
r'(Traceback \(most recent call last\):\n'
r'(?: File "[^"\n]+", line \d+, in \w+\n'
r'(?: .+\n)?)+'
r'\S.*\n)')
REASONS = {
'hole': 'group',
'pepper': 'group',
'right': 'dep',
'tour': 'group',
'trampoline': 'group',
}
RPMDB_CHECKSUM = '47655615e9eae2d339443fa00065d41900f99baf'
TOTAL_RPMDB_COUNT = 10
SYSTEM_NSOLVABLES = TOTAL_RPMDB_COUNT
MAIN_NSOLVABLES = 9
UPDATES_NSOLVABLES = 4
AVAILABLE_NSOLVABLES = MAIN_NSOLVABLES + UPDATES_NSOLVABLES
TOTAL_GROUPS = 4
TOTAL_NSOLVABLES = SYSTEM_NSOLVABLES + AVAILABLE_NSOLVABLES
# testing infrastructure
def dnf_toplevel():
return os.path.normpath(os.path.join(__file__, '../../'))
def repo(reponame):
return os.path.join(REPO_DIR, reponame)
def resource_path(path):
this_dir = os.path.dirname(__file__)
return os.path.join(this_dir, path)
REPO_DIR = resource_path('repos')
COMPS_PATH = os.path.join(REPO_DIR, 'main_comps.xml')
NONEXISTENT_FILE = resource_path('does-not/exist')
TOUR_44_PKG_PATH = resource_path('repos/rpm/tour-4-4.noarch.rpm')
TOUR_50_PKG_PATH = resource_path('repos/rpm/tour-5-0.noarch.rpm')
TOUR_51_PKG_PATH = resource_path('repos/rpm/tour-5-1.noarch.rpm')
USER_RUNDIR = '/tmp/dnf-user-rundir'
# often used query
def installed_but(sack, *args):
q = sack.query().filter(reponame__eq=hawkey.SYSTEM_REPO_NAME)
return reduce(lambda query, name: query.filter(name__neq=name), args, q)
# patching the stdout
@contextlib.contextmanager
def patch_std_streams():
with mock.patch('sys.stdout', new_callable=dnf.pycomp.StringIO) as stdout, \
mock.patch('sys.stderr', new_callable=dnf.pycomp.StringIO) as stderr:
yield (stdout, stderr)
@contextlib.contextmanager
def wiretap_logs(logger_name, level, stream):
"""Record *logger_name* logs of at least *level* into the *stream*."""
logger = logging.getLogger(logger_name)
orig_level = logger.level
logger.setLevel(level)
handler = logging.StreamHandler(stream)
orig_handlers = logger.handlers
logger.handlers = []
logger.addHandler(handler)
try:
yield stream
finally:
logger.removeHandler(handler)
logger.setLevel(orig_level)
logger.handlers = orig_handlers
def command_configure(cmd, args):
parser = dnf.cli.option_parser.OptionParser()
args = [cmd._basecmd] + args
parser.parse_main_args(args)
parser.parse_command_args(cmd, args)
return cmd.configure()
def command_run(cmd, args):
command_configure(cmd, args)
return cmd.run()
def mockSwdbPkg(history, pkg, state="Installed", repo="unknown", reason=SwdbReason.USER):
""" Add DnfPackage into database """
hpkg = history.ipkg_to_pkg(pkg)
pid = history.add_package(hpkg)
pkg_data = SwdbPkgData()
history.swdb.trans_data_beg(0, pid, reason, state, False)
history.update_package_data(pid, 0, pkg_data)
history.set_repo(hpkg, repo)
class Base(dnf.Base):
def __init__(self, *args, **kwargs):
with mock.patch('dnf.rpm.detect_releasever', return_value=69):
super(Base, self).__init__(*args, **kwargs)
# mock objects
def mock_comps(history, seed_persistor):
comps = dnf.comps.Comps()
comps._add_from_xml_filename(COMPS_PATH)
persistor = history.group
if seed_persistor:
name = 'Peppers'
pkg_types = dnf.comps.MANDATORY
p_pep = persistor.new_group(name, name, name, False, pkg_types)
persistor.add_group(p_pep)
p_pep.add_package(['hole', 'lotus'])
name = 'somerset'
pkg_types = dnf.comps.MANDATORY
p_som = persistor.new_group(name, name, name, False, pkg_types)
persistor.add_group(p_som)
p_som.add_package(['pepper', 'trampoline', 'lotus'])
name = 'sugar-desktop-environment'
grp_types = dnf.comps.ALL_TYPES
pkg_types = dnf.comps.ALL_TYPES
p_env = persistor.new_env(name, name, name, pkg_types, grp_types)
persistor.add_env(p_env)
p_env.add_group(['Peppers', 'somerset'])
return comps
def mock_logger():
return mock.create_autospec(logger)
class _BaseStubMixin(object):
"""A reusable class for creating `dnf.Base` stubs.
See also: hawkey/test/python/__init__.py.
Note that currently the used TestSack has always architecture set to
"x86_64". This is to get the same behavior when running unit tests on
different arches.
"""
def __init__(self, *extra_repos):
super(_BaseStubMixin, self).__init__(FakeConf())
for r in extra_repos:
repo = MockRepo(r, self.conf)
repo.enable()
self._repos.add(repo)
self._repo_persistor = FakePersistor()
self._ds_callback = mock.Mock()
self._history = None
self._closing = False
def add_test_dir_repo(self, id_, cachedir):
"""Add a repository located in a directory in the tests."""
repo = dnf.repo.Repo(id_, cachedir)
repo.baseurl = ['file://%s/%s' % (REPO_DIR, repo.id)]
self.repos.add(repo)
return repo
def close(self):
self._closing = True
super(_BaseStubMixin, self).close()
@property
def history(self):
if self._history:
return self._history
else:
self._history = super(_BaseStubMixin, self).history
if not self._closing:
# don't reset db on close, it causes several tests to fail
self._history.reset_db()
return self._history
@property
def sack(self):
if self._sack:
return self._sack
return self.init_sack()
def _build_comps_solver(self):
return dnf.comps.Solver(self.history.group, self._comps,
REASONS.get)
def _activate_persistor(self):
pass
def init_sack(self):
# Create the Sack, tell it how to build packages, passing in the Package
# class and a Base reference.
self._sack = TestSack(REPO_DIR, self)
self._
|
Hakuba/youtube-dl
|
youtube_dl/version.py
|
Python
|
unlicense
| 68
| 0
|
from
|
__future__ import unicode_literals
__version__ = '2016.0
|
1.09'
|
testmana2/test
|
Plugins/VcsPlugins/vcsMercurial/HgCopyDialog.py
|
Python
|
gpl-3.0
| 2,379
| 0.004203
|
# -*- coding: utf-8 -*-
# Copyright (c) 2010 - 2015 Detlev Offenbach <detlev@die-offenbachs.de>
#
"""
Module implementing a dialog to enter the data for a copy or rename operation.
"""
from __future__ import unicode_literals
import os.path
from PyQt5.QtCore import pyqtSlot
from PyQt5.QtWidgets import QDialog, QDialogButtonBox
from E5Gui.E5PathPicker import E5PathPickerModes
from .Ui_HgCopyDialog import Ui_HgCopyDialog
class HgCopyDialog(QDialog, Ui_HgCopyDialog):
"""
Class implementing a dialog to enter the data for a copy or rename
operation.
"""
def __init__(self, source, parent=None, move=False):
"""
Constructor
@param source name of the source file/directory (string)
@param parent parent widget (QWidget)
@param move flag indicating a move operation (boolean)
"""
super(HgCopyDialog, self).__init__(parent)
self.setupUi(self)
self.source = source
if os.path.isdir(self.source):
self.targetPicker.setMode(E5PathPickerModes.DirectoryMode)
else:
self.targetPicker.setMode(E5PathPickerModes.SaveFileMode)
if move:
self.setWindowTitle(self.tr('Mercurial Move'))
else:
self.forceCheckBox.setEnabled(False)
self.sourceEdit.setText(source)
self.buttonBox.button(QDialogButtonBox.Ok).setEnabled(False)
msh = self.minimumSizeHint()
self.resize(max(self.width(), msh.width()), msh.height())
def getData(self):
"""
Public method to retrieve the copy data.
@return the target name (string) and a flag indicating
the operation should be enforced (boolean)
"""
target = self.target
|
Picker.text()
if not os.path.isabs(target):
sourceDir = os.path.dirname(self.sourceEdit.text
|
())
target = os.path.join(sourceDir, target)
return target, self.forceCheckBox.isChecked()
@pyqtSlot(str)
def on_targetPicker_textChanged(self, txt):
"""
Private slot to handle changes of the target.
@param txt contents of the target edit (string)
"""
self.buttonBox.button(QDialogButtonBox.Ok).setEnabled(
os.path.isabs(txt) or os.path.dirname(txt) == "")
|
seaglex/garden
|
learn_theano/tradition/lr.py
|
Python
|
gpl-3.0
| 5,860
| 0.003413
|
import numpy as np
import theano
import theano.tensor as T
import pickle
import timeit
import os.path
import sys
class MultiNomialLR(object):
def __init__(self, in_dim, out_dim):
self.W = theano.shared(
np.zeros((out_dim, in_dim), dtype=np.float64),
name="W",
borrow=True
)
self.b = theano.shared(
np.zeros(out_dim, dtype=np.float64),
name="b",
borrow=True
)
self.X = T.matrix('X')
self.y = T.ivector('y')
self.p_y_given_x = T.nnet.softmax(T.dot(self.X, self.W.T) + self.b)
self.loss = -T.mean(T.log(self.p_y_given_x[T.arange(self.y.shape[0]), self.y]))
self.grad_W, self.grad_b = T.grad(self.loss, wrt=[self.W, self.b])
self.error = T.mean(T.neq(T.argmax(self.p_y_given_x, axis=1), self.y))
def fit(self, train_X, train_y, validation_X, validation_y):
learning_rate = 0.13
batch_size = 600
n_epochs=1000
index = T.lscalar('index')
train_model = theano.function(
inputs=[index],
outputs=self.loss,
updates=[
(self.W, self.W - learning_rate*self.grad_W),
(self.b, self.b - learning_rate*self.grad_b)
],
givens={
self.X: train_X[index*batch_size: (index+1)*batch_size],
self.y: train_y[index*batch_size: (index+1)*batch_size]
}
)
validate_model = theano.function(
inputs=[index],
outputs = self.error,
givens={
self.X: validation_X[index*batch_size: (index+1)*batch_size],
self.y: validation_y[index*batch_size: (index+1)*batch_size]
}
)
n_train_batches = train_X.get_value(borrow=True).shape[0] // batch_size
n_valid_batches = validation_X.get_value(borrow=True).shape[0] // batch_size
print('... training the model')
# early-stopping parameters
patience = 5000 # look as this many examples regardless
patience_increase = 2 # wait this much longer when a new best is
# found
improvement_threshold = 0.995 # a relative improvement of this much is
# considered significant
validation_frequency = min(n_train_batches, patience // 2)
# go through this many
# minibatche before checking the network
# on the validation set; in this case we
# check every epoch
best_validation_loss = np.inf
test_score = 0.
start_time = timeit.default_timer()
done_looping = False
epoch = 0
while (epoch < n_epochs) and (not done_looping):
epoch = epoch + 1
for minibatch_index in range(n_train_batches):
minibatch_avg_cost = train_model(minibatch_index)
# iteration number
iter = (epoch - 1) * n_train_batches + minibatch_index
if (iter + 1) % validation_frequency == 0:
# compute zero-one loss on validation set
validation_losses = [validate_model(i)
|
for i in range(n_valid_batches)]
this_validation_loss = np.mean(validation_losses)
print(
'epoch %i, minibatch %i/%i, validation error %f %% (%f)' %
(
epoch,
minibatch_index + 1,
n
|
_train_batches,
this_validation_loss * 100.0,
minibatch_avg_cost
)
)
# if we got the best validation score until now
if this_validation_loss < best_validation_loss:
#improve patience if loss improvement is good enough
if this_validation_loss < best_validation_loss * \
improvement_threshold:
patience = max(patience, iter * patience_increase)
best_validation_loss = this_validation_loss
# test it on the test set
print(
(
' epoch %i, minibatch %i/%i, test error of'
' best model %f %%'
) %
(
epoch,
minibatch_index + 1,
n_train_batches,
test_score * 100.
)
)
# save the best model
with open('best_model.pkl', 'wb') as f:
pickle.dump(self.W.get_value(), f)
pickle.dump(self.b.get_value(), f)
if patience <= iter:
done_looping = True
break
end_time = timeit.default_timer()
print(
(
'Optimization complete with best validation score of %f %%,'
'with test performance %f %%'
)
% (best_validation_loss * 100., test_score * 100.)
)
print('The code run for %d epochs, with %f epochs/sec' % (
epoch, 1. * epoch / (end_time - start_time)))
print(('The code for file ' +
os.path.split(__file__)[1] +
' ran for %.1fs' % ((end_time - start_time))), file=sys.stderr)
|
nayas360/pyterm
|
bin/cat.py
|
Python
|
mit
| 1,001
| 0
|
# type command prints file contents
from lib.utils import *
def _help():
usage = '''
Usage: cat (file)
Print content of (file)
Use '%' in front of global
vars to use value as file
name.
'''
print(usage)
def main(argv):
if len(argv) < 1 or '-h' in argv:
_help()
return
# The shell doesnt send the
# command name in the arg list
# so the next line is not needed
# anymore
# argv.pop(0)
# The shell does the work of replacing
# vars already. Code segment below
# is
|
not required anymore.
# argv=replace_vars(argv)
argv = make_s(argv)
path = get_path() + argv
if os.path.isfile(path):
with open(path) as f:
data = f.readlines()
print('_________________<START>_________________\n')
print(make_s2(data))
print('__________________<END>__________________\n')
return
elif os.path.isdir(path):
err(3, add=argv + ' is a directory')
else:
|
err(2, path)
|
ProjectIDA/ical
|
gui/analysis_progress_window.py
|
Python
|
gpl-3.0
| 4,458
| 0.005384
|
#######################################################################################################################
# Copyright (C) 2016 Regents of the University of California
#
# This is free software: you can redistribute it and/or modify it under the terms of the
# GNU General Public License (GNU GPL) as published by the Free Software Foundation, either version 3 of the License,
# or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
# without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# A copy of the GNU General Public License can be found in LICENSE.TXT in the root of the source code repository.
# Additionally, it can be found at http://www.gnu.org/licenses/.
#
# NOTES: Per GNU GPLv3 terms:
# * This notice must be kept in this source file
# * Changes to the source must be clearly noted with date & time of change
#
# If you use this software in a product, an explicit acknowledgment in the product documentation of the contribution
# by Project IDA, Institute of Geophysics and Planetary Physics, UCSD would be appreciated but is not required.
#######################################################################################################################
"""GUI Python code auto-generated from Qt Creator *.ui files by PyQt pyuic utility."""
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'analysis_progress_window.ui'
#
# Created by: PyQt5 UI code generator 5.6
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_AnalysisProgressFrm(object):
def setupUi(self, AnalysisProgressFrm):
AnalysisProgressFrm.setObjectName("AnalysisProgressFrm")
AnalysisProgressFrm.resize(312, 130)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(AnalysisProgressFrm.sizePolicy().hasHeightForWidth())
AnalysisProgressFrm.setSizePolicy(sizePolicy)
AnalysisProgressFrm.setMinimumSize(QtC
|
ore.QSize(312, 130))
AnalysisProgressFrm.setMaximumSize(QtCore.QSize(312, 130))
self.progPB = QtWidgets.QProgressBar(AnalysisProgressFrm)
self.progPB.setGeometry(QtCore.QRect(20, 60, 272, 23))
self.progPB.setMaximum(0)
self.progPB.setProperty("value", -1)
self.progPB.setObjectName("progPB")
self.calDescrLbl = QtWidg
|
ets.QLabel(AnalysisProgressFrm)
self.calDescrLbl.setGeometry(QtCore.QRect(20, 10, 271, 21))
font = QtGui.QFont()
font.setFamily("Helvetica Neue")
font.setPointSize(14)
font.setBold(True)
font.setWeight(75)
self.calDescrLbl.setFont(font)
self.calDescrLbl.setStyleSheet("line-height: 150%")
self.calDescrLbl.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignTop)
self.calDescrLbl.setObjectName("calDescrLbl")
self.calDescrLbl_2 = QtWidgets.QLabel(AnalysisProgressFrm)
self.calDescrLbl_2.setGeometry(QtCore.QRect(21, 40, 271, 21))
font = QtGui.QFont()
font.setFamily("Helvetica Neue")
font.setBold(False)
font.setWeight(50)
self.calDescrLbl_2.setFont(font)
self.calDescrLbl_2.setStyleSheet("line-height: 150%")
self.calDescrLbl_2.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignTop)
self.calDescrLbl_2.setObjectName("calDescrLbl_2")
self.cancelBtn = QtWidgets.QPushButton(AnalysisProgressFrm)
self.cancelBtn.setGeometry(QtCore.QRect(182, 90, 115, 32))
self.cancelBtn.setObjectName("cancelBtn")
self.retranslateUi(AnalysisProgressFrm)
QtCore.QMetaObject.connectSlotsByName(AnalysisProgressFrm)
def retranslateUi(self, AnalysisProgressFrm):
_translate = QtCore.QCoreApplication.translate
AnalysisProgressFrm.setWindowTitle(_translate("AnalysisProgressFrm", "Form"))
self.calDescrLbl.setText(_translate("AnalysisProgressFrm", "Analyzing calibration data..."))
self.calDescrLbl_2.setText(_translate("AnalysisProgressFrm", "This will take several minutes."))
self.cancelBtn.setText(_translate("AnalysisProgressFrm", "Cancel"))
|
googleapis/python-dialogflow-cx
|
samples/generated_samples/dialogflow_v3_generated_entity_types_list_entity_types_sync.py
|
Python
|
apache-2.0
| 1,518
| 0.000659
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for ListEntityTypes
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install t
|
he latest published package dependency, execute the following:
# python3 -m pip install google-cloud-dialogflowcx
# [START dialogflow_v3_generated_EntityTypes_ListEntityTypes_sync]
from google.cloud import dialogflowcx_v3
def sample_list_entity_types():
# Create a client
client = dialogflowcx_v3.EntityTypesClient()
# Initialize request argument(s)
request = dialogflowcx_v3.ListEntityTypesRequest(
parent="par
|
ent_value",
)
# Make the request
page_result = client.list_entity_types(request=request)
# Handle the response
for response in page_result:
print(response)
# [END dialogflow_v3_generated_EntityTypes_ListEntityTypes_sync]
|
activitycentral/ebookreader
|
src/ReadTab/epubadapter.py
|
Python
|
gpl-2.0
| 17,071
| 0.002285
|
from gi.repository import Gtk
import os
from gi.repository import GObject
import shutil
from decimal import *
from gettext import gettext as _
from documentviewercommonutils import DocumentViewerCommonUtils
from utils import is_machine_a_xo
from epubview.epub import _Epub
from epubview.webkitbackend import Browser
TOO_FAST_MESSAGE = 'You are scrolling pages way too fast. To ' + \
'navigate to particular locations, use "Bookmarks", or "Search"'
class EpubViewer(Browser, DocumentViewerCommonUtils):
def __init__(self, main_instance, app):
getcontext().prec = 15
Browser.__init__(self, main_instance)
DocumentViewerCommonUtils.__init__(self, main_instance, app)
self._app = app
self._new_file_loaded = None
self._resume_characteristics_done = False
self._first_time_load_flag = False
self._go_to_flag = True
self._view.connect('document-load-finished',
self.perform_actions_upon_loading_if_any)
def __load_file(self, filenum, timeout=5000):
if self._new_file_loaded is False:
return
self._new_file_loaded = False
self._sub_file_number = filenum
self._maximum_offset_calculated = False
self._previous_file_loaded = None
self._current_uri = self._filelist[filenum]
for extension in 'xhtml','xml','htm':
if self._current_uri.endswith(extension):
dest = self._current_uri.replace(extension, 'html')
shutil.copy(self._current_uri.replace('file://', ''), dest)
self._current_uri = dest.replace('file://', '')
self._view.open(self._current_uri)
self._main_instance.window.force_ui_updates()
def perform_actions_upon_loading_if_any(self, first=None, second=None, third=None):
self._main_instance.window.force_ui_updates()
self.get_maximum_offset_possible()
self._maximum_offset_calculated = True
self._first_time_load_flag = True
self._next_file_loaded = True
if self
|
._resume_characteristics_done is True:
self._new_file_loaded = True
def do_view_specific_sync_operations(self):
self.__sync_in_case_internal_bookmarks_are_navigated()
self.__update_percentage_of_document_completed_reading()
# Always keep calling this function, as this is a
# "GObject.timeout" function.
return True
def __sync_in_case_internal
|
_bookmarks_are_navigated(self):
if self._new_file_loaded is False:
return
if self._new_file_loaded is False:
return
uri_to_test = self.get_currently_loaded_uri()
if uri_to_test == self._current_uri:
return
# Sometimes, the URI could be None or "blank". Do nothing in that case.
if uri_to_test is None:
return
if uri_to_test[0] != '/':
return
for i in range(0, len(self._filelist)):
initial_complete_uri_file_path = \
os.path.join(self._document._tempdir, self._filelist[i])
if initial_complete_uri_file_path == uri_to_test:
self._current_uri = initial_complete_uri_file_path
self._sub_file_number = i
return
def __update_percentage_of_document_completed_reading(self):
if self._new_file_loaded == False:
return
current_y_scroll = self.get_y_scroll()
maximum_y_scroll = self.get_maximum_offset_possible()
if maximum_y_scroll != 0:
current_percentage_of_page_navigated = \
(1.0 * current_y_scroll) / maximum_y_scroll
else:
current_percentage_of_page_navigated = 0
effective_share_of_current_page = \
((1.0 * self._filesizes[self._sub_file_number])/(self._total_file_size)) * current_percentage_of_page_navigated
total_percentage = 0
for i in range (0, self._sub_file_number):
total_percentage = total_percentage + ((1.0 * self._filesizes[i])/(self._total_file_size))
total_percentage = total_percentage + effective_share_of_current_page
# Special case : if this is the absolute end of the document,
# show "100%".
if (self._last_y_scroll == self.get_y_scroll()) and \
(self._sub_file_number == (len(self._filelist) - 1)):
total_percentage = 1
self._progress_bar.set_fraction(total_percentage)
def load_document(self, file_path, sub_file_number, metadata, readtab):
self._metadata = metadata
self._readtab = readtab
self._document = _Epub(file_path.replace('file://', ''))
self._filelist = []
self._filesizes = []
self._coverfile_list = []
self._total_file_size = 0
for i in self._document._navmap.get_flattoc():
self._filelist.append(os.path.join(self._document._tempdir, i))
for j in self._document._navmap.get_cover_files():
self._coverfile_list.append(os.path.join(self._document._tempdir, j))
shutil.copy('./ReadTab/epubview/scripts.js', self._document._tempdir)
for file_path in self._filelist:
size = int(os.stat(file_path).st_size)
self._filesizes.append(size)
self._total_file_size = self._total_file_size + size
try:
#if self._document._navmap.is_tag_present(file_path, 'img') is False:
self._insert_js_reference(file_path, self._document._tempdir)
except:
pass
self._total_sub_files = len(self._filelist)
# Before loading, remove all styling, else the bookmarks will
# start failing way too quickly.
dirname = os.path.dirname(self._filelist[0])
"""
for f in os.listdir(dirname):
if f.endswith('.css'):
os.unlink(os.path.join(dirname, f))
"""
# Finally, load the file.
self.__load_file(sub_file_number, timeout=100)
GObject.timeout_add(100, self.__reload_previous_settings)
def _insert_js_reference(self, file_name, tempdir):
js_reference = '<script type="text/javascript" src="' + tempdir + '/scripts.js"></script>'
o = open(file_name + '.tmp', 'a')
for line in open(file_name):
line = line.replace('</head>', js_reference + '</head>')
o.write(line + "\n")
o.close()
shutil.copy(file_name + '.tmp', file_name)
def __reload_previous_settings(self):
if self._first_time_load_flag is True:
if len(self._metadata.keys()) > 0:
self.resume_previous_characteristics(self._metadata,
self._readtab)
else:
self.resumption_complete()
return False
else:
return True
def resumption_complete(self):
self.get_maximum_offset_possible()
self._maximum_offset_calculated = True
self._resume_characteristics_done = True
self._new_file_loaded = True
self._main_instance.set_ui_sensitive(True)
self._go_to_flag = True
# Initialize and reset the js plugin
self._view.execute_script('reset()');
def __load_previous_file(self, scroll_to_end=True):
if self._sub_file_number > 0:
self.__load_file(self._sub_file_number - 1)
if scroll_to_end is True:
GObject.timeout_add(100, self.__scroll_to_end_of_loaded_file)
else:
self._previous_file_loaded = True
def __scroll_to_end_of_loaded_file(self):
if self._new_file_loaded is True:
if not self.is_current_segment_an_image_segment():
self.scroll_to_page_end()
self._previous_file_loaded = True
return False
else:
return True
def is_current_segment_an_image_segment(self):
return self._current_uri in self._coverfile_list
def previous_page(self):
self.remove_focus_from_location_text()
if sel
|
agati/chimera
|
src/chimera/instruments/sk/tests/skdrv_OK_25062015.py
|
Python
|
gpl-2.0
| 8,467
| 0.006614
|
#! /usr/bin/env python
# -*- coding: iso-8859-1 -*-
# Copyright (C) 2006-2015 chimera - observatory automation system
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA.
# *******************************************************************
#This driver is intended to be used with the Emerson Commander SK
#order number SKBD200110 - 15/06/2015 - salvadoragati@gmail.com
from pymodbus.client.sync import ModbusTcpClient
class SKDrv(ModbusTcpClient):
#initial variables setup - This setup is the original setup that was defined at the installation time.
#It is the same for both Commander SK drives.
# If you are planning to change these parameters, see Application Note CTAN#293
ip = '127.0.0.1' #change to the corresponding ip number of your network installed commander SK
min_speed = '' #Hz parm1
max_speed = '' #Hz parm2
acc_rate = '' #s/100Hz parm3
dec_rate = '' #s/100 Hz parm4
motor_rated_speed = 0 #rpm parm7 -attention: the ctsoft original parm is 1800 rpm
motor_rated_voltage = 230 #V parm 8
motor_power_factor = '' # parm 9 it can be changed for the motors's nameplate value if it is known
#Its is the motor cos() and 0.5<motor_power_factor<0.97.
ramp_mode = 2 # parm 30 Standard Std (2) without dynamic braking resistor, If with this resistor, should set to 0 or
# Fast
dynamicVtoF = 'OFF' # parm 32 - It should not be used when the drive is being used as a soft start to full speed. keep off
voltage_mode_select = 2 #parm 41 fixed boost mode(2)
low_freq_voltage_boost = 1 #parm 42 0.5< low_freq_voltage_boost<1
__config__ = {'ip': '127.0.0.1', 'min_speed': 0, 'max_speed': 600, 'acc_rate': 50, 'dec_rate': 100,
'motor_rated_speed': 1800,
'motor_rated_voltage': 230, 'motor_power_factor': 85, 'ramp_mode': 1, 'dynamicVtoF': 1,
'voltage_mode_select': 2,
'low_freq_voltage_boost': 10}
def read_parm(self,parm):
"""
gets a string in the format 'xx.xx' and converts it to an mapped
commander sk address and returns its contents
"""
parm_menu = parm.split('.')[0]
parm_parm = parm.split('.')[1]
address = int(parm_menu) * 100 + int(parm_parm) - 1
result = self.read_holding_registers(address, 1)
return result.registers[0]
def write_parm(self,parm, value):
"""
gets a string in the format 'xx.xx' and converts it to an mapped
commander sk address and writes the value to it
"""
parm_menu = parm.split('.')[0]
parm_parm = parm.split('.')[1]
address = int(parm_menu) * 100 + int(parm_parm) - 1
rq = self.write_register(address, value)
result = self.read_holding_registers(address, 1)
if result.registers[0] == value:
return True
else:
return False
def check_basic(self):
parm_change = []
#check parm1
parm1 = self.read_parm('00.01')
print "parm1=",parm1
min_speed = self.__config__['min_speed']
print "min_speed=", min_speed
if parm1 == min_speed:
print "parm1 ok"
else:
print "parm1 with parm_change"
parm_change.append('parm1')
print "*****************************"
# check parm2
parm2 = self.read_parm("00.02")
print "parm2=",parm2
max_speed = self.__config__['max_speed']
print "max_speed=", max_speed
if parm2 == max_speed:
print "parm2 ok"
else:
print "parm2 with parm_change"
parm_change.append('parm2')
print "*****************************"
#check parm3
parm3 = self.read_parm("00.03")
print "parm3=",parm3
acc_rate = self.__config__['acc_rate']
print "acc_rate=", acc_rate
if parm3 == acc_rate:
print "parm3 ok"
else:
print "parm3 with parm_change"
parm_change.append('parm3')
print "*****************************"
#check parm4
parm4 = self.read_parm("00.04")
print "parm4=",parm4
dec_rate = self.__config__['dec_rate']
print "dec_rate=", dec_rate
if parm4 == dec_rate:
print "parm4 ok"
else:
print "parm4 with parm_change"
parm_change.append('parm4')
print "*****************************"
#check parm7
parm7 = self.read_parm("00.07")
print "parm7=",parm7
motor_rated_speed = self.__config__['motor_rated_speed']
print "motor_rated_speed=", motor_rated_speed
if parm7 == motor_rated_speed:
print "parm7 ok"
else:
print "parm7 with parm_change"
parm_change.append('parm7')
print "*****************************"
#check parm8
parm8 = self.read_parm("00.08")
print "parm8=",parm8
motor_rated_voltage = self.__config__['motor_rated_voltage']
print "motor_rated_voltage=", motor_rated_voltage
if parm8 == motor_rated_voltage:
print "parm8 ok"
else:
print "parm8 with parm_change"
parm_change.append('parm8')
print "*****************************"
#check parm9
parm9 = self.read_parm("00.09")
print "parm9=",parm9
motor_power_factor = self.__config__['motor_power_factor']
print "motor_power_factor=", motor_power_factor
if parm9 == motor_power_factor:
print "parm9 ok"
else:
print "parm9 with parm_change"
p
|
arm_change.append('parm9')
print "*****************************"
#check parm30
parm30 = self.read_parm("00.30")
print "parm30=",parm30
ra
|
mp_mode = self.__config__['ramp_mode']
print "ramp_mode=", ramp_mode
if parm30 == ramp_mode:
print "parm30 ok"
else:
print "parm30 with parm_change"
parm_change.append('parm30')
print "*****************************"
#check parm32
parm32 = self.read_parm("00.32")
print "parm32=",parm32
dynamicVtoF = self.__config__['dynamicVtoF']
print "dynamicVtoF=", dynamicVtoF
if parm32 == dynamicVtoF:
print "parm32 ok"
else:
print "parm32 with parm_change"
parm_change.append('parm32')
print "*****************************"
#check parm41
parm41 = self.read_parm("00.41")
print "parm41=",parm41
voltage_mode_select = self.__config__['voltage_mode_select']
print "voltage_mode_select=", voltage_mode_select
if parm41 == voltage_mode_select:
print "parm41 ok"
else:
print "parm41 with parm_change"
parm_change.append('parm41')
print "*****************************"
#check parm42
parm42 = self.read_parm("00.42")
print "parm42=",parm42
low_freq_voltage_boost = self.__config__['low_freq_voltage_boost']
print "low_freq_voltage_boost=", low_freq_voltage_boost
if parm42 == low_freq_voltage_boost:
print "parm42 ok"
else:
print "parm42 with parm_change"
parm_change.append('parm42')
print "*****************************"
return parm_change
def check_state(self):
"""
TODO
:return:
"""
def power_on(self):
""
|
QiJune/Paddle
|
python/paddle/dataset/tests/flowers_test.py
|
Python
|
apache-2.0
| 1,707
| 0.000586
|
# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distrib
|
uted on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import paddle.dataset.flowers
import unittest
class TestFlowers(unittest.TestCas
|
e):
def check_reader(self, reader):
sum = 0
label = 0
size = 224 * 224 * 3
for l in reader():
self.assertEqual(l[0].size, size)
if l[1] > label:
label = l[1]
sum += 1
return sum, label
def test_train(self):
instances, max_label_value = self.check_reader(
paddle.dataset.flowers.train())
self.assertEqual(instances, 6149)
self.assertEqual(max_label_value, 102)
def test_test(self):
instances, max_label_value = self.check_reader(
paddle.dataset.flowers.test())
self.assertEqual(instances, 1020)
self.assertEqual(max_label_value, 102)
def test_valid(self):
instances, max_label_value = self.check_reader(
paddle.dataset.flowers.valid())
self.assertEqual(instances, 1020)
self.assertEqual(max_label_value, 102)
if __name__ == '__main__':
unittest.main()
|
mjcollin/ibm_bladecenter
|
bc_boot_revert.py
|
Python
|
mit
| 393
| 0
|
#!/usr/bin/python
import sys
import lib.ssh_helper as ssh
host = sys.argv[1]
blade = sys.argv[2]
pw = ssh.prompt_password(host)
chan, sess = ssh.get_channel(host, pw)
# Eat the initial welcome text
ssh.get_output(chan)
ssh.run(chan, 'tcpcmdmode
|
-t 3600 -T system:mm[0]')
ssh.run(chan, 'env -T system:blade[' + blade + ']')
ssh.run(chan, 'bootseq cd usb hd0 nw')
chan.close()
sess.clos
|
e()
|
bossiernesto/melta
|
test/transactions/test_transactional.py
|
Python
|
bsd-3-clause
| 1,365
| 0.002198
|
from unittest import TestCase
from melta.transactions.transactional import Transaction
class TransactionTestClass(Transaction):
def __init__(self):
self.plant_type = 'Pointisera'
self.plant_age = 3
self.plant_pot = 'plastic'
self.combination = 'one\ntwo\nthree'
class TransactionalTestCase(TestCase):
def setUp(self):
self.test_plant = TransactionTestClass()
self.test_plant.start()
def test_succesful_transaction(self):
self.test_plant.age = 4
self.test_plant.commit()
self.assertEqual(self.test_plant.age, 4)
def test_unsuccesful_transaction(self):
clay = 'Clay'
self.test_plant.plant_pot = clay
self.test_plant.rollback()
self.assertNotEqual(self.test_plant.plant_pot, clay)
self.assertEqual(self.test_plant.plant_pot,'plastic')
def test_multiline_string_transaction(self):
|
another_combination = 'one\nnine\nfive'
new_combination = 'one\nnine\nthree'
self.test_plant.combination = new_combination
self.test_plant.commit()
self.assertEqual(self.test_plant.combination, new_combination)
self.t
|
est_plant.start()
self.test_plant.combination = another_combination
self.test_plant.rollback()
self.assertEqual(self.test_plant.combination, new_combination)
|
bitmazk/django-hero-slider
|
hero_slider/south_migrations/0001_initial.py
|
Python
|
mit
| 9,348
| 0.007916
|
# flake8: noqa
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'SliderItem'
db.create_table('hero_slider_slideritem', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('image', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['filer.File'])),
('position', self.gf('django.db.models.fields.PositiveIntegerField')(null=True, blank=True)),
('content_type', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['contenttypes.ContentType'])),
('object_id', self.gf('django.db.models.fields.PositiveIntegerField')()),
))
db.send_create_signal('hero_slider', ['SliderItem'])
# Adding model 'SliderItemTitle'
db.create_table('hero_slider_slideritemtitle', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('title', self.gf('django.db.models.fields.CharField')(max_length=256, blank=True)),
('description', self.gf('django.db.models.fields.CharField')(max_length=512, blank=True)),
('slider_item', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['hero_slider.SliderItem'])),
('language', self.gf('django.db.models.fields.CharField')(max_length=2)),
))
db.send_create_signal('hero_slider', ['SliderItemTitle'])
def backwards(self, orm):
# Deleting model 'SliderItem'
db.delete_table('hero_slider_slideritem')
# Deleting model 'SliderItemTitle'
db.delete_table('hero_slider_slideritemtitle')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)"
|
, 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {
|
'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'filer.file': {
'Meta': {'object_name': 'File'},
'_file_size': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'file': ('django.db.models.fields.files.FileField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'folder': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'all_files'", 'null': 'True', 'to': "orm['filer.Folder']"}),
'has_all_mandatory_data': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_public': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'modified_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}),
'original_filename': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'owned_files'", 'null': 'True', 'to': "orm['auth.User']"}),
'polymorphic_ctype': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'polymorphic_filer.file_set'", 'null': 'True', 'to': "orm['contenttypes.ContentType']"}),
'sha1': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '40', 'blank': 'True'}),
'uploaded_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'})
},
'filer.folder': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('parent', 'name'),)", 'object_name': 'Folder'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'modified_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'filer_owned_folders'", 'null': 'True', 'to': "orm['auth.User']"}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['filer.Folder']"}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'uploaded_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'})
},
'hero_slider.slideritem': {
'Meta': {'object_name': 'SliderItem'},
|
vizcacha/practicalcv
|
chapter_10/find-black-pieces.py
|
Python
|
mit
| 426
| 0.002347
|
from SimpleCV import Image
import time
# Get the template and image
goBoard = Image('go.png')
black = Image('g
|
o-black.png')
black.show()
time.sleep(3)
goBoard.show()
time.sleep(3)
# Find the matches and draw them
matches = goBoard.findTemplate(black)
matches.draw()
# S
|
how the board with matches print the number
goBoard.show()
print str(len(matches)) + " matches found."
# Should output: 9 matches found.
time.sleep(3)
|
lkash/test
|
dpkt/tns.py
|
Python
|
bsd-3-clause
| 1,079
| 0.000927
|
# $Id: tns.py 23 2006-11-08 15:45:33Z dugsong $
# -*- coding: utf-8 -*-
"""Transparent Network Substrate."""
import dpkt
class TNS(dpkt.Packet):
__hdr__ = (
('length', 'H', 0),
('pktsum', 'H', 0),
('type', 'B', 0),
('rsvd', 'B', 0),
('hdrsum', 'H', 0),
('msg', '0s', ''),
)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
n = self.length - self.__hdr_len__
if n > len(self.data):
raise dpkt.NeedData('short message (missing %d bytes)' %
(n - len(self.data)))
self.msg = self.data[:n]
self.data = self.data[n:]
def test_tns():
s = ('\x00\x23\x00\x00\x01\x00\x00\x00\x01\x34\x01\x2c\x00\x00\x08\x00\x7f'
|
'\xff\x4f\x98\x00\x00\x00\x01\x00\x01\x00\x22\x00\x00\x00\x00\x01\x01X')
t = TNS(s)
assert t.msg.startswith('\x01\x34')
# test a truncated packet
try:
t = TNS(s[:-1
|
0])
except dpkt.NeedData:
pass
if __name__ == '__main__':
test_tns()
print 'Tests Successful...'
|
polltooh/FineGrainedAction
|
nn/cal_accuracy_v3.py
|
Python
|
mit
| 2,845
| 0.003866
|
#! /usr/bin/env python
import utility_function as uf
import sys
import cv2
import numpy as np
def read_file_list(file_name):
# assume the first argument is the image name and the second one is the label
name_list = list()
label_list = list()
with open(file_name, "r") as f:
s = f.read()
|
s = uf.delete_last_empty_line(s)
s_l = s.split(
|
"\n")
for ss in s_l:
ss_l = ss.split(" ")
assert(len(ss_l) == 2)
name_list.append(ss_l[0])
label_list.append(int((ss_l[1])))
return name_list, label_list
def read_label(label_file):
with open(label_file, "r") as f:
s = f.read();
s = uf.delete_last_empty_line(s)
s_l = s.split("\n")
s_l = [s == "true" for s in s_l]
return s_l
def read_fine_tune_res_file(file_name, res_list, label_num):
with open(file_name, "r") as f:
file_data = f.read()
file_data = uf.delete_last_empty_line(file_data)
data_list = file_data.split("\n")
if (len(data_list) == 0):
print("empty file " + file_name)
return
for i in range(len(data_list)):
d_l = data_list[i].split(" ")
if (int(float(d_l[1])) == label_num):
index = uf.file_name_to_int(d_l[0])
res_list[index] = True
def read_triplet_res_file(file_name, res_list, radius):
with open(file_name, "r") as f:
file_data = f.read()
file_data = uf.delete_last_empty_line(file_data)
data_list = file_data.split("\n")
if (len(data_list) == 0):
print("empty file " + file_name)
return
for i in range(len(data_list)):
d_l = data_list[i].split(" ")
if (float(d_l[1]) < radius):
index = uf.file_name_to_int(d_l[0])
res_list[index] = True
# image = cv2.imread(d_l[0])
# cv2.imshow("res", image)
# cv2.waitKey(100)
if __name__ == "__main__":
if (len(sys.argv) < 3):
print("Usage: cal_accuracy_v3.py res_file_name.txt label_file.txt")
exit(1)
res_file_name = sys.argv[1]
label_file_name = sys.argv[2]
res_name, res_list = read_file_list(res_file_name)
label_name, label_list = read_file_list(label_file_name)
diff_count = 0
for i in range(len(res_name)):
if (res_name[i] != label_name[i]):
print("n1 is %s n2 is %s"%(n1,n2))
exit(1)
ave_precision = uf.cal_ave_precision(label_list, res_list, 12)
con_mat = uf.cal_confusion_matrix(label_list, res_list, 12)
np.save("ave_precision_v3.npy", ave_precision)
np.save("con_mat_v3.npy", con_mat)
print(uf.cal_ave_precision(label_list, res_list, 12))
print(uf.cal_confusion_matrix(label_list, res_list, 12))
|
doerlbh/Indie-nextflu
|
augur/src/virus_clean.py
|
Python
|
agpl-3.0
| 3,403
| 0.033206
|
# clean sequences after alignment, criteria based on sequences
# make inline with canonical ordering (no extra gaps)
import os, datetime, time, re
from itertools import izip
from Bio.Align import MultipleSeqAlignment
from Bio.Seq import Seq
from scipy import stats
import numpy as np
class virus_clean(object):
"""docstring for virus_clean"""
def __init__(self,n_iqd = 5, **kwargs):
'''
parameters
n_std -- number of interquartile distances accepted in molecular clock filter
'''
self.n_iqd = n_iqd
def remove_insertions(self):
'''
remove all columns from the alignment in which the outgroup is gapped
'''
outgroup_ok = np.array(self.sequence_lookup[self.outgroup['strain']])!='-'
for seq in self.viruses:
seq.seq = Seq("".join(np.array(seq.seq)[outgroup_ok]).upper())
def clean_gaps(self):
'''
remove viruses with gaps -- not part of the standard pipeline
'''
self.viruses = filter(lambda x: '-' in x.seq, self.viruses)
def clean_ambiguous(self):
'''
substitute all ambiguous characters with '-',
ancestral inference will interpret this as missing data
'''
for v in self.viruses:
v.seq = Seq(re.sub(r'[BDEFHIJKLMNOPQRSUVWXYZ]', '-',str(v.seq)))
def unique_date(self):
'''
add a unique numerical date to each leaf. uniqueness is achieved adding a small number
'''
from date_util import numerical_date
og = self.sequence_lookup[self.outgroup['strain']]
if hasattr(og, 'date'):
try:
og.num_date = numerical_date(og.date)
except:
print "cannot parse date"
og.num_date="undefined";
for ii, v in enumerate(self.viruses):
if hasattr(v, 'date'):
try:
v.num_date = numerical_date(v.date, self.date_format['fields']) + 1e-7*(ii+1)
except:
print "cannot parse date"
v.num_date="undefined";
def times_from_outgroup(self):
outgroup_date = self.sequence_lookup[self.outgroup['strain']].num_date
return np.array([x.num_date-outgroup_date for x in self.viruses if x.strain])
def distance_from_outgroup(self):
from seq_util import hamming_distance
outgroup_seq = self.sequence_lookup[self.outgroup['strain']].seq
return np.array([hamming_distance(x.seq, outgroup_seq) for x in self.viruses if x.strain])
def clean_distances(self):
"""Remove viruses that don't follow a loose clock """
times = self.times_from_outgroup()
distances = self.distance_from_outgroup()
slope, intercept, r_value, p_value, std_err = stats.linregress(times, distances)
|
residuals = slope*times + intercept - distances
r_iqd = stats.scoreatpercentile(residuals,75) - stats.scoreatpercentile(residuals,25)
if self.verbose:
print "\tslope: " + str(slope)
print "\tr: " + str(r_value)
print "\tresidual
|
s iqd: " + str(r_iqd)
new_viruses = []
for (v,r) in izip(self.viruses,residuals):
# filter viruses more than n_std standard devitations up or down
if np.abs(r)<self.n_iqd * r_iqd or v.id == self.outgroup["strain"]:
new_viruses.append(v)
else:
if self.verbose>1:
print "\t\tresidual:", r, "\nremoved ",v.strain
self.viruses = MultipleSeqAlignment(new_viruses)
def clean_generic(self):
print "Number of viruses before cleaning:",len(self.viruses)
self.unique_date()
self.remove_insertions()
self.clean_ambiguous()
self.clean_distances()
self.viruses.sort(key=lambda x:x.num_date)
print "Number of viruses after outlier filtering:",len(self.viruses)
|
ilogue/niprov
|
tests/test_add.py
|
Python
|
bsd-3-clause
| 7,954
| 0.001886
|
import unittest, os
from mock import Mock, patch, call, sentinel
from tests.ditest import DependencyInjectionTestBase
class AddTests(DependencyInjectionTestBase):
def setUp(self):
super(AddTests, self).setUp()
self.config.dryrun = False
self.repo.byLocation.return_value = None
self.query.copiesOf.return_value = []
self.img = Mock()
self.lastProvenance = None
def locAt(loc, provenance):
self.lastProvenance = provenance
self.lastPath = loc
return self.img
self.fileFactory.locatedAt.side_effect = locAt
patcher = patch('niprov.adding.datetime')
self.datetime = patcher.start()
self.addCleanup(patcher.stop)
def add(self, path, **kwargs):
from niprov.adding import add
with patch('niprov.adding.inheritFrom') as self.inheritFrom:
return add(path, dependencies=self.dependencies, **kwargs)
def assertNotCalledWith(self, m, *args, **kwargs):
c = call(*args, **kwargs)
assert c not in m.call_args_list, "Unexpectedly found call: "+str(c)
def test_Returns_provenance_and_informs_listener(self):
new = '/p/f2'
image = self.add(new)
self.listener.fileAdded.assert_called_with(self.img)
self.assertEqual(image, self.img)
def test_Sets_transient_flag_if_provided(self):
image = self.add('/p/f1', transient=True)
self.assertEqual(self.lastProvenance['transient'],True)
def test_Creates_ImageFile_object_with_factory(self):
image = self.add('p/afile.f')
self.assertIs(self.img, image)
def test_Calls_inspect(self):
image = self.add('p/afile.f')
self.img.inspect.assert_called_with()
def test_If_inspect_raises_exceptions_tells_listener_and_doesnt_save(self):
self.img.inspect.side_effect = IOError
image = self.add('p/afile.f')
assert not self.repo.add.called
assert not self.repo.update.called
self.listener.fileError.assert_called_with(self.img.path)
self.assertEqual(self.img.status, 'failed')
def test_If_dryrun_doesnt_talk_to_repo_and_status_is_test(self):
self.config.dryrun = True
image = self.add('p/afile.f')
assert not self.repo.add.called
assert not self.repo.update.called
assert not self.img.inspect.called
def test_accepts_optional_provenance(self):
image = self.add('p/afile.f', provenance={'fob':'bez'})
self.assertEqual(self.lastProvenance['fob'],'bez')
def test_If_file_doesnt_exists_raises_error(self):
self.filesys.fileExists.return_value = False
self.assertRaises(IOError, self.add, self.img.location.path)
self.filesys.fileExists.assert_called_with(self.img.location.path)
def test_For_nonexisting_transient_file_behaves_normal(self):
self.filesys.fileExists.return_value = False
self.add('p/afile.f', transient=True)
def test_Doesnt_inspect_transient_files(self):
self.add('p/afile.f', transient=True)
assert not self.img.inspect.called
def test_Adds_timestamp(self):
image = self.add('p/afile.f')
self.assertEqual(self.lastProvenance['added'],self.datetime.now())
def test_Adds_uid(self):
with patch('niprov.adding.shortuuid') as shortuuid:
shortuuid.uuid.return_value = 'abcdefghijklmn'
image = self.add('p/afile.f')
self.assertEqual(self.lastProvenance['id'],'abcdef')
def test_If_config_attach_set_calls_attach_on_file(self):
self.config.attach = False
self.add('p/afile.f')
assert not self.img.attach.called, "Shouldnt attach if not configured."
self.config.attach = True
self.config.attach_format = 'abracadabra'
self.add('p/afile.f', transient=True)
assert not self.img.attach.called, "Shouldnt attach to transient file."
self.add('p/afile.f')
self.img.attach.assert_called_with('abracadabra')
def test_If_file_unknown_adds_it(self): # A
self.repo.byLocation.return_value = None
self.repo.getSeries.return_value = None
image = self.add('p/afile.f')
self.repo.add.assert_any_call(self.img)
def test_If_file_is_version_but_not_series(self): # B
previousVersion = Mock()
self.repo.byLocation.return_value = previousVersion
self.repo.getSeries.return_value = None
img = self.add('p/afile.f')
self.img.keepVersionsFromPrevious.assert_called_with(previousVersion)
self.repo.update.assert_any_call(self.img)
def test_If_file_is_version_and_series(self): # C
previousVersion = Mock()
series = Mock()
self.repo.byLocation.return_value = previousVersion
self.repo.getSeries.return_value = series
image = self.add('p/afile.f')
self.img.keepVersionsFromPrevious.assert_called_with(previousVersion)
self.repo.update.assert_any_call(self.img)
def test_If_file_not_version_but_series_and_not_in_there_yet(self): # D1
series = Mock()
series.hasFile.return_value = False
series.mergeWith.return_value = series
self.repo.byLocation.return_value = None
self.repo.getSeries.return_value = series
image = self.add('p/afile.f')
series.mergeWith.assert_called_with(self.img)
self.repo.update.assert_any_call(series)
def test_If_file_not_version_but_series_has_file(self): # D2
series = Mock()
series.hasFile.return_value = True
self.repo.byLocation.return_value = None
self.repo.getSeries.return_value = series
image = self.add('p/afile.f')
assert not series.mergeWith.called
self.img.keepVersionsFromPrevious.assert_called_with(series)
self.repo.update.assert_any_call(self.img)
def test_copiesOf_not_called_before_inspect(self):
def testIfInspectedAndReturnEptyList(img):
img.inspect.assert_called_with()
return []
self.query.copiesOf.side_effect = testIfInspectedAndReturnEptyList
image = self.add('p/afile.f')
def test_getSeries_not_called_before_inspect(self):
self.repo.getSeries.side_effect = lambda img: img.inspect.assert_called_with()
image = self.add('p/afile.f')
def test_copiesOf_not_called_if_parent_available(self):
image = self.add('p/afile.f', provenance={'parents':[sentinel.parent]})
assert not self.query.copiesOf.called
def test_Found_copy_set_as_parent_inherits_and_flags_and_informs_listener(self):
self.img.provenance = {}
copy = Mock()
copy.provenance = {'location':'copy-location'}
self.query.copiesOf.return_value = [self.img, copy]
out = self.add('p/afile.f')
self.inheritFrom.assert_called_with(self.img.provenance, copy.provenance)
self.listener.usingCopyAsParent.assert_called_with(copy)
self.assertEqual(copy.location.toStr
|
ing(), out.provenance['parents'][0])
self.assertEqual(True, out.provenance['copy-as-parent'])
def test_If_only_copy_is_same_location_ignores_it(self):
self.img.provenance = {}
self.query.copiesOf.return_value = [self.img]
out = self.add('p/afile.f')
assert not self.inheritFrom.called
assert not self.listener.usingCopy
|
AsParent.called
self.assertNotIn('parents', out.provenance)
self.assertNotIn('copy-as-parent', out.provenance)
def test_Adds_niprov_version(self):
with patch('niprov.adding.pkg_resources') as pkgres:
dist = Mock()
dist.version = '5.4.1'
pkgres.get_distribution.return_value = dist
image = self.add('p/afile.f')
self.assertEqual(self.lastProvenance['version-added'], 5.41)
|
unlessbamboo/grocery-shop
|
language/python/src/command-line/argv-test.py
|
Python
|
gpl-3.0
| 347
| 0.006645
|
#!/usr/bin/env python
#coding:utf-8
##
# @file argv-test.py
# @brief
# 最底层的命令行解析,其他模块应该都是对其
|
的封装
# @author unlessbamboo
# @version 1.0
# @date 2016-03-03
import sys
def testSys():
"""testSys"""
for arg in sy
|
s.argv[1:]:
print (arg)
if __name__ == '__main__':
testSys()
|
turbokongen/home-assistant
|
homeassistant/components/aemet/__init__.py
|
Python
|
apache-2.0
| 1,866
| 0.002144
|
"""The AEMET OpenData component."""
impo
|
rt asyncio
import logging
from aemet_opendata.interface import AEMET
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import CONF_API_KEY, CONF_LATITUDE, CONF_LONGITUDE, CONF
|
_NAME
from homeassistant.core import HomeAssistant
from .const import COMPONENTS, DOMAIN, ENTRY_NAME, ENTRY_WEATHER_COORDINATOR
from .weather_update_coordinator import WeatherUpdateCoordinator
_LOGGER = logging.getLogger(__name__)
async def async_setup(hass: HomeAssistant, config: dict) -> bool:
"""Set up the AEMET OpenData component."""
hass.data.setdefault(DOMAIN, {})
return True
async def async_setup_entry(hass: HomeAssistant, config_entry: ConfigEntry):
"""Set up AEMET OpenData as config entry."""
name = config_entry.data[CONF_NAME]
api_key = config_entry.data[CONF_API_KEY]
latitude = config_entry.data[CONF_LATITUDE]
longitude = config_entry.data[CONF_LONGITUDE]
aemet = AEMET(api_key)
weather_coordinator = WeatherUpdateCoordinator(hass, aemet, latitude, longitude)
await weather_coordinator.async_refresh()
hass.data[DOMAIN][config_entry.entry_id] = {
ENTRY_NAME: name,
ENTRY_WEATHER_COORDINATOR: weather_coordinator,
}
for component in COMPONENTS:
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(config_entry, component)
)
return True
async def async_unload_entry(hass: HomeAssistant, config_entry: ConfigEntry):
"""Unload a config entry."""
unload_ok = all(
await asyncio.gather(
*[
hass.config_entries.async_forward_entry_unload(config_entry, component)
for component in COMPONENTS
]
)
)
if unload_ok:
hass.data[DOMAIN].pop(config_entry.entry_id)
return unload_ok
|
vonkarmaninstitute/FreePC
|
server/restriction_system/utils.py
|
Python
|
gpl-3.0
| 5,215
| 0.025503
|
# This file is part of FreePC.
#
# FreePC is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# FreePC is distributed
|
in the hope that it
|
will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with FreePC. If not, see <http://www.gnu.org/licenses/>.
## @package utils
from restriction_system.models import *
from datetime import datetime, timedelta
from django.utils import timezone
from dateutil import tz
import time
## Method giving a queryset of all the buidings
#
# @return QuerySet of Building
def get_buildings():
return Building.objects.all().order_by('name')
## Method giving a queryset of all rooms present in the building
#
# @param b a Building
# @return QuerySet of Room
def get_rooms_from(b):
return Room.objects.filter(building=b).order_by('name')
## Method giving a queryset of all the workstations prensent in the room
#
# @param r a Room
# @return QuerySet of Workstation
def get_workstations_from(r):
"""
"""
return Workstation.objects.filter(room=r).order_by('hostname')
## Method verifying if we are in restricted time
#
# @param w a workstation
# @param time a time
# @return True if restricted otherwise False
def is_restricted(w, time):
ti_zone = tz.tzlocal()
time = time.replace(tzinfo=ti_zone)
day = time.isoweekday()
hour = time.time()
# restriction_on_days == False mean no restriction for the day
restriction_on_days = False
rt = RestrictionTime.objects.get(id=w.restriction_time_id)
rd = RestrictionDay.objects.get(id=rt.days_id)
if day == 1:
if rd.monday:
restriction_on_days = True
elif day == 2:
if rd.tuesday:
restriction_on_days = True
elif day == 3:
if rd.wednesday:
restriction_on_days = True
elif day == 4:
if rd.thursday:
restriction_on_days = True
elif day == 5:
if rd.friday:
restriction_on_days = True
elif day == 6:
if rd.saterday:
restriction_on_days = True
elif day == 7:
if rd.sunday:
restriction_on_days = True
if restriction_on_days:
return ((rt.start <= hour) and (hour < rt.end))
else:
return False
## Method verifying if a user can connect on a workstation
#
# @param w a Workstation
# @param wu a WorkstationUser
# @param user a UserSystem
# @param time a time
# @return True if can reconnect
def can_reconnect(w, wu, user, time):
print "dans can_reconnect"
ti_zone = tz.tzlocal()
time = time.replace(tzinfo=ti_zone)
time_start = wu.connection_start
time_end = wu.connection_end
total_connection_day = timedelta()
other_connection_today = False
#wuall = WorkstationUser.objects.filter(workstation_type_id=wu.workstation_type_id).filter(username=user.username).filter(connection_start__startswith=time).exclude(logged=True)
#print "avant calcul total connection"
#for wua in wuall:
# other_connection_today = True
# diff = wua.connection_end - wua.connection_start
# total_connection += diff
if time_end == None:
return None
diff_time = time - time_end
#print "avant other_connection_today"
#if other_connection_today:
# max_hours = timedelta(hours=w.max_hours_connection)
# if total_connection < max_hours:
# return True
timedelta_interval = timedelta(minutes=w.interval_time_not_disconnection)
if diff_time < timedelta_interval:
return None
timedelta_interval = timedelta(hours=w.waiting_time_before_reconnect)
if diff_time >= timedelta_interval:
return None
return diff_time
## Method giving the number of limit_connection of a workstation for a connection type.
#
# @param workstation a Worstation
# @param connection_type a ConnectionType
# @param number_connection an Integer
# @param restricted is_restricted
def vki_limit_connection(workstation, connection_type, number_connection, restricted=True):
limit_of_connection = 0
if connection_type.name == "console":
limit_of_connection = 1
elif connection_type.name == "ssh":
if restricted:
limit_of_connection = workstation.max_users_ssh
else:
limit_of_connection = workstation.max_users_ssh_unrestricted
else:
if restricted:
limit_of_connection = workstation.max_users_x2go
else:
limit_of_connection = workstation.max_users_x2go_unrestricted
if limit_of_connection == None:
limit_of_connection = 10
return number_connection < limit_of_connection
## Method giving a restriction time based on a string
#
# @param str_time an interval of hours (hh:mm - hh:mm)
# @return RestrictionTime Object
def get_restriction_time(str_time):
# time received in this format "hh:mm - hh:mm"
split_time = str_time.split(' ')
s_start = split_time[0]
s_end = split_time[-1]
try:
time_start = datetime.strptime(s_start, "%H:%M").time()
time_end = datetime.strptime(s_end, "%H:%M").time()
print time_start
print time_end
rt = RestrictionTime.objects.filter(start=time_start, end=time_end).first()
return rt
except:
return None
|
stackArmor/security_monkey
|
security_monkey/auditors/vpc/vpc.py
|
Python
|
apache-2.0
| 1,852
| 0
|
# Copyright 2016 Bridgewater Associates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
.. module: security_monkey.auditors.vpc
:platform: Unix
.. version:: $$VERSION$$
.. moduleauthor:: Bridgewater OSS <opensource@bwater.com>
"""
from se
|
curity_monkey.auditor import Auditor
from security_monkey.watchers.vpc.vpc import VPC
from security_monkey.watchers.vpc.flow_log import FlowLog
class VPCAuditor(Auditor):
index = VPC.index
i_am_singular = VPC.i_am_singular
i_am_plural = VPC.i_am_plural
support_watcher_indexes = [FlowLog.index]
def __init__(self, accounts=None,
|
debug=False):
super(VPCAuditor, self).__init__(accounts=accounts, debug=debug)
def check_flow_logs_enabled(self, vpc_item):
"""
alert when flow logs are not enabled for VPC
"""
flow_log_items = self.get_watcher_support_items(
FlowLog.index, vpc_item.account)
vpc_id = vpc_item.config.get("id")
tag = "Flow Logs not enabled for VPC"
severity = 5
flow_logs_enabled = False
for flow_log in flow_log_items:
if vpc_id == flow_log.config.get("resource_id"):
flow_logs_enabled = True
break
if not flow_logs_enabled:
self.add_issue(severity, tag, vpc_item)
|
kz26/uchicago-hvz
|
uchicagohvz/game/dorm_migrations/0001_initial.py
|
Python
|
mit
| 11,763
| 0.003996
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import mptt.fields
import uchicagohvz.overwrite_fs
from django.conf import settings
import django.utils.timezone
import uchicagohvz.game.models
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Award',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=255)),
('points', models.FloatField(help_text=b'Can be negative, e.g. to penalize players')),
('code', models.CharField(help_text=b'leave blank for automatic (re-)generation', max_length=255, blank=True)),
('redeem_limit', models.IntegerField(help_text=b'Maximum number of players that can redeem award via code entry (set to 0 for awards to be added by moderators only)')),
('redeem_type', models.CharField(max_length=1, choices=[(b'H', b'Humans only'), (b'Z', b'Zombies only'), (b'A', b'All players')])),
],
|
),
migrations.CreateModel(
name='Game',
|
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=255)),
('registration_date', models.DateTimeField()),
('start_date', models.DateTimeField()),
('end_date', models.DateTimeField()),
('rules', models.FileField(storage=uchicagohvz.overwrite_fs.OverwriteFileSystemStorage(), upload_to=uchicagohvz.game.models.gen_rules_filename)),
('picture', models.FileField(storage=uchicagohvz.overwrite_fs.OverwriteFileSystemStorage(), null=True, upload_to=uchicagohvz.game.models.gen_pics_filename, blank=True)),
('color', models.CharField(default=b'#FFFFFF', max_length=64)),
('flavor', models.TextField(default=b'', max_length=6000)),
],
options={
'ordering': ['-start_date'],
},
),
migrations.CreateModel(
name='HighValueDorm',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('dorm', models.CharField(max_length=4, choices=[(b'BJ', b'Burton-Judson Courts'), (b'IH', b'International House'), (b'MAX', b'Max Palevsky'), (b'NC', b'North Campus'), (b'SH', b'Snell-Hitchcock'), (b'SC', b'South Campus'), (b'ST', b'Stony Island'), (b'OFF', b'Off campus')])),
('start_date', models.DateTimeField()),
('end_date', models.DateTimeField()),
('points', models.IntegerField(default=3)),
('game', models.ForeignKey(to='game.Game')),
],
),
migrations.CreateModel(
name='HighValueTarget',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('start_date', models.DateTimeField()),
('end_date', models.DateTimeField()),
('kill_points', models.IntegerField(default=3, help_text=b'# of points zombies receive for killing this HVT')),
('award_points', models.IntegerField(default=0, help_text=b'# of points the HVT earns if he/she survives for the entire duration')),
],
),
migrations.CreateModel(
name='Kill',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('date', models.DateTimeField(default=django.utils.timezone.now)),
('points', models.IntegerField(default=1)),
('notes', models.TextField(blank=True)),
('lat', models.FloatField(null=True, verbose_name=b'latitude', blank=True)),
('lng', models.FloatField(null=True, verbose_name=b'longitude', blank=True)),
('lft', models.PositiveIntegerField(editable=False, db_index=True)),
('rght', models.PositiveIntegerField(editable=False, db_index=True)),
('tree_id', models.PositiveIntegerField(editable=False, db_index=True)),
('level', models.PositiveIntegerField(editable=False, db_index=True)),
('hvd', models.ForeignKey(related_name='kills', on_delete=django.db.models.deletion.SET_NULL, verbose_name=b'High-value Dorm', blank=True, to='game.HighValueDorm', null=True)),
('hvt', models.OneToOneField(related_name='kill', null=True, on_delete=django.db.models.deletion.SET_NULL, blank=True, to='game.HighValueTarget', verbose_name=b'High-value target')),
],
options={
'ordering': ['-date'],
},
),
migrations.CreateModel(
name='Mission',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=63)),
('description', models.CharField(max_length=255)),
('summary', models.TextField(default=b'', max_length=6000)),
('zombies_win', models.BooleanField(default=False)),
('awards', models.ManyToManyField(help_text=b'Awards associated with this mission.', related_name='missions', to='game.Award', blank=True)),
('game', models.ForeignKey(related_name='missions', to='game.Game')),
],
),
migrations.CreateModel(
name='MissionPicture',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('picture', models.FileField(storage=uchicagohvz.overwrite_fs.OverwriteFileSystemStorage(), upload_to=uchicagohvz.game.models.gen_pics_filename)),
('lat', models.FloatField(null=True, verbose_name=b'latitude', blank=True)),
('lng', models.FloatField(null=True, verbose_name=b'longitude', blank=True)),
('game', models.ForeignKey(related_name='pictures', to='game.Game')),
],
),
migrations.CreateModel(
name='New_Squad',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=128)),
('game', models.ForeignKey(related_name='new_squads', to='game.Game')),
],
),
migrations.CreateModel(
name='Player',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('active', models.BooleanField(default=False)),
('bite_code', models.CharField(help_text=b'leave blank for automatic (re-)generation', max_length=255, blank=True)),
('dorm', models.CharField(max_length=4, choices=[(b'BJ', b'Burton-Judson Courts'), (b'IH', b'International House'), (b'MAX', b'Max Palevsky'), (b'NC', b'North Campus'), (b'SH', b'Snell-Hitchcock'), (b'SC', b'South Campus'), (b'ST', b'Stony Island'), (b'OFF', b'Off campus')])),
('major', models.CharField(help_text=b'autopopulates from LDAP', max_length=255, blank=True)),
('human', models.BooleanField(default=True)),
('opt_out_hvt', models.BooleanField(default=False)),
('gun_requested', models.BooleanField(default=False)),
('renting_gun', models.BooleanField(default=False)),
('gun_returned', models.BooleanField(default=False)),
('last_words', models.CharField(max_length=255, blank=True)),
('lead_zombie', models.BooleanField(default=False)),
|
akosyakov/intellij-community
|
python/testData/copyPaste/singleLine/IndentOnTopLevel.after.py
|
Python
|
apache-2.0
| 83
| 0.024096
|
class C:
def foo(self):
x = 1
|
y = 2
x = 1
def foo()
|
:
pass
|
a2ohm/picsou
|
sub/status.py
|
Python
|
gpl-3.0
| 1,333
| 0.003751
|
#! /usr/bin/python3
# -*- coding:utf-8 -*-
"""
Define the "status" sub-command.
"""
from lib.transaction import *
from lib.color import color
import sys
import yaml
def status(conf, args):
"""Print staging transactions.
"""
if not conf:
# The account book is not inited
print("There is no account book here.", end=' ')
print("Create one with: picsou init.")
sys.exit()
# Print basic information
print(color.BOLD + "%s" % conf['name'] + color.END)
if conf['description'] != '.':
print(color.ITALIC + " (%s)" % conf['description'] + color.END)
# Try to open and load the staging file
try:
with open("picsou.stage", 'r') as f:
stage = yaml.load(f)
except IOError:
print("Nothing to commit.")
sys.exit()
if stage:
if len(sta
|
ge) == 1:
print("A transaction is waiting to be comited.")
else:
print("Some transactions are waiting to be comited.")
# List transactions to be commited
transactions = \
[transaction._make(map(t.get, transaction._fields))
for t in stage]
# Print those tra
|
nsactions
print()
printTransactions(transactions)
else:
print("Nothing to commit.")
sys.exit()
|
dkm/skylines
|
skylines/tests/test_gjslint.py
|
Python
|
agpl-3.0
| 1,110
| 0
|
import os
import sys
import nose
from subprocess import Ca
|
lledProcessError, check_output as run
from functools import partial
GJSLINT_COMMAND = 'gjslint'
GJSLINT_OPTIONS = ['--strict']
JS_BASE_FOLDER = os.path.join('skylines', 'public', 'js')
JS_FILES = [
'baro.js',
'fix-table.js',
'flight.js',
'general.js',
'map.js',
'phase-table.js',
'topbar.js',
'tracking.js',
'units.js',
]
def test_js_files():
for filename in JS_FILES:
f = partial(run_gjslint, filename)
|
f.description = 'gjslint {}'.format(filename)
yield f
def run_gjslint(filename):
path = os.path.join(JS_BASE_FOLDER, filename)
args = [GJSLINT_COMMAND]
args.extend(GJSLINT_OPTIONS)
args.append(path)
try:
run(args)
except CalledProcessError, e:
print e.output
raise AssertionError('gjslint has found errors.')
except OSError:
raise OSError('Failed to run gjslint. Please check that you have '
'installed it properly.')
if __name__ == "__main__":
sys.argv.append(__name__)
nose.run()
|
rtrembecky/roots
|
problems/migrations/0001_initial.py
|
Python
|
mit
| 12,637
| 0.00459
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import downloads.models
import base.storage
import base.models
from django.conf import settings
import problems.models
import sortedm2m.fields
class Migration(migrations.Migration):
dependencies = [
('events', '0001_initial'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('leaflets', '0001_initial'),
('schools', '0001_initial'),
('competitions', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='OrgSolution',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('added_at', models.DateTimeField(auto_now_add=True, verbose_name='added at')),
('modified_at', models.DateTimeField(auto_now=True, verbose_name='modified at')),
('added_by', models.ForeignKey(related_name='OrgSolution_created', blank=True, editable=False, to=settings.AUTH_USER_MODEL, null=True, verbose_name='author')),
('modified_by', models.ForeignKey(related_name='OrgSolution_modified', blank=True, editable=False, to=settings.AUTH_USER_MODEL, null=True, verbose_name='last modified by')),
('organizer', models.ForeignKey(verbose_name='organizer', to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'organizer solution',
'verbose_name_plural': 'organizer solutions',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Problem',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('text', models.TextField(help_text='The problem itself. Please insert it in a valid TeX formatting.', verbose_name='problem text')),
('result', models.TextField(help_text='The result of the problem. For problems that do not have simple results, a hint or short outline of the solution.', null=True, verbose_name='Result / short solution outline', blank=True)),
('source', models.CharField(help_text='Source where you found the problem(if not original).', max_length=500, null=True, verbose_name='problem source', blank=True)),
('image', models.ImageField(storage=base.storage.OverwriteFileSystemStorage(), upload_to=b'problems/', blank=True, help_text='Image added to the problem text.', null=True, verbose_name='image')),
('additional_files', models.FileField(storage=base.storage.OverwriteFileSystemStorage(), upload_to=b'problems/', blank=True, help_text='Additional files stored with the problem (such as editable images).', null=True, verbose_name='additional files')),
('rating_votes', models.PositiveIntegerField(default=0, editable=False, blank=True)),
('rating_score', models.IntegerField(default=0, editable=False, blank=True)),
('added_at', models.DateTimeField(auto_now_add=True, verbose_name='added at')),
('modified_at', models.DateTimeField(auto_now=True, verbose_name='modified at')),
('added_by', models.ForeignKey(related_name='Problem_created', blank=True, editable=False, to=settings.AUTH_USER_MODEL, null=True, verbose_name='author')),
],
options={
'verbose_name': 'problem',
'verbose_name_plural': 'problems',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='ProblemCategory',
fields=[
|
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=50, verbose_name='name')),
('competition', models.ForeignKey(verbose_name='competition', to='competitions.Competition', help_text='The reference to the competition that uses this category. It makes sense to have categories specific to each competition, since problem types in competitions may differ significantly.')),
|
],
options={
'ordering': ['name'],
'verbose_name': 'category',
'verbose_name_plural': 'categories',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='ProblemInSet',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('position', models.PositiveSmallIntegerField(verbose_name='position')),
('problem', models.ForeignKey(verbose_name='problem', to='problems.Problem')),
],
options={
'ordering': ['position'],
'verbose_name': 'problem',
'verbose_name_plural': 'problems',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='ProblemSet',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=100, verbose_name=b'name')),
('description', models.CharField(max_length=400, null=True, verbose_name=b'description', blank=True)),
('added_at', models.DateTimeField(auto_now_add=True, verbose_name='added at')),
('modified_at', models.DateTimeField(auto_now=True, verbose_name='modified at')),
('added_by', models.ForeignKey(related_name='ProblemSet_created', blank=True, editable=False, to=settings.AUTH_USER_MODEL, null=True, verbose_name='author')),
('competition', models.ForeignKey(verbose_name='competition', to='competitions.Competition')),
('event', models.ForeignKey(verbose_name='event', blank=True, to='events.Event', null=True)),
('leaflet', models.ForeignKey(verbose_name='leaflet', blank=True, to='leaflets.Leaflet', null=True)),
('modified_by', models.ForeignKey(related_name='ProblemSet_modified', blank=True, editable=False, to=settings.AUTH_USER_MODEL, null=True, verbose_name='last modified by')),
('problems', sortedm2m.fields.SortedManyToManyField(help_text=None, to='problems.Problem', sort_value_field_name=b'position', verbose_name='problems', through='problems.ProblemInSet')),
],
options={
'verbose_name': 'Problem set',
'verbose_name_plural': 'Problem sets',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='ProblemSeverity',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=50, verbose_name='name')),
('level', models.IntegerField(verbose_name='level')),
('competition', models.ForeignKey(verbose_name='competition', to='competitions.Competition', help_text='The reference to the competition that uses this severity. It makes sense to have severities specific to each competition, since organizers might have different ways of sorting the problems regarding their severity.')),
],
options={
'ordering': ['level'],
'verbose_name': 'severity',
'verbose_name_plural': 'severities',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='UserSolution',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('solution', base.models.ContentTypeRestrictedFileField(storage=base.storage.OverwriteFileSystemStorage(base_url=b'/protected/', location=b'/home/tbabej/Projects/roots-env/roots/protected/'), upload_to=problems.models.get_solution_path_global, null=T
|
Calvinxc1/neural_nets
|
Controller.py
|
Python
|
gpl-3.0
| 7,833
| 0.010596
|
#%% Libraries: Built-In
from copy import deepcopy as copy
import pandas as pd
import numpy as np
from datetime import datetime as dt
from datetime import timedelta as td
#% Libraries: Custom
from Clusters.Data import DataCluster
from Clusters.ClusterGroup import ClusterGroup
#%%
class NetworkController(object):
defaults = {
'data_cluster': DataCluster,
'data_name': 'data_cluster'
}
def __init__(self, control_name, data_frame = None, **kwargs):
self.control_name = control_name
self.clusters = {}
self.epocs = 0
self.error_record = []
if data_frame is not None:
self.add_cluster(self.defaults['data_name'], self.defaults['data_cluster'], data_frame, **kwargs)
self.set_data_cluster(self.defaults['data_name'])
else:
self.data_cluster = None
def parse(self):
snapshot = {
'control_name': self.control_name,
'data_cluster': self.data_cluster,
'epocs': self.epocs,
'error_record': self.error_record,
}
snapshot['clusters'] = [cluster.parse() for cluster in self.clusters.values()]
return snapshot
def add_cluster(self, cluster_name, cluster_class, *args, **kwargs):
if cluster_name in self.clusters.keys():
raise Exception('cluster_name %s already exists' % cluster_name)
if type(cluster_class) is ClusterGroup:
self.clusters[cluster_name] = copy(cluster_class)
else:
self.clusters[cluster_name] = cluster_class(cluster_name, *args, **kwargs)
def set_data_cluster(self, cluster_name):
if cluster_name not in self.clusters.keys():
raise Exception('cluster_name %s not in clusters' % cluster_name)
self.data_cluster = cluster_name
def get_cluster(self, connect_type, cluster_name):
return self.clusters[cluster_name].return_cluster(connect_type)
def get_data_cluster(self):
return self.get_cluster(None, self.data_cluster)[0]
def connect_clusters(self, from_cluster_name, to_cluster_name, *args):
for from_clust in self.get_cluster('output', from_cluster_name):
for to_clust in self.get_cluster('input', to_cluster_name):
from_clust.connect_add('output', to_clust, *args)
to_clust.connect_add('input', from_clust, *args)
from_cluster = self.clusters[from_cluster_name]
to_cluster = self.clusters[to_cluster_name]
if (type(from_cluster) is ClusterGroup) & (type(to_cluster) is ClusterGroup):
for back_channel in from_cluster.back_channels.keys():
if back_channel in to_cluster.back_channels.keys():
from_back_connects = from_cluster.back_channels[back_channel]['output']
to_back_connects = from_cluster.back_channels[back_channel]['input']
for from_back_cluster in from_back_connects:
for to_back_cluster in to_back_connects:
from_clust = from_cluster.clusters[from_cluster.grouped_cluster_name(from_back_cluster)]
to_clust = to_cluster.clusters[to_cluster.grouped_cluster_name(to_back_cluster)]
from_clust.connect_add('output', to_clust, *args)
to_clust.connect_add('input', from_clust, *args)
def connect_clusters_many(self, connect_items):
for connect_item in connect_items:
self.connect_clusters(*connect_item)
def add_with_connects(self, cluster_name, cluster_class, connect_items, *args, **kwargs):
def gen_new_connects(connect_items):
connect_list = []
for connect_item in connect_items:
direction = connect_item[0]
connect_name = connect_item[1]
if direction == 'input':
connect_list.append((connect_name, cluster_name, *connect_item[2:]))
elif direction == 'output':
connect_list.append((cluster_name, connect_name, *connect_item[2:]))
return connect_list
self.add_cluster(cluster_name, cluster_class, *args, **kwargs)
self.connect_clusters_many(gen_new_connects(connect_items))
def init_network(self):
obs_count = self.get_data_cluster().get_obs_count()
train_index, self.train_split = self.get_data_cluster().get_train_index()
self.valid_split = 1
for cluster in self.clusters.values():
cluster.init_cluster(obs_count, train_index)
self.error_record = []
self.epocs = 0
print('%s network initialized, %s total coefficients' % (self.control_name, self.get_coef_count()))
def learn_network(self, epoc_limit = 100, learn_weight = 1e-0, verbose = False):
while self.epocs < epoc_limit:
start_time = dt.utcnow()
self.epoc_network(learn_weight = learn_weight)
if np.any(pd.isnull(self.model_error())):
print('Divergent Pattern, halting build on %s' % self.control_name)
break
self.epocs += 1
end_seconds = (dt.utcnow() - start_time).total_seconds()
self.print_error(end_seconds)
def print_error(self, run_seconds):
train_error = self.mo
|
del_error()[self.train_split]
valid_error = self.model_error()[self.valid_split]
print('\r %s completed epoc %s in %s sec.\tTrain
|
Error: %s.\tValid Error:%s' % (self.control_name, self.epocs, round(run_seconds, 1), train_error, valid_error), end = '')
def epoc_network(self, learn_weight = 1e-0):
self.get_data_cluster().send_forward()
self.error_record.append(self.model_error())
self.get_data_cluster().send_backprop(learn_weight = learn_weight)
def predictions(self):
return self.get_data_cluster().return_predicts()
def model_error(self):
return self.get_data_cluster().get_model_error()
def get_trained_errors(self):
return np.array(self.error_record)
def get_coef_count(self):
coefs = sum([cluster.coef_count() for cluster in self.clusters.values()])
return coefs
def setup_recurrence(self, recur_cluster, feature_cols, label_cols, recurrences, recur_offset = 1):
new_feature_cols, new_label_cols = self.get_data_cluster().setup_data_recur(
feature_cols,
label_cols,
recurrences,
recur_offset = recur_offset
)
self.add_recurrence_clusters(recur_cluster, new_feature_cols, new_label_cols, recurrences, recur_offset = recur_offset)
def add_recurrence_clusters(self, cluster_class, feature_cols, label_cols, recurrences, recur_offset = 1, **kwargs):
for recur in range(recurrences):
cluster = copy(cluster_class)
if type(cluster) is ClusterGroup:
cluster_root_name = cluster.cluster_name
cluster.change_cluster_name('%s_%s' % (cluster_root_name, recur))
else:
cluster_root_name = 'rnn_cluster'
cluster_connects = [
('input', 'data_cluster', feature_cols[recur]),
('output', 'data_cluster', label_cols[recur])
]
if recur >= recur_offset:
cluster_connects.append(('input', '%s_%s' % (cluster_root_name, recur - recur_offset)))
self.add_with_connects('%s_%s' % (cluster_root_name, recur), cluster, cluster_connects, **kwargs)
#%%
|
amlannayak/apollo
|
src/mic_test.py
|
Python
|
gpl-3.0
| 521
| 0.015355
|
import speech_recognition as sr
# Obtain audio from the microphone
r = sr.Recognizer()
with sr.Microphone() as source:
print("Say something!")
au
|
dio = r.listen(source, phrase_time_limit=5)
# Recognize using w
|
it.ai
WIT_AI_KEY = "GP3LO2LIQ2Y4OSKOXZN6OAOONB55ZLN5"
try:
print("wit.ai thinks you said " + r.recognize_wit(audio, key=WIT_AI_KEY))
except sr.UnknownValueError:
print("wit.ai could not understand audio")
except sr.RequestError as e:
print("Could not request results from wit.ai servicel {0}".format(e))
|
messagebird/python-rest-api
|
examples/voice_message_create.py
|
Python
|
bsd-2-clause
| 1,444
| 0.018006
|
#!/usr/bin/env python
import sys, os
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
import messagebird
ACCESS_KEY = 'test_gshuPaZoeEG6ovbc8M79w0QyM'
try:
# Create a MessageBird client with the specified ACCESS_KEY.
client = messagebird.Client(ACCESS_KEY)
# Send a new voice message.
vmsg = client.voice_message_create('31612345678', 'Hello World', { 'reference' : 'Foobar' })
# Print the object information.
print('\nThe following information was returned as a VoiceMessage object:\n')
print(' id : %s' % vmsg.id)
print(' href : %s' % vmsg.href)
print(' originator : %s' % vmsg.originator)
print(' body : %s' % vmsg.body)
print(' reference : %s' % vmsg.reference)
print(' language
|
: %s' % vmsg.language)
print(' voice : %s' % vmsg.voice)
print(' repeat : %s' % vmsg.repeat)
print(' ifMachine : %s' % vmsg.ifMachine)
print(' scheduledDatetime : %s' % vmsg.scheduledDatetime)
print(' createdDatetime : %s' % vm
|
sg.createdDatetime)
print(' recipients : %s\n' % vmsg.recipients)
except messagebird.client.ErrorException as e:
print('\nAn error occured while requesting a VoiceMessage object:\n')
for error in e.errors:
print(' code : %d' % error.code)
print(' description : %s' % error.description)
print(' parameter : %s\n' % error.parameter)
|
pkg-ime/ibus-anthy
|
setup/anthyprefs.py
|
Python
|
gpl-2.0
| 30,030
| 0.01582
|
# -*- coding: utf-8 -*-
# vim:set noet ts=4:
#
# ibus-anthy - The Anthy engine for IBus
#
# Copyright (c) 2007-2008 Peng Huang <shawn.p.huang@gmail.com>
# Copyright (c) 2009 Hideaki ABE <abe.sendai@gmail.com>
# Copyright (c) 2007-2011 Red Hat, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
import gtk
import sys
from prefs import Prefs
N_ = lambda a : a
__all__ = ['AnthyPrefs']
class AnthyPrefs(Prefs):
_prefix
|
= 'engine/anthy'
def __init__(self, bus=None, config=None):
super(AnthyPrefs, self).__init__(bus, config)
self.default = _config
# The keys will be EOSL in the near future.
self.__update_key ("common",
"behivior_on_focus_out",
"behavior_on_focus_out")
self.__update_key ("common",
"
|
behivior_on_period",
"behavior_on_period")
self.fetch_all()
def __update_key (self, section, old_key, new_key):
file = __file__
if __file__.find('/') >= 0:
file = __file__[__file__.rindex('/') + 1:]
warning_message = \
"(" + file + ") ibus-anthy-WARNING **: " \
"The key (" + old_key + ") will be removed in the future. " \
"Currently the key (" + new_key + ") is used instead. " \
"The ibus keys are defined in " + \
"/".join(["/desktop/ibus", self._prefix, section]) + " ."
if not self.fetch_item(section, old_key, True):
return
print >> sys.stderr, warning_message
if self.fetch_item(section, new_key, True):
return
self.fetch_item(section, old_key)
value = self.get_value(section, old_key)
self.set_value(section, new_key, value)
self.commit_item(section, new_key)
self.undo_item(section, new_key)
def keys(self, section):
if section.startswith('shortcut/'):
return _cmd_keys
return self.default[section].keys()
def get_japanese_ordered_list(self):
return _japanese_ordered_list
def get_version(self):
return '1.2.6'
# Sad! dict.keys() doesn't return the saved order.
# locale.strcoll() also just returns the Unicode code point.
# Unicode order is wrong in Japanese large 'a' and small 'a'.
# The workaround is to save the order here...
_japanese_ordered_list = [
"あ", "い", "う", "え", "お",
"ぁ", "ぃ", "ぅ", "ぇ", "ぉ",
"いぇ",
"うぁ", "うぃ", "うぅ", "うぇ", "うぉ",
"うゃ", "うゅ", "うょ",
"か", "き", "く", "け", "こ",
"ゕ", "ゖ", "ヵ", "ヶ",
"が", "ぎ", "ぐ", "げ", "ご",
"きゃ", "きぃ", "きゅ", "きぇ", "きょ",
"くぁ", "くぃ", "くぅ", "くぇ", "くぉ",
"ぎゃ", "ぎぃ", "ぎゅ", "ぎぇ", "ぎょ",
"ぐぁ", "ぐぃ", "ぐぅ", "ぐぇ", "ぐぉ",
"さ", "し", "す", "せ", "そ",
"ざ", "じ", "ず", "ぜ", "ぞ",
"しゃ", "しぃ", "しゅ", "しぇ", "しょ",
"じゃ", "じぃ", "じゅ", "じぇ", "じょ",
"すぅぃ", "すぇ",
"ずぇ",
"た", "ち", "つ", "て", "と",
"だ", "ぢ", "づ", "で", "ど",
"っ",
"ちゃ", "ちぃ", "ちゅ", "ちぇ", "ちょ",
"ぢぃ", "ぢぇ",
"ぢゃ", "ぢゅ", "ぢょ",
"つぁ", "つぃ", "つぇ", "つぉ",
"つゃ", "つぃぇ", "つゅ", "つょ",
"づぁ", "づぃ", "づぇ", "づぉ",
"づゃ", "づぃぇ", "づゅ", "づょ",
"てぃ", "てぇ",
"てゃ", "てゅ", "てょ",
"とぅ",
"でぃ", "でぇ",
"でゃ", "でゅ", "でょ",
"どぅ",
"な", "に", "ぬ", "ね", "の",
"にぃ", "にぇ",
"にゃ", "にゅ", "にょ",
"は", "ひ", "ふ", "へ", "ほ",
"ば", "び", "ぶ", "べ", "ぼ",
"ぱ", "ぴ", "ぷ", "ぺ", "ぽ",
"ひぃ", "ひぇ",
"ひゃ", "ひゅ", "ひょ",
"びぃ", "びぇ",
"びゃ", "びゅ", "びょ",
"ぴぃ", "ぴぇ",
"ぴゃ", "ぴゅ", "ぴょ",
"ふぁ", "ふぃ", "ふぇ", "ふぉ",
"ふゃ", "ふゅ", "ふょ",
"ぶぁ", "ぶぇ", "ぶぉ",
"ぷぁ", "ぷぇ", "ぷぉ",
"ま", "み", "む", "め", "も",
"みぃ", "みぇ",
"みゃ", "みゅ", "みょ",
"や", "ゆ", "よ",
"ゃ", "ゅ", "ょ",
"ら", "り", "る", "れ", "ろ",
"りぃ", "りぇ",
"りゃ", "りゅ", "りょ",
"わ", "を", "ん",
"ゎ",
"ゐ", "ゑ",
"ー",
"ヴぁ", "ヴぃ", "ヴ", "ヴぇ", "ヴぉ",
"ヴゃ", "ヴぃぇ", "ヴゅ", "ヴょ",
]
_cmd_keys = [
"on_off",
"circle_input_mode",
"circle_kana_mode",
"latin_mode",
"wide_latin_mode",
"hiragana_mode",
"katakana_mode",
"half_katakana_mode",
# "cancel_pseudo_ascii_mode_key",
"circle_typing_method",
"circle_dict_method",
"insert_space",
"insert_alternate_space",
"insert_half_space",
"insert_wide_space",
"backspace",
"delete",
"commit",
"convert",
"predict",
"cancel",
"cancel_all",
"reconvert",
# "do_nothing",
"select_first_candidate",
"select_last_candidate",
"select_next_candidate",
"select_prev_candidate",
"candidates_page_up",
"candidates_page_down",
"move_caret_first",
"move_caret_last",
"move_caret_forward",
"move_caret_backward",
"select_first_segment",
"select_last_segment",
"select_next_segment",
"select_prev_segment",
"shrink_segment",
"expand_segment",
"commit_first_segment",
"commit_selected_segment",
"select_candidates_1",
"select_candidates_2",
"select_candidates_3",
"select_candidates_4",
"select_candidates_5",
"select_candidates_6",
"select_candidates_7",
"select_candidates_8",
"select_candidates_9",
"select_candidates_0",
"convert_to_char_type_forward",
"convert_to_char_type_backward",
"convert_to_hiragana",
"convert_to_katakana",
"convert_to_half",
"convert_to_half_katakana",
"convert_to_wide_latin",
"convert_to_latin",
"dict_admin",
"add_word",
"start_setup",
]
_config = {
'common': {
'input_mode': 0,
'typing_method': 0,
'conversion_segment_mode': 0,
'period_style': 0,
'symbol_style': 1,
'ten_key_mode': 1,
'behavior_on_focus_out': 0,
'behavior_on_period': 0,
'page_size': 10,
'half_width_symbol': False,
'half_width_number': False,
'half_width_space': False,
'shortcut_type': 'default',
'dict_admin_command': ['/usr/local/bin/kasumi', 'kasumi'],
'add_word_command': ['/usr/local/bin/kasumi', 'kasumi', '-a'],
'dict_config_icon': '/usr/local/share/pixmaps/kasumi.png',
},
'romaji_typing_rule': {
'method': 'default',
# The newkeys list is saved for every romaji_typing_rule/$method
# so that prefs.get_value_direct() is not used.
# prefs.fetch_section() doesn't get the keys if they exist
# in gconf only.
'newkeys': [],
},
##0 MS-IME
# http://www.filibeto.org/sun/lib/solaris10-docs/E19253-01/819-7844/appe-1-4/index.html
##1 ATOK
# http://www.filibeto.org/sun/lib/solaris10-docs/E19253-01/819-7844/appe-1-3/index.html
##2 Gairaigo http://ja.wikipedia.org/wiki/%E5%A4%96%E6%9D%A5%E8%AA%9E
##3 ANSI/BSI Suggestions http://en.wikipedia.org/wiki/Katakana
# Maybe we need a compatibility between MS-IME and ibus-anthy.
'romaji_typing_rule/default': {
"-": "ー",
"a" : "あ",
"i" : "い",
"u" : "う",
"e" : "え",
"o" : "お",
"xa" : "ぁ",
"xi" : "ぃ",
"xu" : "ぅ",
"xe" : "ぇ",
"xo" : "ぉ",
"la" : "ぁ",
"li" : "ぃ",
"lu" : "ぅ",
"le" : "ぇ",
"lo" : "ぉ",
"wha" : "うぁ",
"whi" : "うぃ",
"whe" : "うぇ",
"who" : "うぉ",
"wya" : "うゃ", ##2
"wyu" : "うゅ", ##2
"wyo" : "うょ", ##2
"va" : "ヴぁ",
"vi" : "ヴぃ",
"vu" : "ヴ",
"ve" : "ヴぇ",
"vo" : "ヴぉ",
"vya" : "ヴゃ", ##2
|
testvidya11/ejrf
|
questionnaire/migrations/0025_auto__add_field_questiongroup_allow_multiples.py
|
Python
|
bsd-3-clause
| 17,416
| 0.007407
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'QuestionGroup.allow_multiples'
db.add_column(u'questionnaire_questiongroup', 'allow_multiples',
self.gf('django.db.models.fields.BooleanField')(default=False),
keep_default=False)
def backwards(self, orm):
# Deleting field 'QuestionGroup.allow_multiples'
db.delete_column(u'questionnaire_questiongroup', 'allow_multiples')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'questionnaire.answer': {
'Meta': {'object_name': 'Answer'},
'country': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['questionnaire.Country']", 'null': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'answers'", 'null': 'True', 'to': "orm['questionnaire.Question']"}),
'status': ('django.db.models.fields.CharField', [], {'default': "'Draft'", 'max_length': '15'}),
'version': ('django.db.models.fields.IntegerField', [], {'default': '1', 'null': 'True'})
},
'questionnaire.answergroup': {
'Meta': {'object_name': 'AnswerGroup'},
'answer': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['questionnaire.Answer']", 'null': 'True', 'symmetrical': 'False'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'grouped_question': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['questionnaire.QuestionGroup']", 'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'row': ('django.db.models.fields.CharField', [], {'max_length': '6'})
},
'questionnaire.comment': {
|
'Meta': {'object_name': 'Comment'},
'answer_group': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'comments'", 'symmetrical': 'False', 'to': "orm['questionnaire.AnswerGroup']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank'
|
: 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'text': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
'questionnaire.country': {
'Meta': {'object_name': 'Country'},
'code': ('django.db.models.fields.CharField', [], {'max_length': '5', 'null': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'}),
'regions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'countries'", 'null': 'True', 'to': "orm['questionnaire.Region']"})
},
'questionnaire.dateanswer': {
'Meta': {'object_name': 'DateAnswer', '_ormbases': ['questionnaire.Answer']},
u'answer_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['questionnaire.Answer']", 'unique': 'True', 'primary_key': 'True'}),
'response': ('django.db.models.fields.DateField', [], {})
},
'questionnaire.multichoiceanswer': {
'Meta': {'object_name': 'MultiChoiceAnswer', '_ormbases': ['questionnaire.Answer']},
u'answer_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['questionnaire.Answer']", 'unique': 'True', 'primary_key': 'True'}),
'response': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['questionnaire.QuestionOption']"})
},
'questionnaire.numericalanswer': {
'Meta': {'object_name': 'NumericalAnswer', '_ormbases': ['questionnaire.Answer']},
u'answer_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['questionnaire.A
|
infectiious/Pharaoh_script
|
Markets/KuCoin/kucoin_api.py
|
Python
|
mit
| 1,648
| 0.01517
|
#!/usr/bin/env python3
import argparse
import requests
import time
import datetime
import random
# import pymysql
from connections import hostname, u
|
sername, password, portnumber, database
class MarketKuCoin(object):
# Set variables for API String.
domain = "https://api.kucoin.com"
url = ""
uri = ""
# Function to build API string.
def __init__(self, uri, name, market):
|
super(MarketKuCoin, self).__init__()
self.name = name
self.uri = uri
self.url = self.domain + uri
self.market = market
dbstr = market.lower() + "_" + name.lower()
# Function to query API string and write to mysql database.
def update_data(self):
# db = pymysql.connect(host=hostname, user=username,
# passwd=password, port=portnumber, db=database)
# db.autocommit(True)
# cur = db.cursor()
r = requests.get(self.url, verify=True)
rdata = (r.json()["data"])
ask = str(rdata.get("sell", "none"))
bid = str(rdata.get("buy", "none"))
last = str(rdata.get("lastDealPrice", "none"))
tstampstr = str(rdata.get("datetime", "none"))
tstampint = tstampstr.replace(' ', '')[:-3]
tstampint = float(tstampint)
ltime = time.ctime(tstampint)
utime = time.asctime(time.gmtime(tstampint))
print (ask)
print (str(r.json()))
# query = "INSERT INTO " + dbstr + "(ask,bid,lastsale,recorded_time) " \
# "VALUES(%s,%s,%s,FROM_UNIXTIME(%s))" % (ask, bid, last, tstamp)
# print (query)
# cur.execute(query)
# cur.close()
# db.close()
|
ellisonbg/altair
|
tools/schemapi/utils.py
|
Python
|
bsd-3-clause
| 12,696
| 0.001024
|
"""Utilities for working with schemas"""
import json
import keyword
import pkgutil
import re
import textwrap
import jsonschema
EXCLUDE_KEYS = ('definitions', 'title', 'description', '$schema', 'id')
def load_metaschema():
schema = pkgutil.get_data('schemapi', 'jsonschema-draft04.json')
schema = schema.decode()
return json.loads(schema)
def resolve_references(schema, root=None):
"""Resolve References within a JSON schema"""
resolver = jsonschema.RefResolver.from_schema(root or schema)
while '$ref' in schema:
with resolver.resolving(schema['$ref']) as resolved:
schema = resolved
return schema
def get_valid_identifier(prop, replacement_character='', allow_unicode=False):
"""Given a string property, generate a valid Python identifier
Parameters
----------
replacement_character: string, default ''
The character to replace invalid characters with.
allow_unicode: boolean, default False
If True, then allow Python 3-style unicode identifiers.
Examples
--------
>>> get_valid_identifier('my-var')
'myvar'
>>> get_valid_identifier('if')
'if_'
>>> get_valid_identifier('$schema', '_')
'_schema'
>>> get_valid_identifier('$*#$')
'_'
"""
# First substitute-out all non-valid characters.
flags = re.UNICODE if allow_unicode else re.ASCII
valid = re.sub('\W', replacement_character, prop, flags=flags)
# If nothing is left, use just an underscore
if not valid:
valid = '_'
# first character must be a non-digit. Prefix with an underscore
# if needed
if re.match('^[\d\W]', valid):
valid = '_' + valid
# if the result is a reserved keyword, then add an underscore at the end
if keyword.iskeyword(valid):
valid += '_'
return valid
def is_valid_identifier(var, allow_unicode=False):
"""Return true if var contains a valid Python identifier
Parameters
----------
val : string
identifier to check
allow_unicode : bool (default: False)
if True, then allow Python 3 style unicode identifiers.
"""
flags = re.UNICODE if allow_unicode else re.ASCII
is_valid = re.match("^[^\d\W]\w*\Z", var, flags)
return is_valid and not keyword.iskeyword(var)
class SchemaProperties(object):
"""A wrapper for properties within a schema"""
def __init__(self, properties, schema, rootschema=None):
self._properties = properties
self._schema = schema
self._rootschema = rootschema or schema
def __bool__(self):
return bool(self._properties)
def __dir__(self):
return list(self._properties.keys())
def __getattr__(self, attr):
try:
return self[attr]
except KeyError:
return super(SchemaProperties, self).__getattr__(attr)
def __getitem__(self, attr):
dct = self._properties[attr]
if 'definitions' in self._schema and 'definitions' not in dct:
dct = dict(definitions=self._schema['definitions'], **dct)
return SchemaInfo(dct, self._rootschema)
def __iter__(self):
return iter(self._properties)
def items(self):
return ((key, self[key]) for key in self)
def keys(self):
return self._properties.keys()
def values(self):
return (self[key] for key in self)
class SchemaInfo(object):
"""A wrapper for inspecting a JSON schema"""
def __init__(self, schema, rootschema=None, validate=False):
if hasattr(schema, '_schema'):
if hasattr(schema, '_rootschema'):
schema, rootschema = schema._schema, schema._rootschema
else:
schema, rootschema = schema._schema, schema._schema
elif not rootschema:
rootschema = schema
if validate:
metaschema = load_metaschema()
jsonschema.validate(schema, metaschema)
jsonschema.validate(rootschema, metaschema)
self.raw_schema = schema
self.rootschema = rootschema
self.schema = resolve_references(schema, rootschema)
def child(self, schema):
return self.__class__(schema, rootschema=self.rootschema)
def __repr__(self):
keys = []
for key in sorted(self.schema.keys()):
val = self.schema[key]
rval = repr(val).replace('\n', '')
if len(rval) > 30:
rval = rval[:30] + '...'
if key == 'definitions':
rval = "{...}"
elif key == 'properties':
rval = '{\n ' + '\n '.join(sorted(map(repr, val))) + '\n }'
keys.append('"{0}": {1}'.format(key, rval))
return "SchemaInfo({\n " + '\n '.join(keys) + "\n})"
@property
def title(self):
if self.is_reference():
return get_valid_identifier(self.refname)
else:
return ''
@property
def short_description(self):
return self.title or self.medium_description
@property
def medium_description(self):
_simple_types = {'string': 'string',
'number': 'float',
'integer': 'integer',
'object': 'mapping',
'boolean': 'boolean',
'array': 'list',
'null': 'None'}
if self.is_empty():
return 'any object'
elif self.is_enum():
return 'enum({0})'.format(', '.join(map(repr, self.enu
|
m)))
elif self.is_anyOf():
return 'anyOf({0})'.format(', '.join(s.short_description
for s in self.anyOf))
elif self.is_oneOf():
return 'oneOf({0})'.format(', '.join(s.short_description
for s in self.oneOf))
elif self.is_allOf():
return 'allOf({0})'.format(', '.joi
|
n(s.short_description
for s in self.allOf))
elif self.is_not():
return 'not {0}'.format(self.not_.short_description)
elif isinstance(self.type, list):
options = []
subschema = SchemaInfo(dict(**self.schema))
for typ_ in self.type:
subschema.schema['type'] = typ_
options.append(subschema.short_description)
return "anyOf({0})".format(', '.join(options))
elif self.is_object():
return "Mapping(required=[{0}])".format(', '.join(self.required))
elif self.is_array():
return "List({0})".format(self.child(self.items).short_description)
elif self.type in _simple_types:
return _simple_types[self.type]
elif not self.type:
import warnings
warnings.warn("no short_description for schema\n{0}"
"".format(self.schema))
return 'any'
@property
def long_description(self):
# TODO
return 'Long description including arguments and their types'
@property
def properties(self):
return SchemaProperties(self.schema.get('properties', {}),
self.schema, self.rootschema)
@property
def definitions(self):
return SchemaProperties(self.schema.get('definitions', {}),
self.schema, self.rootschema)
@property
def required(self):
return self.schema.get('required', [])
@property
def patternProperties(self):
return self.schema.get('patternProperties', {})
@property
def additionalProperties(self):
return self.schema.get('additionalProperties', True)
@property
def type(self):
return self.schema.get('type', None)
@property
def anyOf(self):
return [self.child(s) for s in self.schema.get('anyOf', [])]
@property
def oneOf(self):
return [self.child(s) for s in self.schema.get('oneOf', [])]
@property
def allOf(self):
return [self.child(s) for s in self.schema.get('allOf', [])]
@property
|
anhstudios/swganh
|
data/scripts/templates/object/tangible/wearables/pants/shared_pants_s14.py
|
Python
|
mit
| 478
| 0.031381
|
#### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLI
|
NE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/wearables/pants/shared_pants_s14.iff"
result.attribute_template_id = 11
result.stfName("wearables_name","pants_s14")
#### BEGIN MODIFICATIONS ####
result.max_condition = 1000
#### END MODIFICATIONS
|
####
return result
|
Azure/WALinuxAgent
|
tests/common/osutil/test_clearlinux.py
|
Python
|
apache-2.0
| 1,180
| 0.000847
|
# Copyright 2019 Microsoft Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You m
|
ay obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# dist
|
ributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Requires Python 2.6+ and Openssl 1.0+
#
import unittest
from azurelinuxagent.common.osutil.clearlinux import ClearLinuxUtil
from tests.tools import AgentTestCase
from .test_default import osutil_get_dhcp_pid_should_return_a_list_of_pids
class TestClearLinuxUtil(AgentTestCase):
def setUp(self):
AgentTestCase.setUp(self)
def tearDown(self):
AgentTestCase.tearDown(self)
def test_get_dhcp_pid_should_return_a_list_of_pids(self):
osutil_get_dhcp_pid_should_return_a_list_of_pids(self, ClearLinuxUtil())
if __name__ == '__main__':
unittest.main()
|
cryspy-team/cryspy
|
tests/test_utils.py
|
Python
|
gpl-3.0
| 1,181
| 0.005927
|
import pytest
import sys
sys.path.append("../src/")
import cryspy
from cryspy.fromstr import fromstr as fs
import numpy as np
def test_Karussell():
metric = cryspy.geo.Cellparameters(1, 1, 1, 90, 90, 90).to_Metric()
k = cryspy.utils.Karussell(metric, fs("d 1 0 0"), fs("d 0 1 0"))
d1 = k.direction(0)
assert float(metric.length(d1 - fs("d 1.0 0.0 0"))) < 1e-9
d2 = k.direction(np.pi / 2)
assert float(metric.length(d2 - fs("d 0 1 0"))) < 1e-9
metric = cryspy.geo.Cellparameters(1, 1, 1, 90, 90, 45).to_Metric()
k = cryspy.utils.Karussell(metric, fs("d 1 0 0"), fs("d 0 1 0"))
d1 = k.direction(0)
asse
|
rt float(metric.length(d1 - fs("d 1.0 0.0 0"))) < 1e-9
d2 = k.direction(np.pi / 4)
assert float(metric.length(d2 - fs("d 0 1 0"))) < 1e-9
def test_fill():
atomset = cryspy.crystal.Atomset({cryspy.crystal.Atom("Fe1", "Fe", fs("p 1/2 1/2 1/2"))})
atomset = cryspy.utils.fill(atomset, [0.6,
|
0.6, 0.6])
assert len(atomset.menge) == 27
atomset = cryspy.crystal.Atomset({cryspy.crystal.Atom("Fe1", "Fe", fs("p 0 0 0"))})
atomset = cryspy.utils.fill(atomset, [0.1, 0.1, 0.1])
assert len(atomset.menge) == 8
|
shawnhermans/cyborgcrm
|
cyidentity/cyfullcontact/tests/test_activity_stream.py
|
Python
|
bsd-2-clause
| 433
| 0
|
from a
|
ctstream.models import Action
from django.test import TestCase
from cyidentity.cyfullcontact.tests.util import create_sample_contact_info
class FullContactActivityStreamTestCase(TestCase):
def test_contact_create(self):
contact_info = create_sample_contact_info()
action = Action.objects.actor(contact_info).latest('timestamp')
self.assertEqual(action.verb, 'FullContact information was created')
| |
qbeenslee/Nepenthes-Server
|
config/configuration.py
|
Python
|
gpl-3.0
| 1,315
| 0.001668
|
# coding:utf-8
"""
Author : qbeenslee
Created : 2014/12/12
"""
import re
# 客户端ID号
CLIENT_ID = "TR5kVmYeMEh9M"
'''
传输令牌格式
加密方式$迭代次数$盐$结果串
举个栗子:
====start====
md5$23$YUXQ_-2GfwhzVpt5IQWp$3ebb6e78bf7d0c1938578855982e2b1c
====end====
|
'''
MATCH_PWD = r"md5\$(\d\d)\$([a-zA-Z0-9_\-]{20})\$([a-f0-9]{32})"
REMATCH_PWD = re.compile(MATCH_PWD)
# 支持的上传文件格式
SUPPORT_IMAGE_TYPE_LIST = ['image/gif', 'image/jpeg', 'image/png', 'image/bmp', 'image/x-png',
'application/octet-stream']
# 最大上传大小
MAX_UPLOAD_FILE_SIZE = 10485760 # 10*1024*1024 =10M
# 最小上传尺寸
MIN_IMAGE_SIZE = {'w': 10, 'h': 10}
MAX_IMAGE_SIZE = {'w': 4000, 'h': 4000}
#
|
图片裁剪的尺寸(THUMBNAIL)
THUMB_SIZE_SMALL = {'w': 100, 'h': 100, 'thumb': 's'}
THUMB_SIZE_NORMAL = {'w': 480, 'h': 480, 'thumb': 'n'}
THUMB_SIZE_LARGE = {'w': 3000, 'h': 3000, 'thumb': 'l'}
THUMB_SIZE_ORIGIN = {'w': 0, 'h': 0, 'thumb': 'r'}
MAX_SHARE_DESCRIPTION_SIZE = 140
NOW_ANDROID_VERSION_CODE = 7
NOW_VERSION_DOWNLOAD_URL = "/static/download/nepenthes-beta0.9.3.apk"
MAX_RAND_EMAIL_CODE = 99999
MIN_RAND_EMAIL_CODE = 10000
# 定位精度
PRECISION = 12
LOACTION_PRECISION = 4
PAGE_SIZE = 10
|
TeamProxima/predictive-fault-tracker
|
board/board_client.py
|
Python
|
mit
| 909
| 0
|
#!/usr/bin/python
import argparse
from board_manager import BoardManager
from constants import *
def main():
parser = argparse.ArgumentParser(description='Board client settings')
parser.add_argument('-sp', '--PORT', help='server port', type=int,
default=80, required=False)
parser.add_argu
|
ment('-sip', '--IP', help='server ip', type=str,
default='', required=False)
parser.add_argument('-pt', '--TO', help='phone to', type=str,
default='', required=False)
parser.add_argument('-pf', '--FROM', help='phone from', type=str,
default='', required=False)
parser.add_argument('-tk', '--TWKEY', help='twilio key', type=str,
default='', required=False)
args = parser.parse_args()
|
bm = BoardManager(args)
bm.activate()
if __name__ == "__main__":
main()
|
GJL/flink
|
flink-python/pyflink/dataset/tests/test_execution_environment_completeness.py
|
Python
|
apache-2.0
| 3,235
| 0.004019
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limita
|
tions under the License.
################################################################################
import unittest
from pyflink.dataset import ExecutionEnvironment
from pyflink.testing.test_case_utils import PythonAPICompletenessTestCase
class ExecutionEnvironmentCompletenessTests(PythonAPICompletenessTestCase,
unittest.TestCase):
@classmethod
def python_class(cls):
return ExecutionEnvironment
@classmethod
def java_class(cls)
|
:
return "org.apache.flink.api.java.ExecutionEnvironment"
@classmethod
def excluded_methods(cls):
# Exclude these methods for the time being, because current
# ExecutionEnvironment/StreamExecutionEnvironment do not apply to the
# DataSet/DataStream API, but to the Table API configuration.
# Currently only the methods for configuration is added.
# 'setSessionTimeout', 'getSessionTimeout', 'setNumberOfExecutionRetries',
# 'getNumberOfExecutionRetries' is deprecated, exclude them.
# 'access$000' is generated by java compiler, exclude it too.
return {'resetContextEnvironment', 'getSessionTimeout', 'fromParallelCollection',
'getId', 'registerCachedFile', 'setNumberOfExecutionRetries', 'readTextFile',
'getNumberOfExecutionRetries', 'registerCachedFilesWithPlan',
'getLastJobExecutionResult', 'readCsvFile', 'initializeContextEnvironment',
'createLocalEnvironment', 'createLocalEnvironmentWithWebUI', 'createProgramPlan',
'getIdString', 'setSessionTimeout', 'fromElements', 'createRemoteEnvironment',
'startNewSession', 'fromCollection', 'readTextFileWithValue', 'registerDataSink',
'createCollectionsEnvironment', 'readFile', 'readFileOfPrimitives',
'generateSequence', 'areExplicitEnvironmentsAllowed', 'createInput',
'getUserCodeClassLoader', 'getExecutorServiceLoader', 'getConfiguration',
'executeAsync', 'registerJobListener', 'clearJobListeners', 'configure'}
if __name__ == '__main__':
import unittest
try:
import xmlrunner
testRunner = xmlrunner.XMLTestRunner(output='target/test-reports')
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
|
klingtnet/dh-project-ws14
|
data/particle_parser.py
|
Python
|
mit
| 1,420
| 0.005634
|
#!/usr/bin/env python3
from pathlib import Path
import pprint
pp = pprint.PrettyPrinter()
import logging
log = logging.getLogger(__name__)
def main():
p = Path('particles.txt')
if p.exists() and p.is_file():
parse(str(p))
def parse(filepath):
raw = ''
try:
with open(filepath) as f:
raw = f.read()
except IOError as e:
log.exception(e)
return 1
else:
parse_lines(raw.splitlines())
def parse_lines(lines):
'''
parser for particle list of stylianos
'''
data = {}
category = ''
particle = ''
simple_particle_lemma = []
for line in lines:
parts = line.split()
if parts[0] == '*':
category = ' '.join(parts[1:])
if category not in data:
data[category] = {}
else:
log.warn('Category "{}" already defined!'.format(category))
elif p
|
arts[0] == '**':
if category:
if parts[1] not in data[category]:
particle = parts[1]
data[category][particle] = []
|
else:
log.warn('Particle "{}" already contained in category: "{}"'.format(parts[1], category))
else:
log.warn('particle without previous category specification: "{}"'.format(parts[1]))
pp.pprint(data)
if __name__ == '__main__':
main()
|
watsonpy/watson-auth
|
watson/auth/providers/abc.py
|
Python
|
bsd-3-clause
| 3,673
| 0.000272
|
import abc
from sqlalchemy.orm import exc
from watson.auth import crypto
from watson.auth.providers import exceptions
from watson.common import imports
from watson.common.decorators import cached_property
class Base(object):
config = None
session = None
def __init__(self, config, session):
self._validate_configuration(config)
self.config = config
self.session = session
# Configuration
def _validate_configuration(self, config):
if 'class' not in config['model']:
raise exceptions.InvalidConfiguration(
'User model not specified, ensure "class" key is set on provider["model"].')
common_keys = [
'system_email_from_address',
'reset_password_route',
'forgotten_password_route']
for key in common_keys:
if key not in config:
raise exceptions.InvalidConfiguration(
'Ensure "{}" key is set on the provider.'.format(key))
# User retrieval
@property
def user_model_identifier(self):
return self.config['model']['identifier']
@cached_property
def user_model(self):
return imports.load_definition_from_string(
self.config['model']['class'])
@property
def user_query(self):
return self.session.query(self.user_model)
def get_user(self, username):
"""Retrieves a user from the database based on their username.
Args:
username (string): The username of the user to find.
"""
user_field = getattr(self.user_model, self.user_model_identifier)
try:
return self.user_query.filter(user_field == username).one()
except exc.NoResultFound:
return None
def get_user_by_email_address(self, email_address):
email_column = getattr(
self.user_model, self.config['model']['email_address'])
try:
return self.user_query.filter(email_column == email_address).one()
|
except exc.NoResultFound:
return None
# Authentication
def authenticate(self, username, password):
"""Validate a user against a supplied username and password.
Args:
username (string): The username of the user.
password (strin
|
g): The password of the user.
"""
password_config = self.config['password']
if len(password) > password_config['max_length']:
return None
user = self.get_user(username)
if user:
if crypto.check_password(password, user.password, user.salt,
self.config['encoding']):
return user
return None
def user_meets_requirements(self, user, requires):
for require in requires or []:
if not require(user):
return False
return True
# Authorization
def is_authorized(self, user, roles=None, permissions=None, requires=None):
no_role = roles and not user.acl.has_role(roles)
no_permission = permissions and not user.acl.has_permission(
permissions)
no_requires = self.user_meets_requirements(user, requires)
return False if no_role or no_permission or not no_requires else True
# Actions
@abc.abstractmethod
def logout(self, request):
raise NotImplementedError # pragma: no cover
@abc.abstractmethod
def login(self, user, request):
raise NotImplementedError # pragma: no cover
@abc.abstractmethod
def handle_request(self, request):
raise NotImplementedError # pragma: no cover
|
sebrandon1/nova
|
nova/tests/unit/virt/hyperv/test_vmops.py
|
Python
|
apache-2.0
| 76,595
| 0.000052
|
# Copyright 2014 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
from eventlet import timeout as etimeout
import mock
from os_win import constants as os_win_const
from os_win import exceptions as os_win_exc
from oslo_concurrency import processutils
from oslo_config import cfg
from oslo_utils import fileutils
from oslo_utils import units
from nova.compute import vm_states
from nova import exception
from nova import objects
from nova.objects import fields
from nova.objects import flavor as flavor_obj
from nova.tests.unit import fake_instance
from nova.tests.unit.objects import test_flavor
from nova.tests.unit.objects import test_virtual_interface
from nova.tests.unit.virt.hyperv import test_base
from nova.virt import hardware
from nova.virt.hyperv import constants
from nova.virt.hyperv import vmops
from nova.virt.hyperv import volumeops
CONF = cfg.CONF
class VMOpsTestCase(test_base.HyperVBaseTestCase):
"""Unit tests for the Hyper-V VMOps class."""
_FAKE_TIMEOUT = 2
FAKE_SIZE = 10
FAKE_DIR = 'fake_dir'
FAKE_ROOT_PATH = 'C:\\path\\to\\fake.%s'
FAKE_CONFIG_DRIVE_ISO = 'configdrive.iso'
FAKE_CONFIG_DRIVE_VHD = 'configdrive.vhd'
FAKE_UUID = '4f54fb69-d3a2-45b7-bb9b-b6e6b3d893b3'
FAKE_LOG = 'fake_log'
_WIN_VERSION_6_3 = '6.3.0'
_WIN_VERSION_10 = '10.0'
ISO9660 = 'iso9660'
_FAKE_CONFIGDRIVE_PATH = 'C:/fake_instance_dir/configdrive.vhd'
def setUp(self):
super(VMOpsTestCase, self).setUp()
self.context = 'fake-context'
self._vmops = vmops.VMOps()
self._vmops._vmutils = mock.MagicMock()
self._vmops._metricsutils = mock.MagicMock()
self._vmops._vhdutils = mock.MagicMock()
self._vmops._pathutils = mock.MagicMock()
self._vmops._hostutils = mock.MagicMock()
self._vmops._serial_console_ops = mock.MagicMock()
self._vmops._block_dev_man = mock.MagicMock()
@mock.patch('nova.net
|
work.is_neutron')
@mock.patch('nova.virt.hyperv.vmops.importutils.import_object')
def test_load_vif_driver_neutron(self, mock_import_object, is_neutron):
is_neutron.return_value = True
self._vmops._load_vif_driver_class()
mock_import_object.assert_called_once_with(
vmops.NEUTRON_VIF_DRIVER)
@mock.patch('nova.network.is_neutron')
@mock.patch('nova.virt.hyperv.vmops.importutils.import_object')
def test_load_vif_driver_nova(self, mock_i
|
mport_object, is_neutron):
is_neutron.return_value = False
self._vmops._load_vif_driver_class()
mock_import_object.assert_called_once_with(
vmops.NOVA_VIF_DRIVER)
def test_list_instances(self):
mock_instance = mock.MagicMock()
self._vmops._vmutils.list_instances.return_value = [mock_instance]
response = self._vmops.list_instances()
self._vmops._vmutils.list_instances.assert_called_once_with()
self.assertEqual(response, [mock_instance])
def test_estimate_instance_overhead(self):
instance_info = {'memory_mb': 512}
overhead = self._vmops.estimate_instance_overhead(instance_info)
self.assertEqual(0, overhead['memory_mb'])
self.assertEqual(1, overhead['disk_gb'])
instance_info = {'memory_mb': 500}
overhead = self._vmops.estimate_instance_overhead(instance_info)
self.assertEqual(0, overhead['disk_gb'])
def _test_get_info(self, vm_exists):
mock_instance = fake_instance.fake_instance_obj(self.context)
mock_info = mock.MagicMock(spec_set=dict)
fake_info = {'EnabledState': 2,
'MemoryUsage': mock.sentinel.FAKE_MEM_KB,
'NumberOfProcessors': mock.sentinel.FAKE_NUM_CPU,
'UpTime': mock.sentinel.FAKE_CPU_NS}
def getitem(key):
return fake_info[key]
mock_info.__getitem__.side_effect = getitem
expected = hardware.InstanceInfo(state=constants.HYPERV_POWER_STATE[2],
max_mem_kb=mock.sentinel.FAKE_MEM_KB,
mem_kb=mock.sentinel.FAKE_MEM_KB,
num_cpu=mock.sentinel.FAKE_NUM_CPU,
cpu_time_ns=mock.sentinel.FAKE_CPU_NS)
self._vmops._vmutils.vm_exists.return_value = vm_exists
self._vmops._vmutils.get_vm_summary_info.return_value = mock_info
if not vm_exists:
self.assertRaises(exception.InstanceNotFound,
self._vmops.get_info, mock_instance)
else:
response = self._vmops.get_info(mock_instance)
self._vmops._vmutils.vm_exists.assert_called_once_with(
mock_instance.name)
self._vmops._vmutils.get_vm_summary_info.assert_called_once_with(
mock_instance.name)
self.assertEqual(response, expected)
def test_get_info(self):
self._test_get_info(vm_exists=True)
def test_get_info_exception(self):
self._test_get_info(vm_exists=False)
@mock.patch.object(vmops.VMOps, 'check_vm_image_type')
@mock.patch.object(vmops.VMOps, '_create_root_vhd')
def test_create_root_device_type_disk(self, mock_create_root_device,
mock_check_vm_image_type):
mock_instance = fake_instance.fake_instance_obj(self.context)
mock_root_disk_info = {'type': constants.DISK}
self._vmops._create_root_device(self.context, mock_instance,
mock_root_disk_info,
mock.sentinel.VM_GEN_1)
mock_create_root_device.assert_called_once_with(
self.context, mock_instance)
mock_check_vm_image_type.assert_called_once_with(
mock_instance.uuid, mock.sentinel.VM_GEN_1,
mock_create_root_device.return_value)
def _prepare_create_root_device_mocks(self, use_cow_images, vhd_format,
vhd_size):
mock_instance = fake_instance.fake_instance_obj(self.context)
mock_instance.flavor.root_gb = self.FAKE_SIZE
self.flags(use_cow_images=use_cow_images)
self._vmops._vhdutils.get_vhd_info.return_value = {'VirtualSize':
vhd_size * units.Gi}
self._vmops._vhdutils.get_vhd_format.return_value = vhd_format
root_vhd_internal_size = mock_instance.flavor.root_gb * units.Gi
get_size = self._vmops._vhdutils.get_internal_vhd_size_by_file_size
get_size.return_value = root_vhd_internal_size
self._vmops._pathutils.exists.return_value = True
return mock_instance
@mock.patch('nova.virt.hyperv.imagecache.ImageCache.get_cached_image')
def _test_create_root_vhd_exception(self, mock_get_cached_image,
vhd_format):
mock_instance = self._prepare_create_root_device_mocks(
use_cow_images=False, vhd_format=vhd_format,
vhd_size=(self.FAKE_SIZE + 1))
fake_vhd_path = self.FAKE_ROOT_PATH % vhd_format
mock_get_cached_image.return_value = fake_vhd_path
fake_root_path = self._vmops._pathutils.get_root_vhd_path.return_value
self.assertRaises(exception.FlavorDiskSmallerThanImage,
self._vmops._create_root_vhd, self.context,
mock_instance)
self.assertFalse(self._vmops._vhdutils.resize_vhd.called)
self._vmops._pathutils.exists.assert_called_once_with(
fake_root_path)
se
|
Natgeoed/django-broadcasts
|
broadcasts/admin.py
|
Python
|
mit
| 1,181
| 0.001693
|
from django.contrib import admin
from django.utils.translation import ugettext_lazy as _
from broadcasts.models import BroadcastMessage
from broadcasts.forms import BroadcastMessageForm
class BroadcastAdmin(admin.ModelAdmin):
"""Admin class for the broadcast messages"""
form = BroadcastMessageForm
list_display = (
'title', 'user_target', 'show_frequency', 'start_time',
'end_time', 'is_published')
list_filter = ('is_published', 'show_frequency', 'user_target')
search_fields = ['message', 'title']
fieldsets = (
(None, {
|
'fields': ('title', 'message', 'message_type',)
}),
(_('Message Targeting'), {
'fields': ('user_target', 'url_target')
}),
(_("Message Display"), {
'description': _(
"Messages will display only if they are published, "
|
"it is between the start and end times, and the show "
"frequency has not been exceeded."),
'fields': ('show_frequency', 'is_published',
('start_time', 'end_time'))
})
)
admin.site.register(BroadcastMessage, BroadcastAdmin)
|
Dymaxion00/KittenGroomer
|
fs_filecheck/usr/local/bin/pdfid.py
|
Python
|
bsd-3-clause
| 37,276
| 0.004614
|
#!/usr/bin/env python
__description__ = 'Tool to test a PDF file'
__author__ = 'Didier Stevens'
__version__ = '0.2.1'
__date__ = '2014/10/18'
"""
Tool to test a PDF file
Source code put in public domain by Didier Stevens, no Copyright
https://DidierStevens.com
Use at your own risk
History:
2009/03/27: start
2009/03/28: scan option
2009/03/29: V0.0.2: xml output
2009/03/31: V0.0.3: /ObjStm suggested by Dion
2009/04/02: V0.0.4: added ErrorMessage
2009/04/20: V0.0.5: added Dates
2009/04/21: V0.0.6: added entropy
2009/04/22: added disarm
2009/04/29: finished disarm
2009/05/13: V0.0.7: added cPDFEOF
2009/07/24: V0.0.8: added /AcroForm and /RichMedia, simplified %PDF header regex, extra date format (without TZ)
2009/07/25: added input redirection, option --force
2009/10/13: V0.0.9: added detection for CVE-2009-3459; added /RichMedia to disarm
2010/01/11: V0.0.10: relaxed %PDF header checking
2010/04/28: V0.0.11: added /Launch
2010/09/21: V0.0.12: fixed cntCharsAfterLastEOF bug; fix by Russell Holloway
2011/12/29: updated for Python 3, added keyword /EmbeddedFile
2012/03/03: added PDFiD2JSON; coded by Brandon Dixon
2013/02/10: V0.1.0: added http/https support; added support for ZIP file with password 'infected'
2013/03/11: V0.1.1: fixes for Python 3
2013/03/13: V0.1.2: Added error handling for files; added /XFA
2013/11/01: V0.2.0: Added @file & plugins
2013/11/02: continue
2013/11/04: added options -c, -m, -v
2013/11/06: added option -S
2013/11/08: continue
2013/11/09: added option -o
2013/11/15: refactoring
2014/09/30: added CSV header
2014/10/16: V0.2.1: added output when plugin & file not pdf
2014/10/18: some fixes for Python 3
Todo:
- update XML example (entropy, EOF)
- code review, cleanup
"""
import optparse
import os
import re
import xml.dom.minidom
import traceback
import math
import operator
|
import os.path
import sys
import json
import zipfile
import collections
import glob
try:
import urllib2
urllib23 = urllib2
except:
import urllib.request
urllib23 = urllib.reques
|
t
#Convert 2 Bytes If Python 3
def C2BIP3(string):
if sys.version_info[0] > 2:
return bytes([ord(x) for x in string])
else:
return string
class cBinaryFile:
def __init__(self, file):
self.file = file
if file == '':
self.infile = sys.stdin
elif file.lower().startswith('http://') or file.lower().startswith('https://'):
try:
if sys.hexversion >= 0x020601F0:
self.infile = urllib23.urlopen(file, timeout=5)
else:
self.infile = urllib23.urlopen(file)
except urllib23.HTTPError:
print('Error accessing URL %s' % file)
print(sys.exc_info()[1])
sys.exit()
elif file.lower().endswith('.zip'):
try:
self.zipfile = zipfile.ZipFile(file, 'r')
self.infile = self.zipfile.open(self.zipfile.infolist()[0], 'r', C2BIP3('infected'))
except:
print('Error opening file %s' % file)
print(sys.exc_info()[1])
sys.exit()
else:
try:
self.infile = open(file, 'rb')
except:
print('Error opening file %s' % file)
print(sys.exc_info()[1])
sys.exit()
self.ungetted = []
def byte(self):
if len(self.ungetted) != 0:
return self.ungetted.pop()
inbyte = self.infile.read(1)
if not inbyte or inbyte == '':
self.infile.close()
return None
return ord(inbyte)
def bytes(self, size):
if size <= len(self.ungetted):
result = self.ungetted[0:size]
del self.ungetted[0:size]
return result
inbytes = self.infile.read(size - len(self.ungetted))
if inbytes == '':
self.infile.close()
if type(inbytes) == type(''):
result = self.ungetted + [ord(b) for b in inbytes]
else:
result = self.ungetted + [b for b in inbytes]
self.ungetted = []
return result
def unget(self, byte):
self.ungetted.append(byte)
def ungets(self, bytes):
bytes.reverse()
self.ungetted.extend(bytes)
class cPDFDate:
def __init__(self):
self.state = 0
def parse(self, char):
if char == 'D':
self.state = 1
return None
elif self.state == 1:
if char == ':':
self.state = 2
self.digits1 = ''
else:
self.state = 0
return None
elif self.state == 2:
if len(self.digits1) < 14:
if char >= '0' and char <= '9':
self.digits1 += char
return None
else:
self.state = 0
return None
elif char == '+' or char == '-' or char == 'Z':
self.state = 3
self.digits2 = ''
self.TZ = char
return None
elif char == '"':
self.state = 0
self.date = 'D:' + self.digits1
return self.date
elif char < '0' or char > '9':
self.state = 0
self.date = 'D:' + self.digits1
return self.date
else:
self.state = 0
return None
elif self.state == 3:
if len(self.digits2) < 2:
if char >= '0' and char <= '9':
self.digits2 += char
return None
else:
self.state = 0
return None
elif len(self.digits2) == 2:
if char == "'":
self.digits2 += char
return None
else:
self.state = 0
return None
elif len(self.digits2) < 5:
if char >= '0' and char <= '9':
self.digits2 += char
if len(self.digits2) == 5:
self.state = 0
self.date = 'D:' + self.digits1 + self.TZ + self.digits2
return self.date
else:
return None
else:
self.state = 0
return None
def fEntropy(countByte, countTotal):
x = float(countByte) / countTotal
if x > 0:
return - x * math.log(x, 2)
else:
return 0.0
class cEntropy:
def __init__(self):
self.allBucket = [0 for i in range(0, 256)]
self.streamBucket = [0 for i in range(0, 256)]
def add(self, byte, insideStream):
self.allBucket[byte] += 1
if insideStream:
self.streamBucket[byte] += 1
def removeInsideStream(self, byte):
if self.streamBucket[byte] > 0:
self.streamBucket[byte] -= 1
def calc(self):
self.nonStreamBucket = map(operator.sub, self.allBucket, self.streamBucket)
allCount = sum(self.allBucket)
streamCount = sum(self.streamBucket)
nonStreamCount = sum(self.nonStreamBucket)
return (allCount, sum(map(lambda x: fEntropy(x, allCount), self.allBucket)), streamCount, sum(map(lambda x: fEntropy(x, streamCount), self.streamBucket)), nonStreamCount, sum(map(lambda x: fEntropy(x, nonStreamCount), self.nonStreamBucket)))
class cPDFEOF:
def __init__(self):
self.token = ''
self.cntEOFs = 0
def parse(self, char):
if self.cntEOFs > 0:
self.cntCharsAfterLastEOF += 1
if self.token == '' and char == '%':
self.token += char
return
elif self.token == '%' and char == '%':
self.token += char
return
elif self.token == '%%' and char == 'E':
self.token += char
return
elif self.token == '%%E' and char == 'O':
|
joaander/hoomd-blue
|
hoomd/pytest/test_box.py
|
Python
|
bsd-3-clause
| 5,889
| 0.00017
|
from math import isclose
import numpy as np
from pytest import fixture
from hoomd.box import Box
@fixture
def box_dict():
return dict(Lx=1, Ly=2, Lz=3, xy=1, xz=2, yz=3)
def test_base_constructor(box_dict):
box = Box(**box_dict)
for key in box_dict:
assert getattr(box, key) == box_dict[key]
@fixture
def base_box(box_dict):
return Box(**box_dict)
def test_cpp_python_correspondence(base_box):
cpp_obj = base_box._cpp_obj
cpp_L = cpp_obj.getL()
assert base_box.Lx == cpp_L.x and base_box.Ly == cpp_L.y \
and base_box.Lz == cpp_L.z
assert base_box.xy == cpp_obj.getTiltFactorXY()
assert base_box.xz == cpp_obj.getTiltFactorXZ()
assert base_box.yz == cpp_obj.getTiltFactorYZ()
def test_setting_lengths(base_box):
for attr in ['Lx', 'Ly', 'Lz']:
for L in np.linspace(1, 100, 10):
setattr(base_box, attr, L)
assert getattr(base_box, attr) == L
for L in np.linspace(1, 100, 10):
base_box.L = L
assert all(base_box.L == L)
base_box.L = [3, 2, 1]
assert all(base_box.L == [3, 2, 1])
def test_setting_tilts(base_box):
for attr in ['xy', 'xz', 'yz']:
for tilt in np.linspace(1, 100, 10):
setattr(base_box, attr, tilt)
assert getattr(base_box, attr) == tilt
for tilt in np.linspace(1, 100, 10):
base_box.tilts = tilt
assert all(base_box.tilts == tilt)
base_box.tilts = [3, 2, 1]
assert all(base_box.tilts == [3, 2, 1])
def test_is2D(base_box): # noqa: N802 - allow function name
base_box.Lz = 0
assert base_box.is2D
for L in np.linspace(1, 100, 10):
base_box.Lz = L
assert not base_box.is2D
def test_dimensions(base_box):
base_box.Lz = 0
assert base_box.dimensions == 2
for L in np.linspace(1, 100, 10):
base_box.Lz = L
assert base_box.dimensions == 3
def test_lattice_vectors(base_box):
expected_vectors = np.array([[1, 0, 0], [2, 2, 0], [6, 9, 3]],
dtype=np.float64)
assert np.allclose(base_box.lattice_vectors, expected_vectors)
box = Box.cube(4)
lattice_vectors = np.array([[4, 0, 0], [0, 4, 0], [0, 0, 4]])
assert np.allclose(box.lattice_vectors, lattice_vectors)
def get_aspect(L):
return np.array([L[0] / L[1], L[0] / L[2], L[1] / L[2]])
def test_scale(base_box):
aspect = get_aspect(base_box.L)
for s in np.linspace(0.5, 1.5, 10):
prev_vol = base_box.volume
base_box.scale(s)
assert np.allclose(aspect, get_aspect(base_box.L))
assert not isclose(prev_vol, base_box.volume)
L = base_box.L
s = np.array([1, 0.75, 0.5])
base_box.scale(s)
assert np.allclose(aspect * get_aspect(s), get_aspect(base_box.L))
assert np.allclose(base_box.L, L * s)
def test_volume(base_box):
assert isclose(base_box.volume, np.product(base_box.L))
for L in np.linspace(1, 10, 10):
box = Box.cube(L)
assert isclose(box.volume, L**3)
box = Box(L, L + 1, L + 2)
assert isclose(box.volume, L * (L + 1) * (L + 2))
def test_volume_setting(base_box):
aspect = get_aspect(base_box.L)
for v in np.linspace(1, 100, 10):
base_box.volume = v
assert np.allclose(aspect, get_aspect(base_box.L))
assert isclose(base_box.volume, v)
def test_periodic(base_box):
assert all(base_box.periodic)
@fixture
def expected_matrix(box_dict):
return np.array([
[
box_dict['Lx'], box_dict['Ly'] * box_dict['xy'],
box_dict['Lz'] * box_dict['xz']
],
[0, box_dict['Ly'], box_dict['Lz'] * box_dict['yz']],
[0, 0, box_dict['Lz']],
])
def test_matrix(base_box, expected_matrix):
assert np.allclose(base_box.matrix, expected_matrix)
base_box.xy *= 2
assert isclose(base_box.matrix[0, 1], 2 * expected_matrix[0, 1])
base_box.yz *= 0.5
assert isclose(base_box.matrix[1, 2], 0.5 * expected_matrix[1, 2])
base_box.Lx *= 3
assert isclose(base_box.matrix[0, 0], 3 * expected_matrix[0, 0])
@fixture
def new_box_matrix_dict():
Lx, Ly, Lz = 2, 4, 8
xy, xz, yz = 1, 3, 5
new_box_matrix = np.array([[Lx, Ly * xy, Lz * xz], [0, Ly, Lz * yz],
[0, 0, Lz]])
return dict(Lx=Lx, Ly=Ly, Lz=Lz, xy=xy, xz=xz, yz=yz, matrix=new_box_matrix)
def test_matrix_setting(base_box, new_box_matrix_dict):
base_box.matrix = new_box_matrix_dict['matrix']
assert np.allclose(new_box_matrix_dict['matrix'], base_box.matrix)
assert np.allclose(base_box.L, [
new_box_matrix_dict['Lx'], new_box_matrix_dict['Ly'],
new_box_matrix_dict['Lz']
])
assert np.allclose(base_box.tilts, [
new_box_matrix_dict['xy'], new_box_matrix_dict['xz'],
new_box_matrix_dict['yz']
])
def test_cube():
for L in np.linspace(1, 100, 10):
box = Box.cube(L)
assert all(box.L == L)
assert box.Lx == box.Ly == box.Lz == L
def test_square():
for L in np.linspace(1, 100, 10):
box = Box.square(L)
assert all(box.L == [L, L, 0])
assert box.Lx == box.Ly == L and box.Lz == 0
def test_from_matrix(new_box_matrix_dict):
box = Box.from_matrix(new_box_matrix_dict['matrix'])
assert np.allclose(new_box_matrix_dict['matrix'], b
|
ox.matrix)
assert np.allclose(box.L, [
new_box_matrix_dict['Lx'
|
], new_box_matrix_dict['Ly'],
new_box_matrix_dict['Lz']
])
assert np.allclose(box.tilts, [
new_box_matrix_dict['xy'], new_box_matrix_dict['xz'],
new_box_matrix_dict['yz']
])
def test_eq(base_box, box_dict):
box2 = Box(**box_dict)
assert base_box == box2
box2.Lx = 2
assert not base_box == box2
def test_neq(base_box, box_dict):
box2 = Box(**box_dict)
assert not base_box != box2
box2.Lx = 2
assert base_box != box2
|
Impavidity/text-classification-cnn
|
configurable.py
|
Python
|
mit
| 4,672
| 0.026327
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import argparse
from configparser import SafeConfigParser
class Configurable(object):
"""
Configuration processing for the network
"""
def __init__(self, *args, **kwargs):
self._name = kwargs.pop("name", "Unknown")
if args and kwargs:
raise TypeError('Configurable must take either a config parser or keyword args')
if len(args) > 1:
raise TypeError('Configurable takes at most one argument')
if args:
self._config = args[0]
else:
self._config = self._configure(**kwargs)
return
@property
def name(self):
return self._name
def _configure(self, **kwargs):
config = SafeConfigParser()
config_file = kwargs.pop("config_file", "")
config.read(config_file)
# Override the config setting if the (k,v) specified in command line
for option, value in kwargs.items():
assigned = False
for section in config.sections():
if option in config.options(section):
config.set(section, option, str(value))
assigned = True
break
if not assigned:
raise ValueError("%s is not a valid option" % option)
return config
argpa
|
rser = argparse.ArgumentParser()
argparser.add_argument('--config_file')
# ======
# [OS]
@property
def model_type(self):
return self._config.get('OS', 'm
|
odel_type')
argparser.add_argument('--model_type')
@property
def mode(self):
return self._config.get('OS', 'mode')
argparser.add_argument('--mode')
@property
def save_dir(self):
return self._config.get('OS', 'save_dir')
argparser.add_argument('--save_dir')
@property
def word_file(self):
return self._config.get('OS', 'word_file')
argparser.add_argument('--word_file')
@property
def target_file(self):
return self._config.get('OS', 'target_file')
argparser.add_argument('--target_file')
@property
def train_file(self):
return self._config.get('OS', 'train_file')
argparser.add_argument('--train_file')
@property
def valid_file(self):
return self._config.get('OS', 'valid_file')
argparser.add_argument('--valid_file')
@property
def test_file(self):
return self._config.get('OS', 'test_file')
argparser.add_argument('--test_file')
@property
def save_model_file(self):
return self._config.get('OS', 'save_model_file')
argparser.add_argument('--save_model_file')
@property
def restore_from(self):
return self._config.get('OS', 'restore_from')
argparser.add_argument('--restore_from')
@property
def embed_file(self):
return self._config.get('OS', 'embed_file')
argparser.add_argument('--embed_file')
@property
def use_gpu(self):
return self._config.getboolean('OS', 'use_gpu')
argparser.add_argument('--use_gpu')
# [Dataset]
@property
def n_bkts(self):
return self._config.getint('Dataset', 'n_bkts')
argparser.add_argument('--n_bkts')
@property
def n_valid_bkts(self):
return self._config.getint('Dataset', 'n_valid_bkts')
argparser.add_argument('--n_valid_bkts')
@property
def dataset_type(self):
return self._config.get('Dataset', 'dataset_type')
argparser.add_argument('--dataset_type')
@property
def min_occur_count(self):
return self._config.getint('Dataset', 'min_occur_count')
argparser.add_argument('--min_occur_count')
# [Learning rate]
@property
def learning_rate(self):
return self._config.getfloat('Learning rate', 'learning_rate')
argparser.add_argument('--learning_rate')
@property
def epoch_decay(self):
return self._config.getint('Learning rate', 'epoch_decay')
argparser.add_argument('--epoch_decay')
@property
def dropout(self):
return self._config.getfloat('Learning rate', 'dropout')
argparser.add_argument('--dropout')
# [Sizes]
@property
def words_dim(self):
return self._config.getint('Sizes', 'words_dim')
argparser.add_argument('--words_dim')
# [Training]
@property
def log_interval(self):
return self._config.getint('Training', 'log_interval')
argparser.add_argument('--log_interval')
@property
def valid_interval(self):
return self._config.getint('Training', 'valid_interval')
argparser.add_argument('--valid_interval')
@property
def train_batch_size(self):
return self._config.getint('Training', 'train_batch_size')
argparser.add_argument('--train_batch_size')
@property
def test_batch_size(self):
return self._config.getint('Training', 'test_batch_size')
argparser.add_argument('--test_batch_size')
|
xieyajie/BackHatPython
|
backhatpython02/server-tcp.py
|
Python
|
apache-2.0
| 707
| 0.001414
|
import socket
import threading
bind_ip = ""
bind_port = 60007
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server.bind((bind_ip, bind_port))
server.listen(5)
print("[*] Listening on %s:%d" % (bind_ip, bind_port))
def handle_client(client_socket):
request = client_socket.recv(1024).decode()
print("[*] Received: %s" % request)
send_data = "ACK!"
client_socket.send(
|
send_data.encode())
print(client_socket.getpeername())
client_socket.close()
while True:
client, addr = server.accept()
print("[*] Accepted connect from: %s:%d" % (addr[0], addr[1]))
client_handle
|
r = threading.Thread(target=handle_client, args=(client,))
client_handler.start()
|
0x47d/atd.id
|
src/print.py
|
Python
|
gpl-3.0
| 989
| 0.026342
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
import sys
def get_color(color):
if 'default'==color:
return '\x1b[39;01m'
elif 'black'==color:
return '\x1b[30;01m'
elif 'red'==color:
return '\x1b[31;01m'
elif 'green'==color:
return '\x1b[32;01m'
elif 'yellow'==color:
return '\x1b[33;01m'
elif 'blue'==color:
return '\x1b[34;01m'
elif 'magenta'==color:
return '\x1b[35;01m'
elif 'cyan'==color:
return '\x1b[36;01m'
|
return '\x1b[34;01m'
def main():
if 4==len(sys.argv):
color,cmd,action=get_color(sys.argv[1]),sys.argv[2],sys.argv[3]
if action=='stop':
action='exit'
template='\x1b[1m%s[ ΔOS : %s : make : %s ]\x1b[0m'
else:
action='init'
template='\x1b[1m%s[ ΔOS : %s : make : %s ]\x1b[0m'
print(template%(color,action,cmd))
if _
|
_name__=="__main__":
main()
|
fenderglass/ABruijn
|
flye/polishing/polish.py
|
Python
|
bsd-3-clause
| 11,547
| 0.002598
|
#(c) 2016 by Authors
#This file is a part of ABruijn program.
#Released under the BSD license (see LICENSE file)
"""
Runs polishing binary in parallel and concatentes output
"""
from __future__ import absolute_import
from __future__ import division
import logging
import subprocess
import os
from collections import defaultdict
from flye.polishing.alignment import (make_alignment, get_contigs_info,
merge_chunks, split_into_chunks)
from flye.utils.sam_parser import SynchronizedSamReader
from flye.polishing.bubbles import make_bubbles
import flye.utils.fasta_parser as fp
from flye.utils.utils import which
import flye.config.py_cfg as cfg
from flye.six import iteritems
from flye.six.moves import range
POLISH
|
_BIN = "flye-modules"
logger = logging.getLogger()
class PolishException(Exception):
pass
def check_binaries():
if not which(POLISH_BIN):
raise PolishException("polishing binary was not found. "
"Did you run 'make'?")
try:
devnull = open(os.devnull, "w")
subprocess.check_call([POLISH_BIN, "polisher", "-h"], stderr=devnull)
except subprocess.CalledP
|
rocessError as e:
if e.returncode == -9:
logger.error("Looks like the system ran out of memory")
raise PolishException(str(e))
except OSError as e:
raise PolishException(str(e))
def polish(contig_seqs, read_seqs, work_dir, num_iters, num_threads, error_mode,
output_progress):
"""
High-level polisher interface
"""
logger_state = logger.disabled
if not output_progress:
logger.disabled = True
subs_matrix = os.path.join(cfg.vals["pkg_root"],
cfg.vals["err_modes"][error_mode]["subs_matrix"])
hopo_matrix = os.path.join(cfg.vals["pkg_root"],
cfg.vals["err_modes"][error_mode]["hopo_matrix"])
stats_file = os.path.join(work_dir, "contigs_stats.txt")
prev_assembly = contig_seqs
contig_lengths = None
coverage_stats = None
for i in range(num_iters):
logger.info("Polishing genome (%d/%d)", i + 1, num_iters)
#split into 1Mb chunks to reduce RAM usage
#slightly vary chunk size between iterations
CHUNK_SIZE = 1000000 - (i % 2) * 100000
chunks_file = os.path.join(work_dir, "chunks_{0}.fasta".format(i + 1))
chunks = split_into_chunks(fp.read_sequence_dict(prev_assembly),
CHUNK_SIZE)
fp.write_fasta_dict(chunks, chunks_file)
####
logger.info("Running minimap2")
alignment_file = os.path.join(work_dir, "minimap_{0}.bam".format(i + 1))
make_alignment(chunks_file, read_seqs, num_threads,
work_dir, error_mode, alignment_file,
reference_mode=True, sam_output=True)
#####
logger.info("Separating alignment into bubbles")
contigs_info = get_contigs_info(chunks_file)
bubbles_file = os.path.join(work_dir,
"bubbles_{0}.fasta".format(i + 1))
coverage_stats, mean_aln_error = \
make_bubbles(alignment_file, contigs_info, chunks_file,
error_mode, num_threads,
bubbles_file)
logger.info("Alignment error rate: %f", mean_aln_error)
consensus_out = os.path.join(work_dir, "consensus_{0}.fasta".format(i + 1))
polished_file = os.path.join(work_dir, "polished_{0}.fasta".format(i + 1))
if os.path.getsize(bubbles_file) == 0:
logger.info("No reads were aligned during polishing")
if not output_progress:
logger.disabled = logger_state
open(stats_file, "w").write("#seq_name\tlength\tcoverage\n")
open(polished_file, "w")
return polished_file, stats_file
#####
logger.info("Correcting bubbles")
_run_polish_bin(bubbles_file, subs_matrix, hopo_matrix,
consensus_out, num_threads, output_progress)
polished_fasta, polished_lengths = _compose_sequence(consensus_out)
merged_chunks = merge_chunks(polished_fasta)
fp.write_fasta_dict(merged_chunks, polished_file)
#Cleanup
os.remove(chunks_file)
os.remove(bubbles_file)
os.remove(consensus_out)
os.remove(alignment_file)
contig_lengths = polished_lengths
prev_assembly = polished_file
#merge information from chunks
contig_lengths = merge_chunks(contig_lengths, fold_function=sum)
coverage_stats = merge_chunks(coverage_stats,
fold_function=lambda l: sum(l) // len(l))
with open(stats_file, "w") as f:
f.write("#seq_name\tlength\tcoverage\n")
for ctg_id in contig_lengths:
f.write("{0}\t{1}\t{2}\n".format(ctg_id,
contig_lengths[ctg_id], coverage_stats[ctg_id]))
if not output_progress:
logger.disabled = logger_state
return prev_assembly, stats_file
def generate_polished_edges(edges_file, gfa_file, polished_contigs, work_dir,
error_mode, num_threads):
"""
Generate polished graph edges sequences by extracting them from
polished contigs
"""
logger.debug("Generating polished GFA")
alignment_file = os.path.join(work_dir, "edges_aln.bam")
polished_dict = fp.read_sequence_dict(polished_contigs)
make_alignment(polished_contigs, [edges_file], num_threads,
work_dir, error_mode, alignment_file,
reference_mode=True, sam_output=True)
aln_reader = SynchronizedSamReader(alignment_file,
polished_dict,
cfg.vals["max_read_coverage"])
aln_by_edge = defaultdict(list)
#getting one best alignment for each contig
while not aln_reader.is_eof():
_, ctg_aln = aln_reader.get_chunk()
for aln in ctg_aln:
aln_by_edge[aln.qry_id].append(aln)
aln_reader.close()
MIN_CONTAINMENT = 0.9
updated_seqs = 0
edges_dict = fp.read_sequence_dict(edges_file)
for edge in edges_dict:
if edge in aln_by_edge:
main_aln = aln_by_edge[edge][0]
map_start = main_aln.trg_start
map_end = main_aln.trg_end
for aln in aln_by_edge[edge]:
if aln.trg_id == main_aln.trg_id and aln.trg_sign == main_aln.trg_sign:
map_start = min(map_start, aln.trg_start)
map_end = max(map_end, aln.trg_end)
new_seq = polished_dict[main_aln.trg_id][map_start : map_end]
if main_aln.qry_sign == "-":
new_seq = fp.reverse_complement(new_seq)
#print edge, main_aln.qry_len, len(new_seq), main_aln.qry_start, main_aln.qry_end
if len(new_seq) / aln.qry_len > MIN_CONTAINMENT:
edges_dict[edge] = new_seq
updated_seqs += 1
#writes fasta file with polished egdes
#edges_polished = os.path.join(work_dir, "polished_edges.fasta")
#fp.write_fasta_dict(edges_dict, edges_polished)
#writes gfa file with polished edges
with open(os.path.join(work_dir, "polished_edges.gfa"), "w") as gfa_polished, \
open(gfa_file, "r") as gfa_in:
for line in gfa_in:
if line.startswith("S"):
seq_id = line.split()[1]
coverage_tag = line.split()[3]
gfa_polished.write("S\t{0}\t{1}\t{2}\n"
.format(seq_id, edges_dict[seq_id], coverage_tag))
else:
gfa_polished.write(line)
logger.debug("%d sequences remained unpolished",
len(edges_dict) - updated_seqs)
os.remove(alignment_file)
def filter_by_coverage(args, stats_in, contigs_in, stats_out, contigs_out):
"""
Filters out contigs with low coverage
"""
SUBASM_MIN_COVERAGE = 1
HARD_MIN_COVERAGE = cfg.vals["hard_minimum_coverage"]
RELATIVE_MIN_COVERAGE = cfg.vals["relative_minimum_coverage"]
|
chippey/gaffer
|
python/GafferTest/ArrayPlugTest.py
|
Python
|
bsd-3-clause
| 12,836
| 0.064194
|
##########################################################################
#
# Copyright (c) 2013, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import unittest
import gc
import IECore
import Gaffer
import GafferTest
class ArrayPlugTest( GafferTest.TestCase ) :
def test( self ) :
a = GafferTest.AddNode()
n = GafferTest.ArrayPlugNode()
self.assertTrue( "e1" in n["in"] )
self.assertTrue( "e2" not in n["in"] )
self.assertEqual( len( n["in"] ), 1 )
self.assertTrue( n["in"]["e1"].isSame( n["in"][0] ) )
n["in"][0].setInput( a["sum"] )
self.assertEqual( len( n["in"] ), 2 )
self.assertTrue( "e1" in n["in"] )
self.assertTrue( "e2" in n["in"] )
n["in"][0].setInput( None )
self.assertTrue( "e1" in n["in"] )
self.assertTrue( "e2" not in n["in"] )
self.assertEqual( len( n["in"] ), 1 )
def testConnectionGaps( self ) :
a = GafferTest.AddNode()
n = GafferTest.ArrayPlugNode()
n["in"][0].setInput( a["sum"] )
n["in"][1].setInput( a["sum"] )
n["in"][2].setInput( a["sum"] )
self.assertEqual( len( n["in"] ), 4 )
self.assertTrue( n["in"]["e1"].getInput(), a["sum"] )
self.assertTrue( n["in"]["e2"].getInput(), a["sum"] )
self.assertTrue( n["in"]["e3"].getInput(), a["sum"] )
self.assertTrue( n["in"]["e4"].getInput() is None )
n["in"][1].setInput( None )
self.assertEqual( len( n["in"] ), 4 )
self.assertTrue( n["in"]["e1"].getInput(), a["sum"] )
self.assertTrue( n["in"]["e2"].getInput() is None )
self.assertTrue( n["in"]["e3"].getInput(), a["sum"] )
self.assertTrue( n["in"]["e4"].getInput() is None )
def testSerialisation( self ) :
s = Gaffer.ScriptNode()
s["a"] = GafferTest.AddNode()
s["n"] = GafferTest.ArrayPlugNode()
s["n"]["in"][0].setInput( s["a"]["sum"] )
s["n"]["in"][1].setInput( s["a"]["sum"] )
s["n"]["in"][2].setInput( s["a"]["sum"] )
s["n"]["in"][1].setInput( None )
self.assertEqual( len( s["n"]["in"] ), 4 )
self.assertTrue( s["n"]["in"]["e1"].isSame( s["n"]["in"][0] ) )
self.assertTrue( s["n"]["in"]["e2"].isSame( s["n"]["in"][1] ) )
self.assertTrue( s["n"]["in"]["e3"].isSame( s["n"]["in"][2] ) )
self.assertTrue( s["n"]["in"]["e4"].isSame( s["n"]["in"][3] ) )
self.assertTrue( s["n"]["in"]["e1"].getInput(), s["a"]["sum"] )
self.assertTrue( s["n"]["in"]["e2"].getInput() is None )
self.assertTrue( s["n"]["in"]["e3"].getInput(), s["a"]["sum"] )
self.assertTrue( s["n"]["in"]["e4"].getInput() is None )
s2 = Gaffer.ScriptNode()
s2.execute( s.serialise() )
self.assertEqual( len( s2["n"]["in"] ), 4 )
self.assertTrue( s2["n"]["in"]["e1"].isSame( s2["n"]["in"][0] ) )
self.assertTrue( s2["n"]["in"]["e2"].isSame( s2["n"]["in"][1] ) )
self.assertTrue( s2["n"]["in"]["e3"].isSame( s2["n"]["in"][2] ) )
self.assertTrue( s2["n"]["in"]["e4"].isSame( s2["n"]["in"][3] ) )
self.assertTrue( s2["n"]["in"]["e1"].getInput(), s2["a"]["sum"] )
self.assertTrue( s2["n"]["in"]["e2"].getInput() is None )
self.assertTrue( s2["n"]["in"]["e3"].getInput(), s2["a"]["sum"] )
self.assertTrue( s2["n"]["in"]["e4"].getInput() is None )
def testMaximumInputs( self ) :
a = GafferTest.AddNode()
n = GafferTest.ArrayPlugNode()
# connect all inputs
for i in range( 0, 6 ) :
n["in"][i].setInput( a["sum"] )
self.assertEqual( len( n["in"] ), 6 )
for i in range( 0, 6 ) :
self.assertTrue( n["in"][i].getInput().isSame( a["sum"] ) )
# check that removing the one before the last
# leaves the last in place.
n["in"][4].setInput( None )
self.assertEqual( len( n["in"] ), 6 )
for i in range( 0, 6 ) :
if i != 4 :
self.assertTrue( n["in"][i].getInput().isSame( a["sum"] ) )
else :
self.assertTrue( n["in"][i].getInput() is None )
def testMakeConnectionAndUndoAndRedo( self ) :
s = Gaffer.ScriptNode()
s["a"] = GafferTest.AddNode()
s["n"] = GafferTest.ArrayPlugNode()
with Gaffer.UndoContext( s ) :
s["n"]["in"][0].setInput( s["a"]["sum"] )
self.assertEqual( len( s["n"]["in"] ), 2 )
self.assertTrue( s["n"]["in"][0].isSame( s["n"]["in"]["e1"] ) )
self.assertTrue( s["n"]["in"][1].isSame( s["n"]["in"]["e2"] ) )
s.undo()
self.assertEqual( len( s["n"]["in"] ), 1 )
self.assertTrue( s["n"]["in"][0].isSame( s["n"]["in"]["e1"] ) )
s.redo()
self.assertEqual( len( s["n"]["in"] ), 2 )
self.assertTrue( s["n"]["in"][0].isSame( s["n"]["in"]["e1"] ) )
self.assertTrue( s["n"]["in"][1].isSame( s["n"]["in"]["e2"] ) )
s.undo()
self.assertEqual( len( s["n"]["in"] ), 1 )
self.assertTrue( s["n"]["in"][0].isSame( s["n"]["in"]["e1"] ) )
self.assertTrue( "in" in s["n"] )
self.assertFalse( "in1" in s["n"] )
def testMinimumInputs( self ) :
a = GafferTest.AddNode()
n = Gaffer.Node()
n["in"] = Gaffer.ArrayPlug( "in", element = Gaffer.IntPlug( "e1" ), minSize=3 )
self.assertEqual( len( n["in"] ), 3 )
# connecting to the middle input shouldn't create
# any new inputs, because there is still one free on the end
n["in"]["e2"].setInput( a["sum"] )
self.assertEqual( len( n["in"] ), 3 )
# connecting to the last input should create a new
#
|
one - there should always be one free input on the
# end (until the maximum is reached).
n["in"]["e3"].setInput( a["sum"] )
self.assertEqual( len( n["in"] ), 4 )
n["in"]["e3"].setInput( None )
self.assertEqual( len( n["in"] ), 3 )
def testDeleteAndUndoAndRedo( self ) :
s = Gaffer.ScriptNode()
s["a"] = GafferTest.AddNode()
s["n"] = GafferTest.ArrayP
|
lugNode()
s["n"]["in"]["e1"].setInput( s["a"]["sum"] )
s["n"]["in"]["e2"].setInput( s["a"]["sum"] )
s["n"]["in"]["e3"].setInput( s["a"]["sum"] )
self.assertEqual( len( s["n"]["in"] ), 4 )
self.assertTrue( s["n"]["in"]["e1"].getInput().isSame( s["a"]["sum"] ) )
self.assertTrue( s["n"]["in"]["e2"].getInput().isSame( s["a"]["sum"] ) )
self.assertTrue( s["n"]["in"]["e3"].getInput().isSame( s["a"]["sum"] ) )
with Gaffer.UndoContext( s ) :
s.deleteNodes( s, Gaffer.StandardSet( [ s["n"] ] ) )
self.assertFalse( "n" in s )
s.undo()
self.assertEqual( len( s["n"]["in"] ), 4 )
self.assertTrue( s["n"]["in"]["e1"].getInput().isSame( s["a"]["sum"] ) )
self.assertTrue( s["n"]["in"]["e2"].getInput().isSame( s["a"]["sum"] ) )
self.assertTrue( s["n"]["in"]["e3"].getInput().isSame( s["a"]["sum"] ) )
s.redo()
self.assertFalse( "n" in s )
s.undo()
self.assertEqual( len( s["n"]["in"] ), 4 )
self.assertTrue( s["n"]["in"]["e1"].getInput().isSame( s["a"]["sum"] ) )
self.assertTrue( s["n"]["in"]["e2"].getInput().isSame( s["a"]["sum"]
|
hacktobacillus/fermenter
|
kettle/scripts/formatData.py
|
Python
|
mit
| 5,376
| 0.013207
|
import simplejson as json, os
from sklearn.decomposition import PCA
from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer
from kettle.utils import get_beers
import numpy as np
class BeerMLData(list):
def __init__(self):
self.proj = None
self.arr = None
self.beer_mapping = None
try:
self.load()
except: pass
important_keys = [
('hop_varieties',list),
('dry_hop_varieties',list),
('malt_varieties',list),
('yeast_varieties',list),
('descriptors',list),
('categories',list),
('abv',float),
('style',str),
('price_per_growler',float)
]
def from_model(
|
self):
self.extend(get_beers(False))
def from_file(self,fpath):
with open(fpath,'r') as fp:
self.extend(json.load(fp))
def fields(self):
return [key for key in self[0]['beer'].keys()]
def get_mapping_asarray(self):
num_samples = len(self.beer_mapping)
self.arr = np.zeros((num_samples,self.fs_dim),dtype=float)
for i,(k,v) in
|
enumerate(self.beer_mapping.items()):
self.arr[i] = v
self.compute_pca()
return self.arr
def compute_pca(self):
self.proj = PCA(n_components=2)
self.proj.fit(self.arr)
def project(self):
return self.proj.transform(self.arr)
def create_beer_mapping(self):
data = {}
self.feature_space_keys = {}
for key,dtype in self.important_keys:
self.feature_space_keys[key] = set()
self.fscales = {}
# Figure out feature space dimensionality
self.descriptions = []
for beer in self:
for key,dtype in self.important_keys:
fsk = self.feature_space_keys[key]
dat = dtype(beer[key])
if dat == 100:
continue
if dtype != list:
dat = set([dat])
self.feature_space_keys[key] = fsk.union(dat)
self.descriptions = [beer['description'] for beer in self]
self.count_vect = CountVectorizer(stop_words='english')
X_train_counts = self.count_vect.fit_transform(self.descriptions)
self.tfidf_transformer = TfidfTransformer()
self.X_train_tfidf = self.tfidf_transformer.fit_transform(X_train_counts)
#print(self.X_train_tfidf[0])
#print(dir(self.X_train_tfidf[0]))
self.fs_dim = 0
for k,v in self.feature_space_keys.items():
if k in ('abv','price_per_growler'):
self.fs_dim += 1
continue
v = list(v)
v.sort()
self.feature_space_keys[k] = v
self.fs_dim += len(v)
self.fs_dim += self.X_train_tfidf.shape[1] # For the text description.
#compute floating point scales for continuous data
for k,dtype in self.important_keys:
if dtype != float: continue
mx = max(self.feature_space_keys[k])
self.fscales[k] = mx
# Map each beer into the binary feature space.
num_beers = len(self)
self.beer_mapping = {}
for beer in self:
#beer = x['beer']
beer_id = beer['id']
self.beer_mapping[beer_id] = self.map_beer(beer)
def get_beer_by_id(self,beer_id):
beers = [beer for beer in self if beer['id'] == beer_id]
return beers[0]
def map_beer(self,x):
if isinstance(x,str):
beer = self.get_beer_by_id(x)
else:
beer = x
record = np.zeros(self.fs_dim)
idx = 0
for key,dtype in self.important_keys:
beer_vals = beer[key]
fsk = self.feature_space_keys[key]
if dtype == list:
for k in fsk:
qual = k in beer_vals
if qual:
record[idx] = 1
idx += 1
elif dtype == str:
for k in fsk:
qual = k == beer_vals
if qual:
record[idx] = 1
idx += 1
# divide by their scales...
else:
record[idx] = min(dtype(beer_vals) / self.fscales[key],1.0)
idx += 1
cv = self.count_vect.transform([beer['description']])
cv = self.tfidf_transformer.transform(cv).todense()
#print( cv)
record[idx:] = cv
return record
if __name__ == "__main__":
path = os.path.expanduser('~/Downloads/beer_data.json')
data = BeerMLData()
data.from_model()
#data.from_file(path)
data.create_beer_mapping()
X = data.get_mapping_asarray()
Y = data.project()
print (data.feature_space_keys['descriptors'])
print (data.feature_space_keys['categories'])
import matplotlib
#matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
plt.figure()
plt.imshow(X)
plt.figure()
plt.gca().set_axis_bgcolor('k')
plt.plot(Y[:,0],Y[:,1],'ro')
mapping = data.beer_mapping
for i,(k,v) in enumerate(mapping.items()):
plt.text(Y[i,0],Y[i,1],k,color='w')
plt.show()
#print(data.fields())
#print(data[0])
|
devilry/devilry-django
|
devilry/devilry_cradmin/tests/test_devilry_listfilter/test_assignmentgroup_listfilter.py
|
Python
|
bsd-3-clause
| 21,241
| 0.000047
|
from django import test
from model_bakery import baker
from devilry.apps.core.models import AssignmentGroup
from devilry.devilry_dbcache.customsql import AssignmentGroupDbCacheCustomSql
from devilry.devilry_cradmin.devilry_listfilter.assignmentgroup import ExaminerCountFilter, CandidateCountFilter
class TestExaminerCountFilter(test.TestCase):
def setUp(self):
AssignmentGroupDbCacheCustomSql().initialize()
self.testgroup0 = self.__create_group_with_examiners(num_examiners=0)
self.testgroup1 = self.__create_group_with_examiners(num_examiners=1)
self.testgroup2 = self.__create_group_with_examiners(num_examiners=2)
self.testgroup3 = self.__create_group_with_examiners(num_examiners=3)
self.testgroup4 = self.__create_group_with_examiners(num_examiners=4)
self.testgroup5 = self.__create_group_with_examiners(num_examiners=5)
self.testgroup6 = self.__create_group_with_examiners(num_examiners=6)
self.testgroup7 = self.__create_group_with_examiners(num_examiners=7)
def __create_group_with_examiners(self, num_examiners=0):
assignment_group = baker.make('core.AssignmentGroup')
for num in range(num_examiners):
baker.make('core.Examiner', assignmentgroup=assignment_
|
group)
return assignment_group
def __filter_examiners(self, filter_value):
queryset = AssignmentGroup.objects.all()
examinercountfilter = ExaminerCountFilter()
examinercountfilter.values = [filter_value]
return examinercountfilter.filter(queryobject=queryset)
def test_exact_0(self):
filtered_queryset = self.__filter_examiners(filte
|
r_value='eq-0')
self.assertEqual(filtered_queryset.count(), 1)
self.assertEqual(filtered_queryset[0].id, self.testgroup0.id)
def test_exact_1(self):
filtered_queryset = self.__filter_examiners(filter_value='eq-1')
self.assertEqual(filtered_queryset.count(), 1)
self.assertEqual(filtered_queryset[0].id, self.testgroup1.id)
def test_exact_2(self):
filtered_queryset = self.__filter_examiners(filter_value='eq-2')
self.assertEqual(filtered_queryset.count(), 1)
self.assertEqual(filtered_queryset[0].id, self.testgroup2.id)
def test_exact_3(self):
filtered_queryset = self.__filter_examiners(filter_value='eq-3')
self.assertEqual(filtered_queryset.count(), 1)
self.assertEqual(filtered_queryset[0].id, self.testgroup3.id)
def test_exact_4(self):
filtered_queryset = self.__filter_examiners(filter_value='eq-4')
self.assertEqual(filtered_queryset.count(), 1)
self.assertEqual(filtered_queryset[0].id, self.testgroup4.id)
def test_exact_5(self):
filtered_queryset = self.__filter_examiners(filter_value='eq-5')
self.assertEqual(filtered_queryset.count(), 1)
self.assertEqual(filtered_queryset[0].id, self.testgroup5.id)
def test_exact_6(self):
filtered_queryset = self.__filter_examiners(filter_value='eq-6')
self.assertEqual(filtered_queryset.count(), 1)
self.assertEqual(filtered_queryset[0].id, self.testgroup6.id)
def test_less_than_2(self):
filtered_queryset = self.__filter_examiners(filter_value='lt-2')
self.assertEqual(filtered_queryset.count(), 2)
filtered_group_ids = [group.id for group in filtered_queryset]
self.assertIn(self.testgroup0.id, filtered_group_ids)
self.assertIn(self.testgroup1.id, filtered_group_ids)
def test_less_than_3(self):
filtered_queryset = self.__filter_examiners(filter_value='lt-3')
self.assertEqual(filtered_queryset.count(), 3)
filtered_group_ids = [group.id for group in filtered_queryset]
self.assertIn(self.testgroup0.id, filtered_group_ids)
self.assertIn(self.testgroup1.id, filtered_group_ids)
self.assertIn(self.testgroup2.id, filtered_group_ids)
def test_less_than_4(self):
filtered_queryset = self.__filter_examiners(filter_value='lt-4')
self.assertEqual(filtered_queryset.count(), 4)
filtered_group_ids = [group.id for group in filtered_queryset]
self.assertIn(self.testgroup0.id, filtered_group_ids)
self.assertIn(self.testgroup1.id, filtered_group_ids)
self.assertIn(self.testgroup2.id, filtered_group_ids)
self.assertIn(self.testgroup3.id, filtered_group_ids)
def test_less_than_5(self):
filtered_queryset = self.__filter_examiners(filter_value='lt-5')
self.assertEqual(filtered_queryset.count(), 5)
filtered_group_ids = [group.id for group in filtered_queryset]
self.assertIn(self.testgroup0.id, filtered_group_ids)
self.assertIn(self.testgroup1.id, filtered_group_ids)
self.assertIn(self.testgroup2.id, filtered_group_ids)
self.assertIn(self.testgroup3.id, filtered_group_ids)
self.assertIn(self.testgroup4.id, filtered_group_ids)
def test_less_than_6(self):
filtered_queryset = self.__filter_examiners(filter_value='lt-6')
self.assertEqual(filtered_queryset.count(), 6)
filtered_group_ids = [group.id for group in filtered_queryset]
self.assertIn(self.testgroup0.id, filtered_group_ids)
self.assertIn(self.testgroup1.id, filtered_group_ids)
self.assertIn(self.testgroup2.id, filtered_group_ids)
self.assertIn(self.testgroup3.id, filtered_group_ids)
self.assertIn(self.testgroup4.id, filtered_group_ids)
self.assertIn(self.testgroup5.id, filtered_group_ids)
def test_greater_than_0(self):
filtered_queryset = self.__filter_examiners(filter_value='gt-0')
self.assertEqual(filtered_queryset.count(), 7)
filtered_group_ids = [group.id for group in filtered_queryset]
self.assertNotIn(self.testgroup0.id, filtered_group_ids)
self.assertIn(self.testgroup1.id, filtered_group_ids)
self.assertIn(self.testgroup2.id, filtered_group_ids)
self.assertIn(self.testgroup3.id, filtered_group_ids)
self.assertIn(self.testgroup4.id, filtered_group_ids)
self.assertIn(self.testgroup5.id, filtered_group_ids)
self.assertIn(self.testgroup6.id, filtered_group_ids)
self.assertIn(self.testgroup7.id, filtered_group_ids)
def test_greater_than_1(self):
filtered_queryset = self.__filter_examiners(filter_value='gt-1')
self.assertEqual(filtered_queryset.count(), 6)
filtered_group_ids = [group.id for group in filtered_queryset]
self.assertNotIn(self.testgroup0.id, filtered_group_ids)
self.assertNotIn(self.testgroup1.id, filtered_group_ids)
self.assertIn(self.testgroup2.id, filtered_group_ids)
self.assertIn(self.testgroup3.id, filtered_group_ids)
self.assertIn(self.testgroup4.id, filtered_group_ids)
self.assertIn(self.testgroup5.id, filtered_group_ids)
self.assertIn(self.testgroup6.id, filtered_group_ids)
self.assertIn(self.testgroup7.id, filtered_group_ids)
def test_greater_than_2(self):
filtered_queryset = self.__filter_examiners(filter_value='gt-2')
self.assertEqual(filtered_queryset.count(), 5)
filtered_group_ids = [group.id for group in filtered_queryset]
self.assertNotIn(self.testgroup0.id, filtered_group_ids)
self.assertNotIn(self.testgroup1.id, filtered_group_ids)
self.assertNotIn(self.testgroup2.id, filtered_group_ids)
self.assertIn(self.testgroup3.id, filtered_group_ids)
self.assertIn(self.testgroup4.id, filtered_group_ids)
self.assertIn(self.testgroup5.id, filtered_group_ids)
self.assertIn(self.testgroup6.id, filtered_group_ids)
self.assertIn(self.testgroup7.id, filtered_group_ids)
def test_greater_than_3(self):
filtered_queryset = self.__filter_examiners(filter_value='gt-3')
self.assertEqual(filtered_queryset.count(), 4)
filtered_group_ids = [group.id for group in filtered_queryset]
self.assertNotIn(self.testgroup0.id, filtered_group_ids)
self.assertNotIn(self.testgroup1.id, filtered_group_ids)
self.assertNotIn(sel
|
jarble/EngScript
|
libraries/schedulePrioritizer.py
|
Python
|
mit
| 2,752
| 0.015262
|
'''
http://jsfiddle.net/nvYZ8/1/
'''
from functionChecker import functionChecker
functionChecker("schedulePrioritizer.py", "getAllPossibleSchedules")
"function name: getNonOverlappingRanges"
"requires functions: containsOverlappingRanges, overlapsWithOthers(theArr,theIndex)"
"is defined: True"
"description: Return all ranges in the array that do not overlap with any of the other ranges."
"function name: removeNonOverlappingRanges"
"requires functions: getNonOverlappingRanges"
"is defined: False"
"description: Remove all ranges from the array that do not overlap with any of the other ranges."
"function name: containsOverlappingRanges"
"requires functions: rangesOverlap"
"is defined: True"
"description: Return true if the array contains more than zero overlapping ranges, and otherwise return false."
"function name: rangesOverlap"
"requires functions: False"
"is defined: True"
"description: Check whether two 2D arrays are overlapping ranges."
"function name: convertToBinary"
"requires functions: False"
"is defined: True"
"description: Convert from decimal to binary."
"function name: overlapsWithOthers(theArr,theIndex)"
"requires functions: rangesOverlap"
"is defined: True"
"description: Check whether one element in the array overlaps with at least one of the elements that follows it."
def convertToBinary(x):
return int(bin(x)[2:])
def rangesOverlap(r1, r2):
#The events are also considered to be overlapping if one event happens immediately after the other.
return (r1[0] <= r2[1]) and (r2[0] <= r1[1])
def containsOverlappingRanges(arrayOfRanges):
for current in arrayOfRanges:
for current2 in arrayOfRanges:
if(rangesOverlap(current, current2) and current != current2):
return True
return False
def overlapsWithOthers(arr1, index):
for current in arr1:
if((current != arr1[index])):
if(rangesOverlap(current, arr1[index])):
return True
return False
def getNonOverlappingRanges(arr1):
arrayToReturn = []
for idx, current in enumerate(arr1):
if(not overlapsWithOthers(arr1, idx)):
arrayToReturn += [current]
return arrayToReturn
print convertToBinary(2)
print rangesOverlap([1, 3], [2, 5])
print rangesOverlap([1, 3], [3.1, 5])
print(overlapsWithOthers([[1,3], [4,5], [2,4], [7,8]],3))
print containsOverlappingRanges([[1, 3], [3.1, 5]])
|
print contains
|
OverlappingRanges([[1, 3], [3.1, 5], [6,8], [1,7]])
print getNonOverlappingRanges([[1, 3], [3.1, 5], [9, 10], [7, 10]])
"function name: getAllPossibleSchedules"
"requires functions: containsOverlappingRanges, convertToBinary"
"is defined: False"
"description: Return true if the array contains more than zero overlapping ranges, and otherwise return false."
def getPossibleSchedules(theArray):
for current in theArray:
pass
|
auready/django
|
tests/view_tests/tests/test_csrf.py
|
Python
|
bsd-3-clause
| 4,007
| 0.000749
|
from django.template import TemplateDoesNotExist
from django.test import (
Client, RequestFactory, SimpleTestCase, override_settings,
)
from django.utils.translation import override
from django.views.csrf import CSRF_FAILURE_TEMPLATE_NAME, csrf_failure
@override_settings(ROOT_URLCONF='view_tests.urls')
class CsrfViewTests(SimpleTestCase):
def setUp(self):
super().setUp()
self.client = Client(enforce_csrf_checks=True)
@override_settings(
USE_I18N=True,
MIDDLEWARE=[
'django.middleware.locale.LocaleMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
],
)
def test_translation(self):
"""
An invalid request is rejected with a localized error message.
"""
response = self.client.post('/')
self.assertContains(response, "Forbidden", status_code=403)
|
self.assertContains(response,
|
"CSRF verification failed. Request aborted.",
status_code=403)
with self.settings(LANGUAGE_CODE='nl'), override('en-us'):
response = self.client.post('/')
self.assertContains(response, "Verboden", status_code=403)
self.assertContains(response,
"CSRF-verificatie mislukt. Verzoek afgebroken.",
status_code=403)
@override_settings(
SECURE_PROXY_SSL_HEADER=('HTTP_X_FORWARDED_PROTO', 'https')
)
def test_no_referer(self):
"""
Referer header is strictly checked for POST over HTTPS. Trigger the
exception by sending an incorrect referer.
"""
response = self.client.post('/', HTTP_X_FORWARDED_PROTO='https')
self.assertContains(response,
"You are seeing this message because this HTTPS "
"site requires a 'Referer header' to be "
"sent by your Web browser, but none was sent.",
status_code=403)
def test_no_cookies(self):
"""
The CSRF cookie is checked for POST. Failure to send this cookie should
provide a nice error message.
"""
response = self.client.post('/')
self.assertContains(response,
"You are seeing this message because this site "
"requires a CSRF cookie when submitting forms. "
"This cookie is required for security reasons, to "
"ensure that your browser is not being hijacked "
"by third parties.",
status_code=403)
@override_settings(TEMPLATES=[])
def test_no_django_template_engine(self):
"""
The CSRF view doesn't depend on the TEMPLATES configuration (#24388).
"""
response = self.client.post('/')
self.assertContains(response, "Forbidden", status_code=403)
@override_settings(TEMPLATES=[{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'OPTIONS': {
'loaders': [
('django.template.loaders.locmem.Loader', {
CSRF_FAILURE_TEMPLATE_NAME: 'Test template for CSRF failure'
}),
],
},
}])
def test_custom_template(self):
"""
A custom CSRF_FAILURE_TEMPLATE_NAME is used.
"""
response = self.client.post('/')
self.assertContains(response, "Test template for CSRF failure", status_code=403)
def test_custom_template_does_not_exist(self):
"""
An exception is raised if a nonexistent template is supplied.
"""
factory = RequestFactory()
request = factory.post('/')
with self.assertRaises(TemplateDoesNotExist):
csrf_failure(request, template_name="nonexistent.html")
|
Mausy5043/ubundiagd
|
daemon15.py
|
Python
|
mit
| 4,996
| 0.015212
|
#!/usr/bin/env python
# Based on previous work by
# Charles Menguy (see: http://stackoverflow.com/questions/10217067/implementing-a-full-python-unix-style-daemon-process)
# and Sander Marechal (see: http://www.jejik.com/articles/2007/02/a_simple_unix_linux_daemon_in_python/)
# Adapted by M.Hendrix [2015] (deprecated)
# daemon15.py measures the size of selected logfiles.
# These are all counters, therefore no averaging is needed.
import syslog, traceback
import os, sys, time, math, commands
from subprocess import check_output
from libdaemon import Daemon
import ConfigParser
DEBUG = False
IS_SYSTEMD = os.path.isfile('/bin/journalctl')
leaf = os.path.realpath(__file__).split('/')[-2]
os.nice(10)
class MyDaemon(Daemon):
def run(self):
iniconf = ConfigParser.ConfigParser()
inisection = "15"
home = os.path.expanduser
|
('~')
s = iniconf.read(home + '/' + leaf + '/config.ini')
if DEBUG: print "config file : ", s
if DEBUG: print iniconf.items(inisection)
reportTime = iniconf.getint(inisection, "report
|
time")
cycles = iniconf.getint(inisection, "cycles")
samplesperCycle = iniconf.getint(inisection, "samplespercycle")
flock = iniconf.get(inisection, "lockfile")
fdata = iniconf.get(inisection, "resultfile")
samples = samplesperCycle * cycles # total number of samples averaged
sampleTime = reportTime/samplesperCycle # time [s] between samples
cycleTime = samples * sampleTime # time [s] per cycle
data = [] # array for holding sampledata
while True:
try:
startTime = time.time()
result = do_work().split(',')
data = map(int, result)
if (startTime % reportTime < sampleTime):
do_report(data, flock, fdata)
waitTime = sampleTime - (time.time() - startTime) - (startTime%sampleTime)
if (waitTime > 0):
if DEBUG:print "Waiting {0} s".format(waitTime)
time.sleep(waitTime)
except Exception as e:
if DEBUG:
print "Unexpected error:"
print e.message
syslog.syslog(syslog.LOG_ALERT,e.__doc__)
syslog_trace(traceback.format_exc())
raise
def do_work():
# 3 datapoints gathered here
kernlog=messlog=syslog=0
if IS_SYSTEMD:
# -p, --priority=
# Filter output by message priorities or priority ranges. Takes either a single numeric or textual log level (i.e.
# between 0/"emerg" and 7/"debug"), or a range of numeric/text log levels in the form FROM..TO. The log levels are the
# usual syslog log levels as documented in syslog(3), i.e. "emerg" (0), "alert" (1), "crit" (2), "err" (3),
# "warning" (4), "notice" (5), "info" (6), "debug" (7). If a single log level is specified, all messages with this log
# level or a lower (hence more important) log level are shown. If a range is specified, all messages within the range
# are shown, including both the start and the end value of the range. This will add "PRIORITY=" matches for the
# specified priorities.
critlog = commands.getoutput("journalctl --since=00:00:00 --no-pager -p 0..3 |wc -l").split()[0]
warnlog = commands.getoutput("journalctl --since=00:00:00 --no-pager -p 4 |wc -l").split()[0]
syslog = commands.getoutput("journalctl --since=00:00:00 --no-pager |wc -l").split()[0]
else:
critlog = wc("/var/log/kern.log")
warnlog = wc("/var/log/smartd.log")
syslog = wc("/var/log/syslog")
return '{0}, {1}, {2}'.format(critlog, warnlog, syslog)
def wc(filename):
return int(check_output(["wc", "-l", filename]).split()[0])
def do_report(result, flock, fdata):
# Get the time and date in human-readable form and UN*X-epoch...
outDate = commands.getoutput("date '+%F %H:%M:%S, %s'")
result = ', '.join(map(str, result))
lock(flock)
with open(fdata, 'a') as f:
f.write('{0}, {1}\n'.format(outDate, result) )
unlock(flock)
def lock(fname):
open(fname, 'a').close()
def unlock(fname):
if os.path.isfile(fname):
os.remove(fname)
def syslog_trace(trace):
# Log a python stack trace to syslog
log_lines = trace.split('\n')
for line in log_lines:
if line:
syslog.syslog(syslog.LOG_ALERT,line)
if __name__ == "__main__":
daemon = MyDaemon('/tmp/' + leaf + '/15.pid')
if len(sys.argv) == 2:
if 'start' == sys.argv[1]:
daemon.start()
elif 'stop' == sys.argv[1]:
daemon.stop()
elif 'restart' == sys.argv[1]:
daemon.restart()
elif 'foreground' == sys.argv[1]:
# assist with debugging.
print "Debug-mode started. Use <Ctrl>+C to stop."
DEBUG = True
if DEBUG:
logtext = "Daemon logging is ON"
syslog.syslog(syslog.LOG_DEBUG, logtext)
daemon.run()
else:
print "Unknown command"
sys.exit(2)
sys.exit(0)
else:
print "usage: {0!s} start|stop|restart|foreground".format(sys.argv[0])
sys.exit(2)
|
rogeriofalcone/treeio
|
script/testmodel.py
|
Python
|
mit
| 810
| 0.008642
|
# encoding: utf-8
# Copyright 2011 Tree.io Limited
# This file is part of Treeio.
# License www.tree.io/license
#!/usr/bin/python
OBJECTS_NUM = 100
# setup environment
import sys, os
sys.path.append('../')
os.environ['DJANGO_SETTINGS_MODULE'] = 'settings'
from django.core.management import setup_environ
from treeio import settings
from treeio.core.models import Object, User
from treeio.projects.models import Project
setup_environ(
|
settings)
user = User.objects.all()[0]
for i in range(0, OBJECTS_NUM)
|
:
project = Project(name='test'+unicode(i))
project.set_user(user)
project.save()
objects = Object.filter_permitted(user, Project.objects)
allowed = 0
for obj in objects:
if user.has_permission(obj):
allowed += 1
print len(list(objects)), ':', allowed
|
chainer/chainercv
|
tests/visualizations_tests/test_vis_bbox.py
|
Python
|
mit
| 4,256
| 0
|
import unittest
import numpy as np
from chainer import testing
from chainercv.utils import generate_random_bbox
from chainercv.visualizations import vis_bbox
try:
import matplotlib # NOQA
_available = True
except ImportError:
|
_available = False
@testing.parameterize(
*testing.product_dict([
{
'n_bbox': 3, 'label':
|
(0, 1, 2), 'score': (0, 0.5, 1),
'label_names': ('c0', 'c1', 'c2')},
{
'n_bbox': 3, 'label': (0, 1, 2), 'score': None,
'label_names': ('c0', 'c1', 'c2')},
{
'n_bbox': 3, 'label': (0, 1, 2), 'score': (0, 0.5, 1),
'label_names': None},
{
'n_bbox': 3, 'label': None, 'score': (0, 0.5, 1),
'label_names': ('c0', 'c1', 'c2')},
{
'n_bbox': 3, 'label': None, 'score': (0, 0.5, 1),
'label_names': None},
{
'n_bbox': 3, 'label': None, 'score': None,
'label_names': None},
{
'n_bbox': 3, 'label': (0, 1, 1), 'score': (0, 0.5, 1),
'label_names': ('c0', 'c1', 'c2')},
{
'n_bbox': 0, 'label': (), 'score': (),
'label_names': ('c0', 'c1', 'c2')},
{
'n_bbox': 3, 'label': (0, 1, 2), 'score': (0, 0.5, 1),
'label_names': ('c0', 'c1', 'c2'), 'no_img': True},
{
'n_bbox': 3, 'label': (0, 1, 2), 'score': (0, 0.5, 1),
'label_names': ('c0', 'c1', 'c2'),
'instance_colors': [
(255, 0, 0), (0, 255, 0), (0, 0, 255), (100, 100, 100)]},
], [{'sort_by_score': False}, {'sort_by_score': True}]))
@unittest.skipUnless(_available, 'Matplotlib is not installed')
class TestVisBbox(unittest.TestCase):
def setUp(self):
if hasattr(self, 'no_img'):
self.img = None
else:
self.img = np.random.randint(0, 255, size=(3, 32, 48))
self.bbox = generate_random_bbox(
self.n_bbox, (48, 32), 8, 16)
if self.label is not None:
self.label = np.array(self.label, dtype=int)
if self.score is not None:
self.score = np.array(self.score)
if not hasattr(self, 'instance_colors'):
self.instance_colors = None
def test_vis_bbox(self):
ax = vis_bbox(
self.img, self.bbox, self.label, self.score,
label_names=self.label_names,
instance_colors=self.instance_colors,
sort_by_score=self.sort_by_score)
self.assertIsInstance(ax, matplotlib.axes.Axes)
@testing.parameterize(*testing.product_dict([
{
'n_bbox': 3, 'label': (0, 1), 'score': (0, 0.5, 1),
'label_names': ('c0', 'c1', 'c2')},
{
'n_bbox': 3, 'label': (0, 1, 2, 1), 'score': (0, 0.5, 1),
'label_names': ('c0', 'c1', 'c2')},
{
'n_bbox': 3, 'label': (0, 1, 2), 'score': (0, 0.5),
'label_names': ('c0', 'c1', 'c2')},
{
'n_bbox': 3, 'label': (0, 1, 2), 'score': (0, 0.5, 1, 0.75),
'label_names': ('c0', 'c1', 'c2')},
{
'n_bbox': 3, 'label': (0, 1, 3), 'score': (0, 0.5, 1),
'label_names': ('c0', 'c1', 'c2')},
{
'n_bbox': 3, 'label': (-1, 1, 2), 'score': (0, 0.5, 1),
'label_names': ('c0', 'c1', 'c2')},
], [{'sort_by_score': False}, {'sort_by_score': True}]))
@unittest.skipUnless(_available, 'Matplotlib is not installed')
class TestVisBboxInvalidInputs(unittest.TestCase):
def setUp(self):
self.img = np.random.randint(0, 255, size=(3, 32, 48))
self.bbox = np.random.uniform(size=(self.n_bbox, 4))
if self.label is not None:
self.label = np.array(self.label, dtype=int)
if self.score is not None:
self.score = np.array(self.score)
if not hasattr(self, 'instance_colors'):
self.instance_colors = None
def test_vis_bbox_invalid_inputs(self):
with self.assertRaises(ValueError):
vis_bbox(
self.img, self.bbox, self.label, self.score,
label_names=self.label_names,
instance_colors=self.instance_colors,
sort_by_score=self.sort_by_score)
testing.run_module(__name__, __file__)
|
kylemvz/magichour-old
|
StringKernel/kernel_kmeans.py
|
Python
|
apache-2.0
| 4,707
| 0.002337
|
"""Kernel K-means"""
# Author: Mathieu Blondel <mathieu@mblondel.org>
# License: BSD 3 clause
import logging
import numpy as np
from sklearn.base import BaseEstimator, ClusterMixin
from sklearn.metrics.pairwise import pairwise_kernels
from sklearn.utils import check_random_state
logger = logging.getLogger(__name__)
class KernelKMeans(BaseEstimator, ClusterMixin):
"""
Kernel K-means
Reference
---------
Kernel k-means, Spectral Clustering and Normalized Cuts.
Inderjit S. Dhillon,
|
Yuqiang Guan, Brian Kulis.
KDD 2004.
"""
def __init__(self, n_clusters=3, max_iter=50, tol=1e-3, random_state=None,
kernel="linear", gamma=None, degree=3, coef0=1,
kernel_params=None, verbose=0, nystroem=-1):
self.n_clusters = n_clusters
self.max_iter = max_iter
self.tol = tol
self.random_state = random_state
self.kernel = kernel
self.gamma = gamma
self.degree = degree
se
|
lf.coef0 = coef0
self.kernel_params = kernel_params
self.verbose = verbose
self.nystroem = nystroem
@property
def _pairwise(self):
return self.kernel == "precomputed"
def _get_kernel_approx(self, X, Y=None):
n = Nystroem(kernel=self.kernel, n_components=self.nystroem, kernel_params=self.kernel_params).fit(X)
z_transformed = n.transform(X)
return np.dot(z_transformed, z_transformed.T)
def _get_kernel(self, X, Y=None):
if callable(self.kernel):
params = self.kernel_params or {}
else:
params = {"gamma": self.gamma,
"degree": self.degree,
"coef0": self.coef0}
return pairwise_kernels(X, Y, metric=self.kernel,
filter_params=True, **params)
def fit(self, X, y=None, sample_weight=None):
n_samples = X.shape[0]
if self.nystroem == -1:
logger.debug("Nystroem kernel approximation not enabled. Computing full kernel.")
K = self._get_kernel(X)
else:
logger.debug("Enabled Nystroem kernel approximation (num_components=%s)." % self.nystroem)
K = self._get_kernel_approx(X)
sw = sample_weight if sample_weight else np.ones(n_samples)
self.sample_weight_ = sw
rs = check_random_state(self.random_state)
self.labels_ = rs.randint(self.n_clusters, size=n_samples)
dist = np.zeros((n_samples, self.n_clusters))
self.within_distances_ = np.zeros(self.n_clusters)
for it in xrange(self.max_iter):
dist.fill(0)
self._compute_dist(K, dist, self.within_distances_,
update_within=True)
labels_old = self.labels_
self.labels_ = dist.argmin(axis=1)
# Compute the number of samples whose cluster did not change
# since last iteration.
n_same = np.sum((self.labels_ - labels_old) == 0)
if 1 - float(n_same) / n_samples < self.tol:
if self.verbose:
print "Converged at iteration", it + 1
break
self.X_fit_ = X
return self
def _compute_dist(self, K, dist, within_distances, update_within):
"""Compute a n_samples x n_clusters distance matrix using the
kernel trick."""
sw = self.sample_weight_
for j in xrange(self.n_clusters):
mask = self.labels_ == j
if np.sum(mask) == 0:
raise ValueError("Empty cluster found, try smaller n_cluster.")
denom = sw[mask].sum()
denomsq = denom * denom
if update_within:
KK = K[mask][:, mask] # K[mask, mask] does not work.
dist_j = np.sum(np.outer(sw[mask], sw[mask]) * KK / denomsq)
within_distances[j] = dist_j
dist[:, j] += dist_j
else:
dist[:, j] += within_distances[j]
dist[:, j] -= 2 * np.sum(sw[mask] * K[:, mask], axis=1) / denom
def predict(self, X):
K = self._get_kernel(X, self.X_fit_)
n_samples = X.shape[0]
dist = np.zeros((n_samples, self.n_clusters))
self._compute_dist(K, dist, self.within_distances_,
update_within=False)
return dist.argmin(axis=1)
if __name__ == '__main__':
from sklearn.datasets import make_blobs
X, y = make_blobs(n_samples=1000, centers=5, random_state=0)
km = KernelKMeans(n_clusters=5, max_iter=100, random_state=0, verbose=1)
print km.fit_predict(X)[:10]
print km.predict(X[:10])
|
networks-lab/metaknowledge
|
metaknowledge/proquest/recordProQuest.py
|
Python
|
gpl-2.0
| 4,491
| 0.006903
|
import collections
import io
import itertools
from ..mkExceptions import BadProQuestRecord, RecordsNotCompatible
from ..mkRecord import ExtendedRecord
from .tagProcessing.specialFunctions import proQuestSpecialTagToFunc
from .tagProcessing.tagFunctions import proQuestTagToFunc
class ProQuestRecord(ExtendedRecord):
"""Class for full ProQuest entries.
This class is an [ExtendedRecord](./ExtendedRecord.html#metaknowledge.ExtendedRecord) capable of generating its own id number. You should not create them directly, but instead use [proQuestParser()](../modules/proquest.html#metaknowledge.proquest.proQuestHandlers.proQuestParser) on a ProQuest file.
"""
def __init__(self, inRecord, recNum = None, sFile = "", sLine = 0):
bad = False
error = None
fieldDict = None
try:
if isinstance(inRecord, dict) or isinstance(inRecord, collections.OrderedDict):
fieldDict = collections.OrderedDict(inRecord)
elif isinstance(inRecord, enumerate) or isinstance(inRecord, itertools.chain):
#Already enumerated
#itertools.chain is for the parser upstream to insert stuff into the stream
fieldDict = pr
|
oQu
|
estRecordParser(inRecord, recNum)
elif isinstance(inRecord, io.IOBase):
fieldDict = proQuestRecordParser(enumerate(inRecord), recNum)
elif isinstance(inRecord, str):
#Probaly a better way to do this but it isn't going to be used much, so no need to improve it
def addCharToEnd(lst):
for s in lst:
yield s + '\n'
fieldDict = proQuestRecordParser(enumerate(addCharToEnd(inRecord.split('\n')), start = 1), recNum)
#string io
else:
raise TypeError("Unsupported input type '{}', ProQuestRecords cannot be created from '{}'".format(inRecord, type(inRecord)))
except BadProQuestRecord as b:
self.bad = True
self.error = b
fieldDict = collections.OrderedDict()
try:
self._proID = "PROQUEST:{}".format(fieldDict["ProQuest document ID"][0])
except KeyError:
self._proID = "PROQUEST:MISSING"
bad = True
error = BadProQuestRecord("Missing ProQuest document ID")
ExtendedRecord.__init__(self, fieldDict, self._proID, bad, error, sFile =sFile, sLine = sLine)
def encoding(self):
return 'utf-8'
@staticmethod
def getAltName(tag):
return None
@staticmethod
def tagProcessingFunc(tag):
#Should not raise an exception
#It might be faster to do this as a class attribute
return proQuestTagToFunc(tag)
def specialFuncs(self, key):
return proQuestSpecialTagToFunc[key](self)
#raise KeyError("There are no special functions given by default.")
def writeRecord(self, infile):
raise RecordsNotCompatible("ProQuest's data format cannot be written back to file. You can still write out a csv with writeCSV().")
def proQuestRecordParser(enRecordFile, recNum):
"""The parser [ProQuestRecords](../classes/ProQuestRecord.html#metaknowledge.proquest.ProQuestRecord) use. This takes an entry from [proQuestParser()](#metaknowledge.proquest.proQuestHandlers.proQuestParser) and parses it a part of the creation of a `ProQuestRecord`.
# Parameters
_enRecordFile_ : `enumerate object`
> a file wrapped by `enumerate()`
_recNum_ : `int`
> The number given to the entry in the first section of the ProQuest file
# Returns
`collections.OrderedDict`
> An ordered dictionary of the key-vaue pairs in the entry
"""
tagDict = collections.OrderedDict()
currentEntry = 'Name'
while True:
lineNum, line = next(enRecordFile)
if line == '_' * 60 + '\n':
break
elif line == '\n':
pass
elif currentEntry is 'Name' or currentEntry is 'url':
tagDict[currentEntry] = [line.rstrip()]
currentEntry = None
elif ':' in line and not line.startswith('http://'):
splitLine = line.split(': ')
currentEntry = splitLine[0]
tagDict[currentEntry] = [': '.join(splitLine[1:]).rstrip()]
if currentEntry == 'Author':
currentEntry = 'url'
else:
tagDict[currentEntry].append(line.rstrip())
return tagDict
|
netzulo/qacode
|
tests/001_functionals/suite_004_navbase.py
|
Python
|
gpl-3.0
| 16,880
| 0
|
# -*- coding: utf-8 -*-
"""Package for suites and tests related to bots.modules package"""
import pytest
from qacode.core.bots.modules.nav_base import NavBase
from qacode.core.exceptions.core_exception import CoreException
from qacode.core.testing.asserts import Assert
from qacode.core.t
|
esting.test_info import TestInfoBotUnique
from qacode.utils import settings
from selenium.webdriver.remote.webelement import WebElement
ASSERT = Assert()
SETTINGS = settings(file_path="qacode/configs/")
SKIP_NAVS = SETTINGS['tests']['skip']['bot_navigations']
SKIP_NAVS
|
_MSG = 'bot_navigations DISABLED by config file'
class TestNavBase(TestInfoBotUnique):
"""Test Suite for class NavBase"""
app = None
page = None
@classmethod
def setup_class(cls, **kwargs):
"""Setup class (suite) to be executed"""
super(TestNavBase, cls).setup_class(
config=settings(file_path="qacode/configs/"),
skip_force=SKIP_NAVS)
def setup_method(self, test_method, close=True):
"""Configure self.attribute"""
super(TestNavBase, self).setup_method(
test_method,
config=settings(file_path="qacode/configs/"))
self.add_property('app', self.cfg_app('qadmin'))
self.add_property('page', self.cfg_page('qacode_login'))
self.add_property('txt_username', self.cfg_control('txt_username'))
self.add_property('txt_password', self.cfg_control('txt_password'))
self.add_property('btn_submit', self.cfg_control('btn_submit'))
self.add_property('lst_ordered', self.cfg_control('lst_ordered'))
self.add_property(
'lst_ordered_child', self.cfg_control('lst_ordered_child'))
self.add_property('dd_menu_data', self.cfg_control('dd_menu_data'))
self.add_property(
'dd_menu_data_lists', self.cfg_control('dd_menu_data_lists'))
self.add_property(
'btn_click_invisible', self.cfg_control('btn_click_invisible'))
self.add_property(
'btn_click_visible', self.cfg_control('btn_click_visible'))
self.add_property('title_buttons', self.cfg_control('title_buttons'))
def setup_login_to_inputs(self):
"""Do login before to exec some testcases"""
# setup_login
self.bot.navigation.get_url(self.page.get('url'), wait_for_load=10)
txt_username = self.bot.navigation.find_element(
self.txt_username.get("selector"))
txt_password = self.bot.navigation.find_element(
self.txt_password.get("selector"))
btn_submit = self.bot.navigation.find_element(
self.btn_submit.get("selector"))
self.bot.navigation.ele_write(txt_username, "admin")
self.bot.navigation.ele_write(txt_password, "admin")
self.bot.navigation.ele_click(btn_submit)
# end setup_login
def setup_login_to_data(self):
"""Do login before to exec some testcases"""
# setup_login
self.bot.navigation.get_url(self.page.get('url'), wait_for_load=10)
txt_username = self.bot.navigation.find_element(
self.txt_username.get("selector"))
txt_password = self.bot.navigation.find_element(
self.txt_password.get("selector"))
btn_submit = self.bot.navigation.find_element(
self.btn_submit.get("selector"))
self.bot.navigation.ele_write(txt_username, "admin")
self.bot.navigation.ele_write(txt_password, "admin")
self.bot.navigation.ele_click(btn_submit)
self.bot.navigation.ele_click(
self.bot.navigation.find_element_wait(
self.dd_menu_data.get("selector")))
self.bot.navigation.ele_click(
self.bot.navigation.find_element_wait(
self.dd_menu_data_lists.get("selector")))
# end setup_login
@pytest.mark.skipIf(SKIP_NAVS, SKIP_NAVS_MSG)
def test_navbase_instance(self):
"""Testcase: test_navbase_instance"""
ASSERT.is_instance(self.bot.navigation, NavBase)
@pytest.mark.skipIf(SKIP_NAVS, SKIP_NAVS_MSG)
def test_gourl_withoutwaits(self):
"""Testcase: test_gourl_withoutwaits"""
self.bot.navigation.get_url(self.page.get('url'))
@pytest.mark.skipIf(SKIP_NAVS, SKIP_NAVS_MSG)
def test_gourl_withwaits(self):
"""Testcase: test_gourl_withwaits"""
self.bot.navigation.get_url(
self.page.get('url'), wait_for_load=1)
@pytest.mark.skipIf(SKIP_NAVS, SKIP_NAVS_MSG)
def test_getcurrenturl_ok(self):
"""Testcase: test_getcurrenturl_ok"""
ASSERT.equals(
self.bot.navigation.get_current_url(),
self.page.get('url'))
@pytest.mark.skipIf(SKIP_NAVS, SKIP_NAVS_MSG)
def test_isurl_true(self):
"""Testcase: test_isurl_true"""
ASSERT.true(
self.bot.navigation.is_url(
self.bot.navigation.get_current_url()))
@pytest.mark.skipIf(SKIP_NAVS, SKIP_NAVS_MSG)
def test_isurl_false(self):
"""Testcase: test_isurl_false"""
ASSERT.false(self.bot.navigation.is_url(""))
@pytest.mark.skipIf(SKIP_NAVS, SKIP_NAVS_MSG)
def test_isurl_raiseswhenurlreturnfalse(self):
"""Testcase: test_isurl_false"""
with pytest.raises(CoreException):
self.bot.navigation.is_url("", ignore_raises=False)
@pytest.mark.skipIf(SKIP_NAVS, SKIP_NAVS_MSG)
def test_reload_ok(self):
"""Testcase: test_reload_ok"""
self.bot.navigation.reload()
@pytest.mark.skipIf(SKIP_NAVS, SKIP_NAVS_MSG)
def test_forward_ok(self):
"""Testcase: test_reload_ok"""
self.bot.navigation.forward()
@pytest.mark.skipIf(SKIP_NAVS, SKIP_NAVS_MSG)
def test_getmaximizewindow_ok(self):
"""Testcase: test_getmaximizewindow_ok"""
self.bot.navigation.get_maximize_window()
@pytest.mark.skipIf(SKIP_NAVS, SKIP_NAVS_MSG)
def test_getcapabilities_ok(self):
"""Testcase: test_getcapabilities_ok"""
caps = self.bot.navigation.get_capabilities()
ASSERT.is_instance(caps, dict)
ASSERT.is_instance(caps['chrome'], dict)
ASSERT.equals(caps['browserName'], 'chrome')
@pytest.mark.skipIf(SKIP_NAVS, SKIP_NAVS_MSG)
def test_getlog_ok(self):
"""Testcase: test_getlog_ok"""
self.bot.navigation.get_url(self.page.get('url'))
log_data = self.bot.navigation.get_log()
ASSERT.not_none(log_data)
self.log.debug("selenium logs, browser={}".format(log_data))
@pytest.mark.skipIf(SKIP_NAVS, SKIP_NAVS_MSG)
@pytest.mark.parametrize(
"log_name", [None, 'browser', 'driver', 'client', 'server'])
def test_getlog_lognames(self, log_name):
"""Testcase: test_getlog_lognames"""
self.bot.navigation.get_url(self.page.get('url'))
if log_name is None:
with pytest.raises(CoreException):
self.bot.navigation.get_log(log_name=log_name)
return True
log_data = self.bot.navigation.get_log(log_name=log_name)
ASSERT.not_none(log_data)
msg = "selenium logs, log_name={}, log_data={}".format(
log_name, log_data)
self.log.debug(msg)
@pytest.mark.skipIf(SKIP_NAVS, SKIP_NAVS_MSG)
def test_findelement_ok(self):
"""Testcase: test_findelement_ok"""
ASSERT.is_instance(
self.bot.navigation.find_element("body"),
WebElement)
@pytest.mark.skipIf(SKIP_NAVS, SKIP_NAVS_MSG)
def test_findelement_notfound(self):
"""Testcase: test_findelement_notfound"""
with pytest.raises(CoreException):
self.bot.navigation.find_element("article")
@pytest.mark.skipIf(SKIP_NAVS, SKIP_NAVS_MSG)
def test_findelement_notlocator(self):
"""Testcase: test_findelement_notlocator"""
with pytest.raises(CoreException):
self.bot.navigation.find_element(
"body", locator=None)
@pytest.mark.skipIf(SKIP_NAVS, SKIP_NAVS_MSG)
def test_findelementwait_ok(self):
"""Testcase: test_findelementwait_ok"""
ASSERT.is_instance(
self.bot.navigation.find_element_wait("bod
|
edineicolli/daruma-exemplo-python
|
scripts/fiscal/ui_fiscal_icnfefetuarpagamento.py
|
Python
|
gpl-2.0
| 5,558
| 0.0036
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'ui_fiscal_icnfefetuarpagamento.ui'
#
# Created: Mon Nov 24 22:25:57 2014
# by: pyside-uic 0.2.15 running on PySide 1.2.2
#
# WARNING! All changes made in this file will be lost!
from PySide import QtCore, QtGui
from pydaruma.pydaruma import iCNFEfetuarPagamento_ECF_Daruma
from scripts.fiscal.retornofiscal import tratarRetornoFiscal
class Ui_ui_FISCAL_iCNFEfetuarPagamento(QtGui.QWidget):
def __init__(self):
super(Ui_ui_FISCAL_iCNFEfetuarPagamento, self).__init__()
self.setupUi(self)
self.pushButtonEnviar.clicked.connect(self.on_pushButtonEnviar_clicked)
self.pushButtonCancelar.clicked.connect(self.on_pushButtonCancelar_clicked)
def on_pushButtonEnviar_clicked(self):
StrFormaPGTO = self.lineEditForma.text()
StrValor = self.lineEditValor.text()
StrInfo = self.lineEditInfo.text()
tratarRetornoFiscal(iCNFEfetuarPagamento_ECF_Daruma(StrFormaPGTO,StrValor,StrInfo), self)
def on_pushButtonCancelar_clicked(self):
self.close()
def setupUi(self, ui_FISCAL_iCNFEfetuarPagamento):
ui_FISCAL_iCNFEfetuarPagamento.setObjectName("ui_FISCAL_iCNFEfetuarPagamento")
ui_FISCAL_iCNFEfetuarPagamento.resize(531, 123)
self.verticalLayout = QtGui.QVBoxLayout(ui_FISCAL_iCNFEfetuarPagamento)
self.verticalLayout.setObjectName("verticalLayout")
self.gridLayout = QtGui.QGridLayout()
self.gridLayout.setObjectName("gridLayout")
self.lineEditForma = QtGui.QLineEdit(ui_FISCAL_iCNFEfetuarPagamento)
self.lineEditForma.setMaximumSize(QtCore.QSize(100, 16777215))
self.lineEditForma.setObjectName("lineEditForma")
self.gridLayout.addWidget(self.lineEditForma, 0, 1, 1, 1)
self.labelValor = QtGui.QLabel(ui_FISCAL_iCNFEfetuarPagamento)
self.labelValor.setObjectName("labelValor")
self.gridLayout.addWidget(self.labelValor, 1, 0, 1, 1)
self.lineEditValor = QtGui.QLineEdit(ui_FISCAL_iCNFEfetuarPagamento)
self.lineEditValor.setMaximumSize(QtCore.QSize(70, 25))
self.lineEditValor.setObjectName("lineEditValor")
self.gridLayout.addWidget(self.lineEditValor, 1, 1, 1, 1)
self.labelInformacao = QtGui.QLabel(ui_FISCAL_iCNFEfetuarPagamento)
self.labelInformacao.setObjectName("labelInformacao")
self.gridLayout.addWidget(self.labelInformacao, 2, 0, 1, 1)
self.lineEditInfo = QtGui.QLineEdit(ui_FISCAL_iCNFEfetuarPagamento)
self.lineEditInfo.setMinimumSize(QtCore.QSize(401, 20))
self.lineEditInfo.setObjectName("lineEditInfo")
self.gridLayout.addWidget(self.lineEditInfo, 2, 1, 1, 1)
self.labelForma = QtGui.QLabel(ui_FISCAL_iCNFEfetuarPagamento)
self.labelForma.setObjectName("labelForma")
self.gridLayout.addWidget(self.labelForma, 0, 0, 1, 1)
self.verticalLayout.addLayout(self.gridLayout)
self.horizontalLayout = QtGui.QHBoxLayout()
self.horizontalLayout.setObjectName("horizontalLayout")
spacerItem = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.horizontalLayout.addItem(spacerItem)
self.pushButtonEnvia
|
r = QtGui.QPushButton(ui_FISCAL_iCNFEfetuarPagamento)
self.pushButtonEnv
|
iar.setObjectName("pushButtonEnviar")
self.horizontalLayout.addWidget(self.pushButtonEnviar)
self.pushButtonCancelar = QtGui.QPushButton(ui_FISCAL_iCNFEfetuarPagamento)
self.pushButtonCancelar.setObjectName("pushButtonCancelar")
self.horizontalLayout.addWidget(self.pushButtonCancelar)
spacerItem1 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.horizontalLayout.addItem(spacerItem1)
self.verticalLayout.addLayout(self.horizontalLayout)
self.retranslateUi(ui_FISCAL_iCNFEfetuarPagamento)
QtCore.QMetaObject.connectSlotsByName(ui_FISCAL_iCNFEfetuarPagamento)
def retranslateUi(self, ui_FISCAL_iCNFEfetuarPagamento):
ui_FISCAL_iCNFEfetuarPagamento.setWindowTitle(QtGui.QApplication.translate("ui_FISCAL_iCNFEfetuarPagamento", "Método iCNFEfetuarPagamento_ECF_Daruma", None, QtGui.QApplication.UnicodeUTF8))
self.lineEditForma.setText(QtGui.QApplication.translate("ui_FISCAL_iCNFEfetuarPagamento", "Dinheiro", None, QtGui.QApplication.UnicodeUTF8))
self.labelValor.setText(QtGui.QApplication.translate("ui_FISCAL_iCNFEfetuarPagamento", "Valor:", None, QtGui.QApplication.UnicodeUTF8))
self.lineEditValor.setText(QtGui.QApplication.translate("ui_FISCAL_iCNFEfetuarPagamento", "10,00", None, QtGui.QApplication.UnicodeUTF8))
self.labelInformacao.setText(QtGui.QApplication.translate("ui_FISCAL_iCNFEfetuarPagamento", "Informação Adicional:", None, QtGui.QApplication.UnicodeUTF8))
self.lineEditInfo.setText(QtGui.QApplication.translate("ui_FISCAL_iCNFEfetuarPagamento", "Obrigado Volte Sempre! DFW Efetua Forma pagamento com mensagem adicional.", None, QtGui.QApplication.UnicodeUTF8))
self.labelForma.setText(QtGui.QApplication.translate("ui_FISCAL_iCNFEfetuarPagamento", "Forma Pagto:", None, QtGui.QApplication.UnicodeUTF8))
self.pushButtonEnviar.setText(QtGui.QApplication.translate("ui_FISCAL_iCNFEfetuarPagamento", "Enviar", None, QtGui.QApplication.UnicodeUTF8))
self.pushButtonCancelar.setText(QtGui.QApplication.translate("ui_FISCAL_iCNFEfetuarPagamento", "Cancelar", None, QtGui.QApplication.UnicodeUTF8))
|
twstrike/le_for_patching
|
acme/acme/client.py
|
Python
|
apache-2.0
| 23,969
| 0.000167
|
"""ACME client API."""
import collections
import datetime
import heapq
import logging
import time
import six
from six.moves import http_client # pylint: disable=import-error
import OpenSSL
import requests
import sys
import werkzeug
from acme import errors
from acme import jose
from acme import jws
from acme import messages
logger = logging.getLogger(__name__)
# Prior to Python 2.7.9 the stdlib SSL module did not allow a user to configure
# many important security related options. On these platforms we use PyOpenSSL
# for SSL, which does allow these options to be configured.
# https://urllib3.readthedocs.org/en/latest/security.html#insecureplatformwarning
if sys.version_info < (2, 7, 9): # pragma: no cover
requests.packages.urllib3.contrib.pyopenssl.inject_into_urllib3()
class Client(object): # pylint: disable=too-many-instance-attributes
"""ACME client.
.. todo::
Clean up raised error types hierarchy, document, and handle (wrap)
instances of `.DeserializationError` raised in `from_json()`.
:ivar messages.Directory directory:
:ivar key: `.JWK` (private)
:ivar alg: `.JWASignature`
:ivar bool verify_ssl: Verify SSL certificates?
:ivar .ClientNetwork net: Client network. Useful for testing. If not
supplied, it will be initialized using `key`, `alg` and
`verify_ssl`.
"""
DER_CONTENT_TYPE = 'application/pkix-cert'
def __init__(self, directory, key, alg=jose.RS256, verify_ssl=True,
net=None):
"""Initialize.
:param directory: Directory Resource (`.messages.Directory`) or
URI from which the resource will be downloaded.
"""
self.key = key
self.net = ClientNetwork(key, alg, verify_ssl) if net is None else net
if isinstance(directory, six.string_types):
self.directory = messages.Directory.from_json(
self.net.get(directory).json())
else:
self.directory = directory
@classmethod
def _regr_from_response(cls, response, uri=None, new_authzr_uri=None,
terms_of_service=None):
if 'terms-of-service' in response.links:
terms_of_service = response.links['terms-of-service']['url']
if 'next' in response.links:
new_authzr_uri = response.links['next']['url']
if new_authzr_uri is None:
raise errors.ClientError('"next" link missing')
return messages.RegistrationResource(
body=messages.Registration.from_json(response.json()),
uri=response.headers.get('Location', uri),
new_authzr_uri=new_authzr_uri,
terms_of_service=terms_of_service)
def register(self, new_reg=None):
"""Register.
:param .NewRegistration new_reg:
:returns: Registration Resource.
:rtype: `.RegistrationResource`
:raises .UnexpectedUpdate:
"""
new_reg = messages.NewRegistration() if new_reg is None else new_reg
assert isinstance(new_reg, messages.NewRegistration)
response = self.net.post(self.directory[new_reg], new_reg)
# TODO: handle errors
assert response.status_code == http_client.CREATED
# "Instance of 'Field' has no key/contact member" bug:
# pylint: disable=no-member
regr = self._regr_from_response(response)
if (regr.body.key != self.key.public_key() or
regr.body.contact != new_reg.contact):
raise errors.UnexpectedUpdate(regr)
return regr
def _send_recv_regr(self, regr, body):
response = self.net.post(regr.uri, body)
# TODO: Boulder returns httplib.ACCEPTED
#assert response.status_code == httplib.OK
# TODO: Boulder does not set Location or Link on update
# (c.f. acme-spec #94)
return self._regr_from_response(
response, uri=regr.uri, new_authzr_uri=regr.new_authzr_uri,
terms_of_service=regr.terms_of_service)
def update_registration(self, regr, update=None):
"""Update registration.
:param messages.RegistrationResource regr: Registration Resource.
:param messages.Registration update: Updated body of the
resource. If not provided, body will be taken from `regr`.
:returns: Updated Registration Resource.
:rtype: `.RegistrationResource`
"""
update = regr.body if update is None else update
updated_regr = self._send_recv_regr(
regr, body=messages.UpdateRegistration(**dict(update)))
if updated_regr != regr:
raise errors.UnexpectedUpdate(regr)
return updated_regr
def query_registration(self, regr):
"""Query server about registration.
:param messages.RegistrationResource: Existing Registration
Resource.
"""
return self._send_recv_regr(regr, messages.UpdateRegistration())
def agree_to_tos(self, regr):
"""Agree to the terms-of-service.
Agree to the terms-of-service in a Registration Resource.
:param regr: Registration Resource.
:type regr: `.RegistrationResource`
:returns: Updated Registration Resource.
:rtype: `.RegistrationResource`
"""
return self.update_registration(
regr.update(body=regr.body.update(agreement=regr.terms_of_service)))
def _authzr_from_response(self, response, identifier,
uri=None, new_cert_uri=None):
# pylint: disable=no-self-use
if new_cert_uri is None:
try:
new_cert_uri = response.links['next']['url']
except KeyError:
raise errors.ClientError('"next" link missing')
authzr = messages.AuthorizationResource(
body=messages.Authorization.from_json(response.json()),
uri=response.headers.get('Location', uri),
ne
|
w_cert_uri=new_cert_uri)
if authzr.body.identifier != identifier:
raise errors.UnexpectedUpdate(authzr)
return authzr
def request_challenges(self, identifier, new_authzr_uri):
"""Request challenges.
:param identifier: Identifie
|
r to be challenged.
:type identifier: `.messages.Identifier`
:param str new_authzr_uri: new-authorization URI
:returns: Authorization Resource.
:rtype: `.AuthorizationResource`
"""
new_authz = messages.NewAuthorization(identifier=identifier)
response = self.net.post(new_authzr_uri, new_authz)
# TODO: handle errors
assert response.status_code == http_client.CREATED
return self._authzr_from_response(response, identifier)
def request_domain_challenges(self, domain, new_authz_uri):
"""Request challenges for domain names.
This is simply a convenience function that wraps around
`request_challenges`, but works with domain names instead of
generic identifiers.
:param str domain: Domain name to be challenged.
:param str new_authzr_uri: new-authorization URI
:returns: Authorization Resource.
:rtype: `.AuthorizationResource`
"""
return self.request_challenges(messages.Identifier(
typ=messages.IDENTIFIER_FQDN, value=domain), new_authz_uri)
def answer_challenge(self, challb, response):
"""Answer challenge.
:param challb: Challenge Resource body.
:type challb: `.ChallengeBody`
:param response: Corresponding Challenge response
:type response: `.challenges.ChallengeResponse`
:returns: Challenge Resource with updated body.
:rtype: `.ChallengeResource`
:raises .UnexpectedUpdate:
"""
response = self.net.post(challb.uri, response)
try:
authzr_uri = response.links['up']['url']
except KeyError:
raise errors.ClientError('"up" Link header missing')
challr = messages.ChallengeResource(
authzr_uri=authzr_uri,
body=messages.ChallengeBody.from_json(response.json()))
# TODO:
|
MalloyPower/parsing-python
|
front-end/testsuite-python-lib/Python-2.3/Lib/plat-mac/lib-scriptpackages/Netscape/WorldWideWeb_suite.py
|
Python
|
mit
| 16,104
| 0.005899
|
"""Suite WorldWideWeb suite, as defined in Spyglass spec.:
Level 1, version 1
Generated from /Volumes/Sap/Applications (Mac OS 9)/Netscape Communicator\xe2\x84\xa2 Folder/Netscape Communicator\xe2\x84\xa2
AETE/AEUT resource version 1/0, language 0, script 0
"""
import aetools
import MacOS
_code = 'WWW!'
class WorldWideWeb_suite_Events:
_argmap_OpenURL = {
'to' : 'INTO',
'toWindow' : 'WIND',
'flags' : 'FLGS',
'post_data' : 'POST',
'post_type' : 'MIME',
'progressApp' : 'PROG',
}
def OpenURL(self, _object, _attributes={}, **_arguments):
"""OpenURL: Opens a URL. Allows for more options than GetURL event
Required argument: URL
Keyword argument to: file destination
Keyword argument toWindow: window iD
Keyword argument flags: Binary: any combination of 1, 2 and 4 is allowed: 1 and 2 mean force reload the document. 4 is ignored
Keyword argument post_data: Form posting data
Keyword argument post_type: MIME type of the posting data. Defaults to application/x-www-form-urlencoded
Keyword argument progressApp: Application that will display progress
Keyword argument _attributes: AppleEvent attribute dictionary
Returns: ID of the loading window
"""
_code = 'WWW!'
_subcode = 'OURL'
aetools.keysubst(_arguments, self._argmap_OpenURL)
_arguments['----'] = _object
_reply, _arguments, _attributes = self.send(_code, _subcode,
_arguments, _attributes)
if _arguments.get('errn', 0):
raise aetools.Error, aetools.decodeerror(_arguments)
# XXXX Optionally decode result
if _arguments.has_key('----'):
return _arguments['----']
_argmap_ShowFile = {
'MIME_type' : 'MIME',
'Window_ID' : 'WIND',
'URL' : 'URL ',
}
def ShowFile(self, _object, _attributes={}, **_arguments):
"""ShowFile: Similar to OpenDocuments, except that it specifies the parent URL, and MIME type of the file
Required argument: File to open
Keyword argument MIME_type: MIME type
Keyword argument Window_ID: Window to open the file in
Keyword argument URL: Use this as a base URL
Keyword argument _attributes: AppleEvent attribute dictionary
Returns: Window ID of the loaded window. 0 means ShowFile failed, FFFFFFF means that data was not appropriate type to display in the browser.
"""
_code = 'WWW!'
_subcode = 'SHWF'
aetools.keysubst(_arguments, self._argmap_ShowFile)
_arguments['----'] = _object
_reply, _arguments, _attributes = self.send(_code, _subcode,
_arguments, _attributes)
if _arguments.get('errn', 0):
raise aetools.Error, aetools.decodeerror(_arguments)
# XXXX Optionally decode result
if _arguments.has_key('----'):
return _arguments['----']
_argmap_cancel_progress = {
'in_window' : 'WIND',
}
def cancel_progress(self, _object=None, _attributes={}, **_arguments):
"""cancel progress: Interrupts the download of the document in the given window
Required argument: progress ID, obtained from the progress app
Keyword argument in_window: window ID of the progress to cancel
Keyword argument _attributes: AppleEvent attribute dictionary
"""
_code = 'WWW!'
_subcode = 'CNCL'
aetools.keysubst(_arguments, self._argmap_cancel_progress)
|
_arguments['----'] = _object
_reply, _arguments, _attributes = self.send(_code, _subcode,
_arguments, _attributes)
if _arguments.get('errn', 0):
raise aetools.Error, aetools.decodeerror(_arguments)
# XXXX Optionally decode result
if _arguments.has_key('----'):
return _arguments['----']
def find_URL(self, _object, _attributes={}, **_arguments):
"""find URL: If the file was downloaded by Netscape,
|
you can call FindURL to find out the URL used to download the file.
Required argument: File spec
Keyword argument _attributes: AppleEvent attribute dictionary
Returns: The URL
"""
_code = 'WWW!'
_subcode = 'FURL'
if _arguments: raise TypeError, 'No optional args expected'
_arguments['----'] = _object
_reply, _arguments, _attributes = self.send(_code, _subcode,
_arguments, _attributes)
if _arguments.get('errn', 0):
raise aetools.Error, aetools.decodeerror(_arguments)
# XXXX Optionally decode result
if _arguments.has_key('----'):
return _arguments['----']
def get_window_info(self, _object=None, _attributes={}, **_arguments):
"""get window info: Returns the information about the window as a list. Currently the list contains the window title and the URL. You can get the same information using standard Apple Event GetProperty.
Required argument: window ID
Keyword argument _attributes: AppleEvent attribute dictionary
Returns: undocumented, typecode 'list'
"""
_code = 'WWW!'
_subcode = 'WNFO'
if _arguments: raise TypeError, 'No optional args expected'
_arguments['----'] = _object
_reply, _arguments, _attributes = self.send(_code, _subcode,
_arguments, _attributes)
if _arguments.get('errn', 0):
raise aetools.Error, aetools.decodeerror(_arguments)
# XXXX Optionally decode result
if _arguments.has_key('----'):
return _arguments['----']
def list_windows(self, _no_object=None, _attributes={}, **_arguments):
"""list windows: Lists the IDs of all the hypertext windows
Keyword argument _attributes: AppleEvent attribute dictionary
Returns: List of unique IDs of all the hypertext windows
"""
_code = 'WWW!'
_subcode = 'LSTW'
if _arguments: raise TypeError, 'No optional args expected'
if _no_object != None: raise TypeError, 'No direct arg expected'
_reply, _arguments, _attributes = self.send(_code, _subcode,
_arguments, _attributes)
if _arguments.get('errn', 0):
raise aetools.Error, aetools.decodeerror(_arguments)
# XXXX Optionally decode result
if _arguments.has_key('----'):
return _arguments['----']
_argmap_parse_anchor = {
'relative_to' : 'RELA',
}
def parse_anchor(self, _object, _attributes={}, **_arguments):
"""parse anchor: Resolves the relative URL
Required argument: Main URL
Keyword argument relative_to: Relative URL
Keyword argument _attributes: AppleEvent attribute dictionary
Returns: Parsed URL
"""
_code = 'WWW!'
_subcode = 'PRSA'
aetools.keysubst(_arguments, self._argmap_parse_anchor)
_arguments['----'] = _object
_reply, _arguments, _attributes = self.send(_code, _subcode,
_arguments, _attributes)
if _arguments.get('errn', 0):
raise aetools.Error, aetools.decodeerror(_arguments)
# XXXX Optionally decode result
if _arguments.has_key('----'):
return _arguments['----']
def register_URL_echo(self, _object=None, _attributes={}, **_arguments):
"""register URL echo: Registers the \xd2echo\xd3 application. Each download from now on will be echoed to this application.
Required argument: Application signature
Keyword argument _attributes: AppleEvent attribute dictionary
"""
_code = 'WWW!'
_subcode = 'RGUE'
if _arguments: raise TypeError, 'No optional args expected'
_arguments['----'] = _object
_reply, _arguments, _attributes = self.send(_code, _subcode,
_arguments, _attributes)
if _arguments.get('errn', 0):
raise aetools.Error, aetools.decodeerror(_arguments)
# XXXX Optionally decode result
if _arguments.has_key('----'):
|
acaldero/moon
|
app-mon.py
|
Python
|
gpl-3.0
| 5,026
| 0.015519
|
#!/usr/bin/python -u
#
# Application Monitoring (version 1.5)
# Alejandro Calderon @ ARCOS.INF.UC3M.ES
# GPL 3.0
#
import math
import time
import psutil
import threading
import multiprocessing
import subprocess
import os
import sys
import getopt
import json
def print_record ( format, data ):
try:
if (format == 'json'):
print(json.dumps(data))
if (format == 'csv'):
for item in data:
if item != 'type':
sys.stdout.write('"' + str(data[item]) + '";')
print '"' + data['type'] + '"'
sys.stdout.flush()
except IOError, e:
sys.exit()
def mon ():
global format, rrate, delta, p_id, p_obj
global last_info_m_time, last_info_m_usage
global last_info_c_time, last_info_c_usage
global last_info_n_time, last_info_n_usage
global last_info_d_time, last_info_d_usage
info_time = time.time()
# 1.- Check Memory
info_m_usage = p_obj.memory_percent(memtype="vms")
info_delta = math.fabs(info_m_usage - last_info_m_usage)
if info_delta >= delta:
data = { "type": "memory",
"timestamp": info_time,
"timedelta": info_time - last_info_m_time,
"usagepercent": last_info_m_usage,
"usageabsolute": p_obj.memory_info()[1] } ;
print_record(format, data)
last_info_m_time = info_time
last_info_m_usage = info_m_usage
# 2.- Check CPU
info_c_usage = p_obj.cpu_percent()
info_delta = math.fabs(info_c_usage - last_info_c_usage)
if info_delta >= delta:
info_ncores = multiprocessing.cpu_count()
info_cpufreq = 0.0
proc = subprocess.Popen(["cat","/proc/cpuinfo"],stdout=subprocess.PIPE)
out, err = proc.communicate()
for line in out.split("\n"):
if "cpu MHz" in line:
info_cpufreq = info_cpufreq + float(line.split(":")[1])
info_cpufreq = info_cpufreq / info_ncores
# CPU freq * time * CPU usage * # cores
data = { "type": "compute",
"timestamp": info_time,
"cpufreq": info_cpufreq,
"timedelta": info_time - last_info_c_time,
"usagepercent": last_info_c_usage,
"usageabsolute": info_cpufreq * (info_time - last_info_c_time) * last_info_c_usage * info_ncores,
"ncores": info_ncores } ;
print_record(format, data)
last_info_c_time = info_time
last_info_c_usage = info_c_usage
# 3.- Check Network
netinfo = p_obj.connections()
info_n_usage = len(netinfo)
info_delta = math.fabs(info_n_usage - last_info_n_usage)
if info_delta > 0:
# connections
data = { "type": "network",
"timestamp": info_time,
"timedelta": info_time - last_info_n_time,
"usageabsolute": last_info_n_usage } ;
print_record(format, data)
last_info_n_time = info_time
last_info_n_usage = info_n_usage
# 3.- Set next checking...
threading.Timer(rrate, mon).start()
def main(argv):
global format, rrate, delta, p_id, p_obj
global last_info_m_usage, last_info_c_usage, last_info_n_usage, last_info_d_usage
# get parameters
try:
opts, args
|
= getopt.getopt(argv,"h:f:r:d:p:",["format=","rate=","delta=","pid="])
except getopt.GetoptError:
print 'app-mon.py -f <format> -r <rate> -d <delta> -p <pid>'
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
|
print 'app-mon.py -f <format> -r <rate> -d <delta> -p <pid>'
sys.exit()
elif opt in ("-f", "--format"):
format = str(arg)
elif opt in ("-p", "--pid"):
p_id = int(arg)
elif opt in ("-r", "--rate"):
rrate = float(arg)
elif opt in ("-d", "--delta"):
delta = float(arg)
# get proccess object from pid
p_obj = psutil.Process(p_id)
# get initial information
last_info_m_usage = p_obj.memory_percent()
last_info_c_usage = p_obj.cpu_percent()
last_info_n_usage = len(p_obj.connections())
# start simulation
threading.Timer(rrate, mon).start()
# initial values
start_time = time.time()
last_info_m_time = start_time
last_info_c_time = start_time
last_info_n_time = start_time
format = 'csv'
rrate = 1.0
delta = 0.5
p_id = os.getpid()
if __name__ == "__main__":
try:
main(sys.argv[1:])
except psutil.NoSuchProcess:
print "app-mon: the execution of process with pid '" + str(p_id) + "' has ended."
|
gmr/queries
|
tests/utils_tests.py
|
Python
|
bsd-3-clause
| 4,097
| 0
|
"""
Tests for functionality in the utils module
"""
import platform
import unittest
import mock
import queries
from queries import utils
class GetCurrentU
|
serTests(unittest.TestCase):
|
@mock.patch('pwd.getpwuid')
def test_get_current_user(self, getpwuid):
"""get_current_user returns value from pwd.getpwuid"""
getpwuid.return_value = ['mocky']
self.assertEqual(utils.get_current_user(), 'mocky')
class PYPYDetectionTests(unittest.TestCase):
def test_pypy_flag(self):
"""PYPY flag is set properly"""
self.assertEqual(queries.utils.PYPY,
platform.python_implementation() == 'PyPy')
class URICreationTests(unittest.TestCase):
def test_uri_with_password(self):
expectation = 'postgresql://foo:bar@baz:5433/qux'
self.assertEqual(queries.uri('baz', 5433, 'qux', 'foo', 'bar'),
expectation)
def test_uri_without_password(self):
expectation = 'postgresql://foo@baz:5433/qux'
self.assertEqual(queries.uri('baz', 5433, 'qux', 'foo'),
expectation)
def test_default_uri(self):
expectation = 'postgresql://postgres@localhost:5432/postgres'
self.assertEqual(queries.uri(), expectation)
class URLParseTestCase(unittest.TestCase):
URI = 'postgresql://foo:bar@baz:5444/qux'
def test_urlparse_hostname(self):
"""hostname should match expectation"""
self.assertEqual(utils.urlparse(self.URI).hostname, 'baz')
def test_urlparse_port(self):
"""port should match expectation"""
self.assertEqual(utils.urlparse(self.URI).port, 5444)
def test_urlparse_path(self):
"""path should match expectation"""
self.assertEqual(utils.urlparse(self.URI).path, '/qux')
def test_urlparse_username(self):
"""username should match expectation"""
self.assertEqual(utils.urlparse(self.URI).username, 'foo')
def test_urlparse_password(self):
"""password should match expectation"""
self.assertEqual(utils.urlparse(self.URI).password, 'bar')
class URIToKWargsTestCase(unittest.TestCase):
URI = ('postgresql://foo:c%23%5E%25%23%27%24%40%3A@baz:5444/qux?'
'options=foo&options=bar&keepalives=1&invalid=true')
def test_uri_to_kwargs_host(self):
"""hostname should match expectation"""
self.assertEqual(utils.uri_to_kwargs(self.URI)['host'], 'baz')
def test_uri_to_kwargs_port(self):
"""port should match expectation"""
self.assertEqual(utils.uri_to_kwargs(self.URI)['port'], 5444)
def test_uri_to_kwargs_dbname(self):
"""dbname should match expectation"""
self.assertEqual(utils.uri_to_kwargs(self.URI)['dbname'], 'qux')
def test_uri_to_kwargs_username(self):
"""user should match expectation"""
self.assertEqual(utils.uri_to_kwargs(self.URI)['user'], 'foo')
def test_uri_to_kwargs_password(self):
"""password should match expectation"""
self.assertEqual(utils.uri_to_kwargs(self.URI)['password'],
'c#^%#\'$@:')
def test_uri_to_kwargs_options(self):
"""options should match expectation"""
self.assertEqual(utils.uri_to_kwargs(self.URI)['options'],
['foo', 'bar'])
def test_uri_to_kwargs_keepalive(self):
"""keepalive should match expectation"""
self.assertEqual(utils.uri_to_kwargs(self.URI)['keepalives'], 1)
def test_uri_to_kwargs_invalid(self):
"""invalid query argument should not be in kwargs"""
self.assertNotIn('invaid', utils.uri_to_kwargs(self.URI))
def test_unix_socket_path_format_one(self):
socket_path = 'postgresql://%2Fvar%2Flib%2Fpostgresql/dbname'
result = utils.uri_to_kwargs(socket_path)
self.assertEqual(result['host'], '/var/lib/postgresql')
def test_unix_socket_path_format2(self):
socket_path = 'postgresql:///postgres?host=/tmp/'
result = utils.uri_to_kwargs(socket_path)
self.assertEqual(result['host'], '/tmp/')
|
explosiveduck/ed2d
|
ed2d/debug.py
|
Python
|
bsd-2-clause
| 203
| 0.004926
|
from _
|
_future__ import print_function
from ed2d.cmdargs import CmdArgs
debugEnabled = CmdArgs.add_arg('debug', bool, 'Enable debug output.')
def debug(*args):
if debugEnabled:
|
print(*args)
|
mstoppert/adventofcode
|
23/answer.py
|
Python
|
mit
| 728
| 0.005495
|
file = open('input.txt')
instructions = []
for line in file.readlines():
instructions.append(line.replace(',', '').strip
|
().split(' '))
regs = {
'a': 0,
'b': 0
}
ptr = 0
while True:
if ptr not in range(len(instructions)):
break
instr = instructions[ptr]
inst, r = instr[0], instr[1]
di = 1
if inst == 'inc':
regs[r] += 1
elif inst == 'tpl':
regs[r] *= 3
elif inst == 'hlf':
regs[r] //= 2
elif inst == 'jie':
offset = instr[2]
if regs[r] % 2 == 0: di = int(offset)
elif inst == 'jio':
offset = instr[2]
if regs[r] ==
|
1: di = int(offset)
elif inst == 'jmp':
di = int(r)
ptr += di
print regs
|
psywolf/cardfight
|
cardfight.py
|
Python
|
gpl-3.0
| 6,620
| 0.033384
|
#!/usr/bin/python3
import random
import copy
import enum
import jsonpickle
import pickle
import argparse
from sharedlib import Attr, Card, Config
import json
class Die:
def __init__(self, attack, defense, magic, mundane, numSides=12):
self.attack = attack
self.defense = defense
self.magic = magic
self.mundane = mundane
self.numSides = numSides
class Winner(enum.Enum):
attacker = 1
defender = -1
draw = 0
def fight(attacker, defender):
winner = attack(attacker, defender)
if winner != None:
return winner
if Attr.doubleAttack in attacker.attrs:
return attack(attacker, defender)
def is_rampage():
redDie = diceTypes["red"]
greenDie = diceTypes["green"]
return random.randint(1, redDie.numSides) <= redDie.mundane or random.randint(1, greenDie.numSides) <= greenDie.mundane
def calculateDamage(attacker, defender):
if Attr.ethereal in defender.attrs and roll({"black": 2}, "magic") > 0:
return 0
totalAttack = roll(attacker.dice, "attack")
totalDefense = roll(defender.dice, "defense")
if Attr.damageReduction in defender.attrs:
totalDefense += 1
damage = max(0, totalAttack - totalDefense)
if Attr.anaconda in attacker.attrs:
damage += roll({"orange": damage}, "mundane")
return damage
def roll(dice, successSide):
total = 0
for key in dice:
diceType = diceTypes[key]
for _ in range(0,dice[key]):
if random.randint(1,diceType.numSides) <= getattr(diceType,successSide):
total += 1
return total
def attack(attacker, defender):
damage = None
if Attr.theroll in defender.attrs:
damage = 1
else:
damage = calculateDamage(attacker, defender)
if Attr.magus in attacker.attrs and damage == 0:
damage = roll({"orange":1}, "magic")
else:
if damage == 0:
if Attr.counterstrike in defender.attrs:
attacker.wounds += 1
else:
if Attr.gorgon in attacker.attrs and roll(attacker.dice, "magic") >= 2:
|
return attacker
if damage > defender.currentLife():
damage = defender.currentLife()
if Attr.lifedrain in attacker.attrs and Attr.construct not in defender.attrs:
attacker.wounds = max(0, attacker.wounds - damage)
defender.wounds += damage
if defender.currentLife() <= 0:
return attacker
if attacker.cu
|
rrentLife() <= 0:
return defender
return None
def is_odd(x):
return x % 2 != 0
def getStats(attacker, defender, numFights, maxTurns, scriptable):
outcomes = dict()
for w in Winner:
outcomes[w] = []
for i in range(0,numFights):
a = copy.copy(attacker)
d = copy.copy(defender)
winner, turns = fightToTheBitterEnd(a, d, maxTurns)
outcomes[winner].append(turns)
wins = len(outcomes[Winner.attacker])
losses = len(outcomes[Winner.defender])
draws = len(outcomes[Winner.draw])
if scriptable:
output = dict()
output["WINS"] = wins
output["LOSSES"] = losses
output["DRAWS"] = draws
print(json.dumps(output))
else:
print("attacker ({}) winrate: {}%\n\tavg win on turn {}".format(
attacker.name, 100 * wins/numFights, winsToAvgTurn(outcomes[Winner.attacker])))
print("defender ({}) winrate: {}%\n\tavg win on turn {}".format(
defender.name, 100 * losses/numFights, winsToAvgTurn(outcomes[Winner.defender])))
if draws > 0:
print("drawrate (after {} turns): {}%".format(maxTurns, 100 * draws/numFights))
if wins > losses:
return True
elif losses > wins:
return False
else:
return None
def winsToAvgTurn(winTimes):
if len(winTimes) == 0:
return "N/A"
return round(sum(winTimes)/len(winTimes))
def fightToTheBitterEnd(attacker, defender, maxTurns):
w, t = fightToTheDeath(attacker, defender, maxTurns)
deadCard = None
winCard = None
if w == Winner.attacker:
winCard = attacker
deadCard = defender
elif w == Winner.defender:
winCard = defender
deadCard = attacker
if deadCard != None and (Attr.isle in deadCard.attrs or (Attr.wyrm in deadCard.attrs and winCard.currentLife() <= 1)):
return Winner.draw, t
return w, t
def takeTurn(attacker, defender, distance):
if Attr.theroll in attacker.attrs:
attacker.wounds += 1
if attacker.currentLife() <= 0:
return defender, distance
#print("turn",i)
if distance > attacker.range:
distance = max(1,distance - attacker.move, attacker.range)
#print("{} moved. dintance is now {}".format(attacker.name, distance))
if distance > attacker.range:
return None, distance
winner = fight(attacker, defender)
#print("{}({}) attacked {}({})".format(attacker.name, attacker.life, defender.name, defender.life))
if winner != None:
return winner, distance
if Attr.falconer in attacker.attrs and defender.range + defender.move < distance + attacker.move:
#move just out of reach
distance = defender.range + defender.move + 1
return None, distance
def fightToTheDeath(initialAttacker, initialDefender, maxTurns):
distance = max(initialAttacker.range, initialDefender.range) + 1
#print("distance:",distance)
winner = None
i = 1
for i in range(1,maxTurns+1):
attacker = None
defender = None
if is_odd(i):
attacker = initialAttacker
defender = initialDefender
else:
attacker = initialDefender
defender = initialAttacker
winner, distance = takeTurn(attacker, defender, distance)
if winner != None:
break
if Attr.theroll in attacker.attrs or (Attr.rampage in attacker.attrs and is_rampage()):
winner, distance = takeTurn(attacker, defender, distance)
if winner != None:
break
if winner == None:
return Winner.draw, i
elif winner.name == initialAttacker.name:
return Winner.attacker, i
else:
return Winner.defender, i
if __name__ == '__main__':
diceTypes = None
with open("dice.json") as f:
diceTypes = jsonpickle.decode(f.read())
parser = argparse.ArgumentParser(description='Fight two cards to the death')
parser.add_argument('card1', metavar='Card_1', type=str, help='the file path of card #1')
parser.add_argument('card2', metavar='Card_2', type=str, help='the file path of card #2')
parser.add_argument('-s','--scriptable', action="store_true", help='print output in a more easily parsable way')
parser.add_argument('-a', '--attack-only', action="store_true", help='attack only (don\'t run the simulation both ways)')
args = parser.parse_args()
card1 = None
with open(args.card1, 'rb') as f:
card1 = pickle.load(f)
card2 = None
with open(args.card2, 'rb') as f:
card2 = pickle.load(f)
config = None
with open("config.json") as f:
config = jsonpickle.decode(f.read())
print()
getStats(card1, card2, config.numFights, config.maxTurns, args.scriptable)
print()
if not args.attack_only:
getStats(card2, card1, config.numFights, config.maxTurns, args.scriptable)
print()
|
mpirnat/adventofcode
|
day04/test.py
|
Python
|
mit
| 521
| 0.005758
|
#!/usr/bin/env python
import unittest
from day04 import find_integer
class Test(unittest.TestCase):
cases = (
('abc
|
def', 609043),
('pqrstuv', 1048970),
)
def test_gets_integer(self):
for (key, expected) in self.cases:
result = find_integer(key, zeroes=5)
self.assertEqual(result, expected,
|
"Expected {key} to yield {expected}, but got {result}".\
format(**locals()))
if __name__ == '__main__':
unittest.main()
|
google-research/google-research
|
gfsa/model/edge_supervision_models_test.py
|
Python
|
apache-2.0
| 8,558
| 0.00187
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Tests for gfsa.model.edge_supervision_models."""
import functools
import textwrap
from absl.testing import absltest
from absl.testing import parameterized
import dataclasses
import flax
import gin
import jax
import jax.numpy as jnp
import numpy as np
from gfsa import automaton_builder
from gfsa import sparse_operator
from gfsa.datasets import graph_bundle
from gfsa.model import edge_supervision_models
class EdgeSupervisionModelsTest(parameterized.TestCase):
def setUp(self):
super().setUp()
gin.clear_config()
def test_variants_from_edges(self):
example = graph_bundle.zeros_like_padded_example(
graph_bundle.PaddingConfig(
static_max_metadata=automaton_builder.EncodedGraphMetadata(
num_nodes=5, num_input_tagged_nodes=0),
max_initial_transitions=0,
max_in_tagged_transitions=0,
max_edges=8))
example = dataclasses.replace(
example,
graph_metadata=automaton_builder.EncodedGraphMetadata(
num_nodes=4, num_input_tagged_nodes=0),
edges=sparse_operator.SparseCoordOperator(
input_indices=jnp.array([[0], [0], [0], [1], [1], [2], [0], [0]]),
output_indices=jnp.array([[1, 2], [2, 3], [3, 0], [2, 0], [0, 2],
[0, 3], [0, 0], [0, 0]]),
values=jnp.array([1, 1, 1, 1, 1, 1, 0, 0])))
weights = edge_supervision_models.variants_from_edges(
example,
automaton_builder.EncodedGraphMetadata(
num_nodes=5, num_input_tagged_nodes=0),
variant_edge_type_indices=[2, 0],
num_edge_types=3)
expected = np.array([
[[1, 0, 0], [1, 0, 0], [1, 0, 0], [0, 1, 0]],
[[1, 0, 0], [1, 0, 0], [0, 0, 1], [1, 0, 0]],
[[1, 0, 0], [1, 0, 0], [1, 0, 0], [0, 0, 1]],
[[0, 0, 1], [1, 0, 0], [1, 0, 0], [1, 0, 0]],
], np.float32)
# Only assert on the non-padded part.
np.testing.assert_allclose(weights[:4, :4], expected)
def test_ggtnn_steps(self):
gin.parse_config(
textwrap.dedent("""\
edge_supervision_models.ggnn_steps.iterations = 10
graph_layers.LinearMessagePassing.message_dim = 5
"""))
_, params = edge_supervision_models.ggnn_steps.init(
jax.random.PRNGKey(0),
node_embeddings=jnp.zeros((5, 3), jnp.float32),
edge_embeddings=jnp.zeros((5, 5, 4), jnp.float32))
# This component should only contain one step block, with two sublayers.
self.assertEqual(set(params.keys()), {"step"})
self.assertLen(params["step"], 2)
# Gradients should work.
outs, vjpfun = jax.vjp(
functools.partial(
edge_supervision_models.ggnn_steps.call,
node_embeddings=jnp.zeros((5, 3), jnp.float32),
edge_embeddings=jnp.zeros((5, 5, 4), jnp.float32)),
params,
)
vjpfun(outs)
@parameterized.named_parameters(
{
"testcase_name":
"shared",
"expected_block_count":
1,
"config":
textwrap.dedent("""\
transformer_steps.layers = 3
transformer_steps.share_weights = True
transformer_steps.mask_to_neighbors = False
NodeSelfAttention.heads = 2
NodeSelfAttention.query_key_dim = 3
NodeSelfAttention.value_dim = 4
"""),
}, {
"testcase_name":
"unshared",
"expected_block_count":
3,
"config":
textwrap.dedent("""\
transformer_steps.layers = 3
transformer_steps.share_weights = False
transformer_steps.mask_to_neighbors = False
NodeSelfAttention.heads = 2
NodeSelfAttention.query_key_dim = 3
NodeSelfAttention.value_dim = 4
"""),
}, {
"testcase_name":
"shared_masked",
"expected_block_count":
1,
"config":
textwrap.dedent("""\
transformer_steps.layers = 3
transformer_steps.share_weights = True
transformer_steps.mask_to_neighbors = True
NodeSelfAttention.heads = 2
NodeSelfAttention.query_key_dim = 3
NodeSelfAttention.value_dim = 4
"""),
})
def test_transformer_steps(self, config, expected_block_count):
gin.parse_config(config)
_, params = edge_supervision_models.transformer_steps.init(
jax.random.PRNGKey(0),
node_embeddings=jnp.zeros((5, 3), jnp.float32),
edge_embeddings=jnp.zeros((5, 5, 4), jnp.float32),
neighbor_mask=jnp.zeros((5, 5), jnp.float32),
num_real_nodes_per_graph=4)
# This component should contain the right number of blocks.
self.assertLen(params,
|
expected_block_count)
for block in params.values():
# Each block contains 4 sublayers.
self.assertLen(block, 4)
# Gradients should work.
outs, vjpfun = jax.vjp(
functools.partial(
edge_supervision_models.transformer_steps.call,
n
|
ode_embeddings=jnp.zeros((5, 3), jnp.float32),
edge_embeddings=jnp.zeros((5, 5, 4), jnp.float32),
neighbor_mask=jnp.zeros((5, 5), jnp.float32),
num_real_nodes_per_graph=4),
params,
)
vjpfun(outs)
def test_transformer_steps_masking(self):
"""Transformer should mask out padding even if not masked to neigbors."""
gin.parse_config(
textwrap.dedent("""\
transformer_steps.layers = 1
transformer_steps.share_weights = False
transformer_steps.mask_to_neighbors = False
NodeSelfAttention.heads = 2
NodeSelfAttention.query_key_dim = 3
NodeSelfAttention.value_dim = 4
"""))
with flax.deprecated.nn.capture_module_outputs() as outputs:
edge_supervision_models.transformer_steps.init(
jax.random.PRNGKey(0),
node_embeddings=jnp.zeros((5, 3), jnp.float32),
edge_embeddings=jnp.zeros((5, 5, 4), jnp.float32),
neighbor_mask=jnp.zeros((5, 5), jnp.float32),
num_real_nodes_per_graph=4)
attention_weights, = (v[0]
for k, v in outputs.as_dict().items()
if k.endswith("attend/attention_weights"))
expected = np.array([[[0.25, 0.25, 0.25, 0.25, 0.0]] * 5] * 2)
np.testing.assert_allclose(attention_weights, expected)
def test_nri_steps(self):
gin.parse_config(
textwrap.dedent("""\
graph_layers.NRIEdgeLayer.allow_non_adjacent = True
graph_layers.NRIEdgeLayer.mlp_vtoe_dims = [4, 4]
nri_steps.mlp_etov_dims = [8, 8]
nri_steps.with_residual_layer_norm = True
nri_steps.layers = 3
"""))
_, params = edge_supervision_models.nri_steps.init(
jax.random.PRNGKey(0),
node_embeddings=jnp.zeros((5, 3), jnp.float32),
edge_embeddings=jnp.zeros((5, 5, 4), jnp.float32),
num_real_nodes_per_graph=4)
# This component should contain the right number of blocks.
self.assertLen(params, 3)
for block in params.values():
# Each block contains 5 sublayers:
# - NRI message pass
# - Three dense layers (from mlp_etov_dims, then back to embedding space)
# - Layer norm
self.assertLen(block, 5)
# Gr
|
pvsousalima/marolo
|
models/20_validators.py
|
Python
|
mit
| 6,559
| 0
|
# coding: utf-8
from smarthumb import SMARTHUMB
from gluon.contrib.imageutils import RESIZE
# Noticias
db.noticias.titulo.requires = [
IS_NOT_EMPTY(error_message=T('Este campo não pode ficar vazio!')),
IS_NOT_IN_DB(db, db.noticias.titulo,
error_message=T('Título deve ser único.')),
IS_LENGTH(128, error_message=T('Tamanho máximo de 128 caracteres.'))
]
db.noticias.resumo.requires = [
IS_NOT_EMPTY(
error_message=T('Este campo não pode ficar vazio!')),
IS_LENGTH(128, error_message=T('Tamanho máximo de 128 caracteres.'))
]
db.noticias.conteudo.requires = [
IS_NOT_EMPTY(
error_message=T('Este campo não pode ficar vazio!')),
IS_LENGTH(5000, error_message=T('Tamanho máximo de 5000 caracteres.'))
]
db.noticias.permalink.compute = lambda registro: IS_SLUG()(registro.titulo)[0]
db.noticias.foto.requires = [
IS_EMPTY_OR(IS_IMAGE(
error_message=T('Arquivo enviado deve ser uma imagem.'))),
IS_LENGTH(100 * 1024, # 100kb
error_message=T('Arquivo muito grande!'
'Tamanho máximo permitido é 100kb'))
]
db.noticias.thumbnail.compute = lambda registro: SMARTHUMB(registro.foto,
(200, 200))
db.noticias.status.requires = IS_IN_SET(
['publicado', 'não publicado'],
error_message=T('Por favor selecione uma das opções')
)
# Membros
db.membros.nome.requires = [
IS_NOT_EMPTY(error_message=T('Este campo não pode ficar vazio!')),
IS_NOT_IN_DB(db, db.membros.nome,
error_message=T('Nome deve ser único.')),
IS_LENGTH(64, error_message=T('Tamanho máximo de 64 caracteres.'))
]
db.membros.foto.requires = [
IS_EMPTY_OR(
IS_IMAGE(error_message=T('Arquivo enviado deve ser uma imagem.'))
),
IS_LENGTH(100 * 1024, # 100kb
error_message=T('Arquivo muito grande!'
'Tamanho máximo permitido é 100kb')),
IS_EMPTY_OR(RESIZE(200, 200))
]
db.membros.email.requires = IS_EMAIL(error_message=T("Entre um email válido"))
# Eventos
db.eventos.nome.requires = [
IS_NOT_EMPTY(error_message=T('Este campo não pode ficar vazio!')),
IS_LENGTH(128, error_message=T('Tamanho máximo de 128 caracteres.'))
]
db.eventos.endereco.requires = [
IS_NOT_EMPTY(error_message=T('Este campo não pode ficar vazio!')),
IS_LENGTH(128, error_message=T('Tamanho máximo de 128 caracteres.'))
]
db.eventos.descricao.requires = [
IS_NOT_EMPTY(error_message=T('Este campo não pode ficar vazio!')),
IS_LENGTH(256, error_message=T('Tamanho máximo de 256 caracteres.'))
]
db.eventos.banner.requires = [
IS_EMPTY_OR(IS_IMAGE(
error_message=T('Arquivo enviado deve ser uma imagem.'))),
IS_LENGTH(100 * 1024, # 100kb
error_message=T('Arquivo muito grande!'
'Tamanho máximo permitido é 100kb'))
]
db.eventos.banner_thumb.compute = lambda registro: SMARTHUMB(registro.foto,
(200, 200))
# Apoiadores
db.apoiadores.nome.requires = [
IS_NOT_EMPTY(error_message=T('Este campo não pode ficar vazio!')),
IS_LENGTH(64, error_message=T('Tamanho máximo de 64 caracteres.'))
]
db.apoiadores.tipo.requires = IS_IN_SET(
['apoiador', 'patrocinador', 'parceiro'],
error_message=T('Por favor selecione uma das opções')
)
db.apoiadores.logo.requires = [
IS_EMPTY_OR(
IS_IMAGE(error_message=T('Arquivo enviado deve ser uma imagem.'))
),
IS_LENGTH(100 * 1024, # 100kb
error_message=T('Arquivo muito grande!'
'Tamanho máximo permitido é 100kb'))
]
db.apoiadores.logo_thumb.compute = lambda registro: SMARTHUMB(registro.logo,
(200, 200))
db.apoiadores.url.requires = [
IS_NOT_EMPTY(error_message=T('Este campo não pode ficar vazio!')),
IS_LENGTH(256, error_message=T('Tamanho máximo de 256 caracteres.')),
IS_URL()
]
# Produtos
db.produtos.nome.requires = [
IS_NOT_EMPTY(
error_message=T('Este campo não pode ficar vazio!')),
IS_LENGTH(64, error_message=T('Tamanho máximo de 64 caracteres.'))
]
db.produtos.descricao.requires = [
IS_NOT_EMPTY(
error_message=T('Este campo não pode ficar vazio!')),
IS_LENGTH(128, error_message=T('Tamanho máximo de 128 caracteres.'))
]
db.produtos.foto.requires = [
IS_EMPTY_OR(
IS_IMAGE(error_message=T('Arquivo enviado deve ser uma imagem.'))
),
IS_LENGTH(100 * 1024, # 100kb
error_message=T('Arquivo muito grande!'
'Tamanho máximo permitido é 100kb'))
]
db.produtos.thumb.compute = lambda registro: SMARTHUMB(registro.foto,
(200, 200))
db.produtos.preco.requires = IS_EMPTY_OR(IS_FLOAT_IN_RANGE(
minimum=0.1,
dot=',',
error_message=T('Valor inválido para preço. '
'Quando especificado deve ser maior do que 0'
' e no formato 2,50.')
))
# Carousel
db.carousel.nome_aba.requires = [
IS_NOT_EMPTY(
error_message=T('Este campo não pode ficar vazio!')),
IS_LENGTH(16, error_message=T('Tamanho máximo de 16 caracteres.'))
]
db.carousel.descricao_aba.requires = [
IS_NOT_EMPTY(
error_message=T('Este campo não pode ficar vazio!')),
IS_LENGTH(24, error_message=T('Tamanho máximo de 24 caracteres.'))
]
db.carousel.titulo.requires = [
IS_NOT_EMPTY(
error_message=T('Este campo não pode ficar vazio!')),
IS_LENGTH(16, error_message=T('Tamanho máximo de 16 caracteres.'))
]
db.carousel.descricao.requires = [
IS_NOT_EMPTY(
error_message=T('Este campo não pode ficar vazio!')),
IS_L
|
ENGTH(256, error_message=T('Tamanho máximo de 256 caracteres.'))
]
db.carousel.imagem.requires = [
IS_EMPTY_OR(
IS_IMAGE(error_message=T('Arquivo enviado deve ser uma imagem.'))
),
IS_LENGTH(100 * 1024, # 100kb
error_message=T('Arquiv
|
o muito grande!'
'Tamanho máximo permitido é 100kb')),
IS_EMPTY_OR(RESIZE(1200, 400))
]
db.carousel.url.requires = [
IS_NOT_EMPTY(
error_message=T('Este campo não pode ficar vazio!')),
IS_LENGTH(256, error_message=T('Tamanho máximo de 256 caracteres.')),
IS_URL()
]
db.carousel.status.requires = IS_IN_SET(
['ativo', 'inativo'],
error_message=T('Por favor selecione uma das opções')
)
|
alexgorban/models
|
research/object_detection/core/box_list_ops.py
|
Python
|
apache-2.0
| 43,889
| 0.004124
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Bounding Box List operations.
Example box operations that are supported:
* areas: compute bounding box areas
* iou: pairwise intersection-over-union scores
* sq_dist: pairwise distances between bounding boxes
Whenever box_list_ops functions output a BoxList, the fields of the incoming
BoxList are retained unless documented otherwise.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from six.moves import range
import tensorflow as tf
from object_detection.core import box_list
from object_detection.utils import ops
from object_detection.utils import shape_utils
class SortOrder(object):
"""Enum class for sort order.
Attributes:
ascend: ascend order.
descend: descend order.
"""
ascend = 1
descend = 2
def area(boxlist, scope=None):
"""Computes area of boxes.
Args:
boxlist: BoxList holding N boxes
scope: name scope.
Returns:
a tensor with shape [N] representing box areas.
"""
with tf.name_scope(scope, 'Area'):
y_min, x_min, y_max, x_max = tf.split(
value=boxlist.get(), num_or_size_splits=4, axis=1)
return tf.squeeze((y_max - y_min) * (x_max - x_min), [1])
def height_width(boxlist, scope=None):
"""Computes height and width of boxes in boxlist.
Args:
boxlist: BoxList holding N boxes
scope: name scope.
Returns:
Height: A tensor with shape [N] representing box heights.
Width: A tensor with shape [N] representing box widths.
"""
with tf.name_scope(scope, 'HeightWidth'):
y_min, x_min, y_max, x_max = tf.split(
value=boxlist.get(), num_or_size_splits=4, axis=1)
return tf.squeeze(y_max - y_min, [1]), tf.squeeze(x_max - x_min, [1])
def scale(boxlist, y_scale, x_scale, scope=None):
"""scale box coordinates in x and y dimensions.
Args:
boxlist: BoxList holding N boxes
y_scale: (float) scalar tensor
x_scale: (float) scalar tensor
scope: name scope.
Returns:
boxlist: BoxList holding N boxes
"""
with tf.name_scope(scope, 'Scale'):
y_scale = tf.cast(y_scale, tf.float32)
x_scale = tf.cast(x_scale, tf.float32)
y_min, x_min, y_max, x_max = tf.split(
value=boxlist.get(), num_or_size_splits=4, axis=1)
y_min = y_scale * y_min
y_max = y_scale * y_max
x_min = x_scale * x_min
x_max = x_scale * x_max
scaled_boxlist = box_list.BoxList(
tf.concat([y_min, x_min, y_max, x_max], 1))
return _copy_extra_fields(scaled_boxlist, boxlist)
def clip_to_window(boxlist, window, filter_nonoverlapping=True, scope=None):
"""Clip bounding boxes to a window.
This op clips any input bounding boxes (represented by bounding box
corners) to a window, optionally filtering out boxes that do not
overlap at all with the window.
Args:
boxlist: BoxList holding M_in boxes
window: a tensor of shape [4] representing the [y_min, x_min, y_max, x_max]
window to which the op should clip boxes.
filter_nonoverlapping: whether to filter out boxes that do not overlap at
all with the window.
scope: name scope.
Returns:
a BoxList holding M_out boxes where M_out <= M_in
"""
with tf.name_scope(scope, 'ClipToWindow'):
y_min, x_min, y_max, x_max = tf.split(
value=boxlist.get(), num_or_size_splits=4, axis=1)
win_y_min, win_x_min, win_y_max, win_x_max = tf.unstack(window)
y_min_clipped = tf.maximum(tf.minimum(y_min, win_y_max), win_y_min)
y_max_clipped = tf.maximum(tf.minimum(y_max, win_y_max), win_y_min)
x_min_clipped = tf.maximum(tf.minimum(x_min, win_x_max), win_x_min)
x_max_clipped = tf.maximum(tf.minimum(x_max, win_x_max), win_x_min)
clipped = box_list.BoxList(
tf.concat([y_min_clipped, x_min_clipped, y_max_clipped, x_max_clipped],
1))
clipped = _copy_extra_fields(clipped, boxlist)
if filter_nonoverlapping:
areas = area(clipped)
nonzero_area_indices = tf.cast(
tf.reshape(tf.where(tf.greater(areas, 0.0)), [-1]), tf.int32)
clipped = gather(clipped, nonzero_area_indices)
return clipped
def prune_outside_window(boxlist, window, scope=None):
"""Prunes bounding boxes that fall outside a given window.
This function prunes bounding boxes that even partially fall outside the given
window. See also clip_to_window which only prunes bounding boxes that fall
completely outside the window, and clips any bounding boxes that partially
overflow.
Args:
boxlist: a BoxList holding M_in boxes.
window: a float tensor of shape [4] representing [ymin, xmin, ymax, xmax]
of the window
scope: name scope.
Returns:
pruned_corners: a tensor with shape [M_out, 4] where M_out <= M_in
valid_indices: a tensor with shape [M_out] indexing the valid bounding boxes
in the input tensor.
"""
with tf.name_scope(scope, 'PruneOutsideWindow'):
y_min, x_min, y_max, x_max = tf.split(
value=boxlist.get(), num_or_size_splits=4, axis=1)
win_y_min, win_x_min, win_y_max, win_x_max = tf.unstack(window)
coordinate_violations = tf.concat([
tf.less(y_min, win_y_min), tf.less(x_min, win_x_min),
tf.greater(y_max, win_y_max), tf.greater(x_max, win_x_max)
], 1)
valid_indices = tf.reshape(
tf.where(tf.logical_not(tf.reduce_any(coordinate_violations, 1))), [-1])
return gather(boxlist, valid_indices), valid_indices
def prune_completely_outside_window(boxlist, window, scope=None):
"""Prunes bounding boxes that fall completely outside of the given window.
The function clip_to_window prunes bounding boxes that fall
completely outside the window, but also clips any bounding boxes that
partially overflow. This function does not clip partially overflowing boxes.
Args:
boxlist: a BoxList holding M_in boxes.
window: a float tensor of shape [4] representing [ymin, xmin, ymax, xmax]
of the window
scope: name scope.
Returns:
pruned_boxlist: a new BoxList with all bounding boxes partially or fully in
the window.
valid_indices: a tensor with shape [M_out] indexing the valid bounding boxes
in the input tensor.
"""
with tf.name_scope(scope, 'PruneCompleteleyOutsideWindow'):
y_min, x_min, y_max, x_max = tf.split(
value=boxlist.get(), num_or_size_splits=4, axis=1)
win_y_min, win_x_min, win_y_max, win_x_max = tf.unstack(window)
coordinate_violations = tf.concat([
tf.greater_equal(y_min, win_y_max), tf.greater_equal
|
(x_min, win_x_max),
tf.less_equal(y_max, win_y_min), tf.less_equal(x_max, win_x_min)
], 1)
valid_indices = tf.reshape(
tf.where(tf.logical_not(tf.reduce_any(coordinate_violations, 1))), [-1])
return gather(boxlist, valid_indices), valid_indices
def intersection(boxlist1, boxlist2, scope=None):
"""Compute pairwise intersection areas between boxes.
A
|
rgs:
boxlist1: BoxList holding N boxes
boxlist2: BoxList holding M boxes
scope: name scope.
Returns:
a tensor with shape [N, M] representing pairwise intersections
"""
with tf.name_scope(scope, 'Intersection'):
y_min1, x_min1, y_max1, x_max1 = tf.split(
value=boxlist1.get(), num_or_size_splits=4, axis=1)
y_min2, x_min2, y_max2, x_max2 = tf.split(
value=boxlist2.get(), num_or_size_splits=4, axis=1)
all_pairs_min_ymax = tf.minimum(y_max1, tf.transpose(y_max2))
all_pairs_max_ymin = tf.maximum(y_min1, tf.transpose(y_min2))
intersect_heights = tf.max
|
hrashk/sympy
|
sympy/concrete/tests/test_gosper.py
|
Python
|
bsd-3-clause
| 7,307
| 0.002053
|
"""Tests for Gosper's algorithm for hypergeometric summation. """
from sympy import binomial, factorial, gamma, Poly, S, simplify, sqrt, exp, log, Symbol
from sympy.abc import a, b, j, k, m, n, r, x
from sympy.concrete.gosper import gosper_normal, gosper_sum, gosper_term
def test_gosper_normal():
assert gosper_normal(4*n + 5, 2*(4*n + 1)*(2*n + 3), n) == \
(Poly(S(1)/4, n), Poly(n + S(3)/2), Poly(n + S(1)/4))
def test_gosper_term():
assert gosper_term((4*k + 1)*factorial(
k)/factorial(2*k + 1), k) == (-k - S(1)/2)/(k + S(1)/4)
def test_gosper_sum():
assert gosper_sum(1, (k, 0, n)) == 1 + n
assert gosper_sum(k, (k, 0, n)) == n*(1 + n)/2
assert gosper_sum(k**2, (k, 0, n)) == n*(1 + n)*(1 + 2*n)/6
assert gosper_sum(k**3, (k, 0, n)) == n**2*(1 + n)**2/4
assert gosper_sum(2**k, (k, 0, n)) == 2*2**n - 1
assert gosper_sum(factorial(k), (k, 0, n)) is None
assert gosper_sum(binomial(n, k), (k, 0, n)) is None
assert gosper_sum(factorial(k)/k**2, (k, 0, n)) is None
assert gosper_sum((k - 3)*factorial(k), (k, 0, n)) is None
assert gosper_sum(k*factorial(k), k) == factorial(k)
assert gosper_sum(
k*factorial(k), (k, 0, n)) == n*factorial(n) + factorial(n) - 1
assert gosper_sum((-1)**k*binomial(n, k), (k, 0, n)) == 0
assert gosper_sum((
-1)**k*binomial(n, k), (k, 0, m)) == -(-1)**m*(m - n)*binomial(n, m)/n
assert gosper_sum((4*k + 1)*factorial(k)/factorial(2*k + 1), (k, 0, n)) == \
(2*factorial(2*n + 1) - factorial(n))/factorial(2*n + 1)
# issue 2934:
assert gosper_sum(
n*(n + a + b)*a**n*b**n/(factorial(n + a)*factorial(n + b)), \
(n, 0, m)) == -a*b*(exp(m*log(a))*exp(m*log(b))*factorial(a)* \
factorial(b) - factorial(a + m)*factorial(b + m))/(factorial(a)* \
factorial(b)*factorial(a + m)*factorial(b + m))
def test_gosper_sum_indefinite():
assert gosper_sum(k, k) == k*(k - 1)/2
assert gosper_sum(k**2, k) == k*(k - 1)*(2*k - 1)/6
assert gosper_sum(1/(k*(k + 1)), k) == -1/k
assert gosper_sum(-(27*k**4 + 158*k**3 + 430*k**2 + 678*k + 445)*gamma(2*k + 4)/(3*(3*k + 7)*gamma(3*k + 6)), k) == \
(3*k + 5)*(k**2 + 2*k + 5)*gamma(2*k + 4)/gamma(3*k + 6)
def test_gosper_sum_parametric():
assert gosper_sum(binomial(S(1)/2, m - j + 1)*binomial(S(1)/2, m + j), (j, 1, n)) == \
n*(1 + m - n)*(-1 + 2*m + 2*n)*binomial(S(1)/2, 1 + m - n)* \
binomial(S(1)/
|
2, m + n)/(m*(1 + 2*m))
def test_gosper_sum_algebraic():
assert gosper_sum(
n**2 + sqrt(2), (n, 0,
|
m)) == (m + 1)*(2*m**2 + m + 6*sqrt(2))/6
def test_gosper_sum_iterated():
f1 = binomial(2*k, k)/4**k
f2 = (1 + 2*n)*binomial(2*n, n)/4**n
f3 = (1 + 2*n)*(3 + 2*n)*binomial(2*n, n)/(3*4**n)
f4 = (1 + 2*n)*(3 + 2*n)*(5 + 2*n)*binomial(2*n, n)/(15*4**n)
f5 = (1 + 2*n)*(3 + 2*n)*(5 + 2*n)*(7 + 2*n)*binomial(2*n, n)/(105*4**n)
assert gosper_sum(f1, (k, 0, n)) == f2
assert gosper_sum(f2, (n, 0, n)) == f3
assert gosper_sum(f3, (n, 0, n)) == f4
assert gosper_sum(f4, (n, 0, n)) == f5
# the AeqB tests test expressions given in
# www.math.upenn.edu/~wilf/AeqB.pdf
def test_gosper_sum_AeqB_part1():
f1a = n**4
f1b = n**3*2**n
f1c = 1/(n**2 + sqrt(5)*n - 1)
f1d = n**4*4**n/binomial(2*n, n)
f1e = factorial(3*n)/(factorial(n)*factorial(n + 1)*factorial(n + 2)*27**n)
f1f = binomial(2*n, n)**2/((n + 1)*4**(2*n))
f1g = (4*n - 1)*binomial(2*n, n)**2/((2*n - 1)**2*4**(2*n))
f1h = n*factorial(n - S(1)/2)**2/factorial(n + 1)**2
g1a = m*(m + 1)*(2*m + 1)*(3*m**2 + 3*m - 1)/30
g1b = 26 + 2**(m + 1)*(m**3 - 3*m**2 + 9*m - 13)
g1c = (m + 1)*(m*(m**2 - 7*m + 3)*sqrt(5) - (
3*m**3 - 7*m**2 + 19*m - 6))/(2*m**3*sqrt(5) + m**4 + 5*m**2 - 1)/6
g1d = -S(2)/231 + 2*4**m*(m + 1)*(63*m**4 + 112*m**3 + 18*m**2 -
22*m + 3)/(693*binomial(2*m, m))
g1e = -S(9)/2 + (81*m**2 + 261*m + 200)*factorial(
3*m + 2)/(40*27**m*factorial(m)*factorial(m + 1)*factorial(m + 2))
g1f = (2*m + 1)**2*binomial(2*m, m)**2/(4**(2*m)*(m + 1))
g1g = -binomial(2*m, m)**2/4**(2*m)
g1h = -(2*m + 1)**2*(3*m + 4)*factorial(m - S(1)/2)**2/factorial(m + 1)**2
g = gosper_sum(f1a, (n, 0, m))
assert g is not None and simplify(g - g1a) == 0
g = gosper_sum(f1b, (n, 0, m))
assert g is not None and simplify(g - g1b) == 0
g = gosper_sum(f1c, (n, 0, m))
assert g is not None and simplify(g - g1c) == 0
g = gosper_sum(f1d, (n, 0, m))
assert g is not None and simplify(g - g1d) == 0
g = gosper_sum(f1e, (n, 0, m))
assert g is not None and simplify(g - g1e) == 0
g = gosper_sum(f1f, (n, 0, m))
assert g is not None and simplify(g - g1f) == 0
g = gosper_sum(f1g, (n, 0, m))
assert g is not None and simplify(g - g1g) == 0
g = gosper_sum(f1h, (n, 0, m))
assert g is not None and simplify(g - g1h) == 0
def test_gosper_sum_AeqB_part2():
f2a = n**2*a**n
f2b = (n - r/2)*binomial(r, n)
f2c = factorial(n - 1)**2/(factorial(n - x)*factorial(n + x))
g2a = -a*(a + 1)/(a - 1)**3 + a**(
m + 1)*(a**2*m**2 - 2*a*m**2 + m**2 - 2*a*m + 2*m + a + 1)/(a - 1)**3
g2b = (m - r)*binomial(r, m)/2
ff = factorial(1 - x)*factorial(1 + x)
g2c = 1/ff*(
1 - 1/x**2) + factorial(m)**2/(x**2*factorial(m - x)*factorial(m + x))
g = gosper_sum(f2a, (n, 0, m))
assert g is not None and simplify(g - g2a) == 0
g = gosper_sum(f2b, (n, 0, m))
assert g is not None and simplify(g - g2b) == 0
g = gosper_sum(f2c, (n, 1, m))
assert g is not None and simplify(g - g2c) == 0
def test_gosper_nan():
a = Symbol('a', positive=True)
b = Symbol('b', positive=True)
n = Symbol('n', integer=True)
m = Symbol('m', integer=True)
f2d = n*(n + a + b)*a**n*b**n/(factorial(n + a)*factorial(n + b))
g2d = 1/(factorial(a - 1)*factorial(
b - 1)) - a**(m + 1)*b**(m + 1)/(factorial(a + m)*factorial(b + m))
g = gosper_sum(f2d, (n, 0, m))
assert simplify(g - g2d) == 0
def test_gosper_sum_AeqB_part3():
f3a = 1/n**4
f3b = (6*n + 3)/(4*n**4 + 8*n**3 + 8*n**2 + 4*n + 3)
f3c = 2**n*(n**2 - 2*n - 1)/(n**2*(n + 1)**2)
f3d = n**2*4**n/((n + 1)*(n + 2))
f3e = 2**n/(n + 1)
f3f = 4*(n - 1)*(n**2 - 2*n - 1)/(n**2*(n + 1)**2*(n - 2)**2*(n - 3)**2)
f3g = (n**4 - 14*n**2 - 24*n - 9)*2**n/(n**2*(n + 1)**2*(n + 2)**2*
(n + 3)**2)
# g3a -> no closed form
g3b = m*(m + 2)/(2*m**2 + 4*m + 3)
g3c = 2**m/m**2 - 2
g3d = S(2)/3 + 4**(m + 1)*(m - 1)/(m + 2)/3
# g3e -> no closed form
g3f = -(-S(1)/16 + 1/((m - 2)**2*(m + 1)**2)) # the AeqB key is wrong
g3g = -S(2)/9 + 2**(m + 1)/((m + 1)**2*(m + 3)**2)
g = gosper_sum(f3a, (n, 1, m))
assert g is None
g = gosper_sum(f3b, (n, 1, m))
assert g is not None and simplify(g - g3b) == 0
g = gosper_sum(f3c, (n, 1, m - 1))
assert g is not None and simplify(g - g3c) == 0
g = gosper_sum(f3d, (n, 1, m))
assert g is not None and simplify(g - g3d) == 0
g = gosper_sum(f3e, (n, 0, m - 1))
assert g is None
g = gosper_sum(f3f, (n, 4, m))
assert g is not None and simplify(g - g3f) == 0
g = gosper_sum(f3g, (n, 1, m))
assert g is not None and simplify(g - g3g) == 0
|
ygol/odoo
|
addons/purchase_stock/tests/test_fifo_price.py
|
Python
|
agpl-3.0
| 16,403
| 0.002744
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import time
from .common import PurchaseTestCommon
from odoo.addons.stock_account.tests.common import StockAccountTestCommon
from odoo.tests import Form
class TestFifoPrice(PurchaseTestCommon, StockAccountTestCommon):
def test_00_test_fifo(self):
""" Test product cost price with fifo removal strategy."""
res_partner_3 = self.env['res.partner'].create({
'name': 'Gemini Partner',
})
# Set a product as using fifo price
product_cable_management_box = self.env['product.product'].create({
'default_code': 'FIFO',
'name': 'FIFO Ice Cream',
'type': 'product',
'categ_id': self.env.ref('product.product_category_1').id,
'list_price': 100.0,
'standard_price': 70.0,
'uom_id': self.env.ref('uom.product_uom_kgm').id,
'uom_po_id': self.env.ref('uom.product_uom_kgm').id,
'supplier_taxes_id': [],
'description': 'FIFO Ice Cream',
})
product_cable_management_box.categ_id.property_cost_method = 'fifo'
product_cable_management_box.categ_id.property_valuation = 'real_time'
product_cable_management_box.categ_id.property_stock_account_input_categ_id = self.o_expense
product_cable_management_box.categ_id.property_stock_account_output_categ_id = self.o_income
# I create a draft Purchase Order for first in move for 10 kg at 50 euro
purchase_order_1 = self.env['purchase.order'].create({
'partner_id': res_partner_3.id,
'order_line': [(0, 0, {
'name': 'FIFO Ice Cream',
'product_id': product_cable_management_box.id,
'product_qty': 10.0,
'product_uom': self.env.ref('uom.product_uom_kgm').id,
'price_unit': 50.0,
'date_planned': time.strftime('%Y-%m-%d')})],
})
# Confirm the first purchase order
purchase_order_1.button_confirm()
# Check the "Purchase" status of purchase order 1
self.assertEqual(purchase_order_1.state, 'purchase')
# Process the reception of purchase order 1 and set date
picking = purchase_order_1.picking_ids[0]
res = picking.button_validate()
Form(self.env[res['res_model']].with_context(res['context'])).save().process()
# Check the standard price of the product (fifo icecream), that should have changed
# because the unit cost of the purchase order is 50
self.assertAlmostEqual(product_cable_management_box.standard_price, 50.0)
self.assertEqual(product_cable_management_box.value_svl, 500.0, 'Wrong stock value')
# I create a draft Purchase Order for second shipment for 30 kg at 80 euro
purchase_order_2 = self.env['purchase.order'].create({
'partner_id': res_partner_3.id,
'order_line': [(0, 0, {
'name': 'FIFO Ice Cream',
'product_id': product_cable_management_box.id,
'product_qty': 30.0,
'product_uom': self.env.ref('uom.product_uom_kgm').id,
'price_unit': 80.0,
'date_planned': time.strftime('%Y-%m-%d')})],
})
# Confirm the second purchase order
purchase_order_2.button_confirm()
# Process the reception of purchase order 2
picking = purchase_order_2.picking_ids[0]
res = picking.button_validate()
Form(self.env[res['res_model']].with_context(res['context'])).save().process()
# Check the standard price of the product, that should have not changed because we
# still have icecream in stock
self.assertEqual(product_cable_management_box.standard_price, 50.0, 'Standard price as fifo price of second reception incorrect!')
self.assertEqual(product_cable_management_box.value_svl, 2900.0, 'Stock valuation should be 2900')
# Let us send some goods
outgoing_shipment = self.env['stock.picking'].create({
'picking_type_id': self.env.ref('stock.picking_type_out').id,
'location_id': self.env.ref('stock.stock_location_stock').id,
'location_dest_id': self.env.ref('stock.stock_location_customers').id,
'move_lines': [(0, 0, {
'name': product_cable_management_box.name,
'product_id': product_cable_management_box.id,
'product_uom_qty': 20.0,
'product_uom': self.env.ref('uom.product_uom_kgm').id,
'location_id': self.env.ref('stock.stock_location_stock').id,
'location_dest_id': self.env.ref('stock.stock_location_customers').id,
'picking_type_id': self.env.ref('stock.picking_type_out').id})]
})
# I assign this outgoing shipment
outgoing_shipment.action_assign()
# Process the delivery of the outgoing shipment
res = outgoing_shipment.button_validate()
Form(self.env[res['res_model']].with_context(res['context'])).save().process()
# Check stock value became 1600 .
self.assertEqual(product_cable_management_box.value_svl, 1600.0, 'Stock valuation should be 1600')
# Do a delivery of an extra 500 g (delivery order)
outgoing_shipment_uom = self.env['stock.picking'].create({
'picking_type_id': self.env.ref('stock.picking_type_out').id,
'location_id': self.env.ref('stock.stock_location_stock').id,
'location_dest_id': self.env.ref('stock.stock_location_customers').id,
'move_lines': [(0, 0, {
'name': product_cable_management_box.name,
'product_id': product_cable_management_box.id,
'product_uom_qty': 500.0,
'product_uom': self.env.ref('uom.product_uom_gram').id,
'location_id': self.env.ref('stock.stock_location_stock').id,
'location_dest_id': self.env.ref('stock.stock_location_customers').id,
'picking_type_id': self.env.ref('stock.picking_type_out').id})]
})
# I assign this outgoing shipment
outgoing_shipment_uom.action_assign()
# Process the delivery of the outgoing shipment
res = outgoing_shipment_uom.button_validate()
Form(self.env[res['res_model']].with_context(res['context'])).save().process()
# Check stock valuation and qty in stock
self.assertEqual(product_cable_management_box.value_svl, 1560.0, 'Stock valuation should be 1560')
self.assertEqual(product_cable_management_box.qty_available, 19.5, 'Should still have 19.5 in stock')
# We will temporarily change the currency rate on the sixth of June to have the same results all year
NewUSD = self.env['res.currency'].create({
'name': 'new_usd',
'symbol': '$²',
'rate_ids': [(0, 0, {'rate': 1.2834, 'name': time.strftime('%Y-%m-%d')})],
})
# Create PO for 30000 g at 0.150$/g and 10 kg at 150$/kg
purchase_order_usd = self.env['purchase.order'].create({
'partner_id': res_partner_3.id,
'currency_id': NewUSD.id,
'order_line': [(0, 0, {
'name': 'FIFO Ice Cream',
|
'product_id': product_cable_management_box.id,
'product_qty': 30,
'product_uom': self.env.ref('uom.product_uom_kgm').id,
|
'price_unit': 0.150,
'date_planned': time.strftime('%Y-%m-%d')}),
(0, 0, {
'name': product_cable_management_box.name,
'product_id': product_cable_management_box.id,
'product_qty': 10.0,
'product_uom': self.env.ref('uom.product_uom_kgm').id,
'price_unit': 150.0,
'date_planned': time.strftime('%Y-%m-%d')})]
})
# Confirm the purchase order in USD
purchase_order_usd.button_confirm()
# Process the
|
MisanthropicBit/colorise
|
src/colorise/win/win32_functions.py
|
Python
|
bsd-3-clause
| 10,715
| 0
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Windows API functions."""
import ctypes
import os
import sys
from ctypes import WinError, wintypes
from colorise.win.winhandle import WinHandle
# Create a separate WinDLL instance since the one from ctypes.windll.kernel32
# can be manipulated by other code that also imports it
#
# See
# https://stackoverflow.com/questions/34040123/ctypes-cannot-import-windll#comment55835311_34040124
kernel32 = ctypes.WinDLL('kernel32', use_errno=True, use_last_error=True)
# Handle IDs for stdout and stderr
_STDOUT_HANDLE_ID = -11
_STDERR_HANDLE_ID = -12
# Console modes for console virtual terminal sequences
DISABLE_NEWLINE_AUTO_RETURN = 0x0008
ENABLE_VIRTUAL_TERMINAL_PROCESSING = 0x0004
ERROR_INVALID_HANDLE = 6
# Struct defined in wincon.h
class CONSOLE_SCREEN_BUFFER_INFO(ctypes.Structure): # noqa: D101
_fields_ = [
('dwSize', wintypes._COORD),
('dwCursorPosition', wintypes._COORD),
('wAttributes', ctypes.c_ushort),
('srWindow', wintypes._SMALL_RECT),
('dwMaximumWindowSize', wintypes._COORD),
]
# Struct defined in wincon.h
class CONSOLE_SCREEN_BUFFER_INFOEX(ctypes.Structure): # noqa: D101
_fields_ = [
('cbSize', wintypes.ULONG),
('dwSize', wintypes._COORD),
('dwCursorPosition', wintypes._COORD),
('wAttributes', ctypes.c_ushort),
('srWindow', wintypes._SMALL_RECT),
('dwMaximumWindowSize', wintypes._COORD),
('wPopupAttributes', wintypes.WORD),
('bFullscreenSupported', wintypes.BOOL),
('ColorTable', wintypes.COLORREF * 16),
]
if not hasattr(wintypes, 'LPDWORD'):
LPDWORD = ctypes.POINTER(wintypes.DWORD)
else:
LPDWORD = wintypes.LPDWORD
# Set argument and return types for Windows API calls
kernel32.GetConsoleScreenBufferInfo.argtypes =\
[wintypes.HANDLE, ctypes.POINTER(CONSOLE_SCREEN_BUFFER_INFO)]
kernel32.GetConsoleScreenBufferInfo.restype = wintypes.BOOL
kernel32.GetStdHandle.argtypes = [wintypes.DWORD]
kernel32.GetStdHandle.restype = wintypes.HANDLE
kernel32.GetConsoleMode.argtypes = [wintypes.HANDLE, LPDWORD]
kernel32.GetConsoleMode.restype = wintypes.BOOL
kernel32.SetConsoleMode.argtypes = [wintypes.HANDLE, wintypes.DWORD]
kernel32.SetConsoleMode.restype = wintypes.BOOL
kernel32.SetLastError.argtypes = [wintypes.DWORD]
kernel32.SetLastError.restype = None # void
kernel32.FormatMessageW.argtypes = [
wintypes.DWORD,
wintypes.LPCVOID,
wintypes.DWORD,
wintypes.DWORD,
wintypes.LPWSTR,
wintypes.DWORD,
wintypes.LPVOID
]
kernel32.FormatMessageW.restype = wintypes.DWORD
kernel32.LocalFree.argtypes = [wintypes.HLOCAL]
kernel32.LocalFree.restype = wintypes.HLOCAL
kernel32.SetConsoleTextAttribute.argtypes = [wintypes.HANDLE, wintypes.WORD]
kernel32.SetConsoleTextAttribute.restype = wintypes.BOOL
if kernel32.SetConsoleScreenBufferInfoEx is not None:
# We can query RGB values of console colors on Windows
kernel32.GetConsoleScreenBufferInfoEx.argtypes =\
[wintypes.HANDLE, ctypes.POINTER(CONSOLE_SCREEN_BUFFER_INFOEX)]
kernel32.GetConsoleScreenBufferInfoEx.restype = wintypes.BOOL
def isatty(handle):
"""Check if a handle is a valid console handle.
For example, if a handle is redirected to a file, it is not a valid console
handle and all win32 console API calls will fail.
"""
if not handle or not handle.valid:
return False
console_mode = wintypes.DWORD(0)
# We use GetConsoleMode here but it could be any function that expects a
# valid console handle
retval = kernel32.GetConsoleMode(handle.value, ctypes.byref(console_mode))
if retval == 0:
errno = ctypes.get_last_error()
if errno == ERROR_INVALID_HANDLE:
return False
else:
# Another error happened
raise WinError()
else:
return True
def can_redefine_colors(file):
"""Return whether the terminal allows redefinition of colors."""
handle = get_win_handle(WinHandle.from_sys_handle(file))
return kernel32.SetConsoleScreenBufferInfoEx is not None and isatty(handle)
def create_std_handle(handle_id):
"""Create a Windows standard handle from an identifier."""
handle = kernel32.GetStdHandle(handle_id)
if handle == WinHandle.INVALID:
raise WinError()
csbi = CONSOLE_SCREEN_BUFFER_INFO()
retval = kernel32.GetConsoleScreenBufferInfo(
handle,
ctypes.byref(csbi),
)
win_handle = None
if retval == 0:
errno = ctypes.get_last_error()
if errno == ERROR_INVALID_HANDLE:
# Return a special non-console handle
win_handle = WinHandle.get_nonconsole_handle(handle_id)
else:
raise WinError()
else:
win_handle = WinHandle(handle)
# Set defaults color values
# TODO: Do these need to be reread when colors are redefined?
win_handle.default_fg = csbi.wAttributes & 0xf
win_handle.default_bg = (csbi.wAttributes >> 4) & 0xf
# Set the color for the handle
win_handle.fg = win_handle.default_fg
win_handle.bg = win_handle.default_bg
return win_handle
def get_win_handle(target):
"""Return the Windows handle corresponding to a Python handle."""
if WinHan
|
dle.validate(target):
# We create a new handle each time since the old handle may have been
# invalidated by a redirection
return create_std_handle(target)
raise ValueError("Invalid handle identifier '{0}'".format(target))
def get_windows_clut():
"""Query and return the internal Windows color
|
look-up table."""
# On Windows Vista and beyond you can query the current colors in the
# color table. On older platforms, use the default color table
csbiex = CONSOLE_SCREEN_BUFFER_INFOEX()
csbiex.cbSize = ctypes.sizeof(CONSOLE_SCREEN_BUFFER_INFOEX)
retval = kernel32.GetConsoleScreenBufferInfoEx(
get_win_handle(WinHandle.STDOUT).value,
ctypes.byref(csbiex),
)
if retval == 0:
raise WinError()
clut = {}
# Update according to the currently set colors
for i in range(16):
clut[i] = (
csbiex.ColorTable[i] & 0xff,
(csbiex.ColorTable[i] >> 8) & 0xff,
(csbiex.ColorTable[i] >> 16) & 0xff,
)
return clut
def enable_virtual_terminal_processing(handle):
"""Enable Windows processing of ANSI escape sequences."""
if not handle or not handle.valid:
raise ValueError('Invalid handle')
if not isatty(handle):
return False
console_mode = wintypes.DWORD(0)
if kernel32.GetConsoleMode(handle.value, ctypes.byref(console_mode)) == 0:
raise WinError()
handle.console_mode = console_mode
target_mode = wintypes.DWORD(
console_mode.value
| ENABLE_VIRTUAL_TERMINAL_PROCESSING
| DISABLE_NEWLINE_AUTO_RETURN
)
# First attempt to set console mode to interpret ANSI escape codes and
# disable immediately jumping to the next console line
if kernel32.SetConsoleMode(handle.value, target_mode) == 0:
# If that fails, try just setting the mode for ANSI escape codes
target_mode = wintypes.DWORD(
console_mode.value | ENABLE_VIRTUAL_TERMINAL_PROCESSING
)
if kernel32.SetConsoleMode(handle.value, target_mode) == 0:
return None
# Return the original console mode so we can restore it later
return console_mode
def restore_console_mode(handle, restore_mode):
"""Restore the console mode for a handle to its original mode."""
if not handle or handle == WinHandle.INVALID:
raise ValueError('Invalid handle')
if not kernel32.SetConsoleMode(handle.value, restore_mode):
raise WinError()
def restore_console_modes():
"""Restore console modes for stdout and stderr to their original mode."""
if can_interpret_ansi(sys.stdout):
stdout = get_win_handle(WinHandle.STDOUT)
restore_console_mode(stdout, stdout.console_mode)
if can_interpret_ansi(sys.stderr):
stderr = get_win_handle(WinHandle.STDERR)
restore_console_mo
|
iZonex/aioactor
|
examples/accounts/app.py
|
Python
|
apache-2.0
| 1,531
| 0
|
import asyncio
import uvloop
from aioactor.transports import NatsTransport
from aioactor.service import Service
from aioactor.broker import ServiceBroker
asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
# TODO ADD possible actions list!
# TODO ADD abstractions to Message Handler!
# MessageHandler must be able to call methods of Service and control requests
# TODO Aggregate date about user [userinfo, location, photo]
class UsersService(Service):
def __init__(self):
self.name = "users"
self.actions = {
'get': self.get_user_name
}
async def get_user_name(self, user_id: int) -> dict:
users = {
1: {
'firstname': 'Antonio',
'lastname': 'Rodrigas'
}
}
user_obj = users.get(user_id, {})
return user_obj
# TODO Add protected types for registration
# TODO Add protocols accepted types for services
def register_services(broker, services):
for service in services:
broker.create_service(service())
async def main():
settings = {
'logger': 'console',
'message_transport': {
|
'handler': NatsTransport
}
}
broker = ServiceBroker(io_loop=loop, **settings)
services = [UsersService]
registe
|
r_services(broker, services)
print(broker.available_services())
await broker.start()
if __name__ == '__main__':
loop = asyncio.get_event_loop()
loop.run_until_complete(main())
loop.run_forever()
loop.close()
|
qtumproject/qtum
|
test/functional/wallet_hd.py
|
Python
|
mit
| 7,811
| 0.005121
|
#!/usr/bin/env python3
# Copyright (c) 2016-2019 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test Hierarchical Deterministic wallet function."""
import os
import shutil
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
connect_nodes,
assert_raises_rpc_error
)
from test_framework.qtumconfig import COINBASE_MATURITY
from test_framework.qtum import generatesynchronized
class WalletHDTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 2
self.extra_args = [[], ['-keypool=0']]
self.supports_cli = False
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
# Make sure we use hd, keep masterkeyid
masterkeyid = self.nodes[1].getwalletinfo()['hdseedid']
assert_equal(len(masterkeyid), 40)
# create an internal key
change_addr = self.nodes[1].getrawchangeaddress()
change_addrV= self.nodes[1].getaddressinfo(change_addr)
assert_equal(change_addrV["hdkeypath"], "m/88'/1'/0'") #first internal child key
# Import a non-HD private key in the HD wallet
non_hd_add = self.nodes[0].getnewaddress()
self.nodes[1].importprivkey(self.nodes[0].dumpprivkey(non_hd_add))
# This should be enough to keep the master key and the non-HD key
self.nodes[1].backupwallet(os.path.join(self.nodes[1].datadir, "hd.bak"))
#self.nodes[1].dumpwallet(os.path.join(self.nodes[1].datadir, "hd.dump"))
# Derive some HD addresses and remember the last
# Also send funds to each add
generatesynchronized(self.nodes[0], COINBASE_MATURITY+1, None, self.nodes)
hd_add = None
NUM_HD_ADDS = 10
for i in range(NUM_HD_ADDS):
hd_add = self.nodes[1].getnewaddress()
hd_info = self.nodes[1].getaddressinfo(hd_add)
assert_equal(hd_info["hdkeypath"], "m/88'/0'/"+str(i)+"'")
assert_equal(hd_info["hdseedid"], masterkeyid)
self.nodes[0].sendtoaddress(hd_add, 1)
self.nodes[0].generate(1)
self.nodes[0].sendtoaddress(non_hd_add, 1)
self.nodes[0].generate(1)
# create an internal key (again)
change_addr = self.nodes[1].getrawchangeaddress()
change_addrV= self.nodes[1].getaddressinfo(change_addr)
assert_equal(change_addrV["hdkeypath"], "m/88'/1'/1'") #second internal child key
self.sync_all()
assert_equal(self.nodes[1].getbalance(), NUM_HD_ADDS + 1)
self.log.info("Restore backup ...")
self.stop_node(1)
# we need to delete the complete chain directory
# otherwise node1 would auto-recover all funds in flag the keypool keys as used
shutil.rmtree(os.path.join(self.nodes[1].datadir, self.chain, "blocks"))
shutil.rmtree(os.path.join(self.nodes[1].datadir, self.chain, "chainstate"))
shutil.copyfile(os.path.join(self.nodes[1].datadir, "hd.bak"), os.path.join(self.nodes[1].datadir, self.chain, "wallets", "wallet.dat"))
self.start_node(1)
# Assert that derivation is deterministic
hd_add_2 = None
for i in range(NUM_HD_ADDS):
hd_add_2 = self.nodes[1].getnewaddress()
hd_info_2 = self.nodes[1].getaddressinfo(h
|
d_add_2)
assert_equal(hd_info_2["hdkeypath"], "m/88'/0'/"+str(i)+"'")
assert_equal(hd_info_2["hdseedid"], masterkeyid)
assert_equal(hd_add, hd_add_2)
connect_nodes(self.nodes[0], 1)
self.sync_all()
# Needs rescan
self.stop_nod
|
e(1)
self.start_node(1, extra_args=self.extra_args[1] + ['-rescan'])
assert_equal(self.nodes[1].getbalance(), NUM_HD_ADDS + 1)
# Try a RPC based rescan
self.stop_node(1)
shutil.rmtree(os.path.join(self.nodes[1].datadir, self.chain, "blocks"))
shutil.rmtree(os.path.join(self.nodes[1].datadir, self.chain, "chainstate"))
shutil.copyfile(os.path.join(self.nodes[1].datadir, "hd.bak"), os.path.join(self.nodes[1].datadir, self.chain, "wallets", "wallet.dat"))
self.start_node(1, extra_args=self.extra_args[1])
connect_nodes(self.nodes[0], 1)
self.sync_all()
# Wallet automatically scans blocks older than key on startup
assert_equal(self.nodes[1].getbalance(), NUM_HD_ADDS + 1)
out = self.nodes[1].rescanblockchain(0, 1)
assert_equal(out['start_height'], 0)
assert_equal(out['stop_height'], 1)
out = self.nodes[1].rescanblockchain()
assert_equal(out['start_height'], 0)
assert_equal(out['stop_height'], self.nodes[1].getblockcount())
assert_equal(self.nodes[1].getbalance(), NUM_HD_ADDS + 1)
# send a tx and make sure its using the internal chain for the changeoutput
txid = self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), 1)
outs = self.nodes[1].decoderawtransaction(self.nodes[1].gettransaction(txid)['hex'])['vout']
keypath = ""
for out in outs:
if out['value'] != 1:
keypath = self.nodes[1].getaddressinfo(out['scriptPubKey']['addresses'][0])['hdkeypath']
assert_equal(keypath[0:8], "m/88'/1'")
# Generate a new HD seed on node 1 and make sure it is set
orig_masterkeyid = self.nodes[1].getwalletinfo()['hdseedid']
self.nodes[1].sethdseed()
new_masterkeyid = self.nodes[1].getwalletinfo()['hdseedid']
assert orig_masterkeyid != new_masterkeyid
addr = self.nodes[1].getnewaddress()
assert_equal(self.nodes[1].getaddressinfo(addr)['hdkeypath'], 'm/88\'/0\'/0\'') # Make sure the new address is the first from the keypool
self.nodes[1].keypoolrefill(1) # Fill keypool with 1 key
# Set a new HD seed on node 1 without flushing the keypool
new_seed = self.nodes[0].dumpprivkey(self.nodes[0].getnewaddress())
orig_masterkeyid = new_masterkeyid
self.nodes[1].sethdseed(False, new_seed)
new_masterkeyid = self.nodes[1].getwalletinfo()['hdseedid']
assert orig_masterkeyid != new_masterkeyid
addr = self.nodes[1].getnewaddress()
assert_equal(orig_masterkeyid, self.nodes[1].getaddressinfo(addr)['hdseedid'])
assert_equal(self.nodes[1].getaddressinfo(addr)['hdkeypath'], 'm/88\'/0\'/1\'') # Make sure the new address continues previous keypool
# Check that the next address is from the new seed
self.nodes[1].keypoolrefill(1)
next_addr = self.nodes[1].getnewaddress()
assert_equal(new_masterkeyid, self.nodes[1].getaddressinfo(next_addr)['hdseedid'])
assert_equal(self.nodes[1].getaddressinfo(next_addr)['hdkeypath'], 'm/88\'/0\'/0\'') # Make sure the new address is not from previous keypool
assert next_addr != addr
# Sethdseed parameter validity
assert_raises_rpc_error(-1, 'sethdseed', self.nodes[0].sethdseed, False, new_seed, 0)
assert_raises_rpc_error(-5, "Invalid private key", self.nodes[1].sethdseed, False, "not_wif")
assert_raises_rpc_error(-1, "JSON value is not a boolean as expected", self.nodes[1].sethdseed, "Not_bool")
assert_raises_rpc_error(-1, "JSON value is not a string as expected", self.nodes[1].sethdseed, False, True)
assert_raises_rpc_error(-5, "Already have this key", self.nodes[1].sethdseed, False, new_seed)
assert_raises_rpc_error(-5, "Already have this key", self.nodes[1].sethdseed, False, self.nodes[1].dumpprivkey(self.nodes[1].getnewaddress()))
if __name__ == '__main__':
WalletHDTest().main ()
|
phildini/logtacts
|
contacts/tests/test_models.py
|
Python
|
mit
| 9,784
| 0.000511
|
from django.test import TestCase
from django.core.urlresolvers import reverse
from common.factories import UserFactory
import contacts as contact_constants
from contacts import factories
from contacts import models
class ContactModelTests(TestCase):
def setUp(self):
self.book = factories.BookFactory.create()
self.contact = factories.ContactFactory.create(
name="Philip James",
book=self.book,
)
def test_contact_name(self):
"""String repr of contact should be name."""
self.assertEqual(self.contact.name, str(self.contact))
def test_contact_url(self):
expected_url = reverse('contacts-view', kwargs={
'pk': self.contact.id,
'book': self.book.id,
})
self.assertEqual(self.contact.get_absolute_url(), expected_url)
def test_contact_last_contacted(self):
log = factories.LogFactory.create(contact=self.contact)
self.contact.update_last_contact_from_log(log)
self.assertEqual(self.contact.last_contacted(), log.created)
def test_contact_can_be_viewed_by(self):
bookowner = factories.BookOwnerFactory.create(book=self.book)
user = bookowner.user
self.assertTrue(self.contact.can_be_viewed_by(user))
def test_contact_can_be_edited_by(self):
bookowner = factories.BookOwnerFactory.create(book=self.book)
user = bookowner.user
self.assertTrue(self.contact.can_be_edited_by(user))
def test_contact_cant_be_viewed_by_bad(self):
user = UserFactory.create(username='asheesh')
se
|
lf.assertFalse(self.contact.can_be_viewed_by(user))
def test_contact_cant_be_edited_by_bad(self):
user = UserFactory.create(username='asheesh')
self.assertFalse(self.contact.can_be_edited_by(user))
def test_get_contacts_for_user(self):
bookowner = factories.BookOwnerFactory.create(book=self.book)
user = bookowner.user
self.assertEqual(
[self.contact],
list(models.Contact.objects.get_contacts_for_user(user)),
)
|
def test_get_contacts_for_user_bad_user(self):
user = UserFactory.create(username="nicholle")
self.assertFalse(
list(models.Contact.objects.get_contacts_for_user(user)),
)
def test_preferred_address_with_preferred(self):
field = factories.ContactFieldFactory(
contact=self.contact,
kind=contact_constants.FIELD_TYPE_ADDRESS,
value='1600 Pennsylvania Ave.',
preferred=True,
)
self.assertEqual(self.contact.preferred_address, field.value)
def test_preferred_address_without_preferred(self):
field = factories.ContactFieldFactory(
contact=self.contact,
kind=contact_constants.FIELD_TYPE_ADDRESS,
value='1600 Pennsylvania Ave.',
)
self.assertEqual(self.contact.preferred_address, field.value)
def test_preferred_address_no_address(self):
self.assertEqual(self.contact.preferred_address, '')
def test_preferred_email_with_preferred(self):
field = factories.ContactFieldFactory(
contact=self.contact,
kind=contact_constants.FIELD_TYPE_EMAIL,
value='1600 Pennsylvania Ave.',
preferred=True,
)
self.assertEqual(self.contact.preferred_email, field.value)
def test_preferred_email_without_preferred(self):
field = factories.ContactFieldFactory(
contact=self.contact,
kind=contact_constants.FIELD_TYPE_EMAIL,
value='1600 Pennsylvania Ave.',
)
self.assertEqual(self.contact.preferred_email, field.value)
def test_preferred_email_no_email(self):
self.assertEqual(self.contact.preferred_email, '')
def test_preferred_phone_with_preferred(self):
field = factories.ContactFieldFactory(
contact=self.contact,
kind=contact_constants.FIELD_TYPE_PHONE,
value='1600 Pennsylvania Ave.',
preferred=True,
)
self.assertEqual(self.contact.preferred_phone, field.value)
def test_preferred_phone_without_preferred(self):
field = factories.ContactFieldFactory(
contact=self.contact,
kind=contact_constants.FIELD_TYPE_PHONE,
value='1600 Pennsylvania Ave.',
)
self.assertEqual(self.contact.preferred_phone, field.value)
def test_preferred_phone_no_phone(self):
self.assertEqual(self.contact.preferred_phone, '')
class TagModelTests(TestCase):
def setUp(self):
self.book = factories.BookFactory.create()
self.contact = factories.ContactFactory.create(
name="Philip James",
book=self.book,
)
self.tag = factories.TagFactory.create(
tag='Family',
book=self.book,
)
def test_tag_name(self):
self.assertEqual(self.tag.tag, str(self.tag))
def test_get_tags_for_user(self):
bookowner = factories.BookOwnerFactory.create(book=self.book)
user = bookowner.user
self.assertEqual(
[self.tag],
list(models.Tag.objects.get_tags_for_user(user)),
)
def test_tag_can_be_viewed_by(self):
bookowner = factories.BookOwnerFactory.create(book=self.book)
user = bookowner.user
self.assertTrue(self.tag.can_be_viewed_by(user))
def test_tag_can_be_edited_by(self):
bookowner = factories.BookOwnerFactory.create(book=self.book)
user = bookowner.user
self.assertTrue(self.tag.can_be_edited_by(user))
def test_tag_cant_be_viewed_by_bad(self):
user = UserFactory.create(username='asheesh')
self.assertFalse(self.tag.can_be_viewed_by(user))
def test_tag_cant_be_edited_by_bad(self):
user = UserFactory.create(username='asheesh')
self.assertFalse(self.tag.can_be_edited_by(user))
def test_corrected_color(self):
self.assertEqual(self.tag.corrected_color, '#123456')
self.tag.color = '#c0ffee'
self.assertEqual(self.tag.corrected_color, '#c0ffee')
self.tag.color = 'c0ffee'
self.assertEqual(self.tag.corrected_color, '#c0ffee')
class BookModelTests(TestCase):
def setUp(self):
self.book = factories.BookFactory.create(name="James Family")
def test_book_name(self):
self.assertEqual(self.book.name, str(self.book))
def test_book_can_be_viewed_by(self):
bookowner = factories.BookOwnerFactory.create(book=self.book)
user = bookowner.user
self.assertTrue(self.book.can_be_viewed_by(user))
def test_book_can_be_edited_by(self):
bookowner = factories.BookOwnerFactory.create(book=self.book)
user = bookowner.user
self.assertTrue(self.book.can_be_edited_by(user))
def test_book_cant_be_viewed_by_bad(self):
user = UserFactory.create(username='asheesh')
self.assertFalse(self.book.can_be_viewed_by(user))
def test_book_cant_be_edited_by_bad(self):
user = UserFactory.create(username='asheesh')
self.assertFalse(self.book.can_be_edited_by(user))
class BookOwnerModelTests(TestCase):
def setUp(self):
self.book = factories.BookFactory.create(name="James Family")
self.user = UserFactory(username="phildini")
def test_book_owner_repr(self):
bookowner = factories.BookOwnerFactory(book=self.book, user=self.user)
expected = "{} is an owner of {}".format(self.user, self.book)
self.assertEqual(str(bookowner), expected)
class LogEntryModelTests(TestCase):
def setUp(self):
self.book = factories.BookFactory.create(name="James Family")
self.user = UserFactory(username="phildini")
self.bookowner = factories.BookOwnerFactory(book=self.book, user=self.user)
self.contact = factories.ContactFactory.create(
name="Philip James",
book=self.book,
)
self.log = factories.LogFactory.create(contact=self.contact)
self.contact.update_last_contact_from_log(self.log)
def
|
GenericStudent/home-assistant
|
tests/components/logbook/test_init.py
|
Python
|
apache-2.0
| 61,935
| 0.001405
|
"""The tests for the logbook component."""
# pylint: disable=protected-access,invalid-name
import collections
from datetime import datetime, timedelta
import json
import unittest
import pytest
import voluptuous as vol
from homeassistant.components import logbook, recorder
from homeassistant.components.alexa.smart_home import EVENT_ALEXA_SMART_HOME
from homeassistant.components.automation import EVENT_AUTOMATION_TRIGGERED
from homeassistant.components.recorder.models import process_timestamp_to_utc_isoformat
from homeassistant.components.script import EVENT_SCRIPT_STARTED
from homeassistant.const import (
ATTR_DOMAIN,
ATTR_ENTITY_ID,
ATTR_FRIENDLY_NAME,
ATTR_NAME,
ATTR_SERVICE,
CONF_DOMAINS,
CONF_ENTITIES,
CONF_EXCLUDE,
CONF_INCLUDE,
EVENT_CALL_SERVICE,
EVENT_HOMEASSISTANT_START,
EVENT_HOMEASSISTANT_STARTED,
EVENT_HOMEASSISTANT_STOP,
EVENT_STATE_CHANGED,
STATE_OFF,
STATE_ON,
)
import homeassistant.core as ha
from homeassistant.helpers.entityfilter import CONF_ENTITY_GLOBS
from homeassistant.helpers.json import JSONEncoder
from homeassistant.setup import async_setup_component, setup_component
import homeassistant.util.dt as dt_util
from tests.async_mock import Mock, patch
from tests.common import get_test_home_assistant, init_recorder_component, mock_platform
from tests.components.recorder.common import trigger_db_commit
class TestComponentLogbook(unittest.TestCase):
"""Test the History component."""
EMPTY_CONFIG = logbook.CONFIG_SCHEMA({logbook.DOMAIN: {}})
def setUp(self):
"""Set up things to be run when tests are started."""
self.hass = get_test_home_assistant()
init_recorder_component(self.hass) # Force an in memory DB
with patch("homeassistant.components.http.start_http_server_and_save_config"):
assert setup_component(self.hass, logbook.DOMAIN, self.EMPTY_CONFIG)
self.addCleanup(self.hass.stop)
def test_service_call_create_logbook_entry(self):
"""Test if service call create log book entry."""
calls = []
@ha.callback
def event_listener(event):
"""Append on event."""
calls.append(event)
self.hass.bus.listen(logbook.EVENT_LOGBOOK_ENTRY, event_listener)
self.hass.services.call(
logbook.DOMAIN,
"log",
{
logbook.ATTR_NAME: "Alarm",
logbook.ATTR_MESSAGE: "is triggered",
logbook.ATTR_DOMAIN: "switch",
logbook.ATTR_ENTITY_ID: "switch.test_switch",
},
True,
)
self.hass.services.call(
logbook.DOMAIN,
"log",
{
logbook.ATTR_NAME: "This entry",
logbook.ATTR_MESSAGE: "has no domain or entity_id",
},
True,
)
# Logbook entry service call results in firing an event.
# Our service call will unblock when the event listeners have been
# scheduled. This means that they may not have been processed yet.
trigger_db_commit(self.hass)
self.hass.block_till_done()
self.hass.data[recorder.DATA_INSTANCE].block_till_done()
events = list(
logbook._get_events(
self.hass,
dt_util.utcnow() - timedelta(hours=1),
dt_util.utcnow() + timedelta(hours=1),
)
)
assert len(events) == 2
assert len(calls) == 2
first_call = calls[-2]
assert first_call.data.get(logbook.ATTR_NAME) == "Alarm"
assert first_call.data.get(logbook.ATTR_MESSAGE) == "is triggered"
assert first_call.data.get(logbook.ATTR_DOMAIN) == "switch"
assert first_call.data.get(logbook.ATTR_ENTITY_ID) == "switch.test_switch"
last_call = calls[-1]
assert last_call.data.get(logbook.ATTR_NAME) == "This entry"
assert last_call.data.get(logbook.ATTR_MESSAGE) == "has no domain or entity_id"
assert last_call.data.get(logbook.ATTR_DOMAIN) == "logbook"
def test_service_call_create_log_book_entry_no_message(self):
"""Test if service call create log book entry without message."""
calls = []
@ha.callback
def event_listener(event):
"""Append on event."""
calls.append(event)
self.hass.bus.listen(logbook.EVENT_LOGBOOK_ENTRY, event_listener)
with pytest.raises(vol.Invalid):
self.hass.services.call(logbook.DOMAIN, "log", {}, True)
# Logbook entry service call results in firing an event.
# Our service call will unblock when the event listeners have been
# scheduled. This means that they may not have been processed yet.
self.hass.block_till_done()
assert len(calls) == 0
def test_humanify_filter_sensor(self):
"""Test humanify filter too frequent sensor values."""
entity_id = "sensor.bla"
pointA = dt_util.utcnow().replace(minute=2)
pointB = pointA.replace(minute=5)
pointC = pointA + timedelta(minutes=logbook.GROUP_BY_MINUTES)
entity_attr_cache = logbook.EntityAttributeCache(self.hass)
eventA = self.create_state_changed_event(pointA, entity_id, 10)
eventB = self.create_state_changed_event(pointB, entity_id, 20)
eventC = self.create_state_changed_event(pointC, entity_id, 30)
entries = list(
|
logbook.humanify(self.hass, (eventA, eventB, eventC), entity_attr_cache, {})
)
assert len(entries) == 2
self.assert_entry(entries[0], pointB, "bla", entity_id=entity_id)
self.assert_entry(entries[1], pointC, "bla", entity_id=entity_id)
|
def test_home_assistant_start_stop_grouped(self):
"""Test if HA start and stop events are grouped.
Events that are occurring in the same minute.
"""
entity_attr_cache = logbook.EntityAttributeCache(self.hass)
entries = list(
logbook.humanify(
self.hass,
(
MockLazyEventPartialState(EVENT_HOMEASSISTANT_STOP),
MockLazyEventPartialState(EVENT_HOMEASSISTANT_START),
),
entity_attr_cache,
{},
),
)
assert len(entries) == 1
self.assert_entry(
entries[0], name="Home Assistant", message="restarted", domain=ha.DOMAIN
)
def test_home_assistant_start(self):
"""Test if HA start is not filtered or converted into a restart."""
entity_id = "switch.bla"
pointA = dt_util.utcnow()
entity_attr_cache = logbook.EntityAttributeCache(self.hass)
entries = list(
logbook.humanify(
self.hass,
(
MockLazyEventPartialState(EVENT_HOMEASSISTANT_START),
self.create_state_changed_event(pointA, entity_id, 10),
),
entity_attr_cache,
{},
)
)
assert len(entries) == 2
self.assert_entry(
entries[0], name="Home Assistant", message="started", domain=ha.DOMAIN
)
self.assert_entry(entries[1], pointA, "bla", entity_id=entity_id)
def test_process_custom_logbook_entries(self):
"""Test if custom log book entries get added as an entry."""
name = "Nice name"
message = "has a custom entry"
entity_id = "sun.sun"
entity_attr_cache = logbook.EntityAttributeCache(self.hass)
entries = list(
logbook.humanify(
self.hass,
(
MockLazyEventPartialState(
logbook.EVENT_LOGBOOK_ENTRY,
{
logbook.ATTR_NAME: name,
logbook.ATTR_MESSAGE: message,
logbook.ATTR_ENTITY_ID: entity_id,
},
),
),
entity_attr_cache,
{},
)
)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.