repo_name stringlengths 5 100 | path stringlengths 4 231 | language stringclasses 1 value | license stringclasses 15 values | size int64 6 947k | score float64 0 0.34 | prefix stringlengths 0 8.16k | middle stringlengths 3 512 | suffix stringlengths 0 8.17k |
|---|---|---|---|---|---|---|---|---|
ReactiveX/RxPY | docs/conf.py | Python | mit | 5,837 | 0 | # -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/stable/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
import os
import sys
from distutils.command.config import config
import guzzle_sphinx_theme
import tomli
from dunamai import Version
root = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
sys.path.insert(0, root)
# -- Project information -----------------------------------------------------
# General project metadata is stored in pyproject.toml
with open(os.path.join(root, "pyproject.toml"), "rb") as f:
config = tomli.load(f)
project_meta = config["tool"]["poetry"]
print(project_meta)
project = project_meta["name"]
author = project_meta["authors"][0]
description = project_meta["description"]
url = project_meta["homepage"]
title = project + " Documentation"
_version = Version.from_git()
# The full version, including alpha/beta/rc tags
release = _version.serialize(metadata=False)
# The short X.Y.Z version
version = _version.base
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
needs_sphinx = "2.0"
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.napoleon",
"sphinx_autodoc_typehints",
"guzzle_sphinx_theme",
"sphinxcontrib_dooble",
]
# Include a separate entry for special methods, like __init__, where provided.
autodoc_default_options = {
"member-order": "bysource",
"special-members": True,
"exclude-members": "__dict__,__weakref__",
}
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
source_suffix = ".rst"
# The master toctree document.
master_doc = "index"
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = "en"
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path .
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_translator_class = "guzzle_sphinx_theme.HTMLTranslator"
html_theme_path = guzzle_sphinx_theme.html_theme_path()
html_theme = "guzzle_sphinx_theme"
html_title = title
html_short_title = project + " " + version
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
html_theme_options = {"projectlink": url}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "de | fault.css" will overwrite the builtin "default.css".
html_static_ | path = ["_static"]
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
html_sidebars = {"**": ["logo-text.html", "globaltoc.html", "searchbox.html"]}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = project + "doc"
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [(master_doc, project + ".tex", title, author, "manual")]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [(master_doc, project.lower(), title, [author], 1)]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, project, title, author, project, description, "Miscellaneous")
]
# -- Extension configuration -------------------------------------------------
|
cedriclaunay/gaffer | python/GafferImageTest/FilterTest.py | Python | bsd-3-clause | 2,759 | 0.022472 | ##########################################################################
#
# Copyright (c) 2013, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of Image Engine Design nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF | SUCH DAMAGE.
#
##########################################################################
import unittest
import IECore
import Gaffer
import GafferImage
import os
class FilterTest( unittest.TestCase ) :
def testDefaultFilter( self ) :
filters = GafferImage.Filter.filters()
default = GafferImage.Filter.defaultFilter()
self.assertTrue( default in filters )
| def testFilterList( self ) :
filters = GafferImage.Filter.filters()
self.assertTrue( len(filters) == 9 )
self.assertTrue( "Box" in filters )
self.assertTrue( "BSpline" in filters )
self.assertTrue( "Bilinear" in filters )
self.assertTrue( "Hermite" in filters )
self.assertTrue( "Mitchell" in filters )
self.assertTrue( "CatmullRom" in filters )
self.assertTrue( "Cubic" in filters )
self.assertTrue( "Lanczos" in filters )
self.assertTrue( "Sinc" in filters )
def testCreators( self ) :
filters = GafferImage.Filter.filters()
for name in filters :
f = GafferImage.Filter.create( name )
self.assertTrue( f.typeName(), name+"Filter" )
|
cineuse/CNCGToolKit | cgtkLibs/cgtk_py/implant_method.py | Python | mit | 238 | 0 | # cod | ing=utf8
from types import MethodType
def implant_method(obj, func, func_name):
base_class = obj.__class__
event = MethodType(func, obj, base_class)
setattr(obj, func_name, event)
if __name__ == "__main__":
| pass
|
uroslates/django-allauth | allauth/socialaccount/providers/oauth2/urls.py | Python | mit | 483 | 0.00207 | from django.conf.urls.defaults import patterns, url, include
def default_urlpatterns(provider):
urlpatterns = patterns(provider.package + '.views',
url('^login/$', 'oauth2_login',
name=provider.id + "_login"),
| url('^login/done/$', 'oauth2_complete',
name=provider.id + "_complete"))
return patterns | ('', url('^' + provider.id + '/', include(urlpatterns)))
|
fmenabe/python-unix | unix/linux/modules.py | Python | mit | 862 | 0 | class Modules(object):
def __init__(self, host):
self._host = host
def list(self):
status, stdout, stderr = self._host.execute('lsmod')
if not status:
raise LinuxError(stderr)
return [line.split()[0] for line in stdout.splitlines()[1:]]
def tree(self):
pass
def loaded(self, module):
return module in self.list()
def load(self, module, force=False, **params):
return self._host.execute('modprobe', module,
' '.join('%s=%s' % (param, v | alue)
for param, value in params.items()),
force=force)
def unload(self, module, force=False):
return self._host.execute('modprobe', module, rem | ove=True, force=force)
def options(self, module):
pass
|
tomvand/paparazzi-gazebo | sw/supervision/python/processes.py | Python | gpl-2.0 | 8,578 | 0 | # Paparazzi center utilities
#
# Copyright (C) 2016 ENAC, Florian BITARD (intern student)
#
# This file is part of paparazzi.
#
# paparazzi is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# paparazzi is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with paparazzi; see the file COPYING. If not, write to
# the Free Software Foundation, 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
###############################################################################
# [Imports]
import lib.environment as env
import lib.console as cs
import PyQt5.QtCore as Core
import logging
import os
import signal
#### | ###########################################################################
# [C | onstants]
LOGGER = logging.getLogger("[PROCESSES]")
AC_MAKEFILE_NAME = "Makefile.ac"
CONF_FLAG_NAME = "AIRCRAFT"
DEVICE_FLAG_NAME = "FLASH_MODE"
DEFAULT_EXIT_CODE = 2
INTERRUPTED_EXIT_CODE = -1
SUCCESS_EXIT_CODE = 0
CLEAN = "clean"
BUILD = "build"
UPLOAD = "upload"
PROGRAM = "program"
TOOL = "tool"
CLEAN_TARGET_KEY = "clean_ac"
BUILD_TARGET_KEY = ".compile"
UPLOAD_TARGET_KEY = ".upload"
CONF_FLAG = "@" + CONF_FLAG_NAME
TARGET_FLAG = "@TARGET"
CONF_ID_FLAG = "@AC_ID"
###############################################################################
# [Stream class]
class LoggerStream(Core.QObject):
""" Class to define a Stream object."""
logger_log_sent = Core.pyqtSignal(str, str, str)
def __init__(self):
super(LoggerStream, self).__init__()
# Reimplemented method in order to write the logs in the console :
def write(self, line):
log, flag = cs.analyse_log_line(line)
log_type = cs.APPLICATION_MESSAGE_TYPE
self.logger_log_sent.emit(log, flag, log_type)
###############################################################################
# [Process class]
class Process(Core.QObject):
"""Class to upload the built code to a device."""
process_killed = Core.pyqtSignal()
process_log_sent = Core.pyqtSignal(str, str, str)
def __init__(self, process_type,
configuration=None, target=None, device=None, program=None):
"""
:param process_type:
:param configuration:
:param target:
:param device:
:param program:
-> Declare a Process object as a QObject derivative.
-> Give it a name and the necessary parameters for its type.
-> Generate a command for the system call by the Popen object that
manages the subprocess and allows to redirect the output.
-> The process runs into an independent thread.
-> A queue is used to collect the logs from the Popen output and
an other one is used to send it to the QTextEdit integrated console.
-> Logs flags are collected for information.
-> Exit code is initialized to default value. Must change in case of
normal exit, error or user interruption.
"""
super(Process, self).__init__()
self.type = process_type
self.name = None
self.config = configuration
self.target = target
self.device = device
self.program = program
if self.type == CLEAN:
self.command = self.generate_make_command(CLEAN_TARGET_KEY)
self.name = " - ".join([self.type.upper(),
self.config.name])
elif self.type == BUILD:
self.command = self.generate_make_command(BUILD_TARGET_KEY)
self.name = " - ".join([self.type.upper(), self.config.name,
self.target.name])
elif self.type == UPLOAD:
self.command = self.generate_make_command(UPLOAD_TARGET_KEY)
self.name = " - ".join([self.type.upper(), self.config.name,
self.target.name, self.device.name])
else:
self.command = self.generate_program_command()
self.name = " - ".join([self.type.upper(),
self.program.name])
self.subprocess = Core.QProcess()
self.subprocess.setProcessChannelMode(Core.QProcess.MergedChannels)
self.subprocess.setReadChannel(Core.QProcess.StandardOutput)
self.process_killed.connect(self.emergency_stop)
self.exit_code = DEFAULT_EXIT_CODE
self.flags = {cs.ERROR_FLAG: 0,
cs.WARNING_FLAG: 0,
cs.INFO_FLAG: 0}
def generate_make_command(self, target_key):
"""
:param target_key:
-> Generate a system command to compile files by a Makefile and
putting the right arguments if given.
"""
if self.config is not None:
aircraft_term = CONF_FLAG_NAME + "=" + self.config.name
if self.target is not None:
target_key = self.target.name + target_key
command_terms = ["make", "-C", env.PAPARAZZI_HOME, "-f",
AC_MAKEFILE_NAME,
aircraft_term, target_key]
if self.device is not None and self.device.variable[1]:
device_term = DEVICE_FLAG_NAME + "=" + self.device.variable[1]
command_terms.insert(-1, device_term)
return " ".join(command_terms)
def generate_program_command(self):
"""
-> Generate a system command to run a program and add its options
if it has some.
"""
if self.program is not None:
full_command = os.path.join(env.PAPARAZZI_HOME,
self.program.command)
for option in self.program.options:
if type(option) is tuple:
flag, value = option
if value == CONF_FLAG:
full_command += " " + flag + " " + self.config.name
elif value == TARGET_FLAG:
full_command += " " + flag + " " + self.target.name
elif value == CONF_ID_FLAG:
full_command += " " + flag + " " + self.config.id
else:
full_command += " " + flag + " " + value
else:
full_command += " " + option
return full_command
def check_before_start(self):
# TODO IF NECESSARY !!!
return self == self
def start(self):
"""
-> Start the thread => start the worker => call the run method.
"""
LOGGER.info("'%s' process running ... (command='%s')",
self.name, self.command)
self.subprocess.start(self.command)
self.subprocess.readyReadStandardOutput.connect(self.send_text)
self.subprocess.finished.connect(self.finish_process)
def send_text(self):
"""
-> Analyse an process output line to find a flag in it.
-> Send the item by the sending queue object.
-> Collect the flag found.
"""
q_byte_array = self.subprocess.readAllStandardOutput()
string = str(q_byte_array, encoding="utf-8").strip()
for line in string.split("\n"):
log, flag = cs.analyse_log_line(line)
log_type = cs.PROCESS_MESSAGE_TYPE
self.process_log_sent.emit(log, flag, log_type)
if flag != cs.DEFAULT_FLAG:
self.flags[flag] += 1
def finish_process(self):
"""
-> If the process finished, get the exit code.
-> Else, the process crashed...
"""
if self.subprocess.exitStatus() == Core.QProcess.NormalExit:
self.exit_code = self.subprocess.exitCode()
LOGGER.info("'%s' process finished with exit code %s.\n",
|
gordonb3/domoticz | plugins/examples/HTTP.py | Python | gpl-3.0 | 8,304 | 0.00855 | # Google Home page example
#
# Author: Dnpwwo, 2017 - 2018
#
# Demonstrates HTTP/HTTPS connectivity.
# After connection it performs a GET on www.google.com and receives a 302 (Page Moved) response
# It then does a subsequent GET on the Location specified in the 302 response and receives a 200 response.
#
"""
<plugin key="Google" name="Google Home page example" author="Dnpwwo" version="2.2.7" externallink="https://www.google.com">
<description>
<h2>Google Home page example</h2><br/>
Will hit the supplied URL every 5 heartbeats in the request protocol. Redirects are handled.
</description>
<params>
<param field="Address" label="IP Address" width="200px" required="true" default="www.google.com"/>
<param field="Mode1" label="Protocol" width="75px">
<options>
<option label="HTTPS" value="443"/>
<option label="HTTP" value="80" default= | "true" />
</options>
</param>
<param field="Mode6" label="Debug" width="150px">
<options>
<option label="None" value="0" default="true" />
<option label="Python Only" value="2"/>
<option label="Basic Debugging" value="62"/>
<option label="Basic+Messages" value="126"/>
<option label="Connections Only" value="16"/>
| <option label="Connections+Python" value="18"/>
<option label="Connections+Queue" value="144"/>
<option label="All" value="-1"/>
</options>
</param>
</params>
</plugin>
"""
import Domoticz
class BasePlugin:
httpConn = None
runAgain = 6
disconnectCount = 0
sProtocol = "HTTP"
def __init__(self):
return
def onStart(self):
if Parameters["Mode6"] != "0":
Domoticz.Debugging(int(Parameters["Mode6"]))
DumpConfigToLog()
if (Parameters["Mode1"] == "443"): self.sProtocol = "HTTPS"
self.httpConn = Domoticz.Connection(Name=self.sProtocol+" Test", Transport="TCP/IP", Protocol=self.sProtocol, Address=Parameters["Address"], Port=Parameters["Mode1"])
self.httpConn.Connect()
def onStop(self):
Domoticz.Log("onStop - Plugin is stopping.")
def onConnect(self, Connection, Status, Description):
if (Status == 0):
Domoticz.Debug("Google connected successfully.")
sendData = { 'Verb' : 'GET',
'URL' : '/',
'Headers' : { 'Content-Type': 'text/xml; charset=utf-8', \
'Connection': 'keep-alive', \
'Accept': 'Content-Type: text/html; charset=UTF-8', \
'Host': Parameters["Address"]+":"+Parameters["Mode1"], \
'User-Agent':'Domoticz/1.0' }
}
Connection.Send(sendData)
else:
Domoticz.Log("Failed to connect ("+str(Status)+") to: "+Parameters["Address"]+":"+Parameters["Mode1"]+" with error: "+Description)
def onMessage(self, Connection, Data):
DumpHTTPResponseToLog(Data)
strData = Data["Data"].decode("utf-8", "ignore")
Status = int(Data["Status"])
LogMessage(strData)
if (Status == 200):
if ((self.disconnectCount & 1) == 1):
Domoticz.Log("Good Response received from Google, Disconnecting.")
self.httpConn.Disconnect()
else:
Domoticz.Log("Good Response received from Google, Dropping connection.")
self.httpConn = None
self.disconnectCount = self.disconnectCount + 1
elif (Status == 302):
Domoticz.Log("Google returned a Page Moved Error.")
sendData = { 'Verb' : 'GET',
'URL' : Data["Headers"]["Location"],
'Headers' : { 'Content-Type': 'text/xml; charset=utf-8', \
'Connection': 'keep-alive', \
'Accept': 'Content-Type: text/html; charset=UTF-8', \
'Host': Parameters["Address"]+":"+Parameters["Mode1"], \
'User-Agent':'Domoticz/1.0' },
}
Connection.Send(sendData)
elif (Status == 400):
Domoticz.Error("Google returned a Bad Request Error.")
elif (Status == 500):
Domoticz.Error("Google returned a Server Error.")
else:
Domoticz.Error("Google returned a status: "+str(Status))
def onCommand(self, Unit, Command, Level, Hue):
Domoticz.Debug("onCommand called for Unit " + str(Unit) + ": Parameter '" + str(Command) + "', Level: " + str(Level))
def onDisconnect(self, Connection):
Domoticz.Log("onDisconnect called for connection to: "+Connection.Address+":"+Connection.Port)
def onHeartbeat(self):
#Domoticz.Trace(True)
if (self.httpConn != None and (self.httpConn.Connecting() or self.httpConn.Connected())):
Domoticz.Debug("onHeartbeat called, Connection is alive.")
else:
self.runAgain = self.runAgain - 1
if self.runAgain <= 0:
if (self.httpConn == None):
self.httpConn = Domoticz.Connection(Name=self.sProtocol+" Test", Transport="TCP/IP", Protocol=self.sProtocol, Address=Parameters["Address"], Port=Parameters["Mode1"])
self.httpConn.Connect()
self.runAgain = 6
else:
Domoticz.Debug("onHeartbeat called, run again in "+str(self.runAgain)+" heartbeats.")
#Domoticz.Trace(False)
global _plugin
_plugin = BasePlugin()
def onStart():
global _plugin
_plugin.onStart()
def onStop():
global _plugin
_plugin.onStop()
def onConnect(Connection, Status, Description):
global _plugin
_plugin.onConnect(Connection, Status, Description)
def onMessage(Connection, Data):
global _plugin
_plugin.onMessage(Connection, Data)
def onCommand(Unit, Command, Level, Hue):
global _plugin
_plugin.onCommand(Unit, Command, Level, Hue)
def onNotification(Name, Subject, Text, Status, Priority, Sound, ImageFile):
global _plugin
_plugin.onNotification(Name, Subject, Text, Status, Priority, Sound, ImageFile)
def onDisconnect(Connection):
global _plugin
_plugin.onDisconnect(Connection)
def onHeartbeat():
global _plugin
_plugin.onHeartbeat()
# Generic helper functions
def LogMessage(Message):
if Parameters["Mode6"] == "File":
f = open(Parameters["HomeFolder"]+"http.html","w")
f.write(Message)
f.close()
Domoticz.Log("File written")
def DumpConfigToLog():
for x in Parameters:
if Parameters[x] != "":
Domoticz.Debug( "'" + x + "':'" + str(Parameters[x]) + "'")
Domoticz.Debug("Device count: " + str(len(Devices)))
for x in Devices:
Domoticz.Debug("Device: " + str(x) + " - " + str(Devices[x]))
Domoticz.Debug("Device ID: '" + str(Devices[x].ID) + "'")
Domoticz.Debug("Device Name: '" + Devices[x].Name + "'")
Domoticz.Debug("Device nValue: " + str(Devices[x].nValue))
Domoticz.Debug("Device sValue: '" + Devices[x].sValue + "'")
Domoticz.Debug("Device LastLevel: " + str(Devices[x].LastLevel))
return
def DumpHTTPResponseToLog(httpResp, level=0):
if (level==0): Domoticz.Debug("HTTP Details ("+str(len(httpResp))+"):")
indentStr = ""
for x in range(level):
indentStr += "----"
if isinstance(httpResp, dict):
for x in httpResp:
if not isinstance(httpResp[x], dict) and not isinstance(httpResp[x], list):
Domoticz.Debug(indentStr + ">'" + x + "':'" + str(httpResp[x]) + "'")
else:
Domoticz.Debug(indentStr + ">'" + x + "':")
DumpHTTPResponseToLog(httpResp[x], level+1)
elif isinstance(httpResp, list):
for x in httpResp:
Domoticz |
marvinglenn/asnn-mda | asnn_reduce_spamrating.py | Python | gpl-2.0 | 7,003 | 0.016422 | #!/usr/bin/env python
'''
This file is part of the ASNN eMail Suite.
ASNN-MDA is free software: you can redistribute it and/or modify
it under the terms of the version 2 GNU General Public License
as published by the Free Software Foundation.
ASNN-MDA is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
If you don't know about the GNU General Public License by now,
you can find information about it at http://www.gnu.org/licenses/
'''
# ASNN Reduction of Spam-Rating Process
'''
The intention of this tool is that it be run on a regular interval (like in a
'cron' job) to slightly reduce the spam-rating of each IP address or domain.
This is so the current spam-rating of each entry will slowly taper off unless
the entry is further bolstered by improper activity of the entity associated
with the entry.
Because the 'spamrating' field is an integer number, the minimum amount that
rating should be reduced is one unit per cycle. If this is not done, then
a 'spamrating' will never reach zero. If that is the intention, use the
provided option when invoking this program.
The rating may be reduced by a percentage or fixed integer amount. If reduced
by a decimal percentage, the reduction value is truncated to the next lower
integer value, but a minimum of '1' unless the 'zero' option is selected.
'''
# -----------------------------------------------------------------------------
def convert_from_int(numericalip):
addressstr = ''
for index in range(4):
addressstr = (str(numericalip & 255) + '.' + addressstr)
numericalip /= 256
return addressstr[:-1]
# -----------------------------------------------------------------------------
import os, sys, time, socket, argparse
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "asnn.settings")
import django
django.setup()
from asnn import models
from django.core.exceptions import ObjectDoesNotExist
parser = argparse.ArgumentParser()
parser.add_argument("-v", dest='debug', action="count", default = 0, \
help="debug vebosity")
parser.add_argument("-t", dest='testrun', action = "store_true", \
help="test run, don't store values")
parser.add_argument("-z", dest='zero', action = "store_true", \
help="don't enforce minimum reduction of '1'")
parser.add_argument("-i", dest='ipaddrs', action = "store_true", \
help="process against IP addresses")
parser.add_argument("-d", dest='domains', action = "store_true", \
help="process against domains")
parser.add_argument("-l", dest='logfile', type=str, \
default = './asnn_reduce_spamrating.log', \
help="log file to save to")
parser.add_argument("-L", dest='nolog', action = "store_true", \
help="don't save to log file")
parser.add_argument("-p", dest='perreduct', type=str, \
default = None, help="percentage reduction of value")
parser.add_argument("-c", dest='linreduct', type=str, \
default = None, help="linear reduction of value")
args = parser.parse_args()
debug = args.debug
if debug > 0:
print >> sys.stderr, "Debug level set to", debug
if args.testrun:
print >> sys.stderr, "Test run, nothing to be saved"
if not args.ipaddrs and not args.domains:
print >> sys.stderr, "you need to specify whether to process against IPs or Domains"
sys.exit(1)
if not args.perreduct and not args.linreduct:
print >> sys.stderr, "you need to specify the reduction amount"
sys.exit(1)
if args.perreduct and args.linreduct:
print >> sys.stderr, "linear and percentage reduction cannot be used together"
sys.exit(1)
if args.perreduct:
try:
perreduct = float(args.perreduct) / 100.0
except:
print >> sys.stderr, "percentage reduction value is bad"
sys.exit(1)
else:
linreduct = None
if args.linreduct:
try:
linreduct = int(args.linreduct)
except:
print >> sys.stderr, "percentage reduction value is bad"
sys.exit(1)
else:
perreduct = None
if not args.nolog:
logfile = open(args.logfile, 'a')
else:
logfile = None
# -------------------------------------------
if args.ipaddrs:
if debug > 0:
print >> sys.stderr, "Processing against IP addresses"
if logfile | :
logfile.write(time.strftime('%Y-%m | -%d %H:%M:%S', time.gmtime()) + \
' PID ' + str(os.getpid()) + ': IP addresses: ')
if args.testrun:
logfile.write('test run: ')
if perreduct:
logfile.write('percentage reduction of ' + str(perreduct * 100) + '%\n')
if linreduct:
logfile.write('linear reduction of ' + str(linreduct) + '\n')
for dobj in models.IPs.objects.filter(spamrating__gt = 0):
if debug > 1:
if dobj.addrlower == dobj.addrupper:
print >> sys.stderr, "address", convert_from_int(dobj.addrlower),
else:
print >> sys.stderr, "range", convert_from_int(dobj.addrlower) + '-' + \
convert_from_int(dobj.addrupper),
if perreduct:
if not args.zero:
reduction = max(int(perreduct * dobj.spamrating), 1)
else:
reduction = int(perreduct * dobj.spamrating)
else: # assumes 'linreduct' exists
reduction = linreduct
if debug > 1:
print >> sys.stderr, "reduced by", reduction
if not args.testrun:
dobj.spamrating -= reduction
dobj.save()
if logfile:
logfile.write(time.strftime('%Y-%m-%d %H:%M:%S', time.gmtime()) + \
' PID ' + str(os.getpid()) + ': completed run\n')
# -------------------------------------------
if args.domains:
if debug > 0:
print >> sys.stderr, "Processing against domains"
if logfile:
logfile.write(time.strftime('%Y-%m-%d %H:%M:%S', time.gmtime()) + \
' PID ' + str(os.getpid()) + ': Domains: ')
if args.testrun:
logfile.write('test run: ')
if perreduct:
logfile.write('percentage reduction of ' + str(perreduct * 100) + '%\n')
if linreduct:
logfile.write('linear reduction of ' + str(linreduct) + '\n')
for dobj in models.Domains.objects.filter(spamrating__gt = 0):
if debug > 1:
print >> sys.stderr, dobj.domain,
if perreduct:
if not args.zero:
reduction = max(int(perreduct * dobj.spamrating), 1)
else:
reduction = int(perreduct * dobj.spamrating)
else: # assumes 'linreduct' exists
reduction = linreduct
if debug > 1:
print >> sys.stderr, "reduced by", reduction
if not args.testrun:
dobj.spamrating -= reduction
dobj.save()
if logfile:
logfile.write(time.strftime('%Y-%m-%d %H:%M:%S', time.gmtime()) + \
' PID ' + str(os.getpid()) + ': completed run\n')
|
Dawny33/Code | Hackerrank/Back2School/stockmax.py | Python | gpl-3.0 | 299 | 0.010033 | T = int(input | ())
for _ in range(T):
T1 = int(input())
s = map(int, raw_input().split())
count = 0
if T1==len(s):
for i in range(len(s)-1):
if s[i+1]>s[i]:
count = s[i]-s[i+1]
if s[i+1]<=s[i]:
count = 0
print count
| |
Darthone/bug-free-octo-parakeet | technical-analysis/oneDayOHLC/sma_ema_vol_ohlc_rsi.py | Python | mit | 6,893 | 0.025098 | #!/usr/bin/env python
import matplotlib
# matplotlib.use('Agg')
import time
import datetime
import numpy as np
import matplotlib.pyplot as mplot
import matplotlib.ticker as mticker
import matplotlib.dates as mdates
from matplotlib.finance import candlestick_ochl
# custom matplotlib parameters
matplotlib.rcParams.update({'font.size': 9})
import urllib2
stocks = 'AAPL', 'FB', 'UAA'
'''
compute the n period relative strength indicator
n=14 (periods) as a default developed by J. Welles Wilder
momentum oscillator that measures the speed and change of price movements
'''
def rsiFunction(prices, n=14):
deltas = np.diff(prices)
seed = deltas[:n+1]
up = seed[seed >= 0].sum()/n
down = -seed[seed < 0].sum()/n
rs = up/down
rsi = np.zeros_like(prices)
rsi[:n] = 100. - 100./(1. + rs)
for i in range(n, len(prices)):
delta = deltas[i-1] # diff is 1 shorter
if delta > 0:
upval = delta
downval = 0.
else:
upval = 0.
downval = -delta
up = (up * (n - 1) + upval)/n
down = (down * (n - 1) + downval)/n
rs = up/down
rsi[i] = 100. - 100./(1. + rs)
return rsi
def movingaverage(values, window):
weights = np.repeat(1.0, window) / window
# line smoothening
smas = np.convolve(values, weights, 'valid')
# list of values being returned as numpy array
return smas
def ema(values, window):
weights = np.exp(np.linspace(-1., 0., window))
weights /= weights.sum()
a = np.convolve(values, weights, mode='full')[:len(values)]
a[:window] = a[window]
return a
'''
macd line = 12ema - 26ema
signal line = 9ema of the macd line
histogram = macd line - signal line
12 - two trading weeks
26 - one trading month
9 - one and half trading week
http://www.forexabode.com/forex-school/technical-indicators/macd/
5-day trading week -> 10,22,7 or 10,22,8
'''
def computeMACD(x, slow=26, fast=12):
slow = nslow
fast= nfast
emaslow = ema(x, slow)
emafast= ema(x, fast)
return emaslow, emafast, emafast-emaslow
def graphData(stock, MA1, MA2, dateRange):
try:
try:
print 'pulling data on', stock
urlToVisit = 'http://chartapi.finance.yahoo.com/instrument/1.0/' + stock + '/chartdata;type=quote;range=' + dateRange + '/csv'
stockFile = []
try:
sourceCode = urllib2.urlopen(urlToVisit).read()
splitSource = sourceCode.split('\n')
for eachLine in splitSource:
splitLine = eachLine.split(',')
if len(splitLine) == 6:
if 'values' not in eachLine:
stockFile.append(eachLine)
except Exception, e:
print str(e), 'error in organization of pulled data'
except Exception, e:
print str(e), 'error in pulling price data'
# load values and format the date
date, closePrice, highPrice, lowPrice, openPrice, volume = np.loadtxt(stockFile, delimiter=',', unpack=True, converters={0: mdates.strpdate2num('%Y%m%d')})
# add dates to data for candlestick to be plotted
i = 0
k = len(date)
candles = []
while i < k:
newLine = date[i], openPrice[i], closePrice[i], highPrice[i], lowPrice[i], volume[i]
candles.append(newLine)
i = i + 1
av1 = movingaverage(closePrice, MA1)
av2 = movingaverage(closePrice, MA2)
# starting point, plot exactly same amount of data
SP = len(date[MA2-1:])
label_1 = str(MA1) + ' SMA'
label_2 = str(MA2) + ' SMA'
f = mplot.figure()
# on a 4x4 figure, plot at (0,0)
a = mplot.subplot2grid((6,4), (1,0), rowspan=4, colspan=4)
# using matplotlib's candlestick charting
candlestick_ochl(a, candles[-SP:], width=0.5, colorup='g', colordown='r')
# moving average applied to data
a.plot(date[-SP:], av1[-SP:], label=label_1, linewidth=1.5)
a.plot(date[-SP:], av2[-SP:], label=label_2, linewidth=1.5)
mplot.gca().yaxis.set_major_locator(mticker.MaxNLocator(prune='upper'))
mplot.ylabel('Stock Price ($) and Volume')
mplot.legend(loc=9, ncol=2, prop={'size':7}, fancybox=True)
a.grid(True)
minVolume = 0
# rsi
rsiCol = '#1a8782'
posCol = | '#386d13'
negCol = '#8f2020'
c = mplot.subplot2grid((6,4), (0,0), sharex=a, rowspan=1, colspan=4)
rsi = rsiFunction(closePrice)
c.plot(date[-SP:], rsi[-SP:], rsiCol, linewidth=1.5)
c.axhline(70, color=negCol)
c.axhline(30, color=posCol)
c.fill_betwe | en(date[-SP:], rsi[-SP:], 70, where=(rsi[-SP:]>=70), facecolor=negCol, edgecolor=negCol)
c.fill_between(date[-SP:], rsi[-SP:], 30, where=(rsi[-SP:]<=30), facecolor=posCol, edgecolor=posCol)
# 70 --> red, overbought
# 30 --> green, oversold
c.text(0.015, 0.95, 'RSI (14)', va='top', transform=c.transAxes)
c.tick_params(axis='x')
c.tick_params(axis='y')
c.set_yticks([30,70])
# mplot.gca().yaxis.set_major_locator(mticker.MaxNLocator(prune='lower'))
# fit 10 dates into graph and formatt properly
a.xaxis.set_major_locator(mticker.MaxNLocator(10))
a.xaxis.set_major_formatter(mdates.DateFormatter('%Y-%m-%d'))
avol = a.twinx()
avol.fill_between(date[-SP:], minVolume, volume[-SP:], facecolor='b', alpha=.5)
avol.axes.yaxis.set_ticklabels([])
avol.grid(False)
avol.set_ylim(0,2*volume.max())
avol.tick_params(axis='x')
avol.tick_params(axis='y')
# macd
d = mplot.subplot2grid((6,4), (5,0), sharex=a, rowspan=1, colspan=4)
d.tick_params(axis='x')
d.tick_params(axis='y')
# nslow = 26
# nfast = 12
# nema = 9
emaslow, emafast, macd = computeMACD(closePrice)
ema9 = ema(macd, nema)
d.plot(date[-SP:], macd[-SP:])
d.plot(date[-SP:], ema9[-SP:])
d.fill_between(date[-SP:], macd[-SP:]-ema9[-SP:], 0, alpha=0.5)
d.text(0.015, 0.95, 'MACD ' + str(nfast) + ' ' + str(nslow) + ' ' + str(nema), va='top', transform=d.transAxes)
d.yaxis.set_major_locator(mticker.MaxNLocator(nbins=5, prune='upper'))
# rotating angles by 90 degrees to fit properly
for label in d.xaxis.get_ticklabels():
label.set_rotation(45)
# subplot profile parameters
mplot.subplots_adjust(left=.10, bottom=.19, right=.93, top=.95, wspace=.20, hspace=.07)
# plot profiling
mplot.xlabel('Date (YYYY-MM-DD)')
# mplot.ylabel('Stock Price ($)')
mplot.suptitle(stock + ' Stock Price')
# remove x axis from first graph, used at bottom already
mplot.setp(c.get_xticklabels(), visible=False)
mplot.setp(a.get_xticklabels(), visible=False)
# adjusting plots in a clean manner
mplot.subplots_adjust(left=.09, bottom=.18, right=.94, top=.94, wspace=.20, hspace=0)
mplot.show()
f.savefig('financial_graph.png')
except Exception, e:
print 'error in main:', str(e)
stockToUse = raw_input('Stock to chart: ')
# Simple Moving Averages (SMA) - 10, 30
sma1 = raw_input('SMA 1: ') or "10"
sma2 = raw_input('SMA 2: ') or "30"
# date range - 1y for 1 year, 10d for 10 days
dateRange = raw_input('Length of Process: ') or "1y"
# EMA Vars
nslow = raw_input('Slow EMA: ') or "26"
nfast = raw_input('Fast EMA: ') or "12"
nema = raw_input('EMA Signal: ') or "9"
nslow = int(nslow)
nfast = int(nfast)
nema = int(nema)
graphData(stockToUse, int(sma1), int(sma2), dateRange)
|
noironetworks/group-based-policy | gbpservice/neutron/extensions/apic_reuse_bd.py | Python | apache-2.0 | 1,646 | 0 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# | not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the | License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron_lib.api import extensions
from gbpservice._i18n import _
from gbpservice.neutron.extensions import group_policy as gp
CISCO_APIC_GBP_REUSE_BD_EXT = 'cisco_apic_gbp_reuse_bd'
EXTENDED_ATTRIBUTES_2_0 = {
gp.L2_POLICIES: {
'reuse_bd': {
'allow_post': True, 'allow_put': False, 'default': None,
'validate': {'type:uuid_or_none': None},
'is_visible': True},
},
}
class Apic_reuse_bd(extensions.ExtensionDescriptor):
@classmethod
def get_name(cls):
return "APIC GBP Reuse BD Extension"
@classmethod
def get_alias(cls):
return CISCO_APIC_GBP_REUSE_BD_EXT
@classmethod
def get_description(cls):
return _("This extension enables creating L2 policy objects that "
"use the same BridgeDomain on APIC")
@classmethod
def get_updated(cls):
return "2016-11-11T04:20:00-00:00"
def get_extended_resources(self, version):
if version == "2.0":
return EXTENDED_ATTRIBUTES_2_0
else:
return {}
|
SgtFlame/pyzen-fabric | src/zen/fabric/batch_service_client.py | Python | mit | 200 | 0.01 | from zen.fabric.se | rvice_client import ServiceClient
class BatchServiceClient | (ServiceClient):
''' Batch Service Client
Service Client for batch / head-less processes
'''
pass
|
teeheee/RobocupSoccerSimulator | debugger.py | Python | gpl-3.0 | 4,848 | 0.01382 | import pygame
from gameconfig import gc
import numpy as np
# This is an optional visualisation of the sensor values of an specific robot.
# It can be disabled via the config.yml Gui->Debugger->False
class Debugger:
def __init__(self, display, robots):
self._display = display
self._robots = robots
self._id = 0
self.field_width = self._display.get_height();
self.field_height = self._display.get_width()-self._display.get_height();
self.ppcm = self._display.get_height() / self.field_width
self.font = pygame.font.SysFont('Calibri', 20, True, False)
self.polygonList = [[3,5],[5,3],[3,1],[3,-1],[5,-3],[3,-5],[-3,-5],[-5,-3],[-5,3],[-3,5]]
self._pixyModeFlag = False
def setFocusedRobot(self, id):
self._id = id
def togglePixyMode(self):
self._pixyModeFlag = not self._pixyModeFlag
def draw(self):
RED = 255, 0, 0
BLACK = 0, 0, 0
BLUE = 0, 0, 255
pos = int((self.field_height + self.field_width / 2) * self.ppcm), \
int(self.field_width / 2 * self.ppcm)
# ID
tmp = (pos[0] - 20, pos[1] - 250)
state = self._robots[self._id].getRobotState()
text = self.font.render("id: " + str(self._id + 1) + " state: " + str(state), True, BLACK)
self._display.blit(text, tmp)
if self._pixyModeFlag:
blocks = self._robots[self._id].getPixy()
#Resolution is 320x200
topleft = [int(pos[0]+100-320),int(pos[1]+100-200)]
Rect = [topleft[0],topleft[1],320,200]
pygame.draw.rect(self._display,BLACK,Rect,1)
for block in blocks:
point = | [int(topleft[0]+block["y"]),int(topleft[1]+block["x"])]
if block["signature"] == 1:
pygame.draw.circle(self._display, RED, point , int(10 * self.ppcm))
elif block["signature"] == 2:
pygame.draw.rect(self._display, BLACK, [poi | nt[0]-5,point[1]-5,10,10])
elif block["signature"] == 3:
pygame.draw.rect(self._display, BLACK,[point[0]-5,point[1]-5,10,10])
return
# ROBOT
pos = int((self.field_height+self.field_width/2) * self.ppcm), \
int(self.field_width/2* self.ppcm)
newpolygon = []
scale = 15 * self.ppcm
for p in self.polygonList:
newpolygon.append([(scale*p[0])+pos[0], (scale*p[1])+pos[1]])
if self._id == 0 or self._id == 1:
pygame.draw.polygon(self._display, BLUE , newpolygon, 0)
else:
pygame.draw.polygon(self._display, RED , newpolygon, 0)
# Motors
motors = self._robots[self._id]._motors*100
tmp = (pos[0] + 120-20, pos[1] + 120)
text = self.font.render("m0: "+str(int(motors[0])), True, BLACK)
self._display.blit(text, tmp)
tmp = (pos[0] - 120-30, pos[1] + 120)
text = self.font.render("m1: "+str(int(motors[1])), True, BLACK)
self._display.blit(text, tmp)
tmp = (pos[0] - 120-30, pos[1] - 120)
text = self.font.render("m2: "+str(int(motors[2])), True, BLACK)
self._display.blit(text, tmp)
tmp = (pos[0] + 120-20, pos[1] - 120)
text = self.font.render("m3: "+str(int(motors[3])), True, BLACK)
self._display.blit(text, tmp)
#Boden Sensors
boden = self._robots[self._id].getBodenSensors()
for i in range(0,16):
winkel = np.deg2rad(i*360/16)
tmp = (pos[0]+np.cos(winkel)*100-20, pos[1]+np.sin(winkel)*100)
text = self.font.render(str(i)+": "+str(int(boden[i])), True, BLACK)
self._display.blit(text, tmp)
#Ball Sensors
ball = self._robots[self._id].getIRBall()
for i in range(0,16):
winkel = np.deg2rad(i*360/16)
tmp = (pos[0]+np.cos(winkel)*200-20, pos[1]+np.sin(winkel)*200)
text = self.font.render(str(i)+": "+str(int(ball[i])), True, BLACK)
self._display.blit(text, tmp)
#Kompass
kompass = self._robots[self._id].getKompass()
tmp = (pos[0] -40, pos[1] )
text = self.font.render("cmp: " + str(int(kompass)), True, BLACK)
self._display.blit(text, tmp)
#ultraschall
US = self._robots[self._id].getUltraschall()
for i in range(0,4):
winkel = np.deg2rad(i*360/4)
tmp = (pos[0]+np.cos(winkel)*200-30, pos[1]+np.sin(winkel)*160+20)
text = self.font.render("US"+str(i)+": "+str(int(US[i])), True, BLACK)
self._display.blit(text, tmp)
#LightBarrier
lb = self._robots[self._id].getLightBarrier()
if lb:
text = self.font.render("BALL", True, BLACK)
self._display.blit(text, (20,20)) |
ychen820/microblog | y/google-cloud-sdk/platform/google_appengine/google/appengine/api/mail_service_pb.py | Python | bsd-3-clause | 25,492 | 0.020987 | #!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from google.net.proto import ProtocolBuffer
import array
import dummy_thread as thread
__pychecker__ = """maxreturns=0 maxbranches=0 no-callinit
unusednames=printElemNumber,debug_strs no-special"""
if hasattr(ProtocolBuffer, 'ExtendableProtocolMessage'):
_extension_runtime = True
_ExtendableProtocolMessage = ProtocolBuffer.ExtendableProtocolMessage
else:
_extension_runtime = False
_ExtendableProtocolMessage = ProtocolBuffer.ProtocolMessage
from google.appengine.api.api_base_pb import *
import google.appengine.api.api_base_pb
class MailServiceError(ProtocolBuffer.ProtocolMessage):
OK = 0
INTERNAL_ERROR = 1
BAD_REQUEST = 2
UNAUTHORIZED_SENDER = 3
INVALID_ATTACHMENT_TYPE = 4
INVALID_HEADER_NAME = 5
INVALID_CONTENT_ID = 6
_ErrorCode_NAMES = {
0: "OK",
1: "INTERNAL_ERROR",
2: "BAD_REQUEST",
3: "UNAUTHORIZED_SENDER",
4: "INVALID_ATTACHMENT_TYPE",
5: "INVALID_HEADER_NAME",
6: "INVALID_CONTENT_ID",
}
def ErrorCode_Name(cls, x): return cls._ErrorCode_NAMES.get(x, "")
ErrorCode_Name = classmethod(ErrorCode_Name)
def __init__(self, contents=None):
pass
if contents is not None: self.MergeFromString(contents)
def MergeFrom(self, x):
assert x is not self
def Equals(self, x):
if x is self: return 1
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
return initialized
def ByteSize(self):
n = 0
return n
def ByteSizePartial(self):
n = 0
return n
def Clear(self):
pass
def OutputUnchecked(self, out):
pass
def OutputPartial(self, out):
pass
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
}, 0)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
}, 0, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
_PROTO_DESCRIPTOR_NAME = 'apphosting.MailServiceError'
class MailAttachment(ProtocolBuffer.ProtocolMessage):
has_filename_ = 0
filename_ = ""
has_data_ = 0
data_ = ""
has_contentid_ = 0
contentid_ = ""
def __init__(self, contents=None):
if contents is not None: self.MergeFromString(contents)
def filename(self): return self.filename_
def set_filename(self, x):
self.has_filename_ = 1
self.filename_ = x
def clear_filename(self):
if self.has_filename_:
self.has_filename_ = 0
self.filename_ = ""
def has_filename(self): return self.has_filename_
def data(self): return self.data_
def set_data(self, x):
self.has_data_ = 1
self.data_ = x
def clear_data(self):
if self.has_data_:
self.has_data_ = 0
self.data_ = ""
def has_data(self): return self.has_data_
def contentid(self): return self.contentid_
def set_contentid(self, x):
self.has_contentid_ = 1
self.contentid_ = x
def clear_contentid(self):
if self.has_contentid_:
self.has_contentid_ = 0
self.contentid_ = ""
def has_contentid(self): return self.has_contentid_
def MergeFrom(self, x):
assert x is not self
if (x.has_filename()): self.set_filename(x.filename())
if (x.has_data()): self.set_data(x.data())
if (x.has_contentid()): self.set_contentid(x.contentid())
def Equals(self, x):
if x is self: return 1
if self.has_filename_ != x.has_filename_: return 0
if self.has_filename_ and self.filename_ != x.filename_: return 0
if self.has_data_ != x.has_data_: return 0
if self.has_data_ and self.data_ != x.data_: return 0
if self.has_contentid_ != x.has_contentid_: return 0
if self.has_contentid_ and self.contentid_ != x.contentid_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (not self.has_filename_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: filename not set.')
if (not self.has_data_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: data not set.')
return initialized
def ByteSize(self):
n = 0
n += self.lengthString(len(self.filename_))
n += self.lengthString(len(self.data_))
if (self.has_contentid_): n += 1 + self.lengthString(len(self.contentid_))
return n + 2
def ByteSizePartial(self):
n = 0
if (self.has_filename_):
n += 1
n += self.lengthString(len(self.filename_))
if (self.has_data_):
n += 1
n += self.lengthString(len(self.data_))
if (self.has_contentid_): n += 1 + self.lengthString(len(self.contentid_))
return n
def Clear(self):
self.clear_filename()
self.clear_data()
self.clear_contentid()
def OutputUnchecked(self, out):
out.putVarInt32(10)
out.putPrefixedString(self.filename_)
out.putVarInt32(18)
out.putPrefixedString(self.data_)
if (self.has_contentid_):
out.putVarInt32(26)
out.putPrefixedString(self.contentid_)
def OutputPartial(self, out):
if (self.has_filename_):
out.putVarInt32(10)
out.putPrefixedString(self.filename_)
if (self.has_data_):
out.putVarInt32(18)
out.putPrefixedString(self.data_)
if (self.has_contentid_):
out.putVarInt32(26)
out.putPrefixedString(self.contentid_)
d | ef TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 10:
self.set_filename(d.getPrefixedString())
continue
if tt == 18:
self.set_data(d.getPrefixedString())
continue
if tt == 26:
self.set_contentid(d.getPrefixedString())
continue
if (tt == 0): raise ProtocolBuffer. | ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_filename_: res+=prefix+("FileName: %s\n" % self.DebugFormatString(self.filename_))
if self.has_data_: res+=prefix+("Data: %s\n" % self.DebugFormatString(self.data_))
if self.has_contentid_: res+=prefix+("ContentID: %s\n" % self.DebugFormatString(self.contentid_))
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
kFileName = 1
kData = 2
kContentID = 3
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "FileName",
2: "Data",
3: "ContentID",
}, 3)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.STRING,
2: ProtocolBuffer.Encoder.STRING,
3: ProtocolBuffer.Encoder.STRING,
}, 3, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
_PROTO_DESCRIPTOR_NAME = 'apphosting.MailAttachment'
class MailHeader(ProtocolBuffer.ProtocolMessage):
has_name_ = 0
name_ = ""
has_value_ = 0
value_ = ""
def __init__(self, contents=None):
if contents is not None: self.MergeFromString(contents)
def name(self): return self.name_
def set_name(self, x):
self.has_name_ = 1
self.name_ = x
def clear_name(self):
if self.has_name_:
self.has_name_ = 0
self.name_ = ""
def has_name(self): return self.has_name_
def value(self): return self.value_
def set_v |
Ranjandas/firewallmanager | clusters/views.py | Python | gpl-3.0 | 223 | 0.008969 | from django.shortcuts import render
# Create y | our views here.
from django.http import HttpResponse
from django | .views.generic import ListView, DetailView, CreateView
class ClusterListView(ListView):
model = Cluster
|
artsy/docker-images | redis-migrate/migrate.py | Python | mit | 3,144 | 0.003499 | #!/usr/bin/env python
import os
import argparse
from urlparse import urlparse
import redis
from termcolor import cprint
DEBUG = os.environ.get("DEBUG")
DRY_RUN = os.environ.get("DRY_RUN")
CLEAN_UP = os.environ.get("CLEAN_UP")
if os.environ.get("REPLACE_DST_KEYS"):
REPLACE_DST_KEYS = True
else:
REPLACE_DST_KEYS = False
def connect_redis(conn_dict):
conn = redis.StrictRedis(host=conn_dict['host'],
port=conn_dict['port'],
db=conn_dict['db'])
return conn
def conn_string_type(string):
format = 'redis://<host>:<port>/<db>'
url = urlparse(string)
if url.scheme != "redis":
raise argparse.ArgumentTypeError('incorrect format, should be: %s' % format)
host = url.hostname
if url.port:
port = url.port
else:
port = "6379"
if url.path:
db = url.path.strip("/")
else:
db = "0"
try:
port = int(port)
db = int(db)
except ValueError:
raise argparse.ArgumentTypeError('incorrect format, should be: %s' % format)
return {'host': host,
'port': port,
'db': db}
def migrate_redis(source, destination):
if DRY_RUN:
output_color = 'yellow'
log_suffix = ' << DRY_RUN >>'
else:
output_color = 'green'
log_suffix = ''
cprint("Migrating %s:%s/%s to %s:%s/%s...%s" % (source['host'], source['port'], source['db'], destination['host'], destination['port'], destination['db'], log_suffix), output_color)
src = connect_redis(source)
dst = connect_redis(destination)
keys = src.keys('*')
errors = 0
for key in keys:
ttl = src.ttl(key)
# we handle TTL command returning -1 (no expire) or -2 (no key)
if ttl < 0:
ttl = 0
if DEBUG or DRY_RUN:
cprint("Dumping key: %s with TTL %ss%s" % (key, ttl, log_suffix), output_color)
value = src.dump(key)
if not DRY_RUN:
if DEBUG:
cprint("Restoring key: %s with TTL %sms" % (key, ttl * 1000), output_color)
try:
# TTL command returns the key's ttl value in seconds but restore expects it in milliseconds!
dst.restore(key, ttl * 1000, value, replace=REPLACE_DST_KEYS)
| except (redis.exceptions.ResponseError, redis.exceptions.DataError):
cprint("! Failed to restore key: %s" | % key, 'red')
errors += 1
continue # Don't delete the key in src if it failed to restore - move on to the next iteration
if CLEAN_UP:
if DEBUG:
cprint("Deleting source key: %s" % key, output_color)
src.delete(key)
if not DRY_RUN:
cprint("Migrated %d keys" % (len(keys) - errors), output_color)
def run():
parser = argparse.ArgumentParser()
parser.add_argument('source', type=conn_string_type)
parser.add_argument('destination', type=conn_string_type)
options = parser.parse_args()
migrate_redis(options.source, options.destination)
if __name__ == '__main__':
run()
|
ptphp/PyLib | src/dev/case/timethread.py | Python | apache-2.0 | 2,356 | 0.014856 | # -*- coding:utf-8-*-
from PyQt4.QtGui import *
from PyQt4.QtCore import *
from time import *
import sys
class Windows(QDialog):
def __init__(self, parent=None):
super(Windows, self).__init__(parent)
self.startButton = QPushButton("Start")
self.stopButton = QPushButton("Stop")
self.stopButton.setEnabled(False)
self.statusLable = QLabel("Please click \"start\"")
self.statusLable.setFrameStyle(QFrame.StyledPanel|
| QFrame.Plain)
topLayout = QHBoxLayout()
topLayout.addWidget(self.startButton)
topLayout.addWidget(self.stopButton)
layout = QVBoxLayout()
layout.addLayout(topLayout)
layout.addWidget(self.statusLable)
self.timer = Timer()
| self.connect(self.startButton, SIGNAL("clicked()")
, self.start)
self.connect(self.stopButton, SIGNAL("clicked()")
, self.stop)
self.connect(self.timer, SIGNAL("updateTime()")
, self.updateTime)
self.setLayout(layout)
self.setWindowTitle("Timer")
self.setWindowFlags(Qt.WindowMinimizeButtonHint)
def updateTime(self):
self.statusLable.setText("Time: %s s" % QString.number(self.sec))
self.sec += 1
def start(self):
self.sec = 0
self.startButton.setEnabled(False)
self.stopButton.setEnabled(True)
self.timer.start()
def stop(self):
self.timer.stop()
self.stopButton.setEnabled(False)
self.startButton.setEnabled(True)
self.statusLable.setText("Timer stoped.")
class Timer(QThread):
def __init__(self, parent=None):
super(Timer, self).__init__(parent)
self.stoped = False
self.mutex = QMutex()
def run(self):
with QMutexLocker(self.mutex):
self.stoped = False
while True:
if self.stoped:
return
self.emit(SIGNAL("updateTime()"))
sleep(1)
def stop(self):
with QMutexLocker(self.mutex):
self.stoped = True
def isStoped(self):
with QMutexLocker(sellf.mutex):
return self.stoped
app = QApplication(sys.argv)
windows = Windows()
windows.show()
app.exec_() |
ar4s/django | tests/forms_tests/widget_tests/test_timeinput.py | Python | bsd-3-clause | 1,641 | 0.002438 | from datetime import time
from django.forms import TimeInput
from django.utils import translation
from .base import WidgetTest
class TimeInputTest(WidgetTest):
widget = TimeInput()
def test_render_none(self):
self.check_html(self.widget, 'time', None, html='<input type="text" name="time">')
def test_render_value(self):
"""
The microseconds are trimmed on display, by default.
"""
t = time(12, 51, 34, 482548)
self.assertEqual(str(t), '12:51:34.482548')
self.check_html(self.widget, 'time', t, html='<input type="text" name="time" value="12:51:34">')
self.check_html(self.widget, 'time', time(12, 51, 34), html=(
'<input type="text" name="time" value="12:51:34">'
))
self.check_html(self.widget, 'time', time(12, 51), html=(
'<input type="text" name= | "time" value="12:51:00">'
))
def test_string(self):
"""Initializing from a string value."""
self.check_html(self.widget, 'time', '13:12:11', html=(
'<input type="text" name="time" value="13:12:11">'
))
def test_format(self):
"""
Use 'format' to change the way a value is displayed.
"""
t = time(12, 51, 34, 482548)
widget = TimeInput(format='%H:%M', attrs={'type': 'time'})
self.check_html(widge | t, 'time', t, html='<input type="time" name="time" value="12:51">')
@translation.override('de-at')
def test_l10n(self):
t = time(12, 51, 34, 482548)
self.check_html(self.widget, 'time', t, html='<input type="text" name="time" value="12:51:34">')
|
shashank-sharma/mythical-learning | scrapper/apps.py | Python | mit | 91 | 0 | from django.apps import AppConfig |
class ScrapperConfig(AppConfig):
name = 'scrapper'
| |
jasonwee/asus-rt-n14uhp-mrtg | src/lesson_runtime_features/os_strerror.py | Python | apache-2.0 | 208 | 0 | import errno
import os
for num in [errno.ENOENT, errno.EINTR, errno.EBUSY]:
nam | e = errno.errorcode[num]
print('[{num:>2}] {name:<6}: {msg}'.format(
name=name, num=num, | msg=os.strerror(num)))
|
jeremiah-c-leary/vhdl-style-guide | vsg/tests/process/test_rule_031.py | Python | gpl-3.0 | 1,200 | 0.004167 |
import os
import unittest
from vsg.rules import process
from vsg import vhdlFile
from vsg.tests import utils
sTestDir = os.path.dirname(__file__)
lFile, eError =vhdlFile.utils.read_vhdlfile(os.path.join(sTestDir,'rule_031_test_input.vhd'))
lExpected = []
lExpected.append('')
utils.read_file(os.path.join(sTestDir, | 'rule_031_test_input.fixed.vhd'), lExpected)
class test_process_rule(unittest.TestCase):
def setUp(self):
self.oFile = vhdlFile.vhdlFile(lFile)
self.assertIsNone(eError)
def test_rule_031(self):
oRule = process.rule_03 | 1()
self.assertTrue(oRule)
self.assertEqual(oRule.name, 'process')
self.assertEqual(oRule.identifier, '031')
lExpected = [11, 12, 13, 14, 32, 34, 35, 37, 38, 65, 88, 103, 104, 118]
oRule.analyze(self.oFile)
self.assertEqual(lExpected, utils.extract_violation_lines_from_violation_object(oRule.violations))
def test_fix_rule_031(self):
oRule = process.rule_031()
oRule.fix(self.oFile)
lActual = self.oFile.get_lines()
self.assertEqual(lExpected, lActual)
oRule.analyze(self.oFile)
self.assertEqual(oRule.violations, [])
|
taigaio/taiga-back | tests/integration/test_userstories.py | Python | agpl-3.0 | 71,594 | 0.003534 | # -*- coding: utf-8 -*-
# Copyright (C) 2014-present Taiga Agile LLC
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import uuid
import csv
import pytz
from datetime import datetime, timedelta
from urllib.parse import quote
from unittest import mock
from django.urls import reverse
from taiga.base.utils import json
from taiga.permissions.choices import MEMBERS_PERMISSIONS, ANON_PERMISSIONS
from taiga.projects.occ import OCCResourceMixin
from taiga.projects.userstories import services, models
from .. import factories as f
import pytest
pytestmark = pytest.mark.django_db(transaction=True)
def create_uss_fixtures():
data = {}
data["project"] = f.ProjectFactory.create()
project = data["project"]
data["users"] = [f.UserFactory.create(is_superuser=True) for i in range(0, 3)]
data["roles"] = [f.RoleFactory.create() for i in range(0, 3)]
user_roles = zip(data["users"], data["roles"])
# Add membership fixtures
[f.MembershipFactory.create(user=user, project=project, role=role) for (user, role) in user_roles]
data["statuses"] = [f.UserStoryStatusFactory.create(project=project) for i in range(0, 4)]
data["epics"] = [f.EpicFactory.create(project=project) for i in range(0, 3)]
data["tags"] = ["test1test2test3", "test1", "test2", "test3"]
# --------------------------------------------------------------------------------------------------------
# | US | Status | Owner | Assigned To | Assigned Users | Tags | Epic | Milestone |
# |----#---------#--------#-------------#----------------#---------------------#--------------------------
# | 0 | status3 | user2 | None | None | tag1 | epic0 | None |
# | 1 | status3 | user1 | None | user1 | tag2 | None | |
# | 2 | status1 | user3 | None | None | tag1 tag2 | epic1 | None |
# | 3 | status | 0 | user2 | None | None | tag3 | None | |
# | 4 | status0 | user1 | user1 | None | tag1 tag2 tag3 | epic0 | None |
# | 5 | status2 | user3 | user1 | None | tag3 | None | |
# | 6 | status3 | user2 | user1 | None | tag1 tag2 | epic0 epic2 | None |
# | 7 | | status0 | user1 | user2 | None | tag3 | None | |
# | 8 | status3 | user3 | user2 | None | tag1 | epic2 | None |
# | 9 | status1 | user2 | user3 | user1 | tag0 | None | |
# --------------------------------------------------------------------------------------------------------
(user1, user2, user3, ) = data["users"]
(status0, status1, status2, status3 ) = data["statuses"]
(epic0, epic1, epic2) = data["epics"]
(tag0, tag1, tag2, tag3, ) = data["tags"]
us0 = f.UserStoryFactory.create(project=project, owner=user2, assigned_to=None,
status=status3, tags=[tag1], milestone=None)
f.RelatedUserStory.create(user_story=us0, epic=epic0)
us1 = f.UserStoryFactory.create(project=project, owner=user1, assigned_to=None,
status=status3, tags=[tag2], assigned_users=[user1])
us2 = f.UserStoryFactory.create(project=project, owner=user3, assigned_to=None,
status=status1, tags=[tag1, tag2], milestone=None)
f.RelatedUserStory.create(user_story=us2, epic=epic1)
us3 = f.UserStoryFactory.create(project=project, owner=user2, assigned_to=None,
status=status0, tags=[tag3])
us4 = f.UserStoryFactory.create(project=project, owner=user1, assigned_to=user1,
status=status0, tags=[tag1, tag2, tag3], milestone=None)
f.RelatedUserStory.create(user_story=us4, epic=epic0)
us5 = f.UserStoryFactory.create(project=project, owner=user3, assigned_to=user1,
status=status2, tags=[tag3])
us6 = f.UserStoryFactory.create(project=project, owner=user2, assigned_to=user1,
status=status3, tags=[tag1, tag2], milestone=None)
f.RelatedUserStory.create(user_story=us6, epic=epic0)
f.RelatedUserStory.create(user_story=us6, epic=epic2)
us7 = f.UserStoryFactory.create(project=project, owner=user1, assigned_to=user2,
status=status0, tags=[tag3])
us8 = f.UserStoryFactory.create(project=project, owner=user3, assigned_to=user2,
status=status3, tags=[tag1], milestone=None)
f.RelatedUserStory.create(user_story=us8, epic=epic2)
us9 = f.UserStoryFactory.create(project=project, owner=user2, assigned_to=user3,
status=status1, tags=[tag0], assigned_users=[user1])
data["userstories"] = [us0, us1, us2, us3, us4, us5, us6, us7, us8, us9]
return data
def test_get_userstories_from_bulk():
data = "User Story #1\nUser Story #2\n"
userstories = services.get_userstories_from_bulk(data)
assert len(userstories) == 2
assert userstories[0].subject == "User Story #1"
assert userstories[1].subject == "User Story #2"
def test_create_userstories_in_bulk():
data = "User Story #1\nUser Story #2\n"
project = f.ProjectFactory.create()
with mock.patch("taiga.projects.userstories.services.db") as db:
userstories = services.create_userstories_in_bulk(data, project=project)
db.save_in_bulk.assert_called_once_with(userstories, None, None)
def test_update_userstories_order_in_bulk():
project = f.ProjectFactory.create()
us1 = f.UserStoryFactory.create(project=project, backlog_order=1)
us2 = f.UserStoryFactory.create(project=project, backlog_order=2)
data = [{"us_id": us1.id, "order": 2}, {"us_id": us2.id, "order": 1}]
with mock.patch("taiga.projects.userstories.services.db") as db:
services.update_userstories_order_in_bulk(data, "backlog_order", project)
db.update_attr_in_bulk_for_ids.assert_called_once_with({us2.id: 1, us1.id: 2},
"backlog_order",
models.UserStory)
def test_create_userstory_with_assign_to(client):
user = f.UserFactory.create()
user_watcher = f.UserFactory.create()
project = f.ProjectFactory.create(owner=user)
f.MembershipFactory.create(project=project, user=user, is_admin=True)
f.MembershipFactory.create(project=project, user=user_watcher,
is_admin=True)
url = reverse("userstories-list")
data = {"subject": "Test user story", "project": project.id,
"assigned_to": user.id}
client.login(user)
response = client.json.post(url, json.dumps(data))
assert response.status_code == 201
assert response.data["assigned_to"] == user.id
def test_create_userstory_with_assigned_users(client):
user = f.UserFactory.create()
user_watcher = f.UserFactory.create()
project = f.ProjectFactory.create(owner=user)
f.MembershipFactory.create(project=project, user=user, is_admin=True)
f.MembershipFactory.create(project=project, user=user_watcher,
is_admin=True)
url = reverse("userstories-list")
data = {"subject": "Test user story", "project": project.id,
|
balazssimon/ml-playground | udemy/lazyprogrammer/ab-testing-python/chisquare_exercise.py | Python | apache-2.0 | 448 | 0 | import numpy as np
import pandas as pd
from scipy import stats
data = pd.read_csv("advertisement_clicks.csv")
X1 = data[data['advertisement_id'] == 'A']
X2 = data[data['advertisement_id'] == 'B']
A0 = X1[X1['action'] == 0].shape[0] |
A1 = X1[X1['action'] == 1].shape[0]
B0 = X2[X2['action'] == 0].shape[0]
B1 = X2[X2['action'] == 1].shape[0]
T = [[A0, A1], [B0, B1]]
chi2, p, dof, ex = stats.chi2_contingenc | y(T, correction=False)
|
ChuanleiGuo/AlgorithmsPlayground | LeetCodeSolutions/python/51_N-Queens.py | Python | mit | 1,340 | 0.000746 | class Solution(object):
def solveNQueens(self, n):
"""
:type n: int
:rtype: List[List[str]]
"""
def search(cur):
if cur == n:
add_answer()
else:
f | or i in range(n):
ok = True
rows[cur] = i
for j in range(cur):
if not is_valied(cur, j):
ok = False
break
if ok:
search(cur + 1)
def is_valied(pre_row, cur_row):
if rows[pre_row] == rows[cur_row] or \
pre_row - rows[pre_row] == cur_row - rows[cur_row] or \
| pre_row + rows[pre_row] == cur_row + rows[cur_row]:
return False
else:
return True
def add_answer():
ans = []
for num in rows:
res_str = ""
for i in range(n):
if i == num:
res_str += "Q"
else:
res_str += "."
ans.append(res_str)
result.append(ans)
result = []
rows = [0] * n
search(0)
return result
print Solution().solveNQueens(4)
|
chirilo/remo | remo/dashboard/tests/test_forms.py | Python | bsd-3-clause | 2,108 | 0 | from django.contrib.auth.models import User
from django.core import mail
from django.test.client import RequestFactory
from mock import ANY, patch
from nose.tools import eq_, ok_
from test_utils import TestCase
from remo.dashboard.forms import EmailRepsForm
from remo.profiles.tests import FunctionalAreaFactory, UserFactory
class EmailRep | sFormsTest(TestCase):
def setUp(self):
self.functional_area = FunctionalAreaFactory.create()
def test_form_tampered_functional_area(self):
"""Test form with tampered data in functional area field."""
data = {'subject': 'Test email subject',
'body': None,
'functional_area': 'Non existing functional area'}
form = EmailRepsForm(data=data)
| ok_(not form.is_valid())
eq_(len(form.errors['functional_area']), 1)
@patch('remo.dashboard.forms.messages.success')
def test_send_mail(self, fake_messages):
"""Test EmailRepsForm email sending functionality."""
data = {'subject': 'Test email subject',
'body': 'Test email body',
'functional_area': self.functional_area.id}
form = EmailRepsForm(data=data)
ok_(form.is_valid())
area = self.functional_area
UserFactory.create_batch(20, userprofile__functional_areas=[area])
factory = RequestFactory()
request = factory.request()
request.user = UserFactory.create()
reps = User.objects.filter(userprofile__functional_areas__name=area)
form.send_email(request, reps)
eq_(len(mail.outbox), 20)
def format_name(user):
return '%s %s <%s>' % (user.first_name, user.last_name, user.email)
recipients = map(format_name, reps)
receivers = []
for i in range(0, len(mail.outbox)):
eq_(mail.outbox[i].subject, data['subject'])
eq_(mail.outbox[i].body, data['body'])
receivers.append(mail.outbox[i].to[0])
eq_(set(receivers), set(recipients))
fake_messages.assert_called_with(ANY, 'Email sent successfully.')
|
mwleeds/android-malware-analysis | parse_ssdeep.py | Python | gpl-3.0 | 3,067 | 0.006195 | #!/usr/bin/python3
"""
This script reads the CSV output from ssdeep in the malicious_apk
and benign_apk folders and writes similarity scores for each sample
and classifications to a JSON file for later analysis
The output data format is as follows:
{"features": ["similarity_limit_0", "similarity_limit_0.2", ...],
"apps": {"999eca2457729e371355aea5faa38e14.apk": {"vector": [0,0,0,1], "malicious": [0,1]}, ...}}
"""
import os
import json
import glob
import random
import numpy
import ssdeep
__author__='mwleeds'
def main():
all_hashes = {'malicious': [], 'benign': []}
app_malicious_map = {} # mapping from android app names to 1 or 0 for malware or goodware
similarity_buckets = ['similarity_limit_0', 'similarity_limit_0.2', 'similarity_limit_0.4', 'similarit | y_limit_0.6', 'similarity_limit_0.8', 'similarity_limit_1.0']
root_dir = os.getcwd()
for i, directory in enumerate(['benign_apk', 'malicious_apk']):
os.chdir(directory)
with open(directory.split('_')[0] + '_apk_ssdeep.csv') as hashes:
for j, line in enumerate(hashes):
if j == 0: continue
b64hash = line.split(',')[0]
app_name = line.split(',')[-1].split('/')[-1][:-2]
a | pp_malicious_map[app_name] = [1,0] if i else [0,1]
all_hashes['malicious' if i else 'benign'].append((app_name, b64hash))
os.chdir(root_dir)
all_apps = {} # mapping from each app to its similarity score and classification
num_zero = {}
num_each = {}
for category in all_hashes:
num_zero[category] = 0
num_each[category] = 0
for app_and_hash in all_hashes[category]:
similarity_scores = []
this_score = app_and_hash[1]
for i in range(1000):
other_score = random.choice(all_hashes[category])[1]
similarity_scores.append(ssdeep.compare(this_score, other_score))
score = numpy.mean(similarity_scores)
num_each[category] += 1
if score == 0: num_zero[category] += 1
bit_vector = []
last_limit = -0.01
for limit in similarity_buckets:
float_limit = float(limit.split('_')[-1])
if score <= float_limit and score > last_limit:
bit_vector.append(1)
else:
bit_vector.append(0)
last_limit = float_limit
if not any(bit_vector): # score > 1
bit_vector[-1] = 1
all_apps[app_and_hash[0]] = {'vector': bit_vector, 'malicious': app_malicious_map[app_and_hash[0]]}
with open('app_hash_vectors.json', 'w') as outfile:
json.dump({'features': similarity_buckets, 'apps': all_apps}, outfile)
print('{} of {} malicious apps and {} of {} benign apps had zero similarity found'.format(num_zero['malicious'], num_each['malicious'], num_zero['benign'], num_zero['benign']))
print('Wrote data on ' + str(len(all_apps)) + ' apps to a file.')
if __name__=='__main__':
main()
|
kyleconroy/quivr | flickr.py | Python | mit | 645 | 0.003101 | import os
import hashlib
import requests
import json
FLICKR_BASE = "http://api.flickr.com/services/rest/"
def api(method, **kwargs):
kwargs.update({
"method": "flickr.{}".format(method),
"format": "json",
| "nojsoncallback": "1",
"api_key": os.environ["FLICKR_KEY"],
"auth_token": os.environ["FLICKR_AUTH_TOKEN"], |
})
# Signature validation
sig = ''.join([str(u[0]) + str(u[1]) for u in sorted(kwargs.items())])
kwargs["api_sig"] = hashlib.md5(os.environ["FLICKR_SECRET"] + sig).hexdigest()
resp = requests.post(FLICKR_BASE, params=kwargs)
return json.loads(resp.content)
|
bvernoux/micropython | tools/mpremote/mpremote/main.py | Python | mit | 15,680 | 0.00102 | """
MicroPython Remote - Interaction and automation tool for MicroPython
MIT license; Copyright (c) 2019-2021 Damien P. George
This program provides a set of utilities to interact with and automate a
MicroPython device over a serial connection. Commands supported are:
mpremote -- auto-detect, connect and enter REPL
mpremote <device-shortcut> -- connect to given device
mpremote connect <device> -- connect to given device
mpremote disconnect -- disconnect current device
mpremote mount <local-dir> -- mount local directory on device
mpremote eval <string> -- evaluate and print the string
mpremote exec <string> -- execute the string
mpremote run <script> -- run the given local script
mpremote fs <command> <args...> -- execute filesystem commands on the device
mpremote repl -- enter REPL
"""
import os, sys
import serial.tools.list_ports
from . import pyboardextended as pyboard
from .console import Console, ConsolePosix
_PROG = "mpremote"
_BUILTIN_COMMAND_EXPANSIONS = {
# Device connection shortcuts.
"devs": "connect list",
"a0": "connect /dev/ttyACM0",
"a1": "connect /dev/ttyACM1",
"a2": "connect /dev/ttyACM2",
"a3": "connect /dev/ttyACM3",
"u0": "connect /dev/ttyUSB0",
"u1": "connect /dev/ttyUSB1",
"u2": "connect /dev/ttyUSB2",
"u3": "connect /dev/ttyUSB3",
"c0": "connect COM0",
"c1": "connect COM1",
"c2": "connect COM2",
"c3": "connect COM3",
# Filesystem shortcuts.
"cat": "fs cat",
"ls": "fs ls",
"cp": "fs cp",
"rm": "fs rm",
"mkdir": "fs mkdir",
"rmdir": "fs rmdir",
"df": [
"exec",
"import uos\nprint('mount \\tsize \\tused \\tavail \\tuse%')\nfor _m in [''] + uos.listdir('/'):\n _s = uos.stat('/' + _m)\n if not _s[0] & 1 << 14: continue\n _s = uos.statvfs(_m)\n if _s[0]:\n _size = _s[0] * _s[2]; _free = _s[0] * _s[3]; print(_m, _size, _size - _free, _free, int(100 * (_size - _free) / _size), sep='\\t')",
],
# Other shortcuts.
"reset t_ms=100": [
"exec",
"--no-follow",
"import utime, umachine; utime.sleep_ms(t_ms); umachine.reset()",
],
"bootloader t_ms=100": [
"exec",
"--no-follow",
"import utime, umachine; utime.sleep_ms(t_ms); umachine.bootloader()",
],
"setrtc": [
"exec",
"import machine; machine.RTC().datetime((2020, 1, 1, 0, 10, 0, 0, 0))",
],
}
def load_user_config():
# Create empty config object.
config = __build_class__(lambda: None, "Config")()
config.commands = {}
# Get config file name.
path = os.getenv("XDG_CONFIG_HOME")
if path is None:
path = os.getenv("HOME")
if path is None:
return config
path = os.path.join(path, ".config")
path = os.path.join(path, _PROG)
config_file = os.path.join(path, "config.py")
# Check if config file exists.
if not os.path.exists(config_file):
return config
# Exec the config file in its directory.
with open(config_file) as f:
config_data = f.read()
prev_cwd = os.getcwd()
os.chdir(path)
exec(config_data, config.__dict__)
os.chdir(prev_cwd)
return config
def prepare_command_expansions(config):
global _command_expansions
_command_expansions = {}
for command_set in (_BUILTIN_COMMAND_EXPANSIONS, config.commands):
for cmd, sub in command_set.items():
cmd = cmd.split()
if len(cmd) == 1:
args = ()
else:
args = tuple(c.split("=") for c in cmd[1:])
if isinstance(sub, str):
sub = sub.split()
_command_expansions[cmd[0]] = (args, sub)
def do_command_expansion(args):
def usage_error(cmd, exp_args, msg):
print(f"Command {cmd} {msg}; signature is:")
print(" ", cmd, " ".join("=".join(a) for a in exp_args))
sys.exit(1)
last_arg_idx = len(args)
pre = []
while args and args[0] in _command_expansions:
cmd = args.pop(0)
exp_args, exp_sub = _command_expansions[cmd]
for exp_arg in exp_args:
exp_arg_name = exp_arg[0]
if args and "=" not in args[0]:
# Argument given without a name.
value = args.pop(0)
elif args and args[0].startswith(exp_arg_name + "="):
# Argument given with correct name.
value = args.pop(0).split("=", 1)[1]
else:
# No argument given, or argument given with a different name.
if len(exp_arg) == 1:
# Required argument (it has no default).
usage_error(cmd, exp_args, f"missing argument {exp_arg_name}" | )
else:
# Optional argument with a default.
value = exp_arg[1]
pre.append(f"{exp_arg_name}={value}")
args[0:0] = exp_sub
last_arg_idx = len(exp_sub)
if last_arg_idx < len(args) and "=" in args[last_arg_idx]:
# Extra unknown arguments given.
arg = args[last_arg_idx].split("=", 1)[0]
usage_error(cmd, exp_args, f"given unexpected argument {arg}")
sys.exit(1)
# | Insert expansion with optional setting of arguments.
if pre:
args[0:0] = ["exec", ";".join(pre)]
def do_connect(args):
dev = args.pop(0)
try:
if dev == "list":
# List attached devices.
for p in sorted(serial.tools.list_ports.comports()):
print(
"{} {} {:04x}:{:04x} {} {}".format(
p.device,
p.serial_number,
p.vid if isinstance(p.vid, int) else 0,
p.pid if isinstance(p.pid, int) else 0,
p.manufacturer,
p.product,
)
)
return None
elif dev == "auto":
# Auto-detect and auto-connect to the first available device.
for p in sorted(serial.tools.list_ports.comports()):
try:
return pyboard.PyboardExtended(p.device, baudrate=115200)
except pyboard.PyboardError as er:
if not er.args[0].startswith("failed to access"):
raise er
raise pyboard.PyboardError("no device found")
elif dev.startswith("id:"):
# Search for a device with the given serial number.
serial_number = dev[len("id:") :]
dev = None
for p in serial.tools.list_ports.comports():
if p.serial_number == serial_number:
return pyboard.PyboardExtended(p.device, baudrate=115200)
raise pyboard.PyboardError("no device with serial number {}".format(serial_number))
else:
# Connect to the given device.
if dev.startswith("port:"):
dev = dev[len("port:") :]
return pyboard.PyboardExtended(dev, baudrate=115200)
except pyboard.PyboardError as er:
msg = er.args[0]
if msg.startswith("failed to access"):
msg += " (it may be in use by another program)"
print(msg)
sys.exit(1)
def do_disconnect(pyb):
try:
if pyb.mounted:
if not pyb.in_raw_repl:
pyb.enter_raw_repl(soft_reset=False)
pyb.umount_local()
if pyb.in_raw_repl:
pyb.exit_raw_repl()
except OSError:
# Ignore any OSError exceptions when shutting down, eg:
# - pyboard.filesystem_command will close the connecton if it had an error
# - umounting will fail if serial port disappeared
pass
pyb.close()
def do_filesystem(pyb, args):
def _list_recursive(files, path):
if os.path.isdir(path):
for entry in os.listdir(path):
_list_recursive(files, os.path.join(path, entry))
else:
files.append(os.path.split(path))
|
tarikgwa/nfd | newfies/frontend/forms.py | Python | mpl-2.0 | 2,873 | 0.001044 | #
# Newfies-Dialer License
# http://www.newfies-dialer.org
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
#
# Copyright (C) 2011-2014 Star2Billing S.L.
#
# The primary maintainer of this project is
# Arezqui Belaid <info@star2billing.com>
#
from django import forms
from django.utils.translation import ugettext_lazy as _
from dialer_campaign.models import Campaign
from frontend.constants import SEARCH_TYPE
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Layout, Div, Submit, HTML
from crispy_forms.bootstrap import FormActions
class LoginForm(forms.Form):
"""Client Login Form"""
user = forms.CharField(max_length=30, label=_('username'), required=True)
user.widget.attrs['placeholder'] = _('Username')
password = forms.CharField(max_length=30, label=_('password'), required=True, widget=forms.PasswordInput())
password.widget.attrs['placeholder'] = _('Password')
def __init__(self, *args, **kwargs):
super(LoginForm, self).__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.form_action = '/login/'
self.helper.form_show_labels = False
self.helper.form_class = 'form-inline well'
self.helper.layout = Layout(
Div(
Div('user', css_class='col-xs-3'),
Div('password', css_class='col-xs-3'),
),
FormActions(
Submi | t('submit', 'Login'),
HTML('<a class="btn btn-warning" href="/password_re | set/">%s?</a>' % _('Forgot password')),
),
)
class DashboardForm(forms.Form):
"""Dashboard Form"""
campaign = forms.ChoiceField(label=_('campaign'), required=False)
search_type = forms.ChoiceField(label=_('type'), required=False, choices=list(SEARCH_TYPE),
initial=SEARCH_TYPE.D_Last_24_hours)
def __init__(self, user, *args, **kwargs):
super(DashboardForm, self).__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.form_show_labels = False
self.helper.form_class = 'well form-inline text-right'
self.helper.layout = Layout(
Div(
Div('campaign', css_class='form-group'),
Div('search_type', css_class='form-group'),
Div(Submit('submit', _('Search')), css_class='form-group'),
),
)
# To get user's running campaign list
if user:
campaign_choices = [(0, _('Select campaign'))]
for cp in Campaign.objects.filter(user=user).order_by('-id'):
campaign_choices.append((cp.id, unicode(cp.name)))
self.fields['campaign'].choices = campaign_choices
|
TangXT/GreatCatMOOC | common/lib/xmodule/xmodule/modulestore/split_mongo/caching_descriptor_system.py | Python | agpl-3.0 | 6,524 | 0.003679 | import sys
import logging
from xmodule.mako_module import MakoDescriptorSystem
from xmodule.x_module import XModuleDescriptor
from xmodule.modulestore.locator import BlockUsageLocator, LocalId
from xmodule.error_module import ErrorDescriptor
from xmodule.errortracker import exc_info_to_str
from xblock.runtime import DbModel
from ..exceptions import ItemNotFoundError
from .split_mongo_kvs import SplitMongoKVS
from xblock.fields import ScopeIds
log = logging.getLogger(__name__)
class CachingDescriptorSystem(MakoDescriptorSystem):
"""
A system that has a cache of a course version's json that it will use to load modules
from, with a backup of calling to the underlying modulestore for more data.
Computes the settings (nee 'metadata') inheritance upon creation.
"""
def __init__(self, modulestore, course_entry, default_class, module_data, lazy, **kwargs):
"""
Computes the settings inheritance and sets up the cache.
modulestore: the module store that can be used to retrieve additional
modules
course_entry: the originally fetched enveloped course_structure w/ branch and package_id info.
Callers to _load_item provide an override but that function ignores the provided structure and
only looks at the branch and package_id
module_data: a dict mapping Location -> json that was cached from the
underlying modulestore
"""
super(CachingDescriptorSystem, self).__init__(load_item=self._load_item, **kwargs)
self.modulestore = modulestore
self.course_entry = course_entry
self.lazy = lazy
self.module_data = module_data
# Compute inheritance
modulestore.inherit_settings(
course_entry['structure'].get('blocks', {}),
course_entry['structure'].get('blocks', {}).get(course_entry['structure'].get('root'))
)
self.default_class = default_class
self.local_modules = {}
def _load_item(self, block_id, course_entry_override=None):
if isinstance(block_id, BlockUsageLocator) and isinstance(block_id.block_id, LocalId):
try:
return self.local_modules[block_id]
except KeyError:
raise ItemNotFoundError
json_data = self.module_data.get(block_id)
if json_data is None:
# deeper than initial descendant fetch or doesn't exist
self.modulestore.cache_items(self, [block_id], lazy=self.lazy)
json_data = self.module_data.get(block_id)
if json_data is None:
raise ItemNotFoundError(block_id)
class_ = XModuleDescriptor.load_class(
json_data.get('category'),
self.default_class
)
return self.xblock_from_json(class_, block_id, json_data, course_entry_override)
# xblock's runtime does not always pass enough contextual information to figure out
# which named container (course x branch) or which parent is requesting an item. Because split allows
# a many:1 mapping from named containers to structures and because item's identities encode
# context as well as unique identity, this function must sometimes infer whether the access is
# within an unspecified named container. In most cases, course_entry_override will give the
# explicit context; however, runtime.get_block(), e.g., does not. HOWEVER, there are simple heuristics
# which will work 99.999% of the time: a runtime is thread & even context specific. The likelihood that
# the thread is working with more than one named container pointing to the same specific structure is
# low; thus, the course_entry is most likely correct. If the thread is looking at > 1 named container
# pointing to the same structure, the access is likely to be chunky enough that the last known container
# is the intended one when not given a course_entry_override; thus, the caching of the last branch/package_id.
def xblock_from_json(self, class_, block_id, json_data, course_entry_override=None):
if course_entry_override is None:
course_entry_override = self.course_entry
else:
# most recent retrieval is most likely the right one for next caller (see comment above fn)
self.course_entry['branch'] = course_entry_override['branch']
self.course_entry['package_id'] = course_entry_override['package_id']
# most likely a lazy loader or the id directly
definition = json_data.get('definition', {})
definition_id = self.modulestore.definition_locator(definition)
# If no usage id is provided, generate an in-memory id
if block_id is None:
b | lock_id = LocalId()
block_locator = BlockUsageLocator(
version_guid=course_entry_override['structure']['_id'],
block_id=block_id,
package_id=course_entry_override.get('package_i | d'),
branch=course_entry_override.get('branch')
)
kvs = SplitMongoKVS(
definition,
json_data.get('fields', {}),
json_data.get('_inherited_settings'),
)
field_data = DbModel(kvs)
try:
module = self.construct_xblock_from_class(
class_,
ScopeIds(None, json_data.get('category'), definition_id, block_locator),
field_data,
)
except Exception:
log.warning("Failed to load descriptor", exc_info=True)
return ErrorDescriptor.from_json(
json_data,
self,
BlockUsageLocator(
version_guid=course_entry_override['structure']['_id'],
block_id=block_id
),
error_msg=exc_info_to_str(sys.exc_info())
)
edit_info = json_data.get('edit_info', {})
module.edited_by = edit_info.get('edited_by')
module.edited_on = edit_info.get('edited_on')
module.previous_version = edit_info.get('previous_version')
module.update_version = edit_info.get('update_version')
module.definition_locator = self.modulestore.definition_locator(definition)
# decache any pending field settings
module.save()
# If this is an in-memory block, store it in this system
if isinstance(block_locator.block_id, LocalId):
self.local_modules[block_locator] = module
return module
|
poldracklab/niworkflows | niworkflows/tests/test_registration.py | Python | bsd-3-clause | 7,017 | 0.001995 | # -*- coding: utf-8 -*-
""" Registration tests """
import os
from shutil import copy
import pytest
from tempfile import TemporaryDirectory
from nipype.pipeline import engine as pe
from ..interfaces.reportlets.registration import (
FLIRTRPT,
SpatialNormalizationRPT,
ANTSRegistrationRPT,
BBRegisterRPT,
MRICoregRPT,
ApplyXFMRPT,
SimpleBeforeAfterRPT,
)
from .conftest import _run_interface_mock, datadir, has_fsl, has_freesurfer
def _smoke_test_report(report_interface, artifact_name):
with TemporaryDirectory() as tmpdir:
res = pe.Node(report_interface, name="smoke_test", base_dir=tmpdir).run()
out_report = res.outputs.out_report
save_artifacts = os.getenv("SAVE_CIRCLE_ARTIFACTS", False)
if save_artifacts:
copy(out_report, os.path.join(save_artifacts, artifact_name))
assert os.path.isfile(out_report), "Report does not exist"
@pytest.mark.skipif(not has_fsl, reason="No FSL")
def test_FLIRTRPT(reference, moving):
""" the FLIRT report capable test """
flirt_rpt = FLIRTRPT(generate_report=True, in_file=moving, reference=reference)
_smoke_test_report(flirt_rpt, "testFLIRT.svg")
@pytest.mark.skipif(not has_freesurfer, reason="No FreeSurfer")
def test_MRICoregRPT(monkeypatch, reference, moving, nthreads):
""" the MRICoreg report capable test """
def _agg(objekt, runtime):
outputs = objekt.output_spec()
outputs.out_lta_file = os.path.join(datadir, "testMRICoregRPT-out_lta_file.lta")
outputs.out_report = os.path.join(runtime.cwd, objekt.inputs.out_report)
return outputs
# Patch the _run_interface method
monkeypatch.setattr(MRICoregRPT, "_run_interface", _run_interface_mock)
monkeypatch.setattr(MRICoregRPT, "aggregate_outputs", _agg)
mri_coreg_rpt = MRICoregRPT(
generate_report=True,
source_file=moving,
reference_file=reference,
num_threads=nthreads,
)
_smoke_test_report(mri_coreg_rpt, "testMRICoreg.svg")
@pytest.mark.skipif(not has_fsl, reason="No FSL")
def test_ApplyXFMRPT(reference, moving):
""" the ApplyXFM report capable test """
flirt_rpt = FLIRTRPT(generate_report=False, in_file=moving, reference=reference)
applyxfm_rpt = ApplyXFMRPT(
generate_report=True,
in_file=moving,
in_matrix_file=flirt_rpt.run().outputs.out_matrix_file,
reference=reference,
apply_xfm=True,
)
_smoke_test_report(applyxfm_rpt, "testApplyXFM.svg")
@pytest.mark.skipif(not has_fsl, reason="No FSL")
def test_SimpleBeforeAfterRPT(reference, moving):
""" the SimpleBeforeAfterRPT report capable test """
flirt_rpt = FLIRTRPT(generate_report=False, in_file=moving, reference=reference)
ba_rpt = SimpleBeforeAfterRPT(
generate_report=True, before=reference, after=flirt_rpt.run().outputs.out_file
)
_smoke_test_report(ba_rpt, "test_SimpleBeforeAfterRPT.svg")
@pytest.mark.skipif(not has_fsl, reason="No FSL")
def test_FLIRTRPT_w_BBR(reference, reference_mask, moving):
""" test FLIRTRPT with input `wm_seg` set.
For the sake of testing ONLY, `wm_seg` is set to the filename of a brain mask """
flirt_rpt = FLIRTRPT(
generate_report=True, in_file=moving, reference=reference, wm_seg=reference_mask
)
_smoke_test_report(flirt_rpt, "testFLIRTRPTBBR.svg")
@pytest.mark.skipif(not has_freesurfer, reason="No FreeSurfer")
def test_BBRegisterRPT(monkeypatch, moving):
""" the BBRegister report capable test """
def _agg(objekt, runtime):
outputs = objekt.output_spec()
outputs.out_lta_file = os.path.join(
datadir, "testBBRegisterRPT-out_lta_file.lta"
)
outputs.out_report = os.path.join(runtime.cwd, objekt.inputs.out_report)
return outputs
# Patch the _run_interface method
monkeypatch.setattr(BBRegisterRPT, "_run_interface", _run_interface_mock)
monkeypatch.setattr(BBRegisterRPT, "aggregate_outputs", _agg)
subject_id = "fsaverage"
bbregister_rpt = BBRegisterRPT(
generate_report=True,
contrast_type="t1",
init="fsl",
source_file=moving,
subject_id=subject_id,
registered_file=True,
)
_smoke_test_report(bbregister_rpt, "testBBRegister.svg")
def test_SpatialNormalizationRPT(monkeypatch, moving):
""" the SpatialNormalizationRPT report capable test """
def _agg(objekt, runtime):
outputs = objekt.output_spec()
outputs.warped_image = os.path.join(
datadir, "testSpatialNormalizationRPTMovingWarpedImage.nii.gz"
)
outputs.out_report = os.path.join(runtime.cwd, objekt.inputs.out_report)
return outputs
# Patch the _run_interface method
monkeypatch.setattr(
SpatialNormalizationRPT, "_run_interface", _run_interface_mock
)
monkeypatch.setattr(SpatialNormalizationRPT, "aggregate_outputs", _agg)
ants_rpt = SpatialNormalizationRPT(
generate_report=True, moving_image=moving, flavor="testing"
)
_smoke_test_report(ants_rpt, "testSpatialNormalizationRPT.svg")
def test_SpatialNormalizationRPT_masked(monkeypatch, moving, reference_mask):
""" the SpatialNormalizationRPT report capable test with masking """
def _agg(objekt, runtime):
outputs = objekt.output_spec()
outputs.warped_image = os.path.join(
datadir, "testSpatialNormalizationRPTMovingWarpedImage.nii.gz"
)
outputs.out_report = os.path.join(runtime.cwd, objekt.inputs.out_report)
return outputs
# Patch the _run_interface method
monkeypatch.setattr(
SpatialNormalizationRPT, "_run_interface", _run_interface_mock
)
monkeypatch.setattr(SpatialNormalizationRPT, "aggregate_outputs", _agg)
ants_rpt = SpatialNormalizationRPT(
generate_report=True,
moving_image=moving,
reference_mask=reference_mask,
flavor="testing",
)
_smoke_test_report(ants_rpt, "testSpatialNormalizationRPT_masked.svg")
def test_ANTSRegistrationRPT(monkeypatch, reference, moving):
""" the SpatialNormalizationRPT report capable test """
import pkg_resources as pkgr
def _agg(objekt, runtime):
outputs = objekt.output_spec()
outputs.warped_image = os.path.join(
datadir, "testANTSRegistrationRPT-warped_image.nii.gz"
)
outputs.out_report = os.path.join(runtime.cwd, objekt.inputs.out_report)
return outputs
# Patch the _run_interface method
monkeypatch.setattr(ANTSRegistrationRPT, "_run_interface", _run_interface_mock)
monkeypatch.setattr(ANTSRegistrationRPT, "aggregate_outputs", _agg)
ants_rpt = ANTSRegistrationRPT(
generate_report=True,
| moving_imag | e=moving,
fixed_image=reference,
from_file=pkgr.resource_filename(
"niworkflows.data", "t1w-mni_registration_testing_000.json"
),
)
_smoke_test_report(ants_rpt, "testANTSRegistrationRPT.svg")
|
eddiedb6/pdb | PDBConst.py | Python | mit | 245 | 0 | # Schema
DB = "d | b"
Name = "name"
Tables = "tables"
Table = "table"
Columns = "columns"
Column = "column"
Attributes = "attributes"
Initials = "initials"
Initial = "initial"
InitialValue = "initialvalu | e"
Value = "value"
PrimaryKey = "primarykey"
|
squirrelo/qiita | qiita_db/investigation.py | Python | bsd-3-clause | 1,984 | 0 | from __future__ import division
"""
Objects for dealing with Qiita studies
This module provides the implementation of the Investigation class.
Classes
-------
- `Investigation` -- A Qiita investigation class
"""
# - | ----------------------------------------------------------------------------
# Copyright (c) 2014--, The Qiita Development Team.
#
# Distributed under the terms of the BSD 3-clause License.
#
# The full license is in the file LICENSE, distributed with this software.
# -------------------------------------------- | ---------------------------------
import qiita_db as qdb
REQUIRED_KEYS = {"name", "description", "contact_person"}
class Investigation(qdb.base.QiitaStatusObject):
"""
Study object to access to the Qiita Study information
Attributes
----------
name: str
name of the investigation
description: str
description of what the investigation is investigating
contact_person: StudyPerson object
studies: list of Study Objects
all studies that are part of the investigation
Methods
-------
add_study
Adds a study to the investigation
"""
_table = "investigation"
@classmethod
def create(cls, owner, info, investigation=None):
"""Creates a new investigation on the database"""
raise NotImplementedError()
@classmethod
def delete(cls, id_):
"""Deletes an investigation on the database"""
raise NotImplementedError()
@property
def name(self):
raise NotImplementedError()
@name.setter
def name(self, value):
raise NotImplementedError()
@property
def description(self):
raise NotImplementedError()
@description.setter
def description(self, value):
raise NotImplementedError()
@property
def contact_person(self):
raise NotImplementedError()
@contact_person.setter
def contact_person(self, value):
raise NotImplementedError()
|
Lorquas/subscription-manager | src/subscription_manager/gui/messageWindow.py | Python | gpl-2.0 | 3,262 | 0.001839 | from __future__ import print_function, division, absolute_import
#
# Copyright (c) 2010 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public License,
# version 2 (GPLv2). There is NO WARRANTY for this software, express or
# implied, including the implied warranties of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2
# along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
#
# Red Hat trademarks are not licensed under GPLv2. No permission is
# granted to use or replicate Red Hat trademarks that are incorporated
# in this software or its documentation.
#
from subscription_manager.ga import GObject as ga_GObject
from subscription_manager.ga import Gtk as ga_Gtk
# wrap a long line...
def wrap_line(line, max_line_size=100):
if len(line) < max_line_size:
return line
ret = []
l = ""
for w in line.split():
if not len(l):
l = w
continue
if len(l) > max_line_size:
ret.append(l)
l = w
else:
l = "%s %s" % (l, w)
if len(l):
ret.append(l)
return '\n'.join(ret)
# wrap an entire piece of text
def wrap_text(txt):
return '\n'.join(map(wrap_line, txt.split('\n')))
class MessageWindow(ga_GObject.GObject):
__gsignals__ = {
'response': (ga_GObject.SignalFlags.RUN_LAST, None,
(ga_GObject.TYPE_BOOLEAN,))
}
def __init__(self, text, parent=None, title=None):
ga_GObject.GObject.__init__(self)
self.rc = None
# this seems to be wordwrapping text passed to
# it, which is making for ugly error messages
self.dialog = ga_Gtk.MessageDialog(parent, 0, self.STYLE, self.BUTTONS)
if title:
self.dialog.set_title(title)
# escape product strings see rh bz#633438
self.dialog.set_markup(text)
self.dialog.set_default_response(0)
self.dialog.set_position(ga_Gtk.WindowPosition.CENTER_ON_PARENT)
self.dialog.show_all()
self.dialog.set_icon_name('subscription-manager')
self.dialog.set | _modal(True)
#this seems sp | urious, but without it, a ref to this obj gets "lost"
ga_GObject.add_emission_hook(self, 'response', self.noop_hook)
self.dialog.connect("response", self._on_response_event)
def _on_response_event(self, dialog, response):
rc = response in [ga_Gtk.ResponseType.OK, ga_Gtk.ResponseType.YES]
self.emit('response', rc)
self.hide()
def hide(self):
self.dialog.hide()
def noop_hook(self, dummy1=None, dummy2=None):
pass
class ErrorDialog(MessageWindow):
BUTTONS = ga_Gtk.ButtonsType.OK
STYLE = ga_Gtk.MessageType.ERROR
class OkDialog(MessageWindow):
BUTTONS = ga_Gtk.ButtonsType.OK
STYLE = ga_Gtk.MessageType.INFO
class InfoDialog(MessageWindow):
BUTTONS = ga_Gtk.ButtonsType.OK
STYLE = ga_Gtk.MessageType.INFO
class YesNoDialog(MessageWindow):
BUTTONS = ga_Gtk.ButtonsType.YES_NO
STYLE = ga_Gtk.MessageType.QUESTION
class ContinueDialog(MessageWindow):
BUTTONS = ga_Gtk.ButtonsType.OK_CANCEL
STYLE = ga_Gtk.MessageType.WARNING
|
WilliamYi96/Machine-Learning | LeetCode/0033.py | Python | apache-2.0 | 1,334 | 0.001499 | class Solution:
def searchRot(self, nums):
left = 0
right = len(nums) - 1
while left <= righ | t:
mid = (left + right) >> 1
if mid == len(nums) - 1:
return 0
# print(left, right, mid)
if nums[mid] > nums[mid+1] | :
return mid+1
elif nums[mid] < nums[mid+1]:
if nums[left] < nums[mid]:
left = mid + 1
elif nums[left] > nums[mid]:
right = mid - 1
elif nums[left] == nums[mid]:
left = mid + 1
return 0
def search(self, nums, target):
mlength = len(nums)
if mlength == 1:
if nums[0] == target:
return 0
else:
return -1
elif mlength == 0:
return -1
rotIndex = self.searchRot(nums)
# print(rotIndex)
left = 0
right = mlength - 1
while left <= right:
mid = (left + right) >> 1
nmid = (mid + rotIndex) % mlength
if nums[nmid] == target:
return nmid
elif nums[nmid] > target:
right = mid - 1
elif nums[nmid] < target:
left = mid + 1
return -1 |
KBNode/CertNode | certnode/constants.py | Python | mit | 273 | 0 | API_NEW_REG = 'new-reg'
API_NEW_AUTHZ = 'ne | w-authz'
API_NEW_CERT = 'new-cert'
LOCATION = 'Location'
TOS = 'terms-of-service'
REPLAY_NONCE = 'Replay-Nonce'
CHALLENGE_METHO | D_HTTP_01 = 'http-01'
CHALLENGE_METHOD_DNS_01 = 'dns-01'
CHALLENGE_METHOD_TLS_SNI_01 = 'tls-sni-01'
|
nsanthony/super-fortnight | wwk/py/people/people_class.py | Python | apache-2.0 | 583 | 0.012007 | #! /home/nsanthony/miniconda3/bin/python
import inventory.inventory_class as inv
import weapons.weapon_class as wp
class people:
"""This is the people class with attributes:"""
def name():
n = ''
return n
def healt | h():
hp = 0
return hp
def descript():
d = 'Description of the person or creature'
return d
def equiped():
e = inv.inventory()
e.weapon = wp.weapon()
e.armor = 0
return e
def bag():
b = {}
return b
def hostile():
h = 0
| return h |
azavea/blend | docs/conf.py | Python | mit | 7,745 | 0.007489 | # -*- coding: utf-8 -*-
#
# Blend documentation build configuration file, created by
# sphinx-quickstart on Fri Feb 24 14:11:43 2012.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Blend'
copyright = u'2012, Azavea'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built | documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags. |
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Blenddoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'Blend.tex', u'Blend Documentation',
u'Justin Walgran', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'blend', u'Blend Documentation',
[u'Justin Walgran'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'Blend', u'Blend Documentation',
u'Justin Walgran', 'Blend', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
|
mazvv/travelcrm | travelcrm/forms/dismissals.py | Python | gpl-3.0 | 2,514 | 0 | # -*-coding: utf-8 -*-
import colander
from . import (
SelectInteger,
ResourceSchema,
BaseForm,
BaseSearchForm,
BaseAssignForm,
Date
)
from ..resources.dismissals import DismissalsResource
from ..models.dismissal import Dismissal
from ..models.employee import Employee
from ..models.note import Note
from ..models.task import Task
from ..lib.qb.dismissals import DismissalsQueryBuilder
from ..lib.utils.common_utils import translate as _
from ..lib.utils.security_utils import get_auth_employee
from ..lib.bl.employees import (
get_employee_position,
is_employee_currently_dismissed
)
@colander.deferred
def employee_validator(node, kw):
request = kw.get('request')
def validator(node, value):
employee = Employee.get(value)
if is_employee_currently_dismissed(employee):
raise colander.Invalid(
node,
_(u'Employee is dismissed already.')
)
if not get_employee_position(employee):
raise colander.Invalid(
node,
_(u'Can\'t dismiss employee without position.')
)
return validator
class _DismissalSchema(ResourceSchema):
date = colander.SchemaNode(
Date(),
)
employee_id = colander.SchemaNode(
SelectInteger(Employee),
validator=employee_validator,
)
class DismissalForm(BaseForm):
_schema = _DismissalSchema
def submit(self, dismissal=None):
if not dismissal:
dismissal = Dismissal(
resource=DismissalsResource.cr | eate_resource(
get_auth_employee | (self.request)
)
)
else:
dismissal.resource.notes = []
dismissal.resource.tasks = []
dismissal.date = self._controls.get('date')
dismissal.employee_id = self._controls.get('employee_id')
for id in self._controls.get('note_id'):
note = Note.get(id)
dismissal.resource.notes.append(note)
for id in self._controls.get('task_id'):
task = Task.get(id)
dismissal.resource.tasks.append(task)
return dismissal
class DismissalSearchForm(BaseSearchForm):
_qb = DismissalsQueryBuilder
class DismissalAssignForm(BaseAssignForm):
def submit(self, ids):
for id in ids:
dismissal = Dismissal.get(id)
dismissal.resource.maintainer_id = self._controls.get(
'maintainer_id'
)
|
ankush-me/SynthText | text_utils.py | Python | apache-2.0 | 23,512 | 0.01008 | from __future__ import division
import numpy as np
import matplotlib.pyplot as plt
import scipy.io as sio
import os.path as osp
import random, os
import cv2
import cPickle as cp
import scipy.si | gnal as ssig
import scipy.stats as sstat
import pygame, pygame.locals
from pygame import freetype
#import Image
from PIL import Image
import math
from common import *
def sample_weighted(p_dict):
ps = p_dict.keys()
return p_dict[np.random.choice(ps,p=ps)]
def move_bb(bbs, t):
"""
Translate the bounding-boxes in by t_x,t_y.
BB : 2x4xn
T : 2-long np.arra | y
"""
return bbs + t[:,None,None]
def crop_safe(arr, rect, bbs=[], pad=0):
"""
ARR : arr to crop
RECT: (x,y,w,h) : area to crop to
BBS : nx4 xywh format bounding-boxes
PAD : percentage to pad
Does safe cropping. Returns the cropped rectangle and
the adjusted bounding-boxes
"""
rect = np.array(rect)
rect[:2] -= pad
rect[2:] += 2*pad
v0 = [max(0,rect[0]), max(0,rect[1])]
v1 = [min(arr.shape[0], rect[0]+rect[2]), min(arr.shape[1], rect[1]+rect[3])]
arr = arr[v0[0]:v1[0],v0[1]:v1[1],...]
if len(bbs) > 0:
for i in xrange(len(bbs)):
bbs[i,0] -= v0[0]
bbs[i,1] -= v0[1]
return arr, bbs
else:
return arr
class BaselineState(object):
curve = lambda this, a: lambda x: a*x*x
differential = lambda this, a: lambda x: 2*a*x
a = [0.50, 0.05]
def get_sample(self):
"""
Returns the functions for the curve and differential for a and b
"""
sgn = 1.0
if np.random.rand() < 0.5:
sgn = -1
a = self.a[1]*np.random.randn() + sgn*self.a[0]
return {
'curve': self.curve(a),
'diff': self.differential(a),
}
class RenderFont(object):
"""
Outputs a rasterized font sample.
Output is a binary mask matrix cropped closesly with the font.
Also, outputs ground-truth bounding boxes and text string
"""
def __init__(self, data_dir='data'):
# distribution over the type of text:
# whether to get a single word, paragraph or a line:
self.p_text = {0.0 : 'WORD',
0.0 : 'LINE',
1.0 : 'PARA'}
## TEXT PLACEMENT PARAMETERS:
self.f_shrink = 0.90
self.max_shrink_trials = 5 # 0.9^5 ~= 0.6
# the minimum number of characters that should fit in a mask
# to define the maximum font height.
self.min_nchar = 2
self.min_font_h = 16 #px : 0.6*12 ~ 7px <= actual minimum height
self.max_font_h = 120 #px
self.p_flat = 0.10
# curved baseline:
self.p_curved = 1.0
self.baselinestate = BaselineState()
# text-source : gets english text:
self.text_source = TextSource(min_nchar=self.min_nchar,
fn=osp.join(data_dir,'newsgroup/newsgroup.txt'))
# get font-state object:
self.font_state = FontState(data_dir)
pygame.init()
def render_multiline(self,font,text):
"""
renders multiline TEXT on the pygame surface SURF with the
font style FONT.
A new line in text is denoted by \n, no other characters are
escaped. Other forms of white-spaces should be converted to space.
returns the updated surface, words and the character bounding boxes.
"""
# get the number of lines
lines = text.split('\n')
lengths = [len(l) for l in lines]
# font parameters:
line_spacing = font.get_sized_height() + 1
# initialize the surface to proper size:
line_bounds = font.get_rect(lines[np.argmax(lengths)])
fsize = (round(2.0*line_bounds.width), round(1.25*line_spacing*len(lines)))
surf = pygame.Surface(fsize, pygame.locals.SRCALPHA, 32)
bbs = []
space = font.get_rect('O')
x, y = 0, 0
for l in lines:
x = 0 # carriage-return
y += line_spacing # line-feed
for ch in l: # render each character
if ch.isspace(): # just shift
x += space.width
else:
# render the character
ch_bounds = font.render_to(surf, (x,y), ch)
ch_bounds.x = x + ch_bounds.x
ch_bounds.y = y - ch_bounds.y
x += ch_bounds.width
bbs.append(np.array(ch_bounds))
# get the union of characters for cropping:
r0 = pygame.Rect(bbs[0])
rect_union = r0.unionall(bbs)
# get the words:
words = ' '.join(text.split())
# crop the surface to fit the text:
bbs = np.array(bbs)
surf_arr, bbs = crop_safe(pygame.surfarray.pixels_alpha(surf), rect_union, bbs, pad=5)
surf_arr = surf_arr.swapaxes(0,1)
#self.visualize_bb(surf_arr,bbs)
return surf_arr, words, bbs
def render_curved(self, font, word_text):
"""
use curved baseline for rendering word
"""
wl = len(word_text)
isword = len(word_text.split())==1
# do curved iff, the length of the word <= 10
if not isword or wl > 10 or np.random.rand() > self.p_curved:
return self.render_multiline(font, word_text)
# create the surface:
lspace = font.get_sized_height() + 1
lbound = font.get_rect(word_text)
fsize = (round(2.0*lbound.width), round(3*lspace))
surf = pygame.Surface(fsize, pygame.locals.SRCALPHA, 32)
# baseline state
mid_idx = wl//2
BS = self.baselinestate.get_sample()
curve = [BS['curve'](i-mid_idx) for i in xrange(wl)]
curve[mid_idx] = -np.sum(curve) / (wl-1)
rots = [-int(math.degrees(math.atan(BS['diff'](i-mid_idx)/(font.size/2)))) for i in xrange(wl)]
bbs = []
# place middle char
rect = font.get_rect(word_text[mid_idx])
rect.centerx = surf.get_rect().centerx
rect.centery = surf.get_rect().centery + rect.height
rect.centery += curve[mid_idx]
ch_bounds = font.render_to(surf, rect, word_text[mid_idx], rotation=rots[mid_idx])
ch_bounds.x = rect.x + ch_bounds.x
ch_bounds.y = rect.y - ch_bounds.y
mid_ch_bb = np.array(ch_bounds)
# render chars to the left and right:
last_rect = rect
ch_idx = []
for i in xrange(wl):
#skip the middle character
if i==mid_idx:
bbs.append(mid_ch_bb)
ch_idx.append(i)
continue
if i < mid_idx: #left-chars
i = mid_idx-1-i
elif i==mid_idx+1: #right-chars begin
last_rect = rect
ch_idx.append(i)
ch = word_text[i]
newrect = font.get_rect(ch)
newrect.y = last_rect.y
if i > mid_idx:
newrect.topleft = (last_rect.topright[0]+2, newrect.topleft[1])
else:
newrect.topright = (last_rect.topleft[0]-2, newrect.topleft[1])
newrect.centery = max(newrect.height, min(fsize[1] - newrect.height, newrect.centery + curve[i]))
try:
bbrect = font.render_to(surf, newrect, ch, rotation=rots[i])
except ValueError:
bbrect = font.render_to(surf, newrect, ch)
bbrect.x = newrect.x + bbrect.x
bbrect.y = newrect.y - bbrect.y
bbs.append(np.array(bbrect))
last_rect = newrect
# correct the bounding-box order:
bbs_sequence_order = [None for i in ch_idx]
for idx,i in enumerate(ch_idx):
bbs_sequence_order[i] = bbs[idx]
bbs = bbs_sequence_order
# get the union of characters for cropping:
r0 = pygame.Rect(bbs[0])
rect_union = r0.unionall(bbs)
# crop the surface to fit the text:
bbs = np.array(bbs)
surf_arr, bbs = crop_safe(pygame.surfarray.pixels_alpha(surf), rect_union, bbs, pad=5)
surf_arr = sur |
chewse/djangorestframework-dynamic-fields | test_dynamicfields.py | Python | mit | 218 | 0.004587 | # -*- coding: utf-8 -*-
import uni | ttest
import mock
class DynamicFieldsMixinTestCase(unittest.TestCase):
"""Test functionality of the DynamicFieldsMixin class.""" |
def test_restrict_dynamic_fields(self):
|
leewujung/ooi_sonar | during_incubator/nmf_seq_20170228.py | Python | apache-2.0 | 6,547 | 0.017718 | #!/usr/local/bin/python
'''
NMF decomposition sequentially over multiple time chunks
'''
import os, sys, glob, re
import datetime
import numpy as np
from calendar import monthrange
import h5py
sys.path.insert(0,'/home/wu-jung/code_git/mi-instrument/')
from concat_raw import get_num_days_pings, get_data_from_h5
from echogram_decomp import find_nearest_time_idx
import matplotlib.pyplot as plt
from modest_image import imshow
from mpl_toolkits.axes_grid1 import make_axes_locatable
# Set default colormap
plt.rcParams['image.cmap'] = 'jet'
# Get info of all files and set path
data_path = '/media/wu-jung/My Passport/OOI/ooi_all_data_h5'
fname_form = '*.h5'
fname_all = glob.glob(os.path.join(data_path,fname_form))
save_path = '/home/wu-jung/internal_2tb/ooi_sonar/figs/plot_echogram_month'
H5_FILENAME_MATCHER = re.compile('(?P<SITE_CODE>\S*)_(?P<YearMonth>\S*)\.\S*')
# Set sliding window param
SITE_CODE = 'CE04OSPS'
days_win = 10 # number of days in sliding window
d_start_fmt = '20151225' # start date
fmt = '%Y%m%d'
d_start = datetime.datetime.strptime(d_start_fmt,fmt)
days = [d_start+datetime.timedelta(days=x) for x in range(days_win)]
all_hr = range(24) # list of all hour: 0-23
all_minutes = range(1,21) # list of all minutes: 1-10
every_ping = [xday+datetime.timedelta(hours=xhr,minutes=xmin)\
for xday in days for xhr in all_hr for xmin in all_min] # datetime object for all pings wanted
pings_per_day = len(all_hr)*len(all_minutes) # number of pings per day
cnt = 0 # counter for location in unpacked array
y_want = np.unique(np.asarray([every_ping[x].year for x in range(len(every_ping))]))
for y in y_want: # loop through all years included
ping_sel_y = [x for x in every_ping if x.year==y] # all pings from year y
m_want = np.unique(np.asarray([ping_sel_y[x].month for x in range(len(ping_sel_y))]))
for m in m_want:
# y,m are the filename
ym_fname = datetime.datetime.strftime(datetime.date(y,m,1),'%Y%m');
ping_sel_m = [x for x in ping_sel_y if x.month==m] # all pings from month m
# Open h5 file
f = h5py.File(os.path.join(data_path,SITE_CODE+'_'+ym_fname+'.h5'),'r')
# Get f['data_times'] idx for every hour in all days in the month
all_idx = [find_nearest_time_idx(f['data_times'],t) for t in ping_sel_m]
all_idx = np.array(all_idx) # to allow numpy operation
# Inititalize mtx if not exist
if not 'ping_times' in locals():
ping_times = np.empty(len(every_ping)) # pinging time
Sv_tmp = f['Sv'][:,:,0] # Sv array
Sv_mtx = np.empty((Sv_tmp.shape[0],Sv_tmp.shape[1],len(every_ping)))
Sv_mtx[:] = np.nan
bin_size = f['bin_size'][0] # size of each depth bin
# Fill in array
notnanidx = np.int_(all_idx[~np.isnan(all_idx)])
ping_times[~np.isnan(all_idx)+cnt] = f['data_times'][notnanidx.tolist()]
Sv_mtx[:,:,~np.isnan(all_idx)+cnt] = f['Sv'][:,:,notnanidx.tolist()]
# Increment counter
cnt = cnt+len(all_idx)
for fname in fname_all[28:-1]:
print 'Processing '+fname+' ...'
# Open h5 file
f = h5py.File(fname,'r')
# Get month and day range
file_datetime = H5_FILENAME_MATCHER.match(os.path.basename(fname)).group('YearMonth')
ym = datetime.datetime.strptime(file_datetime,'%Y%m')
year = ym.year
month = ym.month
_,daynum = monthrange(year,month)
# Save fig name
save_fname = os.path.basename(fname).split('.')[0]
# Get datetime object for on the hour every hour in all days in the month
all_day = range(1,daynum+1) # list of all days
all_hr = range(24) # list of all hour: 0-23
all_minutes = range(1,21) # list of all minutes: 1-10
every_ping = [datetime.datetime(year,month,day,hr,minutes,0) \
for day in all_day for hr in all_hr for minutes in all_minutes]
pings_per_day = len(all_hr)*len(all_minutes)
# Get f['data_times'] idx for every hour in all days in the month
all_idx = [find_nearest_time_idx(f['data_times'],hr) fo | r hr in every_ping]
all | _idx = np.array(all_idx) # to allow numpy operation
# Extract timing and Sv data
notnanidx = np.int_(all_idx[~np.isnan(all_idx)])
data_times = np.empty(all_idx.shape) # initialize empty array
data_times[~np.isnan(all_idx)] = f['data_times'][notnanidx.tolist()]
Sv_tmp = f['Sv'][:,:,0]
Sv_mtx = np.empty((Sv_tmp.shape[0],Sv_tmp.shape[1],all_idx.shape[0]))
Sv_mtx[:] = np.nan
Sv_mtx[:,:,~np.isnan(all_idx)] = f['Sv'][:,:,notnanidx.tolist()]
bin_size = f['bin_size'][0] # size of each depth bin
f.close()
# Get plotting params
depth_bin_num = Sv_mtx.shape[1] # number of depth bins
max_depth = np.round(Sv_mtx.shape[1]*bin_size)
day_tick = np.arange(daynum)*pings_per_day
day_label = [str(x) for x in np.arange(daynum)]
depth_tick = np.linspace(0,depth_bin_num,5)
depth_label = ['%d' % d for d in np.linspace(0,max_depth,5)]
plot_params = dict(zip(['year','month','depth_label','day_label','depth_tick','day_tick'],\
[year,month,depth_label,day_label,depth_tick,day_tick]))
# Plot and save figure
fig,ax = plt.subplots(3,sharex=True,sharey=True)
for ff in range(Sv_mtx.shape[0]):
im = imshow(ax[ff],Sv_mtx[ff,:,:],aspect='auto',vmax=-30,vmin=-80)
ax[ff].set_yticks(plot_params['depth_tick'])
ax[ff].set_yticklabels(plot_params['depth_label'],fontsize=16)
ax[ff].set_xticks(plot_params['day_tick'])
ax[ff].set_xticklabels(plot_params['day_label'],fontsize=16)
ax[ff].set_ylabel('Depth (m)',fontsize=20)
if ff==0:
ax[ff].set_title(save_fname+', 38 kHz',fontsize=20)
elif ff==1:
ax[ff].set_title('120 kHz',fontsize=20)
elif ff==2:
ax[ff].set_title('200 kHz',fontsize=20)
divider = make_axes_locatable(ax[ff])
cax = divider.append_axes("right", size="1%", pad=0.05)
cbar = plt.colorbar(im,cax=cax)
cbar.ax.tick_params(labelsize=14)
ax[2].set_xlabel('Day',fontsize=20)
fig.set_figwidth(24)
fig.set_figheight(8)
plt.savefig(os.path.join(save_path,save_fname),dpi=300)
plt.close(fig)
|
numerodix/killdupes | setup.py | Python | gpl-2.0 | 1,048 | 0 | from setuptools import find_packages
from setuptools import setup
import killdupes
setup(
name='killdupes',
version=killdupes.__version__,
description='Kill duplicate files, finding partial files as well',
author='Martin Matusiak',
author_email='numerodix@gmail.com',
url='https://github.com/numerodix/killdupes',
packages=find_packages('.'),
package_dir={'': '.'},
# don't install as zipped egg
zip_safe=False,
scripts=[
'killdupes/killdupes.py',
],
classifiers=[
' | License :: OSI Approved :: GNU General Public License v2 (GPLv2)',
| 'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
],
)
|
Reagankm/KnockKnock | venv/lib/python3.4/site-packages/matplotlib/backends/windowing.py | Python | gpl-2.0 | 790 | 0.002532 | """
MS Windows-specific helper for the TkAgg backend.
With rcParams['tk.window_focus'] default of False, it is
effectively disabled.
It uses a tiny C++ extension module to access MS Win functions.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from matplotlib import rcParams
try:
if not rcParams['tk.window_focus']:
raise ImportError
from matplotlib._windowing import GetForegroundWindow, SetForegroundWindow
except ImportError:
def GetForegroundWindow():
return 0 |
def SetForegroundWindow(hwnd):
pass
class FocusManager:
def __init__(self):
self._shellWindow = GetForegroundWindow()
def __del__(self):
SetForegroundWindow(self._shell | Window)
|
nephila/djangocms_twitter | djangocms_twitter/south_migrations/0004_rename_tables.py | Python | bsd-3-clause | 4,301 | 0.007673 | # -*- coding: utf-8 -*-
from distutils.version import LooseVersion
import cms
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
if LooseVersion(cms.__version__) >= LooseVersion('3'):
if 'cmsplugin_twittersearch' in db.connection.introspection.table_names():
db.rename_table(u'cmsplugin_twittersearch', u'djangocms_twitter_twittersearch')
db.rename_table(u'cmsplugin_twitterrecententries', u'djangocms_twitter_twitterrecententries')
def backwards(self, orm):
if LooseVersion(cms.__version__) >= LooseVersion('3'):
if 'djangocms_twitter_twittersearch' in db.connection.introspection.table_names():
db.rename_table(u'djangocms_twitter_twittersearch', u'cmsplugin_twittersearch')
db.rename_table(u'djangocms_twitter_twitterrecententries', u'cmsplugin_twitterrecententries')
models = {
'cms.cmsplugin': {
'Meta': {'object_name': 'CMSPlugin'},
'cha | nged_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'depth': ('django.db.models.fields.PositiveIntegerField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields. | CharField', [], {'max_length': '15', 'db_index': 'True'}),
'numchild': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.CMSPlugin']", 'null': 'True', 'blank': 'True'}),
'path': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'placeholder': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Placeholder']", 'null': 'True'}),
'plugin_type': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}),
'position': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'})
},
'cms.placeholder': {
'Meta': {'object_name': 'Placeholder'},
'default_width': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slot': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'})
},
u'djangocms_twitter.twitterrecententries': {
'Meta': {'object_name': 'TwitterRecentEntries', '_ormbases': ['cms.CMSPlugin']},
u'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['cms.CMSPlugin']", 'unique': 'True', 'primary_key': 'True'}),
'count': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '3'}),
'link_hint': ('django.db.models.fields.CharField', [], {'max_length': '75', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '75', 'blank': 'True'}),
'twitter_id': ('django.db.models.fields.CharField', [], {'max_length': '75'}),
'twitter_user': ('django.db.models.fields.CharField', [], {'max_length': '75'})
},
u'djangocms_twitter.twittersearch': {
'Meta': {'object_name': 'TwitterSearch', '_ormbases': ['cms.CMSPlugin']},
u'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['cms.CMSPlugin']", 'unique': 'True', 'primary_key': 'True'}),
'count': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '3'}),
'query': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '200', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '75', 'blank': 'True'}),
'twitter_id': ('django.db.models.fields.CharField', [], {'max_length': '75'})
}
}
complete_apps = ['djangocms_twitter']
|
janeen666/mi-instrument | mi/dataset/driver/adcps_jln/stc/adcps_jln_stc_recovered_driver.py | Python | bsd-2-clause | 1,609 | 0.002486 | #!/usr/local/bin/python2.7
##
# OOIPLACEHOLDER
#
# Copyright 2014 Raytheon Co.
##
import os
from mi.core.log import get_logger
from mi.dataset.dataset_driver import DataSetDriver
from mi.dataset.dataset_parser import DataSetDriverConfigKeys
from mi.dataset.parser.adcps_jln_stc import AdcpsJlnStcParser, \
AdcpsJlnStcMetadataRecoveredDataParticle, \
AdcpsJlnStcInstrumentRecoveredDataParticle, \
AdcpsJlnStcParticleClassKey
from mi.core.versioning import version
log = get_logger( | )
@version("0.0.5")
def parse(unused, source_file_path, particle_data_handler):
config = {
DataSetDriverConfigKeys.PARTICLE_MODULE: 'mi.dataset.parser.adcps_jln_stc',
DataSetDriverConfigKeys.PART | ICLE_CLASS: None,
DataSetDriverConfigKeys.PARTICLE_CLASSES_DICT: {
AdcpsJlnStcParticleClassKey.METADATA_PARTICLE_CLASS:
AdcpsJlnStcMetadataRecoveredDataParticle,
AdcpsJlnStcParticleClassKey.INSTRUMENT_PARTICLE_CLASS:
AdcpsJlnStcInstrumentRecoveredDataParticle,
}
}
log.debug("My ADCPS JLN STC Config: %s", config)
def exception_callback(exception):
log.debug("ERROR: %r", exception)
particle_data_handler.setParticleDataCaptureFailure()
with open(source_file_path, 'rb') as file_handle:
parser = AdcpsJlnStcParser(config,
file_handle,
exception_callback)
driver = DataSetDriver(parser, particle_data_handler)
driver.processFileStream()
return particle_data_handler
|
tuxology/bcc | src/cc/frontends/p4/compiler/ebpfDeparser.py | Python | apache-2.0 | 6,681 | 0.000299 | # Copyright (c) Barefoot Networks, Inc.
# Licensed under the Apache License, Version 2.0 (the "License")
from collections import defaultdict, OrderedDict
from compilationException import CompilationException
from p4_hlir.hlir import parse_call, p4_field, p4_parse_value_set, \
P4_DEFAULT, p4_parse_state, p4_table, \
p4_conditional_node, p4_parser_exception, \
p4_header_instance, P4_NEXT
import ebpfProgram
import ebpfInstance
import ebpfType
import ebpfStructType
from topoSorting import Graph
from programSerializer import ProgramSerializer
def produce_parser_topo_sorti | ng(hlir):
# This function is copi | ed from the P4 behavioral model implementation
header_graph = Graph()
def walk_rec(hlir, parse_state, prev_hdr_node, tag_stacks_index):
assert(isinstance(parse_state, p4_parse_state))
for call in parse_state.call_sequence:
call_type = call[0]
if call_type == parse_call.extract:
hdr = call[1]
if hdr.virtual:
base_name = hdr.base_name
current_index = tag_stacks_index[base_name]
if current_index > hdr.max_index:
return
tag_stacks_index[base_name] += 1
name = base_name + "[%d]" % current_index
hdr = hlir.p4_header_instances[name]
if hdr not in header_graph:
header_graph.add_node(hdr)
hdr_node = header_graph.get_node(hdr)
if prev_hdr_node:
prev_hdr_node.add_edge_to(hdr_node)
else:
header_graph.root = hdr
prev_hdr_node = hdr_node
for branch_case, next_state in parse_state.branch_to.items():
if not next_state:
continue
if not isinstance(next_state, p4_parse_state):
continue
walk_rec(hlir, next_state, prev_hdr_node, tag_stacks_index.copy())
start_state = hlir.p4_parse_states["start"]
walk_rec(hlir, start_state, None, defaultdict(int))
header_topo_sorting = header_graph.produce_topo_sorting()
return header_topo_sorting
class EbpfDeparser(object):
def __init__(self, hlir):
header_topo_sorting = produce_parser_topo_sorting(hlir)
self.headerOrder = [hdr.name for hdr in header_topo_sorting]
def serialize(self, serializer, program):
assert isinstance(serializer, ProgramSerializer)
assert isinstance(program, ebpfProgram.EbpfProgram)
serializer.emitIndent()
serializer.blockStart()
serializer.emitIndent()
serializer.appendLine("/* Deparser */")
serializer.emitIndent()
serializer.appendFormat("{0} = 0;", program.offsetVariableName)
serializer.newline()
for h in self.headerOrder:
header = program.getHeaderInstance(h)
self.serializeHeaderEmit(header, serializer, program)
serializer.blockEnd(True)
def serializeHeaderEmit(self, header, serializer, program):
assert isinstance(header, ebpfInstance.EbpfHeader)
assert isinstance(serializer, ProgramSerializer)
assert isinstance(program, ebpfProgram.EbpfProgram)
p4header = header.hlirInstance
assert isinstance(p4header, p4_header_instance)
serializer.emitIndent()
serializer.appendFormat("if ({0}.{1}.valid) ",
program.headerStructName, header.name)
serializer.blockStart()
if ebpfProgram.EbpfProgram.isArrayElementInstance(p4header):
ebpfStack = program.getStackInstance(p4header.base_name)
assert isinstance(ebpfStack, ebpfInstance.EbpfHeaderStack)
if isinstance(p4header.index, int):
index = "[" + str(p4header.index) + "]"
elif p4header.index is P4_NEXT:
index = "[" + ebpfStack.indexVar + "]"
else:
raise CompilationException(
True, "Unexpected index for array {0}",
p4header.index)
basetype = ebpfStack.basetype
else:
ebpfHeader = program.getHeaderInstance(p4header.name)
basetype = ebpfHeader.type
index = ""
alignment = 0
for field in basetype.fields:
assert isinstance(field, ebpfStructType.EbpfField)
self.serializeFieldEmit(serializer, p4header.base_name,
index, field, alignment, program)
alignment += field.widthInBits()
alignment = alignment % 8
serializer.blockEnd(True)
def serializeFieldEmit(self, serializer, name, index,
field, alignment, program):
assert isinstance(index, str)
assert isinstance(name, str)
assert isinstance(field, ebpfStructType.EbpfField)
assert isinstance(serializer, ProgramSerializer)
assert isinstance(alignment, int)
assert isinstance(program, ebpfProgram.EbpfProgram)
if field.name == "valid":
return
fieldToEmit = (program.headerStructName + "." + name +
index + "." + field.name)
width = field.widthInBits()
if width <= 32:
store = self.generatePacketStore(fieldToEmit, 0, alignment,
width, program)
serializer.emitIndent()
serializer.appendLine(store)
else:
# Destination is bigger than 4 bytes and
# represented as a byte array.
b = (width + 7) / 8
for i in range(0, b):
serializer.emitIndent()
store = self.generatePacketStore(fieldToEmit + "["+str(i)+"]",
i,
alignment,
8, program)
serializer.appendLine(store)
serializer.emitIndent()
serializer.appendFormat("{0} += {1};",
program.offsetVariableName, width)
serializer.newline()
def generatePacketStore(self, value, offset, alignment, width, program):
assert width > 0
assert alignment < 8
assert isinstance(width, int)
assert isinstance(alignment, int)
return "bpf_dins_pkt({0}, {1} / 8 + {2}, {3}, {4}, {5});".format(
program.packetName,
program.offsetVariableName,
offset,
alignment,
width,
value
)
|
rsnakamura/oldape | tests/testunits/testcommands/testifconfig/test_ifconfig_linux.py | Python | apache-2.0 | 1,971 | 0.008118 | from StringIO import StringIO
from mock import MagicMock
from apetools.commands import ifconfig
from apetools.commons import enumerations
from apetools.connections.localconnection import OutputError
def assert_equal(expected, actual):
assert expected == actual, "Expected: {0} Actual: {1}".format(expected, actual)
def testlinux():
connection = MagicMock()
connection.ifconfig.return_value = OutputError(StringIO(ifconfig_linux), "")
ifc = ifconfig.IfconfigCommand(connection, interface='eth0')
#assert_equal(connection.ifconfig.return_value, ifc.output)
assert_equal("192.168.10.50", ifc.ip_address)
connection.ifconfig.return_value = OutputError(StringIO(ifconfig_linux), "")
assert_equal( "00:26:2d:29:a1:8e", ifc.mac_address)
connection.ifconfig.assert_called_with("eth0")
return
def testandroid():
connection = MagicMock()
connection.ifconfig.return_value = OutputError(StringIO(ifconfig_android), "")
ifc = ifconfig.IfconfigCommand(connection, "wlan0",
enumerations.OperatingSystem.android)
assert_equal("192.168.20.153", ifc.ip_address)
assert_equal(ifconfig.MAC_UNAVAILABLE, ifc.mac_address)
return
ifconfig_linux = '''
eth0 Link encap:Ethernet HWaddr 00:26:2d:29:a1:8e
inet addr:192.168.10.50 Bcast:192.168.10.255 Mask:255.255.255.0
inet6 addr: fe80::226:2dff:fe29:a18e/64 Scope:Link
UP BROADCAST RUNNING MULTICAST MTU:1500 Metric:1
RX packets:715528 errors:0 dropped:0 overruns:0 frame:0
TX packets:282106 errors:0 dropped:0 overruns:0 carrier:0
collisions:0 txqueuelen:1000
RX bytes:39291 | 9899 (392.9 MB) TX bytes:22980409 (22.9 MB)
| Interrupt:18
'''
ifconfig_android = """
wlan0: ip 192.168.20.153 mask 255.255.255.0 flags [up broadcast running multicast]
"""
if __name__ == "__main__":
import pudb; pudb.set_trace()
testlinux()
testandroid()
|
jefflyn/buddha | src/pci/chapter6/docclass.py | Python | artistic-2.0 | 5,785 | 0.038375 | from pysqlite2 import dbapi2 as sqlite
import re
import math
def getwords(doc):
splitter=re.compile('\\W*')
print doc
# Split the words by non-alpha characters
words=[s.lower() for s in splitter.split(doc)
if len(s)>2 and len(s)<20]
# Return the unique set of words only
return dict([(w,1) for w in words])
class classifier:
def __init__(self,getfeatures,filename=None):
# Counts of feature/category combinations
self.fc={}
# Counts of documents in each category
self.cc={}
self.getfeatures=getfeatures
def setdb(self,dbfile):
self.con=sqlite.connect(dbfile)
self.con.execute('create table if not exists fc(feature,category,count)')
self.con.execute('create table if not exists cc(category,count)')
def incf(self,f,cat):
count=self.fcount(f,cat)
if count==0:
self.con.execute("insert into fc values ('%s','%s',1)"
% (f,cat))
else:
self.con.execute(
"update fc set count=%d where feature='%s' and category='%s'"
% (count+1,f,cat))
def fcount(self,f,cat):
res=self.con.execute(
'select count from fc where feature="%s" and category="%s"'
%(f,cat)).fetchone()
if res==None: return 0
else: return float(res[0])
def incc(self,cat):
count=self.catcount(cat)
if count==0:
self.con.execute("insert into cc values ('%s',1)" % (cat))
else:
self.con.execute("update cc set count=%d where category='%s'"
% (count+1,cat))
def catcount(self,cat):
res=self.con.execute('select count from cc where category="%s"'
%(cat)).fetchone()
if res==None: return 0
else: return float(res[0])
def categories(self):
cur=self.con.execute('select category from cc');
return [d[0] for d in cur]
def totalcount(self):
res=self.con.execute('select sum(count) from cc').fetchone();
if res==None: return 0
return res[0]
def train(self,item,cat):
features=self.getfeatures(item)
# Increment the count for every feature with this category
for f in features:
self.incf(f,cat)
# Increment the count for this category
self.incc(cat)
self.con.commit()
def fprob(self,f,cat):
if self.catcount(cat)==0: return 0
# The total number of times this feature appeared in this
# category divided by the total number of items in this category
return self.fcount(f,cat)/self.catcount(cat)
def weightedprob(self,f,cat,prf,weight=1.0,ap=0.5):
# Calculate current probability
basicprob=prf(f,cat)
# Count the number of times this feature has appeared in
# all categories
totals=sum([self.fcount(f,c) for c in self.categories()])
# Calculate the weighted average
bp=((weight*ap)+(totals*basicprob))/(weight+totals)
return bp
class naivebayes(classifier):
def __init__(self,getfeatures):
classifier.__init__(self,getfeatures)
self.thresholds={}
def docprob(self,item,cat):
features=self.getfeatures(item)
# Multiply the probabilities of all the features together
p=1
for f in features: p*=self.weightedprob(f,cat,self.fprob)
return p
def prob(self,item,cat):
catprob=self.catcount(cat)/self.totalcount()
docprob=self.docprob(item,cat)
return docprob*catprob
def setthreshold(self,cat,t):
self.thresholds[cat]=t
def getthreshold(self,cat):
if cat not in self.thresholds: return 1.0
return self.thresholds[cat]
def classify(self,item,default=None):
probs={}
# Find the category with the highest probability
max=0.0
for cat in self.categories():
probs[cat]=self.prob(item,cat)
if probs[cat]>max:
max=probs[cat]
best=cat
# Make sure the probability exceeds threshold*next best
for cat in probs:
if cat==best: continue
if probs[cat]*self.getthreshold(best)>probs[best]: return default
return best
class fisherclassifier(classifier):
def cprob(self,f,cat):
# The frequency of this feature in this category
clf=self.fprob(f,cat)
if clf==0: return 0
# The frequency of this feature in all the categories
freqsum=sum([self.fprob(f,c) for c in self.categories()])
# The probability is the frequency in this category divided by
# the overall frequency
p=clf/(freqsum)
return p
def fisherprob(self,item,cat):
# Mul | tiply all the probabilities together
p=1
features=self.getfeatures(item)
for f in features:
p*=(self.weightedprob(f,cat,self.cprob))
# Take the natural log and multiply by -2
fscore=-2*math.log(p)
# Use the inverse chi2 function to get a probability
return self.invchi2(fscore,len(features)*2)
def invchi2(self,chi, df):
m = | chi / 2.0
sum = term = math.exp(-m)
for i in range(1, df//2):
term *= m / i
sum += term
return min(sum, 1.0)
def __init__(self,getfeatures):
classifier.__init__(self,getfeatures)
self.minimums={}
def setminimum(self,cat,min):
self.minimums[cat]=min
def getminimum(self,cat):
if cat not in self.minimums: return 0
return self.minimums[cat]
def classify(self,item,default=None):
# Loop through looking for the best result
best=default
max=0.0
for c in self.categories():
p=self.fisherprob(item,c)
# Make sure it exceeds its minimum
if p>self.getminimum(c) and p>max:
best=c
max=p
return best
def sampletrain(cl):
cl.train('Nobody owns the water.','good')
cl.train('the quick rabbit jumps fences','good')
cl.train('buy pharmaceuticals now','bad')
cl.train('make quick money at the online casino','bad')
cl.train('the quick brown fox jumps','good')
|
walteryang47/ovirt-engine | packaging/setup/plugins/ovirt-engine-common/ovirt-engine/db/connection.py | Python | apache-2.0 | 5,588 | 0 | #
# ovirt-engine-setup -- ovirt engine setup
# Copyright (C) 2013-2015 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Connection plugin."""
import gettext
from otopi import constants as otopicons
from otopi import plugin, util
from ovirt_engine import configfile
from ovirt_engine_setup import constants as osetupcons
from ovirt_engine_setup.engine import constants as oenginecons
from ovirt_engine_setup.engine_common import constants as oengcommcons
from ovirt_engine_setup.engine_common import database
def _(m):
return gettext.dgettext(message=m, domain='ovirt-engine-setup')
@util.export
class Plugin(plugin.PluginBase):
"""Connection plugin."""
def __init__(self, context):
super(Plugin, self).__init__(context=context)
@plugin.event(
stage=plugin.Stages.STAGE_BOOT,
)
def _boot(self):
self.environment[
otopicons.CoreEnv.LOG_FILTER_KEYS
].append(
oenginecons.EngineDBEnv.PASSWORD
)
@plugin.event(
stage=plugin.Stages.STAGE_INIT,
)
def _init(self):
self.environment.setdefault(
oenginecons.EngineDBEnv.HOST,
None
)
self.environment.setdefault(
oenginec | ons.EngineDBEnv.PORT,
None
)
self.environment.setdefault(
oenginecons.EngineDBEnv.SECURED,
None
)
self.environment.setdefault(
oenginecons.EngineDBEnv.SECURED_HOST_VALIDATION,
None
)
self.environment.setdefault(
oenginecons.EngineDBEnv.USER,
| None
)
self.environment.setdefault(
oenginecons.EngineDBEnv.PASSWORD,
None
)
self.environment.setdefault(
oenginecons.EngineDBEnv.DATABASE,
None
)
self.environment.setdefault(
oenginecons.EngineDBEnv.DUMPER,
oenginecons.Defaults.DEFAULT_DB_DUMPER
)
self.environment.setdefault(
oenginecons.EngineDBEnv.FILTER,
oenginecons.Defaults.DEFAULT_DB_FILTER
)
self.environment.setdefault(
oenginecons.EngineDBEnv.RESTORE_JOBS,
oenginecons.Defaults.DEFAULT_DB_RESTORE_JOBS
)
self.environment[oenginecons.EngineDBEnv.CONNECTION] = None
self.environment[oenginecons.EngineDBEnv.STATEMENT] = None
self.environment[oenginecons.EngineDBEnv.NEW_DATABASE] = True
@plugin.event(
stage=plugin.Stages.STAGE_SETUP,
name=oengcommcons.Stages.DB_CONNECTION_SETUP,
)
def _setup(self):
dbovirtutils = database.OvirtUtils(
plugin=self,
dbenvkeys=oenginecons.Const.ENGINE_DB_ENV_KEYS,
)
dbovirtutils.detectCommands()
config = configfile.ConfigFile([
oenginecons.FileLocations.OVIRT_ENGINE_SERVICE_CONFIG_DEFAULTS,
oenginecons.FileLocations.OVIRT_ENGINE_SERVICE_CONFIG
])
if config.get('ENGINE_DB_PASSWORD'):
try:
dbenv = {}
for e, k in (
(oenginecons.EngineDBEnv.HOST, 'ENGINE_DB_HOST'),
(oenginecons.EngineDBEnv.PORT, 'ENGINE_DB_PORT'),
(oenginecons.EngineDBEnv.USER, 'ENGINE_DB_USER'),
(oenginecons.EngineDBEnv.PASSWORD, 'ENGINE_DB_PASSWORD'),
(oenginecons.EngineDBEnv.DATABASE, 'ENGINE_DB_DATABASE'),
):
dbenv[e] = config.get(k)
for e, k in (
(oenginecons.EngineDBEnv.SECURED, 'ENGINE_DB_SECURED'),
(
oenginecons.EngineDBEnv.SECURED_HOST_VALIDATION,
'ENGINE_DB_SECURED_VALIDATION'
)
):
dbenv[e] = config.getboolean(k)
dbovirtutils.tryDatabaseConnect(dbenv)
self.environment.update(dbenv)
self.environment[
oenginecons.EngineDBEnv.NEW_DATABASE
] = dbovirtutils.isNewDatabase()
except RuntimeError as e:
self.logger.debug(
'Existing credential use failed',
exc_info=True,
)
msg = _(
'Cannot connect to Engine database using existing '
'credentials: {user}@{host}:{port}'
).format(
host=dbenv[oenginecons.EngineDBEnv.HOST],
port=dbenv[oenginecons.EngineDBEnv.PORT],
database=dbenv[oenginecons.EngineDBEnv.DATABASE],
user=dbenv[oenginecons.EngineDBEnv.USER],
)
if self.environment[
osetupcons.CoreEnv.ACTION
] == osetupcons.Const.ACTION_REMOVE:
self.logger.warning(msg)
else:
raise RuntimeError(msg)
# vim: expandtab tabstop=4 shiftwidth=4
|
Tan0/ironic | ironic/tests/drivers/ilo/test_inspect.py | Python | apache-2.0 | 19,077 | 0 | # Copyright 2014 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Test class for Management Interface used by iLO modules."""
import mock
from oslo_config import cfg
from ironic.common import exception
from ironic.common import states
from ironic.common import utils
from ironic.conductor import task_manager
from ironic.conductor import utils as conductor_utils
from ironic.db import api as dbapi
from ironic.drivers.modules.ilo import common as ilo_common
from ironic.drivers.modules.ilo import inspect as ilo_inspect
from ironic.drivers.modules.ilo import power as ilo_power
from ironic.tests.conductor import utils as mgr_utils
from ironic.tests.db import base as db_base
from ironic.tests.db import utils as db_utils
from ironic.tests.objects import utils as obj_utils
INFO_DICT = db_utils.get_test_ilo_info()
CONF = cfg.CONF
class IloInspectTestCase(db_base.DbTestCase):
def setUp(self):
super(IloInspectTestCase, self).setUp()
mgr_utils.mock_the_extension_manager(driver="fake_ilo")
self.node = obj_utils.create_test_node(
self.context, driver='fake_ilo', driver_info=INFO_DICT)
def test_get_properties(self):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
properties = ilo_common.REQUIRED_PROPERTIES.copy()
self.assertEqual(properties,
task.driver.inspect.get_properties())
@mock.patch.object(ilo_common, 'parse_driver_info', spec_set=True,
autospec=True)
def test_validate(self, driver_info_mock):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.driver.inspect.validate(task)
driver_info_mock.assert_called_once_with(task.node)
@mock.patch.object(ilo_inspect, '_get_capabilities', spec_set=True,
autospec=True)
@mock.patch.object(ilo_inspect, '_create_ports_if_not_exist',
spec_set=True, autospec=True)
@mock.patch.object(ilo_inspect, '_get_essential_properties', spec_set=True,
autospec=True)
@mock.patch.object(ilo_power.IloPower, 'get_power_state', spec_set=True,
autospec=True)
@mock.patch.object(ilo_common, 'get_ilo_object', spec_set=True,
autospec=True)
def test_inspect_essential_ok(self, get_ilo_object_mock,
power_mock,
get_essential_mock,
create_port_mock,
get_capabilities_mock):
ilo_object_mock = get_ilo_object_mock.return_value
properties = {'memory_mb': '512', 'local_gb': '10',
'cpus': '1', 'cpu_arch': 'x86_64'}
macs = {'Port 1': 'aa:aa:aa:aa:aa:aa', 'Port 2': 'bb:bb:bb:bb:bb:bb'}
capabilities = ''
result = {'properties': properties, 'macs': macs}
get_essential_mock.return_value = result
get_capabilities_mock.return_value = capabilities
power_mock.return_value = states.POWER_ON
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.driver.inspect.inspect_hardware(task)
self.assertEqual(properties, task.node.properties)
power_mock.assert_called_once_with(mock.ANY, task)
get_essential_mock.assert_called_once_with(task.node,
ilo_object_mock)
get_capabilities_mock.assert_called_once_with(task.node,
ilo_object_mock)
create_port_mock.assert_called_once_with(task.node, macs)
@mock.patch.object(ilo_inspect, '_get_capabilities', spec_set=True,
autospec=True)
@mock.patch.object(ilo_inspect, '_create_ports_if_not_exist',
spec_set=True, autospec=True)
@mock.patch.object(ilo_inspect, '_get_essential_properties', spec_set=True,
autospec=True)
@mock.patch.object(conductor_utils, 'node_power_action', spec_set=True,
autospec=True)
@mock.patch.object(ilo_power.IloPower, 'get_power_state', spec_set=True,
autospec=True)
@mock.patch.object(ilo_common, 'get_ilo_object', spec_set=True,
autospec=True)
def test_inspect_essential_ok_power_off(self, get_ilo_object_mock,
power_mock,
set_power_mock,
get_essential_mock,
create_port_mock,
get_capabilities_mock):
ilo_object_mock = get_ilo_object_mock.return_value
properties = {'memory_mb': '512', 'local_gb': '10',
'cpus': '1', 'cpu_arch': 'x86_64'}
macs = {'Port 1': 'aa:aa:aa:aa:aa:aa', 'Port 2': 'bb:bb:bb:bb:bb:bb'}
capabilities = ''
result = {'properties': properties, 'macs': macs}
get_essential_mock.return_value = r | esult
get_capabilities_mock.return_value = capabilities
power_mock.return_value = states.POWER_OFF
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.driver.inspect.inspect_hardware(task)
self.assertEqual(properties, task.node.properties)
power_mock.assert_called_once_with(mock.ANY, task)
s | et_power_mock.assert_any_call(task, states.POWER_ON)
get_essential_mock.assert_called_once_with(task.node,
ilo_object_mock)
get_capabilities_mock.assert_called_once_with(task.node,
ilo_object_mock)
create_port_mock.assert_called_once_with(task.node, macs)
@mock.patch.object(ilo_inspect, '_get_capabilities', spec_set=True,
autospec=True)
@mock.patch.object(ilo_inspect, '_create_ports_if_not_exist',
spec_set=True, autospec=True)
@mock.patch.object(ilo_inspect, '_get_essential_properties', spec_set=True,
autospec=True)
@mock.patch.object(ilo_power.IloPower, 'get_power_state', spec_set=True,
autospec=True)
@mock.patch.object(ilo_common, 'get_ilo_object', spec_set=True,
autospec=True)
def test_inspect_essential_capabilities_ok(self, get_ilo_object_mock,
power_mock,
get_essential_mock,
create_port_mock,
get_capabilities_mock):
ilo_object_mock = get_ilo_object_mock.return_value
properties = {'memory_mb': '512', 'local_gb': '10',
'cpus': '1', 'cpu_arch': 'x86_64'}
macs = {'Port 1': 'aa:aa:aa:aa:aa:aa', 'Port 2': 'bb:bb:bb:bb:bb:bb'}
capability_str = 'BootMode:uefi'
capabilities = {'BootMode': 'uefi'}
result = {'properties': properties, 'macs': macs}
get_essential_mock.return_value = result
get_capabilities_mock.return_value = capabilities
power_mock.return_value = states.POWER_ON
with task_manager.acquire(self.context, self.node |
kansanmuisti/datavaalit | web/utils/http.py | Python | agpl-3.0 | 1,709 | 0.00117 | # -*- coding: utf-8 -*-
import os
import urllib2
import hashlib
from urllib2 import HTTPError
class HttpFetcher(object):
cache_dir = None
def set_cache_dir(self, dir):
self.cache_dir = dir
def _create_path_for_file(self, fname):
dirname = os.path.dirname(fname)
if not os.path.exists(dirname):
os.makedirs(dirname)
def get_fname(self, url, prefix):
if not self.cache_dir:
return None
hash = hashlib.sha1(url.replace('/', '-')).hexdigest()
fname = '%s/%s/%s' % (self.cache_dir, prefix, hash)
return fname
def nuke_cache(self, url, prefix):
fname = self.get_fname(url, prefix)
if not fname:
return
o | s.unlink(fname)
def open_url(self, url, prefix, error_ok=False, return_url=False, force_load=False):
final_url = None
fname = None
if self.cache_dir:
fname = self.get_fname(url, prefix)
if not fname or not os.access(fname, os.R_OK):
opener = urllib2.build_opener(urllib2.HTTPHandler)
opener.addheaders = [('User-agent', 'Mozilla/5.0')]
try:
f = opener.open(url)
except urllib2.URLError: |
if error_ok:
return None
raise
s = f.read()
final_url = f.geturl()
if fname:
self._create_path_for_file(fname)
outf = open(fname, 'w')
outf.write(s)
outf.close()
else:
f = open(fname)
s = f.read()
f.close()
if return_url:
return s, final_url
return s
|
littlejo/Libreosteo | libreosteoweb/migrations/0019_auto_20150420_1821.py | Python | gpl-3.0 | 460 | 0.002174 | # -*- coding: utf-8 -*-
from __future__ import unicode_ | literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('libreosteoweb', '0018_auto_20150420_1232'),
]
operations = [
migrations.AlterField(
model_name='regulardoctor',
name= | 'phone',
field=models.CharField(max_length=100, null=True, verbose_name='Phone', blank=True),
),
]
|
Alexsays/Kindle-Sync | KindleSearchDialog.py | Python | gpl-3.0 | 3,422 | 0.002046 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'KindleSearchDialog.ui'
#
# Created: Thu Apr 3 16:24:23 2014
# by: PyQt4 UI code generator 4.10.3
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_SearchDialog(QtGui.QDialog):
closeSignal = QtCore.pyqtSignal()
def __init__(self, parent=None, flags=QtCore.Qt.Dialog):
super(Ui_SearchDialog, self).__init__(parent, flags)
self.setupUi(self)
def setupUi(self, Dialog):
Dialog.setObjectName(_fromUtf8("Dialog"))
Dialog.resize(280, 320)
Dialog.setMinimumSize(QtCore.QSize(280, 320))
Dialog.setMaximumSize(QtCore.QSize(280, 320))
Dialog.setWindowModality(QtCore.Qt.WindowModal)
flags = QtCore.Qt.Dialog | QtCore.Qt.WindowCloseButtonHint | QtCore.Qt.CustomizeWindowHint
Dialog.setWindowFlags(flags)
self.image_label = QtGui.QLabel(Dialog)
self.image_label.setGeometry(QtCore.QRect(0, 0, 280, 320))
self.image_label.setText(_fromUtf8(""))
self.image_label.setObjectName(_fromU | tf8("image_label"))
self.text_label = QtGui.QLabel(Dialog)
self.text_label.setGeometry(QtCore.QRect(11, 270, 261, 41))
font = QtGui.QFont()
font.setFamily(_fromUtf8("Futura"))
font.setPointSize(24)
self.text_label.setFont(font)
self.text_label.setAlignment(QtCore.Qt.AlignCenter)
self.text_label.setObjectName(_fromUtf8("text_label"))
self.pushButton = QtGui.QPushButton(Dialog)
self.pushButton.setGeometry(QtCore | .QRect(0, 300, 21, 21))
self.pushButton.setText(_fromUtf8(""))
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(_fromUtf8("images/close-button.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.pushButton.setIcon(icon)
self.pushButton.setIconSize(QtCore.QSize(21, 21))
self.pushButton.setStyleSheet("QPushButton, QPushButton:disabled, QPushButton:focus:pressed { border:transparent; background-color:white; }")
self.pushButton.setFlat(True)
self.pushButton.setObjectName(_fromUtf8("pushButton"))
self.pushButton.setVisible(False)
self.pushButton.clicked.connect(self.closeSignal.emit)
self.retranslateUi(Dialog)
QtCore.QMetaObject.connectSlotsByName(Dialog)
def closeClicked(self):
print "clicked"
self.closeSignal.emit()
def setStatus(self, status):
self.pushButton.setVisible(True)
if status:
self.image_label.setPixmap(QtGui.QPixmap(_fromUtf8("images/unplug-usb.png")))
self.text_label.setText(_fromUtf8("Desconecta el kindle"))
else:
self.image_label.setPixmap(QtGui.QPixmap(_fromUtf8("images/plugin-usb.png")))
self.text_label.setText(_fromUtf8("Conecta el kindle"))
def retranslateUi(self, Dialog):
Dialog.setWindowTitle(_translate("Dialog", "Buscando Kindle...", None))
|
MicrosoftGenomics/FaST-LMM | fastlmm/pyplink/__init__.py | Python | apache-2.0 | 49 | 0 | from .snpset import *
from | .snpreader import *
| |
rwl/PyCIM | CIM14/IEC61970/Protection/SynchrocheckRelay.py | Python | mit | 2,666 | 0.004501 | # Copyright (C) 2010-2011 Richard Lincoln
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, includi | ng without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
| # furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from CIM14.IEC61970.Protection.ProtectionEquipment import ProtectionEquipment
class SynchrocheckRelay(ProtectionEquipment):
"""A device that operates when two AC circuits are within the desired limits of frequency, phase angle, and voltage, to permit or to cause the paralleling of these two circuits. Used to prevent the paralleling of non-synchronous topological islands.
"""
def __init__(self, maxFreqDiff=0.0, maxVoltDiff=0.0, maxAngleDiff=0.0, *args, **kw_args):
"""Initialises a new 'SynchrocheckRelay' instance.
@param maxFreqDiff: The maximum allowable frequency difference across the open device
@param maxVoltDiff: The maximum allowable difference voltage across the open device
@param maxAngleDiff: The maximum allowable voltage vector phase angle difference across the open device
"""
#: The maximum allowable frequency difference across the open device
self.maxFreqDiff = maxFreqDiff
#: The maximum allowable difference voltage across the open device
self.maxVoltDiff = maxVoltDiff
#: The maximum allowable voltage vector phase angle difference across the open device
self.maxAngleDiff = maxAngleDiff
super(SynchrocheckRelay, self).__init__(*args, **kw_args)
_attrs = ["maxFreqDiff", "maxVoltDiff", "maxAngleDiff"]
_attr_types = {"maxFreqDiff": float, "maxVoltDiff": float, "maxAngleDiff": float}
_defaults = {"maxFreqDiff": 0.0, "maxVoltDiff": 0.0, "maxAngleDiff": 0.0}
_enums = {}
_refs = []
_many_refs = []
|
offlinehacker/flumotion | flumotion/component/encoders/vp8/wizard_gtk.py | Python | gpl-2.0 | 4,507 | 0.000666 | # -*- Mode: Python -*-
# vi:si:et:sw=4:sts=4:ts=4
#
# Flumotion - a streaming media server
# Copyright (C) 2004,2005,2006,2007,2008 Fluendo, S.L. (www.fluendo.com).
# All rights reserved.
# This file may be distributed and/or modified under the terms of
# the GNU General Public License version 2 as published by
# the Free Software Foundation.
# This file is distributed without any warranty; without even the implied
# warranty of merchantability or fitness for a particular purpose.
# See "LICENSE.GPL" in the source distribution for more information.
# Licensees having purchased or holding a valid Flumotion Advanced
# Streaming Server license may use this file in accordance with the
# Flumotion Advanced Streaming Server Commercial License Agreement.
# See "LICENSE.Flumotion" in the source distribution for more information.
# Headers in this file shall remain intact.
import gettext
import os
from zope.interface import implements
from flumotion.admin.assistant.interfaces import IEncoderPlugin
from flumotion.admin.assistant.models import VideoEncoder
from flumotion.common.fraction import fractionAsFloat
from flumotion.admin.gtk.basesteps import VideoEncoderStep
__version__ = "$Rev$"
_ = gettext.gettext
class VP8VideoEncoder(VideoEncoder):
"""
@ivar framerate: number of frames per second; | to be set by view
@type framerate: float
"""
componentType = 'vp8-encoder'
def __init__(self):
super(VP8VideoEncoder, self).__init__()
self.has_quality = False
self.has_bitrate = True
self.framerate = 25.0
self.properties.keyframe_delta = 2.0
self.properties.bitrate = 400
| self.properties.quality = 16
def getProperties(self):
properties = super(VP8VideoEncoder, self).getProperties()
if self.has_bitrate:
del properties.quality
properties.bitrate *= 1000
elif self.has_quality:
del properties.bitrate
else:
raise AssertionError
# convert the human-friendly delta to maxdistance
properties.keyframe_maxdistance = int(properties.keyframe_delta *
self.framerate)
del properties.keyframe_delta
self.debug('keyframe_maxdistance: %r',
properties.keyframe_maxdistance)
return properties
class VP8Step(VideoEncoderStep):
name = 'VP8Encoder'
title = _('VP8 Encoder')
sidebarName = _('VP8')
gladeFile = os.path.join(os.path.dirname(os.path.abspath(__file__)),
'wizard.glade')
componentType = 'vp8'
docSection = 'help-configuration-assistant-encoder-vp8'
docAnchor = ''
docVersion = 'local'
# WizardStep
def setup(self):
self.bitrate.data_type = int
self.quality.data_type = int
self.keyframe_delta.data_type = float
self.has_quality.data_type = bool
self.has_bitrate.data_type = bool
self.add_proxy(self.model,
['has_quality', 'has_bitrate'])
self.add_proxy(self.model.properties,
['bitrate', 'quality', 'keyframe_delta'])
# we specify keyframe_delta in seconds, but vp8 expects
# a number of frames, so we need the framerate and calculate
# we need to go through the Step (which is the view) because models
# don't have references to other models
producer = self.wizard.getScenario().getVideoProducer(self.wizard)
self.model.framerate = fractionAsFloat(producer.getFramerate())
self.debug('Framerate of video producer: %r' % self.model.framerate)
step = 1 / self.model.framerate
page = 1.0
self.keyframe_delta.set_increments(step, page)
def workerChanged(self, worker):
self.model.worker = worker
self.wizard.requireElements(worker, 'vp8enc')
# Callbacks
def on_radiobutton_toggled(self, button):
# This is bound to both radiobutton_bitrate and radiobutton_quality
self.bitrate.set_sensitive(self.has_bitrate.get_active())
self.quality.set_sensitive(self.has_quality.get_active())
self.model.has_bitrate = self.has_bitrate.get_active()
self.model.has_quality = self.has_quality.get_active()
class VP8WizardPlugin(object):
implements(IEncoderPlugin)
def __init__(self, wizard):
self.wizard = wizard
self.model = VP8VideoEncoder()
def getConversionStep(self):
return VP8Step(self.wizard, self.model)
|
tomasbelusky/gataca | src/resources/VcfCreator.py | Python | gpl-3.0 | 2,975 | 0.011104 | #!/usr/bin/python2.7
# -*- encoding: utf-8 -*-
__author__ = "Tomáš Beluský"
__date__ = "09.03. 2013"
import types
class VcfCreator:
"""
Creator of VCF output
"""
def __init__(self, filename, output):
"""
Initialize variables
"""
self.__filename = filename
self.__outputName = output
self.__headers = []
self.__contigs = []
self.__infos = []
self.__alts = []
if type(self.__outputName) == types.FileType:
self.__output = self.__outputName
else:
self.__output = open(self.__outputName, 'w')
def addHeader(self, key, value):
"""
Add header
"""
self.__headers.append((key, value))
def __addContigAttribute(self, attributes, key, attribute, result, addApostrophe=False):
"""
Add attribute with possible apostrophes into result string
"""
if key in attributes: # attribute exists
if result: # there is previous attribute
result += ","
if addApostrophe:
result += "%s=\"%s\"" % (attribute, attributes[key])
else:
result += "%s=%s" % (attribute, attributes[key])
return result
def addContig(self, contig):
"""
Add reference contig
"""
result = self.__addContigAttribute(contig, 'SN', 'ID', "")
result = self.__addContigAttribute(contig, 'LN', 'length', result)
result = self.__addContigAttribute(contig, 'AS', 'assembly', result)
result = self.__addContigAttribute(contig, 'M5', 'md5', result)
result = self.__addContigAttribute(contig, 'SP', 'species', result, True)
result = self.__addContigAttribute(contig, 'UR', 'URL', result)
self.__contigs.append(result)
def addInfo(self, iid, number, itype, description):
"""
Add info
"""
self.__infos.append((iid, number, itype, description))
def addAlt(self, aid, description):
"""
Add alternate
"""
self.__alts.append((aid, description))
def writeHeader(self):
"""
Write VCF header
| """
self.__output.write("##fileformat=VCFv4.1\n")
for key, value in self.__headers: # write header
self.__output.write("##%s=%s\n" % (key, value))
self.__output.write("##reference=%s\n" % self.__filename)
for contig in self.__contigs: # write contigs
self.__output.write("##contig=<%s>\n" % contig)
for iid, number, itype, description in self.__infos: # write info
self.__output.write(" | ##INFO=<ID=%s,Number=%s,Type=%s,Description=\"%s\">\n" % (iid, number, itype, description))
for aid, description in self.__alts: # write alt
self.__output.write("##ALT=<ID=%s,Description=\"%s\">\n" % (aid, description))
self.__output.write("#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\n")
def writeRecord(self, record):
"""
Write record
"""
if len(record.strip()):
self.__output.write("%s\n" % record)
def close(self):
"""
Close file
"""
if type(self.__outputName) != types.FileType:
self.__output.close()
|
siddhuwarrier/lockindicator-applet | src/typeutils/Exceptions.py | Python | gpl-3.0 | 2,038 | 0.008342 | # Copyri | ght (c) 2010 Siddhu Warrier (http://siddhuwarrier.homelinux.org,
# siddhuwarrier AT gmail DOT com).
#
# This file is part of the typeutils package.
# The utils package is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the Lice | nse, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from errno import errorcode
import errno
##@defgroup Exceptions User-defined Exception classes
# @brief User-defined exceptions as Python's built-in exceptions are too few.
#
# This package contains several user-defined exception classes to supplement
# Python's all-too-few built-in exceptions.
# @author Siddhu Warrier (siddhuwarrier@gmail.com)
# @date 10/01/2010
__all__ = ['FileError'] #to prevent inadvertent imports
## @brief Class for FileErrors.
#
# @ingroup Exceptions
# The different kinds of file errors are identified using
# the enumerators which use the errno.h numbers
# @param[in] errcode Error code which should be in the list of error codes in errno module.
# @param[in] strerror Human-readable string describing error.
# @author Siddhu Warrier (siddhuwarrier@gmail.com)
# @date 10/01/2010
class FileError(Exception):
##@brief Constructor for FileError.
# Gets the error code and strerror and builds a FileError message
def __init__(self, errcode, strerror):
if errcode not in errorcode.keys():
raise Exception(errno.EINVAL, "FileError: Invalid Error Code")
#set the exception args (errno, strerror)
self.args = (errcode, "FileError: %s"%strerror)
|
dsweet04/rekall | rekall-core/rekall/plugins/linux/check_syscall.py | Python | gpl-2.0 | 6,461 | 0.000929 | # Rekall Memory Forensics
# Copyright (C) 2007-2013 Volatility Foundation
# Copyright 2013 Google Inc. All Rights Reserved.
#
# This file is part of Rekall Memory Forensics.
#
# Rekall Memory Forensics is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License Version 2 as
# published by the Free Software Foundation. You may not use, modify or
# distribute this program under any other version of the GNU General Public
# License.
#
# Rekall Memory Forensics is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# Rekall Memory Forensics. If not, see <http://www.gnu.org/licenses/>.
#
"""
@author: Andrew Case
@license: GNU General Public License 2.0
@contact: atcuno@gmail.com
@organization:
"""
from rekall.plugins.linux import common
from rekall.plugins.tools import dynamic_profiles
class CheckSyscall(common.LinuxPlugin):
"""Checks if the system call table has been altered."""
__name = "check_syscall"
table_header = [
dict(name="divider", type="Divider"),
dict(name="table", hidden=True),
dict(name="index", style="address"),
dict(name="address", style="address"),
dict(name="symbol", width=80)
]
def Find_sys_call_tables(self):
"""Calculates the size of the syscall table.
Here we need the symbol __NR_syscall_max. We derive it from
disassembling the following system calls:
- system_call_fastpath function:
http://lxr.linux.no/linux+v3.12/arch/x86/kernel/entry_64.S#L620
system_call_fastpath:
#if __SYSCALL_MASK == ~0
cmpq $__NR_syscall_max,%rax
#else
andl $__SYSCALL_MASK,%eax
cmpl $__NR_syscall_max,%eax
#endif
- ret_from_sys_call function (with a small rewind):
http://lxr.linux.no/linux+v2.6.26/arch/x86/kernel/entry_64.S#L249
249 cmpq $__NR_syscall_max,%rax
250 ja badsys
251 movq %r10,%rcx
252 call *sys_call_table(,%rax,8) # XXX: rip relative
253 movq %rax,RAX-ARGOFFSET(%rsp)
254 /*
255 * Syscall return path ending with SYSRET (fast path)
256 * Has incomplete stack frame and undefined top of stack.
257 */
258 ret_from_sys_call:
259 movl $_TIF_ALLWORK_MASK,%edi
260 /* edi: flagmask */
- sysenter_do_call
Linux> dis "linux!sysenter_do_call"
Address Rel Op Codes Instruction Comment
------- ---------- -------------------- ------------------ -------
------ linux!sysenter_do_call ------: 0xc12c834d
0xc12c834d 0x0 3d5d010000 CMP EAX, 0x15d
0xc12c8352 0x5 0f8397baffff JAE 0xc12c3def linux!syscall_badsys
"""
rules = [
# Look for a comparison of the register (EAX) with a fixed value.
{'mnemonic': 'CMP', 'operands': [
{'type': 'REG'}, {'type': 'IMM', 'target': "$value"}]},
# Immediately followed by a branch to linux!badsys,
# linux!ia32_badsys etc.
{'comment': '~.+badsys'}
]
func = None
tables = set()
for func_name, table_name in [
# http://lxr.free-electrons.com/source/arch/x86_64/kernel/entry.S?v=2.4.37
("system_call", "sys_call_table"),
# http://lxr.free-electrons.com/source/arch/x86/kernel/entry_64.S?v=3.16
("system_call_fastpath", "sys_call_table"),
# http://lxr.free-electrons.com/source/arch/x86/ia32/ia32entry.S?v=3.14
("ia32_sysenter_target", "ia32_sys_call_table"),
("sysenter_auditsys", "ia32_sys_call_table"),
# http://lxr.free-electrons.com/source/arch/x86/kernel/entry_32.S?v=3.3
("sysenter_do_call", "sys_call_table")]:
if table_name in tables:
continue
# This table does not exist in this profile dont bother looking for
# its size.
if self.profile.get_constant(table_name) == None:
continue
func = self.profile.get_constant_object(
func_name, target="Function")
if func == None:
continue
matcher = dynamic_profiles.DisassembleMatcher(
name="sys_call_table_size",
mode=func.mode, rules=rules, session=self.session)
result = matcher.MatchFunction(func)
if result:
tables.add(table_name)
yield table_name, result["$value"] + 1
# Fallback. Note this underestimates the size quite a bit.
if func == None:
table_size = len([x for x in self.profile.constants
if x.startswith("__syscall_meta__")]) or 0x300
yield "ia32_sys_call | _table", table_size
yield "sys_call_table", table_size
def collect(self):
"""
| This works by walking the system call table
and verifies that each is a symbol in the kernel
"""
for table_name, table_size in self.Find_sys_call_tables():
# The syscall table is simply an array of pointers to functions.
table = self.profile.get_constant_object(
table_name,
target="Array",
target_args=dict(
count=table_size,
target="Pointer",
target_args=dict(
target="Function"
)
)
)
yield dict(divider="Table %s" % table_name)
resolver = self.session.address_resolver
for i, entry in enumerate(table):
sym_name = resolver.format_address(entry.deref())[:2]
yield dict(
table=table_name, index=i,
address=entry,
symbol=sym_name or "Unknown",
highlight=None if sym_name else "important")
|
dz0ny/vaultier | vaultier/libs/version/context.py | Python | bsd-3-clause | 1,091 | 0 |
class Manager(object):
_user = None
_enabl | ed = True
_user_required = True
def set_user_required(self, user_required):
self._user_required = user_required
def get_user_required(self):
return self._user_required
def set_user(self, user):
self._user = user
def get_user(self):
if self._user_required and (not self._user or
self._user.is_anonymous()):
msg = 'To store version valid user is required on ' \
'vers | ion_context_manager'
raise Exception(msg)
return self._user
def set_enabled(self, enabled):
self._enabled = enabled
def get_enabled(self):
return self._enabled
version_context_manager = Manager()
class VersionContextAwareApiViewMixin(object):
def initialize_request(self, request, *args, **kargs):
request = super(VersionContextAwareApiViewMixin, self) \
.initialize_request(request, *args, **kargs)
version_context_manager.set_user(request.user)
return request
|
JulyKikuAkita/PythonPrac | cs15211/ValidateStackSequences.py | Python | apache-2.0 | 2,782 | 0.001797 | __source__ = 'https://leetcode.com/problems/validate-stack-sequences/'
# Time: O(N)
# Space: O(N)
#
# Description: Leetcode # 946. Validate Stack Sequences
#
# Given two sequences pushed and popped with distinct values,
# return true if and only if this could have been the result of a sequence of push and pop operations
# on an initially empty stack.
#
# Example 1:
#
# Input: pushed = [1,2,3,4,5], popped = [4,5,3,2,1]
# Output: true
# Explanation: We might do the following sequence:
# push(1), push(2), push(3), push(4), pop() -> 4,
# push(5), pop() -> 5, pop() -> 3, pop() -> 2, pop() -> 1
# Example 2:
#
# Input: pushed = [1,2,3,4,5], popped = [4,3,5,1,2]
# Output: false
# Explanation: 1 cannot be popped before 2.
#
#
# Note:
#
# 0 <= pushed.length == popped.length <= 1000
# 0 <= pushed[i], popped[i] < 1000
# pushed is a permutation of popped.
# pushed and popped have distinct values.
#
import unittest
# 28ms 88.40%
class Solution(object):
def validateStackSequences(self, pushed, popped):
"""
:type pushed: List[int]
:type popped: List[int]
:rtype: bool
"""
j = 0
stack = []
for x in pushed:
stack.append(x)
while stack and j < len(popped) and stack[-1] == popped[j]:
stack.pop()
j += 1
return j == len(popped)
class TestMethods(unittest.TestCase):
def test_Local(self):
self.assertEqual(1, 1)
if __name__ == '__main__':
unittest.main()
Java = '''
# Thought: https://leetcode.com/problems/validate-stack-sequences/solution/
Approach 1: Greedy
Complexity Analysis
Time Complexity: O(N), where N is the length of pushed and popped.
Space Complexity: O(N)).
# 9ms 86.82%
class Solution {
public boolean validateStackSequences(int[] pushed, int[] popped) {
int N = pushed.length;
Stack<Integer> stack = new Stack();
int j = 0;
for (int x : pushed) {
stack.push(x);
while (!stack.isEmpty() && j < N && stack.peek() == popped[j]) {
stack.pop();
j++;
}
}
return j == N;
}
| }
# use array
# 5ms 99.91%
class Solution {
public boolean validateStackSequences(int[] pushed, int[] popped) {
int[] stack = new int[pushed.length];
int inde | x = 0, pIndex = 0;
for (int i = 0; i < pushed.length; i++) {
stack[index++] = pushed[i];
while (index != 0 && stack[index - 1] == popped[pIndex]) {
index--;
pIndex++;
}
}
while (index != 0 && stack[index - 1] == popped[pIndex]) {
index--;
pIndex++;
}
return index == 0 && pIndex == popped.length;
}
}
''' |
nelsyeung/half-metals-analysis | lib/nmod.py | Python | mit | 5,429 | 0.004237 | """ Useful and shared functions """
import os
import math
import numpy as np
from scipy.interpolate import interp1d
def chunks(l, size):
""" Return same size chunks in a list """
return [l[i:i+size] for i in range(0, len(l), size)]
def findLine(filename, s):
""" Return first encountered line from a file with matching string """
value = ''
with open(filename, "r") as f:
for line in f:
if s in line:
value = line
break
return value
def findMean(l):
""" Find the mean of a list """
return math.fsum(l) / len(l)
def replaceAll(text, reps):
| """ Replace all the matching strings from a piece of text """
for i, j in reps.items():
text = text.replace(str(i), str(j))
return text
def ntabulate(matrix):
""" Return a nice tabulated string from a matrix """
s = [[str(e) for e in row] for row in matrix]
lens = [len(max(col, key=len)) for col in zip(*s)]
fmt = ' ' . join('{{:{}}}'.format(x) for x in lens)
return '\n' . join([fmt.format(*row) for row in s])
def float2str(prec, val):
""" Return a nice | ly formatted string from a float """
return '{val:.{prec}f}'.format(prec=prec, val=val)
def nexit():
""" Standard exit program function """
print('Exiting program...')
raise SystemExit
def seconds2str(s):
""" Return a nicely formatted time string from seconds """
seconds = str(int(s % 60))
minutes = str(int(s / 60) % 60)
hours = str(int(s / 3600))
return hours + 'h ' + minutes + 'm ' + seconds + 's'
def modFile(new, tmp, reps):
""" Copy and modify the specified file to a new location """
with open(new, 'w+') as fnew:
with open(tmp, 'r') as ftmp:
for line in ftmp:
fnew.write(replaceAll(line, reps))
def getDOS(filePath, spin):
""" Store into text file and return DOS data """
baseDir = os.path.dirname(os.path.abspath(filePath))
filename = os.path.basename(filePath).split('.')[0]
outFile = os.path.join(baseDir, filename + '_' + spin + '.txt')
dos = []
record = False
if spin == 'up':
dataStart = '@target G0.S0'
elif spin == 'down':
dataStart = '@target G1.S0'
else:
print('Incorrect spin.')
nexit()
with open(filePath) as f:
for l in f:
line = l.rstrip()
if line == dataStart:
record = True
continue
if line == '&':
record = False
continue
if record and not '@' in line:
x = float(line.split()[0])
y = float(line.split()[1])
dos.append([x, y])
if os.path.isfile(outFile) is True:
os.remove(outFile)
with open(outFile, 'a+') as f:
for x, y in dos:
f.write(str(x) + ' ' + str(y) + '\n')
return dos
def getBSF3D(filePath, spin, numSites):
""" Store into text file and return 3D BSF data """
baseDir = os.path.dirname(os.path.abspath(filePath))
bsfnum = os.path.basename(filePath).split('_')[-2]
if bsfnum.isdigit() == True:
outFile = os.path.join(baseDir, bsfnum + '_bsf3d_' + spin + '.txt')
else:
outFile = os.path.join(baseDir, 'bsf3d_' + spin + '.txt')
raw = []
hashCount = 0 # For determining when to start reading raw data.
# Get raw data first.
with open(filePath) as f:
for l in f:
line = l.rstrip()
if '###' in line:
hashCount += 1
continue
if hashCount == 3:
x = float(line.split()[0])
y = float(line.split()[1])
raw.append([x, y])
# Generate plotable data from raw
numUseful = (numSites - 1) * 2
nk = len(raw) / numUseful
nk2 = nk * nk
bsf = [[] for i in range(nk)]
if spin == 'up':
sign = -1
elif spin == 'down':
sign = 1
for i in range(nk2):
n = math.floor(i / nk)
j = i + (nk2 * numUseful - 2)
bsf[n].append(float(raw[i]) + sign * float(raw[j]))
if os.path.isfile(outFile) is True:
os.remove(outFile)
np.savetxt(outFile, bsf)
return bsf
def getBSF2D(filePath, spin, numSites):
""" Store into text file and return single strip of BSF data """
baseDir = os.path.dirname(os.path.abspath(filePath))
bsfnum = os.path.basename(filePath).split('_')[-2]
if bsfnum.isdigit() == True:
outFile = os.path.join(baseDir, bsfnum + '_bsf2d_' + spin + '.txt')
else:
outFile = os.path.join(baseDir, 'bsf2d_' + spin + '.txt')
bsf3D = getBSF3D(filePath, spin, numSites)
bsf = []
nk = len(bsf3D)
for i in range(nk):
bsf.append([ i / nk, bsf3D[0][i]])
if os.path.isfile(outFile) is True:
os.remove(outFile)
with open(outFile, 'a+') as f:
for x, y in bsf:
f.write(str(x) + ' ' + str(y) + '\n')
return bsf
def getInterp1d(data):
""" Get interpolated data from a list with x and y values """
x, y = [], []
for X, Y in data:
x.append(X)
y.append(Y)
return interp1d(x, y)
def normalise(inp):
""" Linearly normalise the input values to range from 0 to 1 """
normalised = []
xmin = min(inp)
xmax = max(inp)
for x in inp:
normalised.append((x - xmin) / (xmax - xmin))
return normalised
|
heromod/migrid | mig/wwwuser/pickle_file_sizes.py | Python | gpl-2.0 | 1,433 | 0.000698 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# --- BEGIN_HEADER ---
#
# pickle_file_sizes - [insert a few words of module description on this | line]
# Copyright (C) 2003-2009 The MiG Project lead by Brian Vinter
#
# This file is part of MiG.
#
# MiG is free software: you can redistribute it and/or modify
| # it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# MiG is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# -- END_HEADER ---
#
import os
import pickle
from os.path import join, getsize
list = []
base = '/home/mig/mig/wwwuser'
for (root, dirs, files) in os.walk(base):
for name in files:
path = join(root, name)
path_no_base = path.replace(base, '')
list.append((path_no_base, getsize(path)))
# print path + " " + str(getsize(path))
output = open('filesizes.pkl', 'wb')
# Pickle dictionary using protocol 0.
pickle.dump(list, output)
output.close()
# print list
|
hansonrobotics/chatbot | src/chatbot/server/config.py | Python | mit | 1,300 | 0.002308 | import os
DEFAULT_CHARACTER_PATH = os.path.join(
os.path.dirname(os.path.realpath | (__file__)), 'characters')
CHARACT | ER_PATH = os.environ.get('HR_CHARACTER_PATH', DEFAULT_CHARACTER_PATH)
RESET_SESSION_BY_HELLO = False
SESSION_REMOVE_TIMEOUT = 3600 # Timeout seconds for a session to be removed
CHATBOT_LOG_DIR = os.environ.get('CHATBOT_LOG_DIR') or os.path.expanduser('~/.hr/chatbot')
SERVER_LOG_DIR = os.environ.get('SERVER_LOG_DIR') or os.path.expanduser('~/.hr/log/chatbot')
HISTORY_DIR = os.path.join(CHATBOT_LOG_DIR, 'history')
TEST_HISTORY_DIR = os.path.join(CHATBOT_LOG_DIR, 'test/history')
CS_HOST = os.environ.get('CS_HOST') or 'localhost'
CS_PORT = os.environ.get('CS_PORT') or '1024'
CS_BOT = os.environ.get('CS_BOT') or 'rose'
HR_CHATBOT_AUTHKEY = os.environ.get('HR_CHATBOT_AUTHKEY', 'AAAAB3NzaC')
config = {}
config['DEFAULT_CHARACTER_PATH'] = DEFAULT_CHARACTER_PATH
config['CHARACTER_PATH'] = CHARACTER_PATH
config['RESET_SESSION_BY_HELLO'] = RESET_SESSION_BY_HELLO
config['SESSION_REMOVE_TIMEOUT'] = SESSION_REMOVE_TIMEOUT
config['CHATBOT_LOG_DIR'] = CHATBOT_LOG_DIR
config['SERVER_LOG_DIR'] = SERVER_LOG_DIR
config['HISTORY_DIR'] = HISTORY_DIR
config['CS_HOST'] = CS_HOST
config['CS_PORT'] = CS_PORT
config['CS_BOT'] = CS_BOT
config['HR_CHATBOT_AUTHKEY'] = HR_CHATBOT_AUTHKEY
|
ioram7/keystone-federado-pgid2013 | build/sqlalchemy/test/aaa_profiling/test_memusage.py | Python | apache-2.0 | 19,051 | 0.004567 | from test.lib.testing import eq_
from sqlalchemy.orm import mapper, relationship, create_session, \
clear_mappers, sessionmaker, class_mapper
from sqlalchemy.orm.mapper import _mapper_registry
from sqlalchemy.orm.session import _sessions
import operator
from test.lib import testing, engines
from sqlalchemy import MetaData, Integer, String, ForeignKey, \
PickleType, create_engine, Unicode
from test.lib.schema import Table, Column
import sqlalchemy as sa
from sqlalchemy.sql import column
from sqlalchemy.processors import to_decimal_processor_factory, \
to_unicode_processor_factory
from test.lib.util import gc_collect
from sqlalchemy.util.compat import decimal
import gc
import weakref
from test.lib import fixtures
class A(fixtures.ComparableEntity):
pass
class B(fixtures.ComparableEntity):
pass
def profile_memory(func):
# run the test 50 times. if length of gc.get_objects()
# keeps growing, assert false
def profile(*args):
gc_collect()
samples = [0 for x in range(0, 50)]
for x in range(0, 50):
func(*args)
gc_collect()
samples[x] = len(gc.get_objects())
print "sample gc sizes:", samples
assert len(_sessions) == 0
for x in samples[-4:]:
if x != samples[-5]:
flatline = False
break
else:
flatline = True
# object count is bigger than when it started
if not flatline and samples[-1] > samples[0]:
for x in samples[1:-2]:
# see if a spike bigger than the endpoint exists
if x > samples[-1]:
break
else:
assert False, repr(samples) + " " + repr(flatline)
return profile
def assert_no_mappers():
clear_mappers()
gc_collect()
assert len(_mapper_registry) == 0
class EnsureZeroed(fixtures.ORMTest):
def setup(self):
_sessions.clear()
_mapper_registry.clear()
class MemUsageTest(EnsureZeroed):
__requires__ = 'cpython',
# ensure a pure growing test trips the assertion
@testing.fails_if(lambda: True)
def test_fixture(self):
class Foo(object):
pass
x = []
@profile_memory
def go():
x[-1:] = [Foo(), Foo(), Foo(), Foo(), Foo(), Foo()]
go()
def test_session(self):
metadata = MetaData(testing.db)
table1 = Table("mytable", metadata,
Column('col1', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('col2', String(30)))
table2 = Table("mytable2", metadata,
Column('col1', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('col2', String(30)),
Column('col3', Integer, ForeignKey("mytable.col1")))
metadata.create_all()
m1 = mapper(A, table1, properties={
"bs":relationship(B, cascade="all, delete",
order_by=table2.c.col1)},
order_by=table1.c.col1)
m2 = mapper(B, table2)
m3 = mapper(A, table1, non_primary=True)
@profile_memory
def go():
sess = create_session()
a1 = A(col2="a1")
a2 = A(col2="a2")
a3 = A(col2="a3")
a1.bs.append(B(col2="b1"))
a1.bs.append(B(col2="b2"))
a3.bs.append(B(col2="b3"))
for x in [a1,a2,a3]:
sess.add(x)
sess.flush()
sess.expunge_all()
alist = sess.query(A).all()
eq_(
[
A(col2="a1", bs=[B(col2="b1"), B(col2="b2")]),
A(col2="a2", bs=[]),
A(col2="a3", bs=[B(col2="b3")])
],
alist)
for a in alist:
sess.delete(a)
sess.flush()
go()
metadata.drop_all()
del m1, m2, m3
assert_no_mappers()
@testing.crashes('sqlite', ':memory: connection not suitable here')
def test_orm_many_engines(self):
metadata = MetaData(testing.db)
table1 = Table("mytable", metadata,
Column('col1', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('col2', String(30)))
table2 = Table("mytable2", metadata,
Column('col1', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('col2', String(30)),
Column('col3', Integer, ForeignKey("mytable.col1")))
metadata.create_all()
m1 = mapper(A, table1, properties={
"bs":relationship(B, cascade="all, delete",
order_by=table2.c.col1)},
order_by=table1.c.col1,
_compiled_cache_size=10
)
m2 = mapper(B, table2,
_compiled_cache_size=10
)
m3 = mapper(A, table1, non_primary=True)
@profile_memory
def go():
engine = engines.testing_engine(
options={'logging_name':'FOO',
'pool_logging_name':'BAR',
'use_reaper':False}
)
sess = create_session(bind=engine)
a1 = A(col2="a1")
a2 = A(col2="a2")
a3 = A(col2="a3")
a1.bs.append(B(col2="b1"))
a1.bs.append(B(col2="b2"))
a3.bs.append(B(col2="b3"))
for x in [a1,a2,a3]:
sess.add(x)
sess.flush()
sess.expunge_all()
alist = sess.query(A).all()
eq_(
[
A(col2="a1", bs=[B(col2="b1"), B(col2="b2")]),
A(col2="a2", bs=[]),
A(col2="a3", bs=[B(col2="b3")])
],
alist)
for a in alist:
sess.delete(a)
sess.flush()
sess.close()
engine.dispose()
go()
metadata.drop_all()
del m1, m2, m3
assert_no_mappers()
def test_ad_hoc_types(self):
"""test storage of bind processors, result processors
in dialect-wide registry."""
from sqlalchemy.dialects | import mysql, postgresql, sqlite
from sqlalchemy import types
eng = engines.testing_engine()
for args in (
(types.Integer, ),
(types.String, ),
(types.PickleType, ),
(types.Enum, 'a', 'b', 'c'),
(sqlite.DATETIME, ),
(postgresql.ENUM, 'a', 'b', 'c'),
(types. | Interval, ),
(postgresql.INTERVAL, ),
(mysql.VARCHAR, ),
):
@profile_memory
def go():
type_ = args[0](*args[1:])
bp = type_._cached_bind_processor(eng.dialect)
rp = type_._cached_result_processor(eng.dialect, 0)
go()
assert not eng.dialect._type_memos
def test_many_updates(self):
metadata = MetaData(testing.db)
wide_table = Table('t', metadata,
Column('id', Integer, primary_key=True,
test_needs_autoincrement=True),
*[Column('col%d' % i, Integer) for i in range(10)]
)
class Wide(object):
pass
mapper(Wide, wide_table, _compiled_cache_size=10)
metadata.create_all()
session = create_session()
w1 = Wide()
session.add(w1)
session.flush()
session.close()
del session
counter = [1]
@profile_memory
def go():
session = create_session()
w1 = session.query(Wide).first()
x = counter[0]
dec = 10
while dec > 0:
# trying to count in binary here,
# works enough to trip the test case
if pow(2, dec) < x:
|
MitchellChu/torndsession | torndsession/sessionhandler.py | Python | mit | 964 | 0.003112 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright @ 2014 Mitchell Chu
from __future__ import (absolute_import, division, print_function,
with_statement)
import tornado.web
import torndsession.session
class SessionBaseHandler(tornado.web.RequestHandler, torndsession.session.SessionMixin):
"""
This is a tornado web r | equest handler which is base on torndsession.
Generally, user must persistent session object with manual operation when force_persistence is False.
but when the handler is inherit from SessionBaseHandler, in your handler, you just need to add/update/delete session values, SessionBaseHandler will auto save it.
"""
def prepare(self):
"""
Overwrite tornado.web.RequestHandler prepare.
"""
| pass
def on_finish(self):
"""
Overwrite tornado.web.RequestHandler on_finish.
"""
self.session.flush() # try to save session
|
cloudaice/simple-data | github/libs/client.py | Python | mit | 3,814 | 0.001049 | #-*-coding: utf-8-*-
from tornado import gen
from tornado.httpclient import AsyncHTTPClient, HTTPError, HTTPRequest
from tornado.options import options
from functools import wraps
from tornado import escape
import tornado.ioloop
import base64
import time
import datetime
import json
from math import exp
AsyncHTTPClient.configure("tornado.curl_httpclient.CurlAsyncHTTPClient")
formula = lambda x: 2 ** 10 / (1 + pow(exp(1), -(x - 2 ** 7) / 2 ** 5))
def loop_call(delta=60 * 1000):
def wrap_loop(func):
@wraps(func)
def wrap_func(*args, **kwargs):
func(*args, **kwargs)
tornado.ioloop.IOLoop.instance().add_timeout(
datetime.timeelta(milliseconds=delta),
wrap_func)
return wrap_func
return wrap_loop
def sync_loop_call(delta=60 * 1000):
"""
Wait for func down then process add_timeout
"""
def wrap_loop(func):
@wraps(func)
@gen.coroutine
def wrap_func(*args, **kwargs):
options.logger.info("function %r start at %d" %
(func.__name__, int(time.time())))
try:
yield func(*args, **kwargs)
except Exception, e:
options.logger.error("function %r error: %s" %
(func.__name__, e))
options.logger.info("function %r end at %d" %
| (func.__name__, int(time.time())))
tornado.ioloop.IOLoop.instance().add_timeout(
datetime.timedelta(milliseconds=delta),
wrap_func)
return wrap_func
return wrap_loop
class TornadoDataRequest(HTTPRequest):
def __init__(self, url, **kwargs):
super(TornadoDataRequest, self).__init__(url, **kwargs)
self.auth_username = options.username
| self.auth_password = options.password
self.user_agent = "Tornado-data"
@gen.coroutine
def GetPage(url):
client = AsyncHTTPClient()
request = TornadoDataRequest(url, method='GET')
try:
response = yield client.fetch(request)
except HTTPError, e:
response = e
raise gen.Return(response)
@gen.coroutine
def PutPage(url, body):
client = AsyncHTTPClient()
request = TornadoDataRequest(url, method='PUT', body=body)
try:
response = yield client.fetch(request)
except HTTPError, e:
response = e
raise gen.Return(response)
@gen.coroutine
def PatchPage(url, body):
client = AsyncHTTPClient.configurable_default()()
request = TornadoDataRequest(url, method="PATCH", body=body)
try:
response = yield client.fetch(request)
except HTTPError, e:
response = e
raise gen.Return(response)
@gen.coroutine
def commit(url, message, data):
resp = yield GetPage(url)
if resp.code == 200:
resp = escape.json_decode(resp.body)
sha = resp["sha"]
body = json.dumps({
"message": message,
"content": base64.b64encode(json.dumps(data)),
"committer": {"name": "cloudaice", "email": "cloudaice@163.com"},
"sha": sha
})
resp = yield PutPage(url, body)
raise gen.Return(resp)
else:
raise gen.Return(resp)
@gen.coroutine
def update_file(gist_url, filename, data):
try:
body = json.dumps({
"description": "update file at utctime %s" %
datetime.datetime.utcfromtimestamp(time.time()),
"files": {
filename: {
"content": json.dumps(data, indent=4, separators=(',', ': '))
}
}
})
except Exception, e:
options.logger.error("Error: %s" % e)
resp = yield PatchPage(gist_url, body)
raise gen.Return(resp)
|
HewlettPackard/oneview-ansible | test/test_oneview_sas_logical_interconnect_group_facts.py | Python | apache-2.0 | 2,046 | 0.000489 | #!/usr/bin/python
# -*- coding: utf-8 -*-
###
# Copyright (2016-2019) Hewlett Packard Enterprise Development LP
#
# Licensed under the Apache License, Version 2.0 (the "License");
# You may not use thi | s file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing | permissions and
# limitations under the License.
###
import pytest
from hpe_test_utils import OneViewBaseFactsTest
from oneview_module_loader import SasLogicalInterconnectGroupFactsModule
ERROR_MSG = 'Fake message error'
PARAMS_GET_ALL = dict(
config='config.json',
name=None
)
PARAMS_GET_BY_NAME = dict(
config='config.json',
name="SAS LIG 2"
)
SAS_LIGS = [{"name": "SAS LIG 1"}, {"name": "SAS LIG 2"}]
@pytest.mark.resource(TestSasLogicalInterconnectGroupFactsModule='sas_logical_interconnect_groups')
class TestSasLogicalInterconnectGroupFactsModule(OneViewBaseFactsTest):
def test_should_get_all(self):
self.resource.get_all.return_value = SAS_LIGS
self.mock_ansible_module.params = PARAMS_GET_ALL
SasLogicalInterconnectGroupFactsModule().run()
self.mock_ansible_module.exit_json.assert_called_once_with(
changed=False,
ansible_facts=dict(sas_logical_interconnect_groups=(SAS_LIGS))
)
def test_should_get_by_name(self):
self.resource.get_by.return_value = [SAS_LIGS[1]]
self.mock_ansible_module.params = PARAMS_GET_BY_NAME
SasLogicalInterconnectGroupFactsModule().run()
self.mock_ansible_module.exit_json.assert_called_once_with(
changed=False,
ansible_facts=dict(sas_logical_interconnect_groups=([SAS_LIGS[1]]))
)
if __name__ == '__main__':
pytest.main([__file__])
|
UQ-UQx/edx-platform_lti | common/test/acceptance/tests/discussion/test_cohorts.py | Python | agpl-3.0 | 5,771 | 0.002599 | """
Tests related to the cohorting feature.
"""
from uuid import uuid4
from .helpers import BaseDiscussionMixin
from .helpers import CohortTestMixin
from ..helpers import UniqueCourseTest
from ...pages.lms.auto_auth import AutoAuthPage
from ...fixtures.course import (CourseFixture, XBlockFixtureDesc)
from ...pages.lms.discussion import (DiscussionTabSingleThreadPage, InlineDiscussionThreadPage, InlineDiscussionPage)
from ...pages.lms.courseware import CoursewarePage
from nose.plugins.attrib import attr
class NonCohortedDiscussionTestMixin(BaseDiscussionMixin):
"""
Mixin for tests of discussion in non-cohorted courses.
"""
def setup_cohorts(self):
"""
No cohorts are desired for this mixin.
"""
pass
def test_non_cohort_visibility_label(self):
self.setup_thread(1)
self.assertEquals(self.thread_page.get_group_visibility_label(), "This post is visible to everyone.")
class CohortedDiscussionTestMixin(BaseDiscussionMixin, CohortTestMixin):
"""
Mixin for tests of discussion in cohorted courses.
"""
def setup_cohorts(self):
"""
Sets up the course to use cohorting with a single defined cohort group.
"""
self.setup_cohort_config(self.course_fixture)
self.cohort_1_name = "Cohort Group 1"
self.cohort_1_id = self.add_manual_cohort(self.course_fixture, self.cohort_1_name)
def test_cohort_visibility_label(self):
# Must be moderator to view content in a cohort other than your own
AutoAuthPage(self.browser, course_id=self.course_id, roles="Moderator").visit()
self.thread_id = self.setup_thread(1, group_id=self.cohort_1_id)
self.assertEquals(
self.thread_page.get_group_visibility_label(),
"This post is visible only to {}.".format(self.cohort_1_name)
)
# Disable cohorts and verify that the post now shows as visible to everyone.
self.disable_cohorting(self.course_fixture)
self.refresh_thread_page(self.thread_id)
self.assertEquals(self.thread_page.get_group_visibility_label(), "This post is visible to everyone.")
class DiscussionTabSingleThreadTest(UniqueCourseTest):
"""
Tests for the discussion page displaying a single thread.
"""
def setUp(self):
super(DiscussionTabSingleThreadTest, self).setUp()
self.discussion_id = "test_discussion_{}".format(uuid4().hex)
# Create a course to register for
self.course_fixture = CourseFixture(**self.course_info).install()
self.setup_cohorts()
AutoAuthPage(self.browser, course_id=self.course_id).visit()
def setup_thread_page(self, thread_id):
self.thread_page = DiscussionTabSingleThreadPage(self.browser, self.course_id, thread_id) # pylint: disable=attribute-defined-outside-init
self.thread_page.visit()
# pylint: disable=unused-argument
def refresh_thread_page(self, thread_id):
self.browser.refresh()
self.thread_page.wait_for_page()
@attr('shard_1')
class CohortedDiscussionTabSingleThreadTest(DiscussionTabSingleThreadTest, CohortedDiscussionTestMixin):
"""
Tests for the discussion page displaying a single cohorted thread.
"""
# Actual test method(s) defined in CohortedDiscussionTestMixin.
pass
@attr('shard_1')
class NonCohortedDiscussionTabSingleThreadTest(DiscussionTabSingleThreadTest, NonCohortedDiscussionTestMixin):
"""
Tests for the discussion page displaying a single non-cohorted thread.
"""
# Actual test method(s) defined in NonCohortedDiscussionTestMixin.
pass
class InlineDiscussionTest(UniqueCourseTest):
"""
Tests for inline discussions
"""
def setUp(self):
super(InlineDiscussionTest, self).setUp()
self.discussion_id = "test_discussion_{}".format(uuid4().hex)
self.course_fixture = CourseFixture(**self.course_info).add_children(
XBlockFixtureDesc("chapter", "Test Section").add_children(
XBlockFixtureDesc("sequential", "Test Subsection").add_children(
XBlockFixtureDesc("vertical", "Test Unit").add_children(
XBlockFixtureDesc(
"discussion",
"Test Discussion",
metadata={"discussion_id": self.discussion_id}
)
)
)
)
).install()
self.setup_cohorts()
self.user_id = AutoAuthPage(self.browser, course_id=self.course_id).visit().get_user_id()
def setup_thread_page(self, thread_id):
CoursewarePage(self.browser, self.course_id).visit()
self.show_thread(thread_id)
def show_thread(self, thread_id):
discussion_page = InlineDiscussionPage(self.browser, self.discussion_id)
discussion_page.expand_discussion()
self.assertEqual(discussion_page.get_num_displayed_threads(), 1)
self.thread_page = InlineDiscussionThreadPage(self.browser, thread_id) # pylint: disable=attribute-defined-outside-init
self.thread_p | age.expand()
def refresh_thread_page(self, thread_id):
self.browser.refresh()
self.show_thread(thread_id)
@attr('shard_1')
class CohortedInlineDiscussionTest(InlineDiscussionTest, CohortedDiscussionTestMixin):
"""
Tests for cohorted inline discussions.
"""
# Actual test method(s) defined in CohortedDiscussionTestMixin.
pass
@attr('shard_1')
class NonCohortedInlineDiscussionTest(InlineDiscussionTest, NonCoho | rtedDiscussionTestMixin):
"""
Tests for non-cohorted inline discussions.
"""
# Actual test method(s) defined in NonCohortedDiscussionTestMixin.
pass
|
egineering-llc/egat | examples/config_example/test2.py | Python | mit | 188 | 0.015957 | from e | gat.testset import UnorderedTestSet
class Test2(UnorderedTestSet):
def testStep1(self):
pass
def testStep2(self):
pass
| def testStep3(self):
pass
|
consbio/seedsource-core | seedsource_core/django/seedsource/tasks/write_tif.py | Python | bsd-3-clause | 2,684 | 0.001863 | import zipfile
from stat import S_IRUSR, S_IRGRP, S_IWUSR
import netCDF4
import numpy as np
import os.path
import rasterio
import tempfile
from django.conf import settings
from io import BytesIO
from django.template.loader import render_to_string
from django.utils.translation import activate
from ncdjango.geoprocessing.params import StringParameter
from ncdjango.geoprocessing.workflow import Task
from ncdjango.models import Service, Variable, SERVICE_DATA_ROOT
from rasterio.transform import Affine
class WriteTIF(Task):
name = 'wr | ite_tif'
inputs = [
StringParameter('service_id'),
StringParameter('language_code', required=False)
]
outputs = [StringParameter('filename')]
def execute(self, service_id, language_code=None):
if language_code is None:
language_code = settings.LANGUAGE_CODE
activate(language_code)
svc = Service.objects.get(name=service_id)
var = Variable.objects.get(service_id=svc.id)
data_path = svc.data_path
with netCDF4.D | ataset(os.path.join(SERVICE_DATA_ROOT, data_path), 'r') as nc:
data = nc.variables[var.name][:].astype('uint8')
height, width = data.shape
ex = var.full_extent
x_step = (ex.xmax - ex.xmin) / width
y_step = (ex.ymax - ex.ymin) / height
transform = Affine.from_gdal(ex.xmin, x_step, 0, ex.ymax, 0, -y_step)
dtype = np.uint8
nodata = 128
if not settings.DATASET_DOWNLOAD_DIR.exists():
settings.DATASET_DOWNLOAD_DIR.mkdir()
fd, filename = tempfile.mkstemp(dir=str(settings.DATASET_DOWNLOAD_DIR), suffix='.zip')
os.close(fd)
os.chmod(filename, S_IRUSR | S_IWUSR | S_IRGRP)
with zipfile.ZipFile(filename, mode='w') as zf:
tif_data = BytesIO()
with rasterio.Env(GDAL_TIFF_INTERNAL_MASK=True):
opts = dict(
driver='GTiff', height=height, width=width, crs=var.projection, transform=transform, count=1,
dtype=dtype, nodata=nodata
)
with rasterio.open(tif_data, 'w', **opts) as dst:
dst.write(np.array(data, dtype=dtype), 1)
dst.write_mask(np.logical_not(data.mask))
zf.writestr('SST Results/results.tif', tif_data.getvalue(), compress_type=zipfile.ZIP_DEFLATED)
zf.writestr(
'SST Results/README.txt',
render_to_string('txt/download_readme.txt', {'language_code': language_code}),
compress_type=zipfile.ZIP_DEFLATED
)
return os.path.basename(filename)
|
matusvalo/python-easyldap | easyldap/tools.py | Python | bsd-3-clause | 2,067 | 0.002903 | from .libldap.structures import LDAPMod
def is_iterable(obj):
from collections import Iterable
return not (isinstance(obj, str) or isinstance(obj, bytes)) and isinstance(obj, Iterable)
def is_ascii(s):
if isinstance(s, bytes):
return all(c < 128 for c in s)
else:
return all(ord(c) < 128 for c in s)
def ldap_decode(s):
if s is None:
return None
if isinstance(s, str):
return str(s)
elif isinstance(s, bytes):
return s.decode('utf8')
else:
try:
return str(s)
except:
pass
try:
return bytes(s).decode('utf8')
except:
pass
raise ValueError('Cannot decode to bytes')
def ldap_encode(s):
if s is None:
return None
| if isinstance(s, str):
return s.encode('utf8')
elif isinstance(s, bytes):
return bytes(s)
else:
try:
return bytes(s)
except:
pass
try:
return str(s).encode('utf8')
excep | t:
pass
raise ValueError('Cannot encode to bytes')
def build_binary_ldapmod(battr_name, op, vals):
if is_iterable(vals):
mod = LDAPMod.create_binary(op | LDAPMod.LDAP_MOD_BVALUES,
ldap_encode(battr_name),
values=map(lambda a: ldap_encode(a), vals))
else:
mod = LDAPMod.create_binary(op | LDAPMod.LDAP_MOD_BVALUES,
ldap_encode(battr_name),
values=ldap_encode(vals))
return mod
def build_ascii_ldapmod(attr_name, op, vals):
if is_iterable(vals):
mod = LDAPMod.create_string(op,
ldap_encode(attr_name),
values=map(lambda a: ldap_encode(a),vals))
else:
mod = LDAPMod.create_string(op,
ldap_encode(attr_name),
values=ldap_encode(vals))
return mod
|
ducksboard/libsaas | doc/conf.py | Python | mit | 8,086 | 0.007049 | # -*- coding: utf-8 -*-
#
# libsaas documentation build configuration file, created by
# sphinx-quickstart on Sun May 20 14:45:58 2012.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('..'))
sys.path.insert(0, os.path.abspath('../libsaas/services'))
if not os.path.isdir('generated'):
os.mkdir('generated')
exec(compile(open('generate_doc.py').read(), 'generate_doc.py', 'exec'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.extlinks']
autodoc_default_flags = ['no-members', 'private-members', 'no-show-inheritance']
autodoc_member_order = 'bysource'
autoclass_content = 'both'
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'libsaas'
copyright = '2012, Ducksboard'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
html_show_sou | rcel | ink = False
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'libsaasdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'libsaas.tex', 'libsaas Documentation',
'Ducksboard', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'libsaas', 'libsaas Documentation',
['Ducksboard'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'libsaas', 'libsaas Documentation',
'Ducksboard', 'libsaas', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
|
matham/kivy | kivy/tests/pyinstaller/simple_widget/project/widget.py | Python | mit | 253 | 0.003953 | from kivy.uix.widget import Widget
class MyWidget(Widget):
| def __init__(self, **kwargs):
super(MyWidget, self).__init__ | (**kwargs)
def callback(*l):
self.x = self.y
self.fbind('y', callback)
callback()
|
b1oki/time_clicker_automatization | main.py | Python | gpl-3.0 | 5,645 | 0.004076 | # -*- coding: utf-8 -*-
"""Time Clickers Auto upgrade & Auto-Clicker Tool.
It makes the increment game look like the idle game.
Made by b1oki & hakonw
"""
import sys
import time
# dis_pylint: disable=locally-disabled, no-name-in-module, no-member
from win32gui import GetWindowText, GetForegroundWindow, GetWindowRect
import win32api
import win32con
__author__ = 'b1oki, hakonw'
GAMENAME = 'Time Clickers'
SLEEPS_DELAY_NOT_PLAY = 0.3
SLEEPS_DELAY_AFTER_CLICK = 0.5
SLEEPS_DELAY_BETWEEN_STEPS = 0.002
COUNTER_LIMIT_BETWEEN_UPGRADES = 1000
COUNTER_INITIAL = 0
# upgrade keys
KEY_A = 0x41 # Pulse Pistol
KEY_S = 0x53 # Flak Cannon
KEY_D = 0x44 # Spread Rifle
KEY_F = 0x46 # Rocket Launcher
KEY_G = 0x47 # Particle Ball
KEY_H = 0x48 # Weapon Cubesc
KEY_C = 0x43 # Active Abilities
# skills keys
KEY_1 = 0x10 # Automatic Fire
KEY_2 = 0x10 # Spread Shots
KEY_3 = 0x10 # Team Work
KEY_4 = 0x10 # Augmented Aim
KEY_5 = 0x10 # Overcharged
KEY_6 = 0x10 # Gold Rush
KEY_7 = 0x10
KEY_8 = 0x10
KEY_9 = 0x10
KEY_0 = 0x10
# first of all update the expensive stuff
UPGRADES_KEYS = (KEY_C, KEY_H, KEY_G, KEY_F, KEY_D, KEY_S, KEY_A)
# first activate all, then reset skills, after try activate again
SKILLS_KEYS = (KEY_7, win32con.VK_SPACE, KEY_0, KEY_7, win32con.VK_SPACE)
def print_oneline(string, one_line_string_flag=False):
""" Print text to the same line if flag is True """
if not one_line_string_flag:
one_line_string_flag = True
sys.stdout.write(string)
# remove string after print for writing to same line
sys.stdout.write('\r')
sys.stdout.flush()
return one_line_string_flag
def print_oneline_clear():
""" Just print newline character """
sys.stdout.write('\n') # clean up
def print_newline(string, one_line_string_flag=False):
"""" Default print wrapper with flag corrector """
if one_line_string_flag:
print_oneline_clear()
one_line_string_flag = False
print string
return one_line_string_flag
def win_check(win_name):
""" Is current focused windows has title win_name?
__contributer__ = 'hakonw' """
hwnd = GetForegroundWindow()
win_current_name = GetWindowText(hwnd)
if win_current_name != win_name:
| # sleeps for a short time if not in game to reduce stress on pc
time.sleep(SLEEPS_DELAY_NOT_PLAY)
return False
rect = GetWindowRect(hwnd) # get the window posission
width = rect[2] - rect[0]
height = rect[3] - rect[1]
# auto click window field, numbers is the % of pixels that needs to be | removed on the side
x_min = rect[0] + 0.25 * width
x_max = rect[2] - 0.25 * width
y_min = rect[1] + 0.16 * height
y_max = rect[3] - 0.05 * height
pos_x, pos_y = win32api.GetCursorPos()
# checks if the mouse is inside the field
return x_max > pos_x > x_min and y_max > pos_y > y_min
def mouse_click(pos_x, pos_y):
""" Just click mouse in this point """
win32api.mouse_event(win32con.MOUSEEVENTF_LEFTDOWN, pos_x, pos_y, 0, 0)
win32api.mouse_event(win32con.MOUSEEVENTF_LEFTUP, pos_x, pos_y, 0, 0)
def key_push(key_code):
""" Press button by hex code """
win32api.keybd_event(key_code, 0, win32con.KEYEVENTF_EXTENDEDKEY, 0)
win32api.keybd_event(key_code, 0, win32con.KEYEVENTF_KEYUP, 0)
def run():
""" Main loop """
one_line_string_flag = False
is_click = False
in_focus = False
focus_last_state = in_focus
counter = COUNTER_INITIAL
one_line_string_flag = print_newline(
'Running Time Clickers Auto upgrade & Auto-Clicker', one_line_string_flag)
one_line_string_flag = print_newline(
'Use "CTRL" to turn it on/off', one_line_string_flag)
one_line_string_flag = print_newline(
'Use "INSERT" to shut down the script', one_line_string_flag)
one_line_string_flag = print_newline(
'Made by b1oki & hakonw', one_line_string_flag)
one_line_string_flag = print_newline(
'--- HINT: Just activate Idle mode for more effective farm ---', one_line_string_flag)
one_line_string_flag = print_newline(
'--- Press "E" some times and your pistol will target on the cubes by himself ---',
one_line_string_flag)
while True:
if win32api.GetAsyncKeyState(win32con.VK_INSERT):
one_line_string_flag = print_newline('Exit', one_line_string_flag)
break
if win32api.GetAsyncKeyState(win32con.VK_CONTROL):
one_line_string_flag = print_newline(
'Script off' if is_click else 'Script on', one_line_string_flag)
is_click = not is_click
time.sleep(SLEEPS_DELAY_AFTER_CLICK)
in_focus = win_check(GAMENAME)
if in_focus != focus_last_state:
focus_last_state = in_focus
# space in "Get focus" for equal length
one_line_string_flag = print_oneline(
'Get game window ' if in_focus else 'Lost game window', one_line_string_flag)
if is_click and in_focus:
pos_x, pos_y = win32api.GetCursorPos()
mouse_click(pos_x, pos_y)
if counter > COUNTER_LIMIT_BETWEEN_UPGRADES:
counter = COUNTER_INITIAL
# upgrade fisrt
for key_code in UPGRADES_KEYS:
key_push(key_code)
# after activate skills
for key_code in SKILLS_KEYS:
key_push(key_code)
counter += 1
time.sleep(SLEEPS_DELAY_BETWEEN_STEPS)
if __name__ == '__main__':
try:
run()
except KeyboardInterrupt:
pass # if user press Ctrl + C in command line
exit(0)
|
mikron-ia/dice-roller | src/roller.py | Python | mit | 260 | 0.023077 | __author__ = "Mikron"
__version__ = "0.0-dev"
from roll import Roll
dice = [ 6, 6, 20 ]
if __name__ == "__main__":
rolls = Roll(dice)
roll | _results = rolls.get_rolls()
|
for roll in roll_results:
print("Roll: "+str(roll)+"\n") |
charles-g-young/Table2NetCDF | gov/noaa/gmd/table_2_netcdf/Util.py | Python | apache-2.0 | 428 | 0.011682 | '''
Utilities
Created on Mar 3, 2017
@author: cyoung
'''
cla | ss Util:
# Get a class by fully qualified name.
def getClass(self, kls):
parts = kls.split('.')
moduleName = ".".join(parts[:-1])
#print("mo | dule "+module)
#print("attr "+parts[-1])
mod = __import__(moduleName, fromlist=[parts[-1]])
clazz = getattr(mod, parts[-1])
return clazz()
|
anhstudios/swganh | data/scripts/templates/object/draft_schematic/bio_engineer/creature/shared_creature_shear_mite.py | Python | mit | 470 | 0.046809 | #### NOTICE: T | HIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE T | HE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Intangible()
result.template = "object/draft_schematic/bio_engineer/creature/shared_creature_shear_mite.iff"
result.attribute_template_id = -1
result.stfName("string_id_table","")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result |
vmanoria/bluemix-hue-filebrowser | hue-3.8.1-bluemix/desktop/core/ext-py/guppy-0.1.10/guppy/heapy/Part.py | Python | gpl-2.0 | 19,486 | 0.052397 | #._cv_part guppy.heapy.Part
class Format(object):
__slots__ = 'impl', 'mod'
def __init__(self, impl):
self.impl = impl
self.mod = impl.mod
def get_formatted_row(self, row):
fr = self.get_stat_data(row)
rows = []
rs = row.name.split('\n')
subsequent_indent = len(fr)*' '
rows.extend(self.mod.wrap(
fr+rs[0],
width=self.mod.line_length,
subsequent_indent=subsequent_indent))
for r in rs[1:]:
rows.extend(self.mod.wrap(
r,
width=self.mod.line_length,
initial_indent=subsequent_indent,
subsequent_indent=subsequent_indent))
return '\n'.join(rows)
def get_more_index(self, idx=None):
if idx is None:
idx = 0
idx += 10
return idx
def get_row_header(self):
impl = self.impl
if not (impl.count or impl.size):
return ''
sh = self.get_stat_header()
return self.mod.fill(
sh + self.impl.kindheader,
width=self.mod.line_length,
subsequent_indent=' '*len(sh))
def load_statrow_csk(self, r):
impl = self.impl
count, size, kind = r.split(' ', 2)
count = int(count)
size = int(size)
impl.cum_size += size
return StatRow(count, size, kind, impl.cur_index, impl.cum_size)
def load_statrow_sk(self, r):
impl = self.impl
size, kind = r.split(' ', 1)
size = int(size)
impl.cum_size += size
return StatRow(1, size, kind, impl.cur_index, impl.cum_size)
def ppob(self, ob, idx=None):
impl = self.impl
if idx is None:
label = self.get_label()
if label is not None:
print >>ob, label
idx = 0
if idx < 0:
idx = impl.numrows + startindex
it = impl.get_rows(idx)
print >>ob, self.get_row_header()
numrows = 0
for row in it:
form = self.get_formatted_row(row)
print >>ob, form
numrows += 1
if numrows >= 10:
nummore = impl.numrows - 1 - row.index
if nummore > 1:
print >>ob, \
"<%d more rows. Type e.g. '_.more' to view.>"%nummore
break
class SetFormat(Format):
__slots__ = ()
def get_label(self):
impl = self.impl
if impl.count != 1:
s = 's'
else:
| s = ''
return 'Partition of a set of %d object%s. Total size = %d bytes.'%(
impl.count, s, impl.size)
def get_rowdata(self, row):
return '%d %d %s'%(row.count, row.size, row.name)
def get_stat_header(self):
return (
' Index Count % Size % Cumulative % ')
def get_stat_data(self, row):
format = '%6d %6d %3d %8d %3d %9d %3d '
impl = self.impl
fr = format % (
row.index,
row.count, int('%.0f'%(row.count * 100.0/impl.count)),
row.size, int | ('%.0f'%(row.size * 100.0/impl.size)),
row.cumulsize, int('%.0f'%(row.cumulsize * 100.0/impl.size)),
)
return fr
def load_statrow(self, r):
return self.load_statrow_csk(r)
class IdFormat(Format):
__slots__ = ()
def get_label(self):
impl = self.impl
if impl.count != 1:
s = 's'
else:
s = ''
return (
'Set of %d %s object%s. Total size = %d bytes.'%(
impl.count, impl.kindname, s, impl.size))
return part
def get_rowdata(self, row):
return '%d %s'%(row.size, row.name)
def get_stat_header(self):
return (
' Index Size % Cumulative % ')
def get_stat_data(self, row):
impl = self.impl
format = '%6d %8d %5.1f %9d %5.1f '
fr = format % (
row.index,
row.size, (row.size * 100.0/impl.size),
row.cumulsize, row.cumulsize * 100.0/impl.size,
)
return fr
def load_statrow(self, r):
return self.load_statrow_sk(r)
class DiffFormat(Format):
__slots__ = ()
def _percent_of_b(self, size):
if self.impl.b_size != 0:
return '%9.3g'%(size*100.0/self.impl.b_size,)
else:
return ' (n.a.)'
def get_label(self):
impl = self.impl
x = (
'Summary of difference operation (A-B).\n'+
' Count Size\n'+
' A %6d %8d\n'%(impl.count+impl.b_count, impl.size+impl.b_size)+
' B %6d %8d\n'%(impl.b_count, impl.b_size)+
' A-B %6d %8d = %s %% of B\n'%(impl.count, impl.size, self._percent_of_b(impl.size)))
if impl.count or impl.size:
x += '\nDifferences by kind, largest absolute size diffs first.'
return x
def get_rowdata(self, row):
return '%d %d %s'%(row.count, row.size, row.name)
def get_stat_header(self):
return (
' Index Count Size Cumulative % of B ')
def get_stat_data(self, row):
impl = self.impl
format = '%6d %6d %8d %9d %s '
fr = format % (
row.index,
row.count,
row.size,
row.cumulsize,
self._percent_of_b(row.cumulsize),
)
return fr
def load_statrow(self, r):
return self.load_statrow_csk(r)
class StatRow(object):
__slots__ = 'count', 'size', 'name', 'index', 'cumulsize'
def __init__(self, count, size, name, index=None, cumulsize=None):
self.count = count
self.size = size
self.name = name
self.index = index
self.cumulsize = cumulsize
class PartRow(StatRow):
__slots__ = 'set', 'kind'
def __init__(self, count, size, name, index, cumulsize, set, kind):
self.count = count
self.size = size
self.name = name
self.index = index
self.cumulsize = cumulsize
self.set = set
self.kind = kind
class Stat:
def __init__(self, mod, get_trows, firstheader=''):
self.mod = mod
self._hiding_tag_ = mod._hiding_tag_
self.get_trows = get_trows
self.firstheader = firstheader
self.it = iter(get_trows())
self.cur_index = 0
self.cum_size = 0
self.rows = []
r = self.get_next()
while r and not r.startswith('.r:'):
name = r[1:r.index(':')]
value = r[r.index(':')+1:].strip()
try:
value = int(value)
except ValueError:
pass
setattr(self, name, value)
r = self.get_next()
self.format_name = self.format
self.format_class = getattr(self.mod, self.format)
self.format = self.format_class(self)
self.timemade = float(self.timemade)
def __getitem__(self, idx):
if isinstance(idx, (int, long)):
if idx < 0:
idx = self.numrows + idx
if not (0 <= idx < self.numrows):
raise IndexError, 'Stat index out of range.'
rows = [self.get_row(idx)]
elif isinstance(idx, slice):
start, stop, step = idx.indices(self.numrows)
rows = [self.get_row(idx) for idx in range(start, stop, step)]
else:
raise IndexError, 'Stat indices must be integers or slices.'
count = 0
size = 0
for r in rows:
count += r.count
size += r.size
trows = [
'.loader: _load_stat',
'.format: %s'%self.format_name,
'.timemade: %f'%self.timemade,
'.count: %d'%count,
'.size: %d'%size,
'.kindheader: %s'%self.kindheader,
'.kindname: %s'%self.kindname,
'.numrows: %d'%len(rows),
]
if getattr(self, 'b_count', None) is not None:
trows.append('.b_count: %d'%self.b_count)
trows.append('.b_size: %d'%self.b_size)
for r in rows:
trows.append('.r: %s'%self.format.get_rowdata(r))
return self.mod.load(trows)
def __len__(self):
return self.numrows
def __repr__(self):
ob = self.mod.output_buffer()
self.ppob(ob)
return self.firstheader + ob.getvalue().rstrip()
def __sub__(self, other):
if not isinstance(other, Stat):
raise TypeError, 'Can only take difference with other Stat instance.'
if self.kindheader != other.kindheader:
raise ValueError, 'Mismatching table kind header, %r vs %r.'%(
self.kindheader, other.kindheader)
rows = []
otab = {}
stab = {}
for r in other.get_rows():
o = otab.get(r.name)
if o:
otab[r.name] = StatRow(r.count+o.count, r.size+o.size, r.name, o.index, None)
else:
otab[r.name] = r
for r in self.get_rows():
o = otab.get(r.name)
if o:
del otab[r.name]
count = r.count - o.count
size = r.size - o.size
else:
count = r.count
size = r.size
if count == 0 and size == 0:
continue
sr = stab.get(r.name)
if sr:
sr.count += count
sr.size += size
else:
sr = StatRow(count, size, r.name)
stab[sr.name] = sr
rows.append(sr)
rs = otab.values()
rs.sort(lambda x,y:cmp(x.index, y.index)) # Preserve orig. order
for r in rs:
sr = StatRow(-r.count, -r.size, r.name)
assert sr.name not in stab
rows.append(sr)
rows.sort(lambda x,y:cmp(abs(y.size), abs(x.size)))
cumulcount = 0
cumulsize = 0
for r in rows:
|
Aloomaio/googleads-python-lib | examples/adwords/v201809/error_handling/handle_partial_failures.py | Python | apache-2.0 | 4,130 | 0.006295 | #!/usr/bin/env python
#
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example demonstrates how to handle partial failures.
To get ad groups, run get_ad_groups.py.
The LoadFromStorage method is pulling credentials and properties from a
"googleads.yaml" file. By default, it looks for this file in your home
directory. For more information, see the "Caching authentication information"
section of our README.
"""
from googleads import adwords
AD_GROUP_ID = 'INSERT_AD_GROUP_ID_HERE'
def main(client, ad_group_id):
# Enable partial failure.
client.partial_failure = True
# Initialize appropriate service.
ad_group_criterion_service = client.GetService(
'AdGroupCriterionService', version='v201809')
# Construct keyword ad group criteria objects.
keywords = [
{
'xsi_type': 'BiddableAdGroupCriterion',
'adGroupId': ad_group_id,
'criterion': {
'xsi_type': 'Keyword',
'matchType': 'BROAD',
'text': 'mars cruise'
}
},
{
'xsi_type': 'BiddableAdGroupCriterion',
'adGroupId': ad_group_id,
'criterion': {
'xsi_type': 'Keyword',
'matchType': 'BROAD',
'text': r'inv\@lid cruise'
}
},
{
'xsi_type': 'BiddableAdGroupCriterion',
'adGroupId': ad_group_id,
'criterion': {
'xsi_type': 'Keyword',
'matchType': 'BROAD',
'text': 'venus cruise'
}
},
{
'xsi_type': 'BiddableAdGroupCriterion',
'adGroupId': ad_group_id,
'criterion': {
'xsi_type': 'Keyword',
'matchType': 'BROAD',
'text': r'b\(a\)d keyword cruise'
}
}
]
# Construct operations and add ad group criteria.
operations = []
for keyword in keywords:
operations.append(
{
'operator': 'ADD',
'operand': keyword
})
result = ad_group_criterion_service.mutate(operations)
# Display results.
for criterion in result['value']:
if criterion['AdGroupCriterion.Type'] == 'BiddableAdGroupCriterion':
print ('Added keyword ad group criterion with ad group id "%s", '
'criterion id "%s", text "%s", and match type "%s" was '
'added.'
% (criterion['adGroupId'], criterion['criterion']['id'],
criterion['criterion']['text'],
criterion['criterion']['matchType']))
for error in result['partialFailureErrors']:
field_path_elements = error['fieldPathElements']
first_field_path_element = None
if field_path_elements:
first_field_path_element = field_path_e | lements[0]
if (first_field_path_element and
first_field_path_element['field'] == 'operations' and
'index' in first_field_path_element):
operation_index = first_field_path_element['index']
adgroup_criterion = operations[operation_index]['operand']
print('Ad group criterion with ad group ID %s and keyword "%s" triggere | d '
'a failure for the following reason: %s.'
% (adgroup_criterion['adGroupId'],
adgroup_criterion['criterion']['text'],
error['errorString']))
else:
print ('A failure has occurred for the following reasons: %s\n'
% error['errorString'])
if __name__ == '__main__':
# Initialize client object.
adwords_client = adwords.AdWordsClient.LoadFromStorage()
main(adwords_client, AD_GROUP_ID)
|
drnextgis/QGIS | python/ext-libs/nose2/plugins/coverage.py | Python | gpl-2.0 | 2,893 | 0.000346 | """
Use this plugin to activate coverage report.
To install this plugin, you need to activate ``coverage-plugin``
with extra requirements :
::
$ pip install nose2[coverage-plugin]
Next, you can enable coverage reporting with :
::
$ nose2 --with-coverage
Or with this lines in ``unittest.cfg`` :
::
[coverage]
always-on = True
"""
from nose2.events import Plugin
class Coverage(Plugin):
configSection = 'coverage'
commandLineSwitch = ('C', 'with-coverage', 'Turn on coverage reporting')
def __init__(self):
"""Get our config and add our command line arguments."""
self.conSource = self.config.as_list('coverage', [])
self.conReport = self.config.as_list('coverage-report', [])
self.conConfig = self.config.as_str('coverage-config', '').strip()
group = self.session.pluginargs
group.add_argument(
'--coverage', action='append', default=[], metavar='PATH',
dest='coverage_source',
help='Measure coverage for filesystem path (multi-allowed)'
)
group.add_argument(
'--coverage-report', action='append', default=[], metavar='TYPE',
choices=['term', 'term-missing', 'annotate', 'html', 'xml'],
dest='coverage_report',
help='Generate selected reports, available types:'
' term, term-missing, annotate, html, xml (multi-allowed)'
)
group.add_argument(
'--coverage-config', action='store', default='', metavar='FILE',
dest='coverage_config',
help='Config file for coverage, default: .coveragerc'
)
def handleArgs(self, event):
"""Get our options in order command line, config file, hard coded."""
self.covSource = (event.args.coverage_source or
self.conSource or ['.'])
self.covReport = (event.args.coverage_report or
self.conReport or ['term'])
self.covConfig = (event.args.coverage_config or
self.conConfig or '.coveragerc')
def startTestRun(self, event):
"""Only called if active so start coverage."""
self.covController = None
try:
import cov_core
except:
print('Warning: you need to install [coverage-plugin] '
'extra requirements to use this plugin')
return
self.covController = cov_core.Central(self.covSource,
self.covReport,
self.covConfig)
| self.covController.start()
def afterSummaryReport(self, event):
"""Only called if active so stop coverage and produce reports."""
if self.covController:
| self.covController.finish()
self.covController.summary(event.stream)
|
pedrospdc/pinger | setup.py | Python | mit | 939 | 0.001065 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import setuptools
from setuptools import setup, find_packages
if setuptools.__version__ < '0.7':
raise RuntimeError("setuptools must be newer than 0.7")
version = "0.1.3"
setup(
name="pinger",
version=version,
author="Pedro Palhares (pedrospdc)",
author_email="pedrospdc@gmail.com",
description="Website monitoring tool",
url="https://github.com/pedrospdc/pinger",
download_url="https://github.com/pedrospdc/pinger/tarball/{}".format(version),
packages=find_packages(),
zip_safe=False,
license="MIT",
classifiers=[
" | Development Status :: 3 - Alpha",
"Environment :: Console",
"Intended Audience :: Developers",
"Operating System :: OS Independent",
"Programming Lang | uage :: Python",
],
install_requires=[
"requests>=2.4.3",
"peewee>=2.4.0"
],
scripts=["bin/pinger"],
)
|
alfredodeza/pytest | src/_pytest/skipping.py | Python | mit | 6,499 | 0.000308 | """ support for skip/xfail functions and markers. """
from _pytest.config import hookimpl
from _pytest.mark.evaluate import MarkEvaluator
from _pytest.outcomes import fail
from _pytest.outcomes import skip
from _pytest.outcomes import xfail
from _pytest.store import StoreKey
skipped_by_mark_key = StoreKey[bool]()
evalxfail_key = StoreKey[MarkEvaluator]()
unexpectedsuccess_key = StoreKey[str]()
def pytest_addoption(parser):
group = parser.getgroup("general")
group.addoption(
"--runxfail",
action="store_true",
dest="runxfail",
default=False,
help="report the results of xfail tests as if they were not marked",
)
parser.addini(
"xfail_strict",
"default for the strict parameter of xfail "
"markers when not given explicitly (default: False)",
default=False,
type="bool",
)
def pytest_configure(config):
if config.option.runxfail:
# yay a hack
import pytest
old = pytest.xfail
config._cleanup.append(lambda: setattr(pytest, "xfail", old))
def nop(*args, **kwargs):
pass
nop.Exception = xfail.Exception
setattr(pytest, "xfail", nop)
config.addinivalue_line(
"markers",
"skip(reason=None): skip the given test function with an optional reason. "
'Example: skip(reason="no way of currently testing this") skips the '
"test.",
)
config.addinivalue_line(
"markers",
"skipif(condition): skip the given test function if eval(condition) "
"results in a True value. Evaluation happens within the "
"module global context. Example: skipif('sys.platform == \"win32\"') "
"skips the test if we are on the win32 platform. see "
"https://docs.pytest.org/en/latest/skipping.html",
)
config.addinivalue_line(
"markers",
"xfail(condition, reason=None, run=True, raises=None, strict=False): "
"mark the test function as an expected failure if eval(condition) "
"has a True value. Opti | onally specify a reason for better reporting "
"and run=False if you don't even want to execute the test function. "
"If only specific exception(s) are expected, you can list them in "
"raises, and if the test fails in other ways, it will be reported as "
"a true failure. See https://docs.pytest.org/en/latest/skipping.html",
)
@hookimpl(tryfirst=True)
def pytest_runtest_setup(item):
# Check if skip or skipif | are specified as pytest marks
item._store[skipped_by_mark_key] = False
eval_skipif = MarkEvaluator(item, "skipif")
if eval_skipif.istrue():
item._store[skipped_by_mark_key] = True
skip(eval_skipif.getexplanation())
for skip_info in item.iter_markers(name="skip"):
item._store[skipped_by_mark_key] = True
if "reason" in skip_info.kwargs:
skip(skip_info.kwargs["reason"])
elif skip_info.args:
skip(skip_info.args[0])
else:
skip("unconditional skip")
item._store[evalxfail_key] = MarkEvaluator(item, "xfail")
check_xfail_no_run(item)
@hookimpl(hookwrapper=True)
def pytest_pyfunc_call(pyfuncitem):
check_xfail_no_run(pyfuncitem)
outcome = yield
passed = outcome.excinfo is None
if passed:
check_strict_xfail(pyfuncitem)
def check_xfail_no_run(item):
"""check xfail(run=False)"""
if not item.config.option.runxfail:
evalxfail = item._store[evalxfail_key]
if evalxfail.istrue():
if not evalxfail.get("run", True):
xfail("[NOTRUN] " + evalxfail.getexplanation())
def check_strict_xfail(pyfuncitem):
"""check xfail(strict=True) for the given PASSING test"""
evalxfail = pyfuncitem._store[evalxfail_key]
if evalxfail.istrue():
strict_default = pyfuncitem.config.getini("xfail_strict")
is_strict_xfail = evalxfail.get("strict", strict_default)
if is_strict_xfail:
del pyfuncitem._store[evalxfail_key]
explanation = evalxfail.getexplanation()
fail("[XPASS(strict)] " + explanation, pytrace=False)
@hookimpl(hookwrapper=True)
def pytest_runtest_makereport(item, call):
outcome = yield
rep = outcome.get_result()
evalxfail = item._store.get(evalxfail_key, None)
# unittest special case, see setting of unexpectedsuccess_key
if unexpectedsuccess_key in item._store and rep.when == "call":
reason = item._store[unexpectedsuccess_key]
if reason:
rep.longrepr = "Unexpected success: {}".format(reason)
else:
rep.longrepr = "Unexpected success"
rep.outcome = "failed"
elif item.config.option.runxfail:
pass # don't interfere
elif call.excinfo and call.excinfo.errisinstance(xfail.Exception):
rep.wasxfail = "reason: " + call.excinfo.value.msg
rep.outcome = "skipped"
elif evalxfail and not rep.skipped and evalxfail.wasvalid() and evalxfail.istrue():
if call.excinfo:
if evalxfail.invalidraise(call.excinfo.value):
rep.outcome = "failed"
else:
rep.outcome = "skipped"
rep.wasxfail = evalxfail.getexplanation()
elif call.when == "call":
strict_default = item.config.getini("xfail_strict")
is_strict_xfail = evalxfail.get("strict", strict_default)
explanation = evalxfail.getexplanation()
if is_strict_xfail:
rep.outcome = "failed"
rep.longrepr = "[XPASS(strict)] {}".format(explanation)
else:
rep.outcome = "passed"
rep.wasxfail = explanation
elif (
item._store.get(skipped_by_mark_key, True)
and rep.skipped
and type(rep.longrepr) is tuple
):
# skipped by mark.skipif; change the location of the failure
# to point to the item definition, otherwise it will display
# the location of where the skip exception was raised within pytest
_, _, reason = rep.longrepr
filename, line = item.location[:2]
rep.longrepr = filename, line + 1, reason
# called by terminalreporter progress reporting
def pytest_report_teststatus(report):
if hasattr(report, "wasxfail"):
if report.skipped:
return "xfailed", "x", "XFAIL"
elif report.passed:
return "xpassed", "X", "XPASS"
|
arKtelix/plugin.program.1.search | resources/lib/infodialog.py | Python | gpl-2.0 | 6,445 | 0.045151 | import sys, re
import xbmc, xbmcgui
import contextmenu
__language__ = sys.modules[ "__main__" ].__language__
__cwd__ = sys.modules[ "__main__" ].__cwd__
CANCEL_DIALOG = ( 9, 10, 92, 216, 247, 257, 275, 61467, 61448, )
ACTION_SHOW_INFO = ( 11, )
class GUI( xbmcgui.WindowXMLDialog ):
def __init__( self, *args, **kwargs ):
xbmcgui.WindowXMLDialog.__init__( self )
self.listitem = kwargs[ "listitem" ]
self.content = kwargs[ "content" ]
self.selected_source = None
def onInit( self ):
self._hide_controls()
self._show_info()
def _hide_controls( self ):
self.getControl( 110 ).se | tVisible( False | )
self.getControl( 120 ).setVisible( False )
self.getControl( 130 ).setVisible( False )
self.getControl( 140 ).setVisible( False )
self.getControl( 150 ).setVisible( False )
self.getControl( 160 ).setVisible( False )
self.getControl( 170 ).setVisible( False )
self.getControl( 180 ).setVisible( False )
self.getControl( 191 ).setVisible( False )
self.getControl( 192 ).setVisible( False )
self.getControl( 193 ).setVisible( False )
def _show_info( self ):
self.getControl( 100 ).addItem( self.listitem )
if self.content == 'movies':
self.getControl( 192 ).setLabel( xbmc.getLocalizedString(208) )
self.getControl( 193 ).setLabel( xbmc.getLocalizedString(20410) )
self.getControl( 110 ).setVisible( True )
self.getControl( 191 ).setVisible( True )
self.getControl( 192 ).setVisible( True )
if self.listitem.getProperty('trailer'):
self.getControl( 193 ).setVisible( True )
elif self.content == 'tvshows':
self.getControl( 192 ).setLabel( xbmc.getLocalizedString(1024) )
self.getControl( 120 ).setVisible( True )
self.getControl( 191 ).setVisible( True )
self.getControl( 192 ).setVisible( True )
elif self.content == 'seasons':
self.getControl( 192 ).setLabel( xbmc.getLocalizedString(1024) )
self.getControl( 130 ).setVisible( True )
self.getControl( 191 ).setVisible( True )
self.getControl( 192 ).setVisible( True )
elif self.content == 'episodes':
self.getControl( 192 ).setLabel( xbmc.getLocalizedString(208) )
self.getControl( 140 ).setVisible( True )
self.getControl( 191 ).setVisible( True )
self.getControl( 192 ).setVisible( True )
elif self.content == 'musicvideos':
self.getControl( 192 ).setLabel( xbmc.getLocalizedString(208) )
self.getControl( 150 ).setVisible( True )
self.getControl( 191 ).setVisible( True )
self.getControl( 192 ).setVisible( True )
elif self.content == 'artists':
self.getControl( 192 ).setLabel( xbmc.getLocalizedString(1024) )
self.getControl( 160 ).setVisible( True )
self.getControl( 191 ).setVisible( True )
self.getControl( 192 ).setVisible( True )
elif self.content == 'albums':
self.getControl( 192 ).setLabel( xbmc.getLocalizedString(208) )
self.getControl( 193 ).setLabel( xbmc.getLocalizedString(1024) )
self.getControl( 170 ).setVisible( True )
self.getControl( 191 ).setVisible( True )
self.getControl( 192 ).setVisible( True )
self.getControl( 193 ).setVisible( True )
elif self.content == 'songs':
self.getControl( 192 ).setLabel( xbmc.getLocalizedString(208) )
self.getControl( 180 ).setVisible( True )
self.getControl( 191 ).setVisible( True )
self.getControl( 192 ).setVisible( True )
elif self.content == 'actors':
self.getControl( 192 ).setLabel( xbmc.getLocalizedString(208) )
self.getControl( 193 ).setLabel( xbmc.getLocalizedString(20410) )
self.getControl( 110 ).setVisible( True )
self.getControl( 191 ).setVisible( True )
self.getControl( 192 ).setVisible( True )
if self.listitem.getProperty('trailer'):
self.getControl( 193 ).setVisible( True )
self.setFocus( self.getControl( 191 ) )
def _close_dialog( self, action=None ):
self.action = action
self.close()
def onClick( self, controlId ):
if controlId == 191:
self._close_dialog()
elif controlId == 192:
if self.content == 'movies':
self._close_dialog( 'play_movie' )
elif self.content == 'tvshows':
self._close_dialog( 'browse_tvshow' )
elif self.content == 'seasons':
self._close_dialog( 'browse_season' )
elif self.content == 'episodes':
self._close_dialog( 'play_episode' )
elif self.content == 'musicvideos':
self._close_dialog( 'play_musicvideo' )
elif self.content == 'artists':
self._close_dialog( 'browse_artist' )
elif self.content == 'albums':
self._close_dialog( 'play_album' )
elif self.content == 'songs':
self._close_dialog( 'play_song' )
if self.content == 'actors':
self._close_dialog( 'play_movie_actors' )
elif controlId == 193:
if self.content == 'movies':
self._close_dialog( 'play_trailer' )
if self.content == 'albums':
self._close_dialog( 'browse_album' )
if self.content == 'actors':
self._close_dialog( 'play_trailer_actors' )
elif controlId == 194:
sources = self.listitem.getProperty('source_names').split(',')
print '-> sources: %s' % sources
context_menu = contextmenu.GUI( "script-globalsearch-contextmenu.xml" , __cwd__, "Default", labels=sources )
context_menu.doModal()
if context_menu.selection is not None:
self.selected_source = context_menu.selection
self.onClick( 192 )
del context_menu
def onFocus( self, controlId ):
pass
def onAction( self, action ):
if ( action.getId() in CANCEL_DIALOG ) or ( action.getId() in ACTION_SHOW_INFO ):
self._close_dialog()
|
choderalab/Ensembler2 | MSMSeeder/__init__.py | Python | gpl-2.0 | 33 | 0 | from MS | MSeed | er.attic import core
|
jazztpt/edx-platform | lms/djangoapps/discussion_api/tests/test_views.py | Python | agpl-3.0 | 34,440 | 0.001249 | """
Tests for Discussion API views
"""
from datetime import datetime
import json
from urlparse import urlparse
import ddt
import httpretty
import mock
from pytz import UTC
from django.core.urlresolvers import reverse
from rest_framework.test import APIClient
from discussion_api.tests.utils import (
CommentsServiceMockMixin,
make_minimal_cs_comment,
make_minimal_cs_thread,
)
from student.tests.factories import CourseEnrollmentFactory, UserFactory
from util.testing import UrlResetMixin
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory
class DiscussionAPIViewTestMixin(CommentsServiceMockMixin, UrlResetMixin):
"""
Mixin for common code in tests of Discussion API views. This includes
creation of common structures (e.g. a course, user, and enrollment), logging
in the test client, utility functions, and a test case for unauthenticated
requests. Subclasses must set self.url in their setUp methods.
"""
client_class = APIClient
@mock.patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
def setUp(self):
super(DiscussionAPIViewTestMixin, self).setUp()
self.maxDiff = None # pylint: disable=invalid-name
self.course = CourseFactory.create(
org="x",
course="y",
run="z",
start=datetime.now(UTC),
discussion_topics={"Test Topic": {"id": "test_topic"}}
)
self.password = "password"
self.user = UserFactory.create(password=self.password)
CourseEnrollmentFactory.create(user=self.user, course_id=self.course.id)
self.client.login(username=self.user.username, password=self.password)
def assert_response_correct(self, response, expected_status, expected_content):
"""
Assert that the response has the given status code and parsed content
"""
self.assertEqual(response.status_code, expected_status)
parsed_content = json.loads(response.content)
self.assertEqual(parsed_content, expected_content)
def test_not_authenticated(self):
self.client.logout()
response = self.client.get(self.url)
self.assert_response_correct(
response,
401,
{"developer_message": "Authentication credentials were not provided."}
)
class CourseViewTest(DiscussionAPIViewTestMixin, ModuleStoreTestCase):
"""Tests for CourseView"""
def setUp(self):
super(CourseViewTest, self).setUp()
self.url = reverse("discussion_course", kwargs={"course_id": unicode(self.course.id)})
def test_404(self):
response = self.client.get(
reverse("course_topics", kwargs={"course_id": "non/existent/course"})
)
self.assert_response_correct(
response,
404,
{"developer_message": "Not found."}
)
def test_get_success(self):
response = self.client.get(self.url)
self.assert_response_correct(
response,
200,
{
"id": unicode(self.course.id),
"blackouts": [],
"thread_list_url": "http://testserver/api/discussion/v1/threads/?course_id=x%2Fy%2Fz",
"following_thread_list_url": (
"http://testserver/api/discussion/v1/threads/?course_id=x%2Fy%2Fz&following=True"
),
"topics_url": "http://testserver/api/discussion/v1/course_topics/x/y/z",
}
)
class CourseTopicsViewTest(DiscussionAPIViewTestMixin, ModuleStoreTestCase):
"""Tests for CourseTopicsView"""
def setUp(self):
super(CourseTopicsViewTest, self).setUp()
self.url = reverse("course_topics", kwargs={"course_id": unicode(self.course.id)})
def test_404(self):
response = self.client.get(
reverse("course_topics", kwargs={"course_id": "non/existent/course"})
)
self.assert_response_correct(
response,
404,
{"developer_message": "Not found."}
)
def test_get_success(self):
response = self.client.get(self.url)
self.assert_response_correct(
response,
200,
{
"courseware_topics": [],
"non_courseware_topics": [{
"id": "test_topic",
"name": "Test Topic",
"children": [],
"thread_list_url":
"http://testserver/api/discussion/v1/threads/?course_id=x%2Fy%2Fz&topic_id=test_topic",
}],
}
)
@ddt.ddt
@httpretty.activate
class ThreadViewSetListTest(DiscussionAPIViewTestMixin, ModuleStoreTestCase):
"""Tests for ThreadViewSet list"""
def setUp(self):
super(ThreadViewSetListTest, self).setUp()
self.author = UserFactory.create()
self.url = reverse("thread-list")
def test_course_id_missing(self):
response = self.client.get(self.url)
self.assert_response_correct(
response,
400,
{"field_errors": {"course_id": {"developer_message": "This field is required."}}}
)
def test_404(self):
response = self.client.get(self.url, {"course_id": unicode("non/existent/course")})
self.assert_response_correct(
response,
404,
{"developer_message": "Not found."}
)
def test_basic(self):
self.register_get_user_response(self.user, upvoted_ids=["test_thread"])
source_threads = [{
"type": "thread",
"id": "test_thread",
"course_id": unicode(self.course.id),
"commentable_id": "test_topic",
"group_id": None,
"user_id": str(self.author.id),
"username": self.author.username,
"anonymous": False,
"anonymous_to_peers": False,
"cr | eated_at": "2015-04-28T00:00:00Z",
"updated_at": "2015-04 | -28T11:11:11Z",
"thread_type": "discussion",
"title": "Test Title",
"body": "Test body",
"pinned": False,
"closed": False,
"abuse_flaggers": [],
"votes": {"up_count": 4},
"comments_count": 5,
"unread_comments_count": 3,
"read": False,
"endorsed": False
}]
expected_threads = [{
"id": "test_thread",
"course_id": unicode(self.course.id),
"topic_id": "test_topic",
"group_id": None,
"group_name": None,
"author": self.author.username,
"author_label": None,
"created_at": "2015-04-28T00:00:00Z",
"updated_at": "2015-04-28T11:11:11Z",
"type": "discussion",
"title": "Test Title",
"raw_body": "Test body",
"rendered_body": "<p>Test body</p>",
"pinned": False,
"closed": False,
"following": False,
"abuse_flagged": False,
"voted": True,
"vote_count": 4,
"comment_count": 5,
"unread_comment_count": 3,
"comment_list_url": "http://testserver/api/discussion/v1/comments/?thread_id=test_thread",
"endorsed_comment_list_url": None,
"non_endorsed_comment_list_url": None,
"editable_fields": ["abuse_flagged", "following", "voted"],
"read": False,
"has_endorsed": False
}]
self.register_get_threads_response(source_threads, page=1, num_pages=2)
response = self.client.get(self.url, {"course_id": unicode(self.course.id)})
self.assert_response_correct(
response,
200,
{
"results": expected_threads,
"next": "http://testserver/api/discussion/v1/threads/?course_id=x%2Fy%2Fz&page=2",
"previous": None,
"text_search_rewrite": None,
}
)
s |
petroniocandido/pyFTS | pyFTS/partitioners/partitioner.py | Python | gpl-3.0 | 12,188 | 0.003774 | from pyFTS.common import FuzzySet, Membership
import numpy as np
from scipy.spatial import KDTree
import matplotlib.pylab as plt
import logging
class Partitioner(object):
"""
Universe of Discourse partitioner. Split data on several fuzzy sets
"""
def __init__(self, **kwargs):
"""
Universe of Discourse partitioner scheme. Split data on several fuzzy sets
"""
self.name = kwargs.get('name',"")
"""partitioner name"""
self.partitions = kwargs.get('npart', 10)
"""The number of universe of discourse partitions, i.e., the number of fuzzy sets that will be created"""
self.sets = {}
self.membership_function = kwargs.get('func', Membership.trimf)
"""Fuzzy membership function (pyFTS.common.Membership)"""
self.setnames = kwargs.get('names', None)
"""list of partitions names. If None is given the partitions will be auto named with prefix"""
self.prefix = kwargs.get('prefix', 'A')
"""prefix of auto generated partition names"""
self.transformation = kwargs.get('transformation', None)
"""data transformation to be applied on data"""
self.indexer = kwargs.get('indexer', None)
self.variable = kwargs.get('variable', None)
"""In a multivariate context, the variable that contains this partitioner"""
self.type = kwargs.get('type', 'common')
"""The type of fuzzy sets that are generated by this partitioner"""
self.ordered_sets = None
"""A ordered list of the fuzzy sets names, sorted by their middle point"""
self.kdtree = None
"""A spatial index to help in fuzzyfication"""
self.margin = kwargs.get("margin", 0.1)
"""The upper and lower exceeding margins for the known UoD. The default value is .1"""
self.lower_margin = kwargs.get("lower_margin", self.margin)
"""Specific lower exceeding margins for the known UoD. The default value is the self.margin parameter"""
self.upper_margin = kwargs.get("lower_margin", self.margin)
"""Specific upper exceeding margins for the known UoD. The default value is the self.margin parameter"""
if kwargs.get('preprocess',True):
data = kwargs.get('data',[None])
if self.indexer is not None:
ndata = self.indexer.get_data(data)
else:
ndata = data
if self.transformation is not None:
ndata = self.transformation.apply(ndata)
else:
ndata = data
if self.indexer is not None:
ndata = self.indexer.get_data(ndata)
_min = np.nanmin(ndata)
if _min == -np.inf:
ndata[ndata == -np.inf] = 0
_min = np.nanmin(ndata)
self.min = float(_min * (1 + self.lower_margin) if _min < 0 else _min * (1 - self.lower_margin))
_max = np.nanmax(ndata)
self.max = float(_max * (1 + self.upper_margin) if _max > 0 else _max * (1 - self.upper_margin))
self.sets = self.build(ndata)
self.partitions = len(self.sets)
if self.ordered_sets is None and self.setnames is not None:
self.ordered_sets = self.setnames[:len(self.sets)]
else:
self.ordered_sets = FuzzySet.set_ordered(self.sets)
del(ndata)
def extractor(self,x):
"""Extract a single primitive type from an structured instance"""
return x
def build(self, data):
"""
Perform the partitioning of the Universe of Discourse
:param data: training data
:return:
"""
pass
def get_name(self, counter):
"""
Find the name of the fuzzy set given its counter id.
:param counter: The number of the fuzzy set
:return: String
"""
return self.prefix + str(counter) if self.setnames is None else self.setnames[counter]
def lower_set(self):
"""
Return the fuzzy set on lower bound of the universe of discourse.
:return: Fuzzy Set
"""
return self.sets[self.ordered_sets[0]]
def upper_set(self):
"""
Return the fuzzy set on upper bound of the universe of discourse.
:return: Fuzzy Set
"""
return self.sets[self.ordered_sets[-1]]
def build_index(self):
points = []
#self.index = {}
for ct, key in enumerate(self.ordered_sets):
fset = self.sets[key]
points.append([fset.lower, fset.centroid, fset.upper])
#self.index[ct] = fset.name
import sys
sys.setrecursionlimit(100000)
self.kdtree = KDTree(points)
sys.setrecursionlimit(1000)
def fuzzyfy(self, data, **kwargs):
"""
Fuzzyfy the input data according to this partitioner fuzzy sets.
:param data: input value to be fuzzyfied
:keyword alpha_cut: the minimal membership value to be considered on fuzzyfication (only for mode='sets')
:keyword method: the fuzzyfication method (fuzzy: all fuzzy memberships, maximum: only the maximum membership)
:keyword mode: the fuzzyfication mode (sets: return the fuzzy sets names, vector: return a vector with the membership
values for all fuzzy sets, both: return a list with tuples (fuzzy set, membership value) )
:returns a list with the fuzzyfied values, depending on the mode
"""
if isinstance(data, (tuple, list, np.ndarray)):
ret = []
for inst in data:
mv = self.fuzzyfy(inst, **kwargs)
ret.append(mv)
return ret
alpha_cut = kwargs.get('alpha_cut', 0.)
mode = kwargs.get('mode', 'sets')
method = kwargs.get('method', 'fuzzy')
nearest = self.search(data, type='index')
mv = np.zeros(self | .partitions)
for ix in nearest:
tmp = self[ix].membership(data)
mv[ix] = tmp if tmp >= alpha_cut else 0.
ix = np.ravel(np.argwhere(mv > 0.))
if ix.size == 0:
mv[self.check_bounds(data)] = 1.
if method == 'fuzzy' and mode == 'vector':
| return mv
elif method == 'fuzzy' and mode == 'sets':
try:
ix = np.ravel(np.argwhere(mv > 0.))
sets = [self.ordered_sets[i] for i in ix if i < self.partitions]
return sets
except Exception as ex:
return None
elif method == 'maximum' and mode == 'sets':
mx = max(mv)
ix = np.ravel(np.argwhere(mv == mx))
return self.ordered_sets[ix[0]]
elif mode == 'both':
ix = np.ravel(np.argwhere(mv > 0.))
sets = [(self.ordered_sets[i], mv[i]) for i in ix]
return sets
def defuzzyfy(self, values, mode='both'):
if not isinstance(values, list):
values = [values]
num = []
den = []
for val in values:
fset = val[0]
mv = val[1]
if mode == 'both':
num.append( self.sets[fset].centroid * mv )
den.append(mv)
elif mode == 'sets':
num.append(self.sets[fset].centroid)
elif mode == 'vector':
num.append(self.sets[self.ordered_sets[fset]].centroid * mv)
den.append(mv)
else:
raise Exception('Unknown deffuzyfication mode')
if mode in ('both','vector'):
return np.nansum(num) / np.nansum(den)
else:
return np.nanmean(num)
def check_bounds(self, data):
"""
Check if the input data is outside the known Universe of Discourse and, if it is, round it to the closest
fuzzy set.
:param data: input data to be verified
:return: the index of the closest fuzzy set when data is outside de universe of discourse or None if
the data is inside the UoD.
"""
if data < self.min:
return 0
elif data > self. |
InstitutoPascal/campuswebpro | controllers/notas.py | Python | agpl-3.0 | 4,034 | 0.006696 | # controlador de ejemplo para carga inicial / consulta de notas por alumno
def index():
# armo un formulario para buscar alumno por su dni
form = SQLFORM.factory(
Field("dni", "integer"),
)
if form.accepts(request.vars, session):
# buscar el alumno
q = db.alumnos.dni == form.vars.dni
alumno = db(q).select().first()
if alumno:
# encontrado, redirigo a cargar notas por
redirect(URL(f=cargar, vars={'alumnoid': alumno.alumnoid}))
else:
response.flash = "Alumno no encontrado"
response.view = "generic.html" # HACER una vista de verdad
return {"form ": form}
def cargar():
# obtengo el parámetro pasado por variable en la url
alumnoid = request.vars['alumnoid']
# busco el alumno
alumno = db.alumnos[alumnoid]
# obtengo la carrera / plan de estudio
#carreraid = alumno.carreraid ...
# busco las materias:
q = d | b.materias.materiaid>0 # HACER: filtrar por carrera/plan de estudio
orden = db.materias.materiaid # HACER: mejorar el orden (por ej. curso)
materias = db(q).select(orderby=orden)
# obtengo las notas
q = db.notas.alumnoid == alumnoid
q &= db.notas.calificacionid == 5 # filtrar solo finales
#q &= db.notas.periodoid == ... # HACER: filtrar otros campos
notas = db(q).select()
# armo un diccionario para manipularlas más facilmente {materiaid: nota}
notas = dict( | [(nota.materiaid, nota.nota) for nota in notas])
# encabezado de la tabla:
filas = [TR(
TH("ID"),
TH("Materia"),
TH("Nota"),
)]
# recorro las materias armando la tabla
for materia in materias:
filas.extend([
TR(
TD(materia.materiaid),
TD(materia.nombre),
TD(INPUT(requires=IS_EMPTY_OR(IS_INT_IN_RANGE(0,11)),
_name='nota.%s' % (materia.materiaid),
_value=notas.get((materia.materiaid), "..."),
_size="3", _style="width: 30px;")
)
)])
filas.append(TR(TD(INPUT(_type="submit"), _colspan=3,
_style="text-align: center;")))
# armo el formulario
form = FORM(TABLE(filas, _class="compacta", _width="100%",
_cellpadding="0", _cellspacing="0",
_style="padding: 0; margin: 0;"),
_style="padding: 0; margin: 0;")
# valido el formulario:
if form.accepts(request.vars, session):
# recorro los campos del formulario y guardo:
for var in form.vars.keys():
if "." in var:
# divido el nombre del campo (ver INPUT)
n, materiaid = var.split(".")
# obtengo el valor ingresado para este campo
val = form.vars[var]
# busco el registro actual para actualizarlo (si existe)
q = db.notas.materiaid == materiaid
q &= db.notas.alumnoid == alumnoid
q &= db.notas.calificacionid == 5
# actualizao el registro (si no existe devuelve 0 filas)
actualizados = db(q).update(nota=val)
if not actualizados:
# inserto el registro ya que no existe
db.notas.insert(
alumnoid=alumnoid,
materiaid=materiaid,
calificacionid=5, # final
nota=val,
# HACER: libro y folio, otros datos
)
# mesnaje para el usuario y redirijo al listado
session.flash = "Datos aceptados!"
redirect(URL(f="index"))
elif form.errors:
response.flash = 'revise los errores!'
response.view = "generic.html" # HACER una vista de verdad
return dict(form=form)
|
andir/ganeti | lib/storage/gluster.py | Python | bsd-2-clause | 14,117 | 0.005454 | #
#
# Copyright (C) 2013 Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Gluster storage class.
This class is very similar to FileStorage, given that Gluster when mounted
behaves essentially like a regular file system. Unlike RBD, there are no
special provisions for block device abstractions (yet).
"""
import logging
import os
import socket
from ganeti import utils
from ganeti import errors
from ganeti import netutils
from ganeti import constants
from ganeti import ssconf
from ganeti.utils import io
from ganeti.storage import base
from ganeti.storage.filestorage import FileDeviceHelper
class GlusterVolume(object):
"""This class represents a Gluster volume.
Volumes are uniquely identified by:
- their IP address
- their port
- the volume name itself
Two GlusterVolume objects x, y with same IP address, port and volume name
are considered equal.
"""
def __init__(self, server_addr, port, volume, _run_cmd=utils.RunCmd,
_mount_point=None):
"""Creates a Gluster volume object.
@type server_addr: str
@param server_addr: The address to connect to
@type port: int
@param port: The port to connect to (Gluster standard is 24007)
@type volume: str
@param volume: The gluster volume to use for storage.
"""
self.server_addr = server_addr
server_ip = netutils.Hostname.GetIP(self.server_addr)
self._server_ip = server_ip
port = netutils.ValidatePortNumber(port)
self._port = port
self._volume = volume
if _mount_point: # tests
self.mount_point = _mount_point
else:
self.mount_point = ssconf.SimpleStore().GetGlusterStorageDir()
self._run_cmd = _run_cmd
@property
def server_ip(self):
return self._server_ip
@property
def port(self):
return self._port
@property
def volume(self):
return self._volume
def __eq__(self, other):
return (self.server_ip, self.port, self.volume) == \
(other.server_ip, other.port, other.volume)
def __repr__(self):
return """GlusterVolume("{ip}", {port}, "{volume}")""" \
.format(ip=self.server_ip, port=self.port, volume=self.volume)
def __hash__(self):
return (self.server_ip, self.port, self.volume).__hash__()
def _IsMounted(self):
"""Checks if we are mounted or not.
@rtype: bool
@return: True if this volume is mounted.
"""
if not os.path.exists(self.mount_point):
return False
return os.path.ismount(self.mount_point)
def _GuessMountFailReasons(self):
"""Try and give reasons why the mount might've failed.
@rtype: str
@return: A semicolon-separated list of problems found with the current setup
suitable for display to the user.
"""
reasons = []
# Does the mount point exist?
if not os.path.exists(self.mount_point):
reasons.append("%r: does not exist" % self.mount_point)
# Okay, it exists, but is it a directory?
elif not os.path.isdir(self.mount_point):
reasons.append("%r: not a directory" % self.mount_point)
# If, for some unfortunate reason, this folder exists before mounting:
#
# /var/run/ganeti/gluster/gv0/10.0.0.1:30000:gv0/
# '--------- cwd ------------'
#
# and you _are_ trying to mount the gluster volume gv0 on 10.0.0.1:30000,
# then the mount.glusterfs command parser gets confused and this command:
#
# mount -t glusterfs 10.0.0.1:30000:gv0 /var/run/ganeti/gluster/gv0
# '-- remote end --' '------ mountpoint -------'
#
# gets parsed instead like this:
#
# mount -t glusterfs 10.0.0.1:30000:gv0 /var/run/ganeti/gluster/gv0
# '-- mountpoint --' '----- syntax error ------'
#
# and if there _is_ a gluster server running locally at the default remote
# end, localhost:24007, then this is not a network error and therefore... no
# usage message gets printed out. All you get is a Byson parser error in the
# gluster log files about an unexpected token in line 1, "". (That's stdin.)
#
# Not that we rely on that output in any way whatsoever...
parser_confusing = io.PathJoin(self.mount_point,
self._GetFUSEMountString())
if os.path.exists(parser_confusing):
reasons.append("%r: please delete, rename or move." % parser_confusing)
# Let's try something else: can we connect to the server?
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
sock.connect((self.server_ip, self.port))
sock.close()
except socket.error as err:
reasons.append("%s:%d: %s" % (self.server_ip, self.port, err.strerror))
reasons.append("try running 'gluster volume info %s' on %s to ensure"
" it exists, it is started and it is using the tcp"
" transport" % (self.volume, self.server_ip))
return "; ".join(reasons)
def _GetFUSEMountString(self):
"""Return the string FUSE needs to mount this volume.
@rtype: str
"""
return "-o server-port={port} {ip}:/{volume}" \
.format(port=self.port, ip=self.server_ip, volume=self.volume)
def GetKVMMountString(self, path):
"""Return the string KVM needs to use this volume.
@rtype: str
"""
ip = self.server_ip
if netutils.IPAddress.GetAddressFamily(ip) == socket.AF_INET6:
ip = "[%s]" % ip
return "gluster://{ip}:{port}/{volume}/{path}" \
.format(ip=ip, port=self.port, volume=self.volume, path=path)
def Mount(self):
"""Try and mount the volume. No-op if the volume is already mounted.
@raises BlockDeviceError: if the mount was unsuccessful
@rtype: context manager
@return: A simple context manager that lets you use this volume for
short lived operations like so::
with volume.mount():
# Do operations on volume
# Volume is now unmounted
"""
class _GlusterVolumeContextManager(object):
def __init__(self, volume):
self.volume = volume
def __enter__(self):
# We're already mounted.
return self
def __exit__(self, *exception_information):
self.volume.Unmount()
return False # do not swallow exceptions.
if self._IsMounted():
return _GlusterVolumeContextManager(self)
command = ["mount", |
"-t", "glusterfs",
self._GetFUSEMountString(),
self.mount_point]
io.Makedirs(self.mount_point)
self._run_cmd(" ".join(command),
# Why set cwd? Because it's an area we control. If,
# for some unfortunate reason, this folder exists:
# "/ | %s/" % _GetFUSEMountString()
# ...then the gluster parser gets confused and treats
# _GetFUSEMountString() as your mount point and
|
tchellomello/home-assistant | homeassistant/components/roku/config_flow.py | Python | apache-2.0 | 4,682 | 0.000854 | """Config flow for Roku."""
import logging
from typing import Any, Dict, Optional
from urllib.parse import urlparse
from rokuecp import Roku, RokuError
import voluptuous as vol
from homeassistant.components.ssdp import (
ATTR_SSDP_LOCATION,
ATTR_UPNP_FRIENDLY_NAME,
ATTR_UPNP_SERIAL,
)
from homeassistant.config_entries import CONN_CLASS_LOCAL_POLL, ConfigFlow
from homeassistant.const import CONF_HOST, CONF_NAME
from homeassistant.core import callback
from homeassistant.helpers.aiohttp_client import async_get_clientsession
from homeassistant.helpers.typing import HomeAssistantType
from .const import DOMAIN # pylint: disable=unused-import
DATA_SCHEMA = vol.Schema({vol.Required(CONF_HOST): str})
ERROR_CANNOT_CONNECT = "cannot_connect"
ERROR_UNKNOWN = "unknown"
_LOGGER = logging.getLogger(__name__)
async def validate_input(hass: HomeAssistantType, data: Dict) -> Dict:
"""Validate the user input allows us to connect.
Data has the keys from DATA_SCHEMA with values provided by the user.
"""
session = async_get_clientsession(hass)
roku = Roku(data[CONF_HOST], session=session)
device = await roku.update()
return {
"title": device.info.name,
"serial_number": device.info.serial_number,
}
class RokuConfigFlow(ConfigFlow, domain=DOMAIN):
"""Handle a Roku config flow."""
VERSION = 1
CONNECTION_CLASS = CONN_CLASS_LOCAL_PO | LL
def __init__(self):
"""Set up the instance."""
self.discovery_info = {}
@callback
def _show_form(se | lf, errors: Optional[Dict] = None) -> Dict[str, Any]:
"""Show the form to the user."""
return self.async_show_form(
step_id="user",
data_schema=DATA_SCHEMA,
errors=errors or {},
)
async def async_step_import(
self, user_input: Optional[Dict] = None
) -> Dict[str, Any]:
"""Handle configuration by yaml file."""
return await self.async_step_user(user_input)
async def async_step_user(
self, user_input: Optional[Dict] = None
) -> Dict[str, Any]:
"""Handle a flow initialized by the user."""
if not user_input:
return self._show_form()
errors = {}
try:
info = await validate_input(self.hass, user_input)
except RokuError:
_LOGGER.debug("Roku Error", exc_info=True)
errors["base"] = ERROR_CANNOT_CONNECT
return self._show_form(errors)
except Exception: # pylint: disable=broad-except
_LOGGER.exception("Unknown error trying to connect")
return self.async_abort(reason=ERROR_UNKNOWN)
await self.async_set_unique_id(info["serial_number"])
self._abort_if_unique_id_configured(updates={CONF_HOST: user_input[CONF_HOST]})
return self.async_create_entry(title=info["title"], data=user_input)
async def async_step_ssdp(
self, discovery_info: Optional[Dict] = None
) -> Dict[str, Any]:
"""Handle a flow initialized by discovery."""
host = urlparse(discovery_info[ATTR_SSDP_LOCATION]).hostname
name = discovery_info[ATTR_UPNP_FRIENDLY_NAME]
serial_number = discovery_info[ATTR_UPNP_SERIAL]
await self.async_set_unique_id(serial_number)
self._abort_if_unique_id_configured(updates={CONF_HOST: host})
# pylint: disable=no-member # https://github.com/PyCQA/pylint/issues/3167
self.context.update({"title_placeholders": {"name": name}})
self.discovery_info.update({CONF_HOST: host, CONF_NAME: name})
try:
await validate_input(self.hass, self.discovery_info)
except RokuError:
_LOGGER.debug("Roku Error", exc_info=True)
return self.async_abort(reason=ERROR_CANNOT_CONNECT)
except Exception: # pylint: disable=broad-except
_LOGGER.exception("Unknown error trying to connect")
return self.async_abort(reason=ERROR_UNKNOWN)
return await self.async_step_ssdp_confirm()
async def async_step_ssdp_confirm(
self, user_input: Optional[Dict] = None
) -> Dict[str, Any]:
"""Handle user-confirmation of discovered device."""
# pylint: disable=no-member # https://github.com/PyCQA/pylint/issues/3167
if user_input is None:
return self.async_show_form(
step_id="ssdp_confirm",
description_placeholders={"name": self.discovery_info[CONF_NAME]},
errors={},
)
return self.async_create_entry(
title=self.discovery_info[CONF_NAME],
data=self.discovery_info,
)
|
zacharyvoase/pathobject | src/pathobject.py | Python | unlicense | 15,069 | 0.001128 | # -*- coding: utf-8 -*-
"""pathobjec | t.py - A utility class for operating on pathnames."""
import codecs
import fnmatch
import glob
import hashlib
import os
import shutil
import sys
import warnings
__all__ = ["Path"]
__version__ = "0.0.1"
## Some functional utilities to save code later on.
def update_wrapper(wrapper, wra | pped):
"""Update a wrapper function to look like the wrapped function."""
for attr in ('__module__', '__name__', '__doc__'):
value = getattr(wrapped, attr, None)
if value:
setattr(wrapper, attr, value)
wrapper.__dict__.update(getattr(wrapped, '__dict__', {}))
return wrapper
def wrap(function, doc=None):
"""Wrap a basic `os.path` function to return `Path` instances."""
def method(self, *args, **kwargs):
return type(self)(function(self, *args, **kwargs))
method = update_wrapper(method, function)
if doc:
method.__doc__ = doc
return method
def pmethod(name):
"""Return a proxy method to a named function on the current path module."""
return lambda self, *a, **kw: getattr(self._path, name)(self, *a, **kw)
def defined_if(predicate):
"""
Declare a method as only defined if `self` meets a given predicate.
>>> class Test(object):
... x = defined_if(lambda self: True)(lambda self: 1)
... y = defined_if(lambda self: False)(lambda self: 1)
>>> Test().x()
1
>>> Test().y # doctest: +ELLIPSIS
Traceback (most recent call last):
...
AttributeError: <lambda> not defined for <pathobject.Test object at 0x...>
"""
def decorator(method):
def wrapper(self):
if not predicate(self):
raise AttributeError("%s not defined for %r" % (method.__name__, self))
def function(*args, **kwargs):
return method(self, *args, **kwargs)
return update_wrapper(function, method)
return property(wrapper)
return decorator
def normalize_line_endings(text, linesep=u'\n'):
"""
Normalize a string's line endings to `linesep` (default <LF>).
The provided string can be either a `str` or a `unicode`.
Pass `linesep=''` to remove line endings entirely. This only makes sense
when operating on a single line.
"""
if isinstance(text, str):
return (text.replace('\r\n', '\n')
.replace('\r', '\n')
.replace('\n', linesep))
return (text.replace(u'\r\n', u'\n')
.replace(u'\r\x85', u'\n')
.replace(u'\r', u'\n')
.replace(u'\x85', u'\n')
.replace(u'\u2028', u'\n')
.replace(u'\n', linesep))
class Path(unicode):
"""A utility class for operating on pathnames."""
_path = os.path
def __repr__(self):
return "%s(%s)" % (type(self).__name__, unicode.__repr__(self))
__add__ = wrap(lambda self, other: unicode(self) + other)
__radd__ = wrap(lambda self, other: other + self)
__div__ = wrap(pmethod('join'), "Shortcut for `os.path.join()`.")
__truediv__ = __div__
# @classmethod
def cwd(cls):
"""Return the current working directory as a `Path`."""
return cls(os.getcwdu())
cwd = classmethod(cwd)
# @classmethod
def for_path_module(cls, pathmod, name=None):
"""
Return a `Path` class for the given path module.
This allows you to use `Path` to perform NT path manipulation on UNIX
machines and vice versa.
Example:
>>> import ntpath
>>> NTPath = Path.for_path_module(ntpath, name="NTPath")
>>> NTPath(u'C:\\\\A\\\\B\\\\C').splitdrive()
(NTPath(u'C:'), u'\\\\A\\\\B\\\\C')
"""
if name is None:
name = cls.__name__
return type(name, (cls,), {'_path': pathmod})
for_path_module = classmethod(for_path_module)
# Simple proxy methods or properties.
is_absolute = pmethod('isabs')
absolute = wrap(pmethod('abspath'))
normcase = wrap(pmethod('normcase'))
normalize = wrap(pmethod('normpath'))
realpath = wrap(pmethod('realpath'))
joinpath = wrap(pmethod('join'))
expanduser = wrap(pmethod('expanduser'))
expandvars = wrap(pmethod('expandvars'))
dirname = wrap(pmethod('dirname'))
basename = pmethod('basename')
parent = property(dirname, None, None,
"""Property synonym for `os.path.dirname()`.
Example:
>>> Path('/usr/local/lib/libpython.so').parent
Path(u'/usr/local/lib')
""")
name = property(basename, None, None,
"""Property synonym for `os.path.basename()`.
Example:
>>> Path('/usr/local/lib/libpython.so').name
u'libpython.so'
""")
ext = property(lambda self: self._path.splitext(self)[1], None, None,
"""Return the file extension (e.g. '.py').""")
drive = property(lambda self: self._path.splitdrive(self)[0], None, None,
"""Return the drive specifier (e.g. "C:").""")
def splitpath(self):
"""
Return `(p.parent, p.name)`.
Example:
>>> Path('/usr/local/lib/libpython.so').splitpath()
(Path(u'/usr/local/lib'), u'libpython.so')
"""
parent, child = self._path.split(self)
return type(self)(parent), child
def splitdrive(self):
"""
Return `(p.drive, <the rest of p>)`.
If there is no drive specifier, `p.drive` is empty (as is always the
case on UNIX), so the result will just be `(Path(u''), u'')`.
Example:
>>> import ntpath
>>> import posixpath
>>> Path.for_path_module(ntpath)('C:\\\\A\\\\B\\\\C').splitdrive()
(Path(u'C:'), u'\\\\A\\\\B\\\\C')
>>> Path.for_path_module(posixpath)('/a/b/c').splitdrive()
(Path(u''), u'/a/b/c')
"""
drive, rel = self._path.splitdrive(unicode(self))
return type(self)(drive), rel
def splitext(self):
"""
Return `(<base filename>, extension)`.
Splits the filename on the last `.` character, and returns both pieces.
The extension is prefixed with the `.`, so that the following holds:
>>> p = Path('/some/path/to/a/file.txt.gz')
>>> a, b = p.splitext()
>>> a + b == p
True
Example:
>>> Path('/home/zack/filename.tar.gz').splitext()
(Path(u'/home/zack/filename.tar'), u'.gz')
"""
filename, extension = self._path.splitext(self)
return type(self)(filename), extension
def stripext(self):
"""
Remove one file extension from the path.
Example:
>>> Path('/home/guido/python.tar.gz').stripext()
Path(u'/home/guido/python.tar')
"""
return self.splitext()[0]
# @defined_if(lambda self: hasattr(self._path, 'splitunc'))
def splitunc(self):
unc, rest = self._path.splitunc(self)
return type(self)(unc), rest
splitunc = defined_if(lambda self: hasattr(self._path, 'splitunc'))(splitunc)
uncshare = property(lambda self: self.splitunc()[0], None, None,
"""The UNC mount point for this path. Empty for paths on local drives.""")
def splitall(self):
"""
Return a list of the path components in this path.
The first item in the list will be a `Path`. Its value will be either
`path.curdir`, `path.pardir`, empty, or the root directory of this path
(e.g. `'/'` or `'C:\\'`). The other items in the list will be strings.
By definition, `result[0].joinpath(*result[1:])` will yield the original
path.
>>> p = Path(u'/home/guido/python.tar.gz')
>>> parts = p.splitall()
>>> parts
[Path(u'/'), u'home', u'guido', u'python.tar.gz']
>>> parts[0].joinpath(*parts[1:])
Path(u'/home/guido/python.tar.gz')
"""
parts = []
location = self
while location not in (self._ |
sfstpala/hello | hello/__main__.py | Python | gpl-3.0 | 1,225 | 0 | # Copyright (c) 2014 Stefano Palazzo | <stefano.palazzo@gmail.com>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public Lic | ense as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
hello
Usage:
hello (--help | --version)
Options:
--help -h display this help message and exit
--version print version information and exit
'''
import sys
import docopt
import hello
def main(argv=sys.argv[1:]):
try:
docopt.docopt(__doc__, argv=argv, version=hello.__version__)
except docopt.DocoptExit as e:
print(str(e), file=sys.stderr)
return 2
except SystemExit as e:
return 0
if __name__ == "__main__": # pragma: no cover
sys.exit(main())
|
jseabold/statsmodels | statsmodels/tsa/statespace/dynamic_factor_mq.py | Python | bsd-3-clause | 186,145 | 0 | # -*- coding: utf-8 -*-
"""
Dynamic factor model.
Author: Chad Fulton
License: BSD-3
"""
from collections import OrderedDict
from warnings import warn
import numpy as np
import pandas as pd
from scipy.linalg import cho_factor, cho_solve, LinAlgError
from statsmodels.tools.data import _is_using_pandas
from statsmodels.tools.validation import int_like
from statsmodels.tools.decorators import cache_readonly
from statsmodels.regression.linear_model import OLS
from statsmodels.genmod.generalized_linear_model import GLM
from statsmodels.multivariate.pca import PCA
from statsmodels.tsa.statespace.sarimax import SARIMAX
from statsmodels.tsa.statespace._quarterly_ar1 import QuarterlyAR1
from statsmodels.tsa.vector_ar.var_model import VAR
from statsmodels.tools.tools import Bunch
from statsmodels.tools.validation import string_like
from statsmodels.tsa.tsatools import lagmat
from statsmodels.tsa.statespace import mlemodel, initialization
from statsmodels.tsa.statespace.tools import (
companion_matrix, is_invertible, constrain_stationary_univariate,
constrain_stationary_multivariate, unconstrain_stationary_univariate,
unconstrain_stationary_multivariate)
from statsmodels.tsa.statespace.kalman_smoother import (
SMOOTHER_STATE, SMOOTHER_STATE_COV, SMOOTHER_STATE_AUTOCOV)
from statsmodels.base.data import PandasData
from statsmodels.iolib.table import SimpleTable
from statsmodels.iolib.summary import Summary
from statsmodels.iolib.tableformatting import fmt_params
class FactorBlock(dict):
"""
Helper class for describing and indexing a block of factors.
Parameters
----------
factor_names : tuple of str
Tuple of factor names in the block (in the order that they will appear
in the state vector).
factor_order : int
Order of the vector autoregression governing the factor block dynamics.
endog_factor_map : pd.DataFrame
Mapping from endog variable names to factor names.
state_offset : int
Offset of this factor block in the state vector.
has_endog_Q : bool
Flag if the model contains quarterly data.
Notes
-----
The goal of this class is, in particular, to make it easier to retrieve
indexes of subsets of the state vector that are associated with a
particular block of factors.
- `factors_ix` is a matrix of indices, with rows corresponding to factors
in the block and columns corresponding to lags
- `factors` is vec(factors_ix) (i.e. it stacks columns, so that it is
`factors_ix.ravel(order='F')`). Thinking about a VAR system, the first
k*p elements correspond to the equation for the first variable. The next
k*p elements correspond to the equation for the second variable, and so
on. It contains all of the lags in the state vector, which is max(5, p)
- `factors_ar` is the subset of `factors` that have nonzero coefficients,
so it contains lags up to p.
- `factors_L1` only contains the first lag of the factors
- `factors_L1_5` contains the first - fifth lags of the factors
"""
def __init__(self, factor_names, factor_order, endog_factor_map,
state_offset, k_endog_Q):
self.factor_names = factor_names
self.k_factors = len(self.factor_names)
self.factor_order = factor_order
self.endog_factor_map = endog_factor_map.loc[:, factor_names]
self.state_offset = state_offset
self.k_endog_Q = k_endog_Q
if self.k_endog_Q > 0:
self._factor_order = max(5, self.factor_order)
else:
self._factor_order = self.factor_order
self.k_states = self.k_factors * self._factor_order
# Save items
self['factors'] = self.factors
self['factors_ar'] = self.factors_ar
self['factors_ix'] = self.factors_ix
self['factors_L1'] = self.factors_L1
self['factors_L1_5'] = self.factors_L1_5
@property
def factors_ix(self):
"""Factor state index array, shaped (k_factors, lags)."""
# i.e. the position in the state vector of the second lag of the third
# factor is factors_ix[2, 1]
# ravel(order='F') gives e.g (f0.L1, f1.L1, f0.L2, f1.L2, f0.L3, ...)
# while
# ravel(order='C') gives e.g (f0.L1, f0.L2, f0.L3, f1.L1, f1.L2, ...)
o = self.state_offset
return np.reshape(o + np.arange(self.k_factors * self._factor_order),
(self._factor_order, self.k_factors)).T
@property
def factors(self):
"""Factors and all lags in the state vector (max(5, p))."""
| # Note that this is equivalent to factors_ix with ravel(order='F')
o = self.state_offset
return np.s_[o:o + self.k_factors * self._factor_orde | r]
@property
def factors_ar(self):
"""Factors and all lags used in the factor autoregression (p)."""
o = self.state_offset
return np.s_[o:o + self.k_factors * self.factor_order]
@property
def factors_L1(self):
"""Factors (first block / lag only)."""
o = self.state_offset
return np.s_[o:o + self.k_factors]
@property
def factors_L1_5(self):
"""Factors plus four lags."""
o = self.state_offset
return np.s_[o:o + self.k_factors * 5]
class DynamicFactorMQStates(dict):
"""
Helper class for describing and indexing the state vector.
Parameters
----------
k_endog_M : int
Number of monthly (or non-time-specific, if k_endog_Q=0) variables.
k_endog_Q : int
Number of quarterly variables.
endog_names : list
Names of the endogenous variables.
factors : int, list, or dict
Integer giving the number of (global) factors, a list with the names of
(global) factors, or a dictionary with:
- keys : names of endogenous variables
- values : lists of factor names.
If this is an integer, then the factor names will be 0, 1, ....
factor_orders : int or dict
Integer describing the order of the vector autoregression (VAR)
governing all factor block dynamics or dictionary with:
- keys : factor name or tuples of factor names in a block
- values : integer describing the VAR order for that factor block
If a dictionary, this defines the order of the factor blocks in the
state vector. Otherwise, factors are ordered so that factors that load
on more variables come first (and then alphabetically, to break ties).
factor_multiplicities : int or dict
This argument provides a convenient way to specify multiple factors
that load identically on variables. For example, one may want two
"global" factors (factors that load on all variables) that evolve
jointly according to a VAR. One could specify two global factors in the
`factors` argument and specify that they are in the same block in the
`factor_orders` argument, but it is easier to specify a single global
factor in the `factors` argument, and set the order in the
`factor_orders` argument, and then set the factor multiplicity to 2.
This argument must be an integer describing the factor multiplicity for
all factors or dictionary with:
- keys : factor name
- values : integer describing the factor multiplicity for the factors
in the given block
idiosyncratic_ar1 : bool
Whether or not to model the idiosyncratic component for each series as
an AR(1) process. If False, the idiosyncratic component is instead
modeled as white noise.
Attributes
----------
k_endog : int
Total number of endogenous variables.
k_states : int
Total number of state variables (those associated with the factors and
those associated with the idiosyncratic disturbances).
k_posdef : int
Total number of state disturbance terms (those associated with the
factors and those associated with the idiosyncratic disturbances).
k_endog_M : int
Number of monthly (or non-time-specific, if k_endog_Q=0) variables.
|
starwels/starwels | test/functional/feature_notifications.py | Python | mit | 4,003 | 0.003747 | #!/usr/bin/env python3
# Copyright (c) 2014-2019 The Starwels developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the -alertnotify, -blocknotify and -walletnotify options."""
import os
from test_framework.test_framework import StarwelsTestFramework
from test_framework.util import assert_equal, wait_until, connect_nodes_bi
class NotificationsTest(StarwelsTestFramework):
def set_test_params(self):
self.num_nodes = 2
self.setup_clean_chain = True
def setup_network(self):
self.alert_filename = os.path.join(self.options.tmpdir, "alert.txt")
self.block_filename = os.path.join(self.options.tmpdir, "blocks.txt")
self.tx_filename = os.path.join(self.options.tmpdir, "transactions.txt")
# -alertnotify and -blocknotify on node0, walletnotify on node1
self.extra_args = [["-blockversion=2",
"-alertnotify=echo %%s >> %s" % self.alert_filename,
"-blocknotify=echo %%s >> %s" % self.block_filename],
["-blockversion=211",
"-rescan",
"-walletnotify=echo %%s >> %s" % self.tx_filename]]
super().setup_network()
def run_test(self):
self.log.info("test -blocknotify")
block_count = 10
blocks = self.nodes[1].generate(block_count)
# wait at most 10 seconds for expected file size before reading the content
wait_until(lambda: os.path.isfile(self.block_filename) and os.stat(self.block_filename).st_size >= (block_count * 65), timeout=10)
# file content should equal the generated blocks hashes
with open(self.block_filename, 'r') as f:
assert_equal(sorted(blocks), sorted(f.read().splitlines()))
self.log.info("test -walletnotify")
# wait at most 10 seconds for expected file size before reading the content
wait_until(lambda: os.path.isfile(self.tx_filename) and os.stat(self.tx_filename).st_size >= (block_count * 65), timeout=10)
# file content should equal the generated transaction hashes
txids_rpc = list(map(lambda t: t['txid'], self.nodes[1].listtransactions("*", block_count)))
with open(self.tx_filename, 'r') as f:
assert_equal(sorted(txids_rpc), sorted(f.read().splitlines()))
os.remove(self.tx_filename)
self.log.info("test -walletnotify after rescan")
# restart node to rescan to force wallet notifications
self.restart_node(1)
connect_nodes_bi(self.nodes, 0, 1)
wait_until(lambda: os.path.isfile(self.tx_filename) and os.stat(self.tx_filename).st_size >= (block_count * 65), timeout=10)
# file content should equal the generated transaction hashes
txids_rpc = list(map(lambda t: t['txid'], self.nodes[1].listtransactions("*", block_count)))
with open(self.tx_filename, 'r') as f:
assert_equal(sorted(txids_rpc), sorted(f.read().splitlines()))
# Mine another 41 up-version blocks. -alertnotify should trigger on the 51st.
self.log.info("test -alertnotify")
self.nodes[1].generate(41)
self.sync_all()
# Give starwelsd 10 seconds to write the alert notification
wait_until(lambda: os.path.isfile(self.alert_filename) and os.path.getsize(self.alert_filename), timeout=10)
with open(self.alert_filename, 'r', encoding='utf8') as f:
alert_text = f.read()
# Mine more up-version blocks, should not get more alerts:
self.nodes[1].generate(2)
| self.sync_all()
| with open(self.alert_filename, 'r', encoding='utf8') as f:
alert_text2 = f.read()
self.log.info("-alertnotify should not continue notifying for more unknown version blocks")
assert_equal(alert_text, alert_text2)
if __name__ == '__main__':
NotificationsTest().main()
|
qisanstudio/qstudio-launch | src/studio/launch/commands/contrib.py | Python | mit | 2,178 | 0.000459 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from __future__ import print_function
import os
import codecs
import shutil
from termcolor import colored
from jinja2 import Template
from studio.launch import ROOT_PATH
from jinja2 import Environment, FileSystemLoader
JDIR = os.path.join(ROOT_PATH, 'jinja')
JENV = Environment(loader=FileSystemLoader(JDIR))
class cd(object):
"""Context manager for changing the current working directory"""
def __init__(self, newPath):
self.newPath = newPath
def __enter__(self):
self.savedPath = os.getcwd()
os.chdir(self.newPath)
def __exit__(self, etype, value, traceback):
os.chdir(self.savedPath)
def mkdirs(path):
try:
print(colored('create directory %s' % path, 'blue'))
os.makedirs(path)
except OSError:
pass
def writefp(path, text):
with codecs.open(path, 'wb', 'utf-8') as fp:
print(colored('create file %s' % path, 'white'))
fp.write(text)
def build_structure(command, dist='.', tpl='default', **kwargs):
with cd(dist):
TEMPLATE_DIR = os.path.join(JDIR, command, tpl)
for root, dirs, files in os.walk(TEMPLATE_DIR):
reldir = os.path.relpath(root, start=JDIR)
relcurdir = os.path.relpath(root, start=TEMPLATE_DIR)
for dname in dirs:
dpath = Template(os.path.join(relcurdir,
dname)).render(**kwargs)
mkdirs(dpath)
for fname in files:
| real_fname = fname[:-7] if fname.endswith('.jinja2') else fname
fpath = Template(os.path.join(relcurdir,
real_fname)).render(**kwargs)
if fname.endswith('.jinja2'):
| text = JENV.get_template(os.path.join(reldir,
fname)).render(**kwargs)
writefp(fpath, text)
else:
shutil.copyfile(os.path.join(JDIR, reldir, fname), fpath)
if __name__ == '__main__':
build_structure('pypi', appname='daydayup')
|
odahoda/noisicaa | noisicaa/ui/graph/toolbox.py | Python | gpl-2.0 | 4,154 | 0.000722 | #!/usr/bin/python3
# @begin:license
#
# Copyright (c) 2015-2019, Benjamin Niemann <pink@odahoda.de>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @end:license
import enum
import functools
import logging
import os.path
from typing import Any
from PyQt5 import QtCore
from PyQt5 import QtGui
from PyQt5 import QtWidgets
from noisicaa import constants
from noisicaa.ui import ui_base
logger = logging.getLogger(__name__)
class Tool(enum.Enum):
SELECT = 'select'
INSERT = 'insert'
class Toolbox(ui_base.ProjectMixin, QtWidgets.QWidget):
toolChanged = QtCore.pyqtSignal(Tool)
resetViewTriggered = QtCore.pyqtSignal()
def __init__(self, **kwargs: Any) -> None:
super().__init__(**kwargs)
icon_size = QtCore.QSize(32, 32)
self.__select_tool_action = QtWidgets.QAction("Selection tool", self)
self.__select_tool_action.setIcon(QtGui.QIcon(
os.path.join(constants.DATA_DIR, 'icons', 'edit-select.svg')))
self.__select_tool_action.setCheckable(True)
self.__select_tool_action.triggered.connect(functools.partial(self.setTool, Tool.SEL | ECT))
self.__select_tool_button = QtWidgets.QToolButton(self)
self.__select_tool_button.setAutoRaise(True)
self.__select_tool_button.setIconSize(icon_size)
self.__select_tool_bu | tton.setDefaultAction(self.__select_tool_action)
self.__insert_tool_action = QtWidgets.QAction("Insert tool", self)
self.__insert_tool_action.setIcon(QtGui.QIcon(
os.path.join(constants.DATA_DIR, 'icons', 'list-add.svg')))
self.__insert_tool_action.setCheckable(True)
self.__insert_tool_action.triggered.connect(functools.partial(self.setTool, Tool.INSERT))
self.__insert_tool_button = QtWidgets.QToolButton(self)
self.__insert_tool_button.setAutoRaise(True)
self.__insert_tool_button.setIconSize(icon_size)
self.__insert_tool_button.setDefaultAction(self.__insert_tool_action)
self.__reset_view_action = QtWidgets.QAction("Reset view", self)
self.__reset_view_action.setIcon(QtGui.QIcon(
os.path.join(constants.DATA_DIR, 'icons', 'zoom-original.svg')))
self.__reset_view_action.triggered.connect(self.resetViewTriggered.emit)
self.__reset_view_button = QtWidgets.QToolButton(self)
self.__reset_view_button.setAutoRaise(True)
self.__reset_view_button.setIconSize(icon_size)
self.__reset_view_button.setDefaultAction(self.__reset_view_action)
layout = QtWidgets.QVBoxLayout()
layout.setContentsMargins(2, 2, 2, 2)
layout.setSpacing(2)
layout.addWidget(self.__select_tool_button)
layout.addWidget(self.__insert_tool_button)
layout.addSpacing(8)
layout.addWidget(self.__reset_view_button)
layout.addStretch(1)
self.setLayout(layout)
self.__tool_actions = {
Tool.SELECT: self.__select_tool_action,
Tool.INSERT: self.__insert_tool_action,
}
self.__current_tool = Tool.SELECT
for tool, action in self.__tool_actions.items():
action.setChecked(tool == self.__current_tool)
def setTool(self, tool: Tool) -> None:
if tool == self.__current_tool:
return
self.__current_tool = tool
for tool, action in self.__tool_actions.items():
action.setChecked(tool == self.__current_tool)
self.toolChanged.emit(self.__current_tool)
|
matus-chochlik/various | atmost/presentation/tools/plot-link-actu-pred.py | Python | mit | 3,063 | 0.001959 | #!/usr/bin/python3 -B
# coding=utf8
# ------------------------------------------------------------------------------
import os
import sys
import math
import random
import matplotlib.pyplot as plt
import matplotlib.ticker as pltckr
import matplotlib.lines as pltlns
import numpy as np
from statistics import mean
from common import DictObject, PresArgParser
# ------------------------------------------------------------------------------
class ArgParser(PresArgParser):
# --------------------------------------------------------------------------
def __init__(self, **kw):
PresArgParser.__init__(self, **kw)
self._add_multi_input_arg()
# ------------------------------------------------------------------------------
def make_argparser():
return ArgParser(prog=os.path.basename(__file__))
# ------------------------------------------------------------------------------
def do_plot(options):
data = {}
error = 0.0
for p in options.input_path:
stats = DictObject.loadJson(p)
for run in stats:
for tgt in run.targets:
try:
data[tgt.name]["predicted"].append(tgt.linked.predicted)
data[tgt.name]["actual"].append(tgt.linked.actual)
error = max(error, tgt.linked.error)
except KeyError:
data[tgt.name] = {"predicted": [], "actual": []}
except AttributeError:
pass
sortfunc = lambda d: mean(d["actual"])-mean(d["predicted"])
values = sorted(data.values(), key=sortfunc)
x = range(len(values))
y = np.array([sortfunc(d) for d in values])
a = np.array([mean(d["actual"]) for d in values])
e = np.array([error for i in values])
z = np.array([0.0 for i in values])
fig, spls = plt.subplots(2, 1)
options.initialize(plt, fig)
avp = spls[0]
avp.xaxis.set_major_locator(pltckr.NullLocator())
avp.set_xlabel("Link targets")
avp.set_yla | bel("Actual - Predicted [GB]")
avp.grid()
act = spls[1]
act.xaxis.set_major_locator(pltckr.NullLocator())
act.set_xlabel("Link targets")
act.set_ylabel("Actual [ | GB]")
act.grid()
avp.fill_between(
x, y, e,
where=(e <= y),
interpolate=True,
color="red",
alpha=0.7
)
avp.fill_between(
x, y, e,
where=(y < e),
interpolate=True,
color="green",
alpha=0.3
)
avp.plot(x, y, color="black")
avp.scatter(x, y, color="black")
avp.plot(x, e, color="red")
avp.plot(x, z, color="black")
act.plot(x, a, color="black")
act.scatter(x, a, color="black")
options.finalize(plt)
# ------------------------------------------------------------------------------
def main():
do_plot(make_argparser().make_options())
return 0
# ------------------------------------------------------------------------------
if __name__ == "__main__":
exit(main())
# ------------------------------------------------------------------------------
|
arkmaxim/grpc | test/distrib/python/distribtest.py | Python | bsd-3-clause | 1,732 | 0 | # Copyright 2016, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of condi | tions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products deri | ved from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import grpc
# This code doesn't do much but makes sure the native extension is loaded
# which is what we are testing here.
channel = grpc.insecure_channel('localhost:1000')
del channel
print 'Success!'
|
porduna/appcomposer | alembic/versions/435d360d3398_add_position_and_category_in_each_.py | Python | bsd-2-clause | 1,306 | 0.009188 | """Add position and category in each message, so as to order the XML
Revision ID: 435d360d3398
Revises: 2a68ba66c32b
Create Date: 2015-05-03 19:00:38.124617
"""
# revision identifiers, used by Alembic.
revision = '435d360d3398'
down_revision = '2a68ba66c32b'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adj | ust! ###
op.add_column('ActiveTransla | tionMessages', sa.Column('category', sa.Unicode(length=255), nullable=True))
op.add_column('ActiveTranslationMessages', sa.Column('position', sa.Integer(), nullable=True))
op.create_index(u'ix_ActiveTranslationMessages_category', 'ActiveTranslationMessages', ['category'], unique=False)
op.create_index(u'ix_ActiveTranslationMessages_position', 'ActiveTranslationMessages', ['position'], unique=False)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_index(u'ix_ActiveTranslationMessages_position', table_name='ActiveTranslationMessages')
op.drop_index(u'ix_ActiveTranslationMessages_category', table_name='ActiveTranslationMessages')
op.drop_column('ActiveTranslationMessages', 'position')
op.drop_column('ActiveTranslationMessages', 'category')
### end Alembic commands ###
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.