repo_name stringlengths 5 100 | path stringlengths 4 231 | language stringclasses 1 value | license stringclasses 15 values | size int64 6 947k | score float64 0 0.34 | prefix stringlengths 0 8.16k | middle stringlengths 3 512 | suffix stringlengths 0 8.17k |
|---|---|---|---|---|---|---|---|---|
Level-Up/python_log_io | log_io_handler/__init__.py | Python | gpl-3.0 | 1,702 | 0 | import logging
from emitter import Send
"""
log.io logging handler
======================
Handler for the python logging framework for log.io stateless TCP API
http://logio.org/
Usage:
::
from log_io_handler import LogIOHandler
Logger.addHandler(LogIOHandler)
#or dict_config
'handlers'
'log_io': {
'level': 'DEBUG',
'class': 'log_io_handler.LogIOHandler',
#optional configs
'logstream': 'EXAMPLE_STREAM',
'node': 'EXAMPLE_NODE',
'host': 'EXAMPLE_HOST',
'port': 28777,
}
logstream: name of log.io stream default (PythonStream)
node: name of log.io node default (PythonNode)
host: log.io server domain or ip address (default localhost)
port: log.io api port (default 28777)
"""
VERSION = '1.1'
AUTHOR = "Raymond McGinlay"
EMAIL = "raymond@thisislevelup.com"
URL = "www.thisislevelup.com"
class LogIOHandler(logging.Handler):
"""A log handler that transmits log entries to log.io server.
If the request is passed as the first argument to the log record,
request data will be provided in the report.
"""
def __init__(self, logstream='PythonStream',
node='PythonNode',
host='localh | ost',
port=28777):
logging.Handler.__init__(self)
self.logstream = logstream
self.node = node
self.h | ost = host
self.port = port
def emit(self, record):
message = self.format(record)
msg_string = "+log|%s|%s|info|%s\r\n" %\
(self.logstream, self.node, message)
Send.config_dict = {'host': self.host, 'port': self.port}
Send(msg_string)
|
awidegreen/tr064 | .ycm_extra_conf.py | Python | bsd-2-clause | 4,073 | 0.021606 | # This file is NOT licensed under the GPLv3, which is the license for the rest
# of YouCompleteMe.
#
# Here's the license text for this file:
#
# This is free and unencumbered software released into the public domain.
#
# Anyone is free to copy, modify, publish, use, compile, sell, or
# distribute this software, either in source code form or as a compiled
# binary, for any purpose, commercial or non-commercial, and by any
# means.
#
# In jurisdictions that recognize copyright laws, the author or authors
# of this software dedicate any and all copyright interest in the
# software to the public domain. We make this dedication for the benefit
# of the public at large and to the detriment of our heirs and
# successors. We intend this dedication to be an overt act of
# relinquishment in perpetuity of all present and future rights to this
# software under copyright law.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INC | LUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
# For more information, please refer to <http://unlicense.org/>
import os
import ycm_co | re
# These are the compilation flags that will be used in case there's no
# compilation database set (by default, one is not set).
# CHANGE THIS LIST OF FLAGS. YES, THIS IS THE DROID YOU HAVE BEEN LOOKING FOR.
flags = [
'-Wall',
'-Wextra',
'-Werror',
#'-Wc++98-compat',
'-Wno-long-long',
'-Wno-variadic-macros',
'-fexceptions',
'-DNDEBUG',
'-std=c++11',
#'-stdlib=libc++',
# ...and the same thing goes for the magic -x option which specifies the
# language that the files to be compiled are written in. This is mostly
# relevant for c++ headers.
# For a C project, you would set this to 'c' instead of 'c++'.
'-x',
'c++',
'-isystem', '.',
'-isystem', '/usr/include',
'-isystem', '/usr/local/include',
'-isystem', 'include',
'-isystem', 'contrib',
'-isystem', 'contrib/curlcpp/include',
'-isystem', 'lib',
]
# Set this to the absolute path to the folder (NOT the file!) containing the
# compile_commands.json file to use that instead of 'flags'. See here for
# more details: http://clang.llvm.org/docs/JSONCompilationDatabase.html
#
# Most projects will NOT need to set this to anything; you can just change the
# 'flags' list of compilation flags. Notice that YCM itself uses that approach.
compilation_database_folder = ''
if os.path.exists( compilation_database_folder ):
database = ycm_core.CompilationDatabase( compilation_database_folder )
else:
database = None
SOURCE_EXTENSIONS = [ '.cpp', '.cxx', '.cc', '.c', '.m', '.mm' ]
def DirectoryOfThisScript():
return os.path.dirname( os.path.abspath( __file__ ) )
def MakeRelativePathsInFlagsAbsolute( flags, working_directory ):
if not working_directory:
return list( flags )
new_flags = []
make_next_absolute = False
path_flags = [ '-isystem', '-I', '-iquote', '--sysroot=' ]
for flag in flags:
new_flag = flag
if make_next_absolute:
make_next_absolute = False
if not flag.startswith( '/' ):
new_flag = os.path.join( working_directory, flag )
for path_flag in path_flags:
if flag == path_flag:
make_next_absolute = True
break
if flag.startswith( path_flag ):
path = flag[ len( path_flag ): ]
new_flag = path_flag + os.path.join( working_directory, path )
break
if new_flag:
new_flags.append( new_flag )
return new_flags
def IsHeaderFile( filename ):
extension = os.path.splitext( filename )[ 1 ]
return extension in [ '.h', '.hxx', '.hpp', '.hh' ]
def FlagsForFile( filename, **kwargs ):
relative_to = DirectoryOfThisScript()
final_flags = MakeRelativePathsInFlagsAbsolute( flags, relative_to )
return {
'flags': final_flags,
'do_cache': True
}
|
MattWellie/FastqSimulator | read_condenser.py | Python | gpl-2.0 | 6,301 | 0.003968 | import os
__author__ = 'mwelland'
class Condenser:
"""
A class called from fqRunner.py, used to combine several FastQ files into a single file pair
For each gene and transcript, the modified contents are combined with the unmodified contents
For single transcripts this will mean each variant appears as a perfect heterozygote
For multiple transcripts the variant will be progressively diluted
- Transcript 1: modified dictionary has 100% of variant locations as the changed bases
default dictionary has 100% unmodified bases
combined this gives 50% modified at variant positions
This is written to the output file
- Transcript 2: Same again, with a default copy combined with a 100% variant copy
This will be added to the final output file along with transcript 1
assuming the variants are not made at the same positions, this will give
75% normal bases, 25% variant at each location
So far this has not been a problem, as the read depth is more than enough to pick these up
Transcripts will not always overlap as well, so the dilution of every site is a worst-case
It may be counter-productive to have mentioned this, but its too late now
"""
def __init__(self, gene_list, run_number):
"""
:param gene_list: A set object containing all genes processed
:param run_number: The numerical identifier of the run
"""
self.gene_list = gene_list
self.run_number = run_number
self.file_list = os.listdir('fastQs')
self.gene_dictionary = {}
def run(self):
"""
Control method for condenser, this is called from fqRunner
"""
for gene in self.gene_list:
# The length of the name is used in substring selection to find appropriate files
name_length = len(gene)
# identify all files in the fastQ folder relevant to the gene
# This includes R1 & 2s, as well as separating the modified and unmodified
self.populate_dict(gene, name_length)
r1_pairs = self.create_file_pairings(gene, name_length, 1)
r2_pairs = self.create_file_pairings(gene, name_length, 2)
self.combine_files(r1_pairs)
self.combine_files(r2_pairs)
self.erase_old_files()
def populate_dict(self, gene, name_length):
"""
The dictionary will contain the names of all files relating to the selected gene. This method uses the gene
name and the transcript numbers to identify the unchanged sequence and the accompanying changed transcripts.
This is done to allow each separate transcript to be paired with the unchanged sequence, so as to represent
a homozygous variant as well as improving the read depth for each area.
:param gene:
:param name_length:
:return:
"""
self.gene_dictionary = {1: {'ref': '', 'transcripts': []}, 2: {'ref': '', 'transcripts': []}}
read1s = []
read2s = []
fq_list = [name for name in self.file_list if name[:name_length] == gene]
# Separate read 1s from read 2s
for filename in fq_list:
first_part_of_name = filename.split('.')[0]
if first_part_of_name[-1] == '1':
read1s.append(filename)
elif first_part_of_name[-1] == '2':
read2s.append(filename)
# For each of Read 1 and 2, separate reference from altered
for filename in read1s:
transcript = filename[name_length:name_length+1]
if transcript == '0':
self.gene_dictionary[1]['ref'] = filename
else:
self.gene_dictionary[1]['transcripts'].append(filename)
for filename in read2s:
transcript = filename[name_length:name_length+1]
if transcript == '0':
self.gene_dictionary[2]['ref'] = filename
else:
self.gene_dictionary[2]['transcripts'].append(filename)
def create_file_pairings(self, gene, name_length, read):
"""
:param gene: gene name
:param name_length: length of gene name
:param read: 1 or 2
:return: a list of 3-element tuples
element 1: reference file name (unchanged seq)
element 2: changed file name
element 3: a name for the file once combined
"""
file_pairs = []
read_dict = self.gene_dictionary[read]
for filename in read_dict['transcripts']:
file_ | pairs.append([read_dict['ref'], filename, self.create_file_name(gene, name_length, filename, read)])
return file_pairs
@staticmethod
def create_file_name(gene, name_length, filename, read):
"""
Creates a new file name which combines the gene name, the transcript number and the read number
| """
transcript = filename[name_length:name_length+1]
filename = '%s_transcript%s_R%d.fq' % (gene, transcript, read)
return filename
@staticmethod
def combine_files(read_pairs):
"""
This will combine the created files in memory and write the output to a new file
the filename for the new file will be the one created in create_file_name
:param read_pairs:
:return:
"""
for triple_tuple in read_pairs:
outfile_name = triple_tuple[2]
with open(os.path.join('fastQs', outfile_name), 'w') as output_file:
reference_file_name = triple_tuple[0]
reference_file = open(os.path.join('fastQs', reference_file_name), "r")
contents = reference_file.readlines()
output_file.writelines(contents)
alt_file = triple_tuple[1]
contents = open(os.path.join('fastQs', alt_file), 'r').readlines()
output_file.writelines(contents)
def erase_old_files(self):
"""
Deletes all the old files which have now been combined
"""
for filename in self.file_list:
os.remove(os.path.join('fastQs', filename))
|
nanolearning/edx-platform | common/lib/xmodule/xmodule/modulestore/xml_importer.py | Python | agpl-3.0 | 34,363 | 0.001484 | import logging
import os
import mimetypes
from path import path
import json
from .xml import XMLModuleStore, ImportSystem, ParentTracker
from xblock.runtime import KvsFieldData, DictKeyValueStore
from xmodule.x_module import XModuleDescriptor
from xmodule.modulestore.keys import UsageKey
from xblock.fields import Scope, Reference, ReferenceList, ReferenceValueDict
from xmodule.contentstore.content import StaticContent
from .inheritance import own_metadata
from xmodule.errortracker import make_error_tracker
from .store_utilities import rewrite_nonportable_content_links
import xblock
from xmodule.tabs import CourseTabList
from xmodule.modulestore.exceptions import InvalidLocationError
log = logging.getLogger(__name__)
def import_static_content(
course_data_path, static_content_store,
target_course_id, subpath='static', verbose=False):
remap_dict = {}
# now import all static assets
static_dir = course_data_path / subpath
try:
with open(course_data_path / 'policies/assets.json') as f:
policy = json.load(f)
except (IOError, ValueError) as err:
# xml backed courses won't have this file, only exported courses;
# so, its absence is not really an exception.
policy = {}
verbose = True
mimetypes.add_type('application/octet-stream', '.sjson')
mimetypes.add_type('application/octet-stream', '.srt')
mimetypes_list = mimetypes.types_map.values()
for dirname, _, filenames in os.walk(static_dir):
for filename in filenames:
content_path = os.path.join(dirname, filename)
if filename.endswith('~'):
if verbose:
log.debug('skipping static content %s...', content_path)
continue
if verbose:
log.debug('importing static content %s...', content_path)
try:
with open(content_path, 'rb') as f:
data = f.read()
except IOError:
if filename.startswith('._'):
# OS X "companion files". See
# http://www.diigo.com/annotated/0c936fda5da4aa1159c189cea227e174
continue
# Not a 'hidden file', then re-raise exception
raise
# strip away leading path from the name
fullname_with_subpath = content_path.replace(static_dir, '')
if fullname_with_subpath.startswith('/'):
fullname_with_subpath = fullname_with_subpath[1:]
asset_key = StaticContent.compute_location(target_course_id, fullname_with_subpath)
policy_ele = policy.get(asset_key.path, {})
displayname = policy_ele.get('displayname', filename)
locked = policy_ele.get('locked', False)
mime_type = policy_ele.get('contentType')
# Check extracted contentType in list of all valid mimetypes
if not mime_type or mime_type not in mimetypes_list:
mime_type = mimetypes.guess_type(filename)[0] # Assign guessed mimetype
content = StaticContent(
asset_key, displayname, mime_type, data,
import_path=fullname_with_subpath, locked=locked
)
# first let's save a thumbnail so we can get back a thumbnail location
thumbnail_content, thumbnail_location = static_content_store.generate_thumbnail(content)
if thumbnail_content is not None:
content.thumbnail_location = thumbnail_location
# then commit the content
try:
static_content_store.save(content)
except Exception as err:
log.exception('Error importing {0}, error={1}'.format(
fullname_with_subpath, err
))
# store the remapping information which will be needed
# to subsitute in the module data
remap_dict[fullname_with_subpath] = asset_key
return remap_dict
def import_from_xml(
store, data_dir, course_dirs=None,
default_class='xmodule.raw_module.RawDescriptor',
load_error_modules=True, static_content_store=None,
target_course_id=None, verbose=False, draft_store=None,
do_import_static=True, create_new_course=False):
"""
Import the specified xml data_dir into the "store" modulestore,
using org and course as the location org and course.
course_dirs: If specified, the list of course_dirs to load. Otherwise, load
all course dirs
target_course_id is the CourseKey that all modules should be remapped to
after import off disk. We do this remapping as a post-processing step
because there's logic in the importing which expects a 'url_name' as an
identifier to where things are on disk
e.g. ../policies/<url_name>/policy.json as well as metadata keys in
the policy.json. so we need to keep the original url_name during import
:param do_import_static:
if False, then static files are not imported into the static content
store. This can be employed for courses which have substantial
unchanging static content, which is to inefficient to import every
time the course is loaded. Static content for some courses may also be
served directly by nginx, instead of going through django.
: create_new_course:
If True, then courses whose ids already exist in the store are not imported.
The check for existing courses is case-insensitive.
"""
xml_module_store = XMLModuleStore(
data_dir,
default_class=default_class,
course_dirs=course_dirs,
load_error_modules=load_error_modules,
xblock_mixins=store.xblock_mixins,
xblock_select=store.xblock_select,
)
# If we're going to remap the course_id, then we can only do that with
# a single course
if target_course_id: |
assert(len(xml_module_store.modules) == 1)
# NOTE: the XmlModuleStore does not implement get_items()
# which would be a preferable means to enumerate the entire collection
# of course modules. It will be left as a TBD to implement that
# method on XmlModuleStore.
course_items = []
for course_key in xm | l_module_store.modules.keys():
if target_course_id is not None:
dest_course_id = target_course_id
else:
dest_course_id = course_key
if create_new_course:
# this tests if exactly this course (ignoring case) exists; so, it checks the run
if store.has_course(dest_course_id, ignore_case=True):
log.debug(
"Skipping import of course with id, {0},"
"since it collides with an existing one".format(dest_course_id)
)
continue
else:
try:
store.create_course(dest_course_id.org, dest_course_id.offering)
except InvalidLocationError:
# course w/ same org and course exists and store is old mongo
log.debug(
"Skipping import of course with id, {0},"
"since it collides with an existing one".format(dest_course_id)
)
continue
try:
# turn off all write signalling while importing as this
# is a high volume operation on stores that need it
if hasattr(store, 'ignore_write_events_on_courses'):
store.ignore_write_events_on_courses.add(dest_course_id)
course_data_path = None
if verbose:
log.debug("Scanning {0} for course module...".format(course_key))
# Quick scan to get course module as we need some info from there.
# Also we need to make sure that the course module is committed
# first into the store
for module in xml_module_store.modules[course_key].itervalues():
if module.scope_ids.block_type == 'course':
course_data_path = path |
rcharp/Simple | dependencies/flask-user/flask_user/tests/tst_app.py | Python | bsd-2-clause | 7,602 | 0.00513 | import os
import datetime
from flask import Flask, render_template_string, request
from flask_babel import Babel
from flask_mail import Mail
from flask_sqlalchemy import SQLAlchemy
from flask_user import login_required, SQLAlchemyAdapter, UserManager, UserMixin
from flask_user import roles_required, confirm_email_required
app = Flask(__name__)
db = SQLAlchemy(app) # Initialize Flask-SQLAlchemy
# Define the User data model. Make sure to add flask_user UserMixin!!
class User(db.Model, UserMixin):
id = db.Column(db.Integer, primary_key=True)
# User authentication information
username = db.Column(db.String(50), nullable=True, unique=True)
password = db.Column(db.String(255), nullable=False, server_default='')
reset_password_token = db.Column(db.String(100), nullable=False, server_default='')
# User email information
email = db.Column(db.String(255), nullable=True, unique=True)
confirmed_at = db.Column(db.DateTime())
# User information
active = db.Column('is_active', db.Boolean(), nullable=False, server_default='0')
first_name = db.Column(db.String(100), nullable=False, server_default='')
last_name = db.Column(db.String(100), nullable=False, server_default='')
# Relationships
roles = db.relationship('Role', secondary='user_roles',
backref=db.backref('users', lazy='dynamic'))
# Define UserEmail DataModel.
class UserEmail(db.Model):
id = db.Column(db.Integer, primary_key=True)
user_id = db.Column(db.Integer, db.ForeignKey('user.id'))
# User email information
email = db.Column(db.String(255), nullable=True, unique=True)
confirmed_at = db.Column(db.DateTime())
is_primary = db.Column(db.Boolean(), nullable=False, default=False)
# Relationship
user = db.relationship('User', uselist=False)
class UserInvitation(db.Model):
__tablename__ = 'user_invite'
id = db.Column(db.Integer, primary_key=True)
email = db.Column(db.String(255), nullable=False)
# save the user of the invitee
invited_by_user_id = db.Column(db.Integer, db.ForeignKey('user.id'))
# token used for registration page to identify user registering
token = db.Column(db.String(100), nullable=False, server_default='')
# Define the Role data model
class Role(db.Model):
id = db.Column(db.Integer(), primary_key=True)
name = db.Column(db.String(50), unique=True)
# Define the UserRoles data model
class UserRoles(db.Model):
id = db.Column(db.Integer(), primary_key=True)
user_id = db.Column(db.Integer(), db.ForeignKey('user.id', ondelete='CASCADE'))
role_id = db.Column(db.Integer(), db.ForeignKey('role.id', ondelete='CASCADE'))
# Use a Class-based config to avoid needing a 2nd file
# os.getenv() enables configuration through OS environment variables
class ConfigClass(object):
# Flask settings
SECRET_KEY = os.getenv('SECRET_KEY', 'THIS IS AN INSECURE SECRET')
SQLALCHEMY_DATABASE_URI = os.getenv('DATABASE_URL', 'sqlite:///tst_app.sqlite')
CSRF_ENABLED = True
# Flask-Mail settings
MAIL_USERNAME = os.getenv('MAIL_USERNAME', 'email@example.com')
MAIL_PASSWORD = os.getenv('MAIL_PASSWORD', 'password')
MAIL_DEFAULT_SENDER = os.getenv('MAIL_DEFAULT_SENDER', '"MyApp" <noreply@example.com>')
MAIL_SERVER = os.getenv('MAIL_SERVER', 'smtp.gmail.com')
MAIL_PORT = int(os.getenv('MAIL_PORT', '465'))
MAIL_USE_SSL = os.getenv('MAIL_USE_SSL', True)
# Flask-User settings
USER_APP_NAME = " | AppName" # Used by email templates
USER_ENABLE_USERNAME = True
USER_ENABLE_EMAIL = True
US | ER_ENABLE_CONFIRM_EMAIL = True
USER_ENABLE_INVITATION = True
def init_app(app, test_config=None): # For automated tests
# Setup Flask and read config from ConfigClass defined above
app.config.from_object(__name__+'.ConfigClass')
# Load local_settings.py if file exists # For automated tests
try: app.config.from_object('local_settings')
except: pass
# Load optional test_config # For automated tests
if test_config:
app.config.update(test_config)
# Initialize Flask extensions
babel = Babel(app) # Initialize Flask-Babel
mail = Mail(app) # Initialize Flask-Mail
# Reset all the database tables
db.create_all()
# Setup Flask-User
db_adapter = SQLAlchemyAdapter(db, User, UserInvitationClass=UserInvitation)
user_manager = UserManager(db_adapter, app)
# Create regular 'member' user
if not User.query.filter(User.username=='member').first():
user = User(username='member', email='member@example.com', active=True,
password=user_manager.hash_password('Password1'), confirmed_at=datetime.datetime.utcnow())
db.session.add(user)
db.session.commit()
# Create 'user007' user with 'secret' and 'agent' roles
if not User.query.filter(User.username=='user007').first():
user1 = User(username='user007', email='user007@example.com', active=True,
password=user_manager.hash_password('Password1'))
user1.roles.append(Role(name='secret'))
user1.roles.append(Role(name='agent'))
db.session.add(user1)
db.session.commit()
# The '/' page is accessible to anyone
@app.route('/')
def home_page():
return render_template_string("""
{% extends "base.html" %}
{% block content %}
<h2>{%trans%}Home Page{%endtrans%}</h2>
<p><a href="{{ url_for('user.login') }}">{%trans%}Sign in{%endtrans%}</a></p>
{% endblock %}
""")
# The '/profile' page requires a logged-in user
@app.route('/user/profile')
@login_required # Use of @login_required decorator
@confirm_email_required
def user_profile_page():
return render_template_string("""
{% extends "base.html" %}
{% block content %}
<h2>{%trans%}Profile Page{%endtrans%}</h2>
<p> {%trans%}Hello{%endtrans%}
{{ current_user.username or current_user.email }},</p>
<p> <a href="{{ url_for('user.change_username') }}">
{%trans%}Change username{%endtrans%}</a></p>
<p> <a href="{{ url_for('user.change_password') }}">
{%trans%}Change password{%endtrans%}</a></p>
<p> <a href="{{ url_for('user.invite') }}">
{%trans%}Invite User{%endtrans%}</a></p>
<p> <a href="{{ url_for('user.logout') }}?next={{ url_for('user.login') }}">
{%trans%}Sign out{%endtrans%}</a></p>
{% endblock %}
""")
# The '/special' page requires a user that has the 'special' AND ('sauce' OR 'agent') role.
@app.route('/special')
@roles_required('secret', ['sauce', 'agent']) # Use of @roles_required decorator
def special_page():
return render_template_string("""
{% extends "base.html" %}
{% block content %}
<h2>{%trans%}Special Page{%endtrans%}</h2>
{% endblock %}
""")
# For testing only
app.db = db
app.UserEmailClass = UserEmail
return app
# Start development web server
if __name__=='__main__':
app = create_app()
app.run(host='0.0.0.0', port=5555, debug=True)
|
seismology/mc_kernel | tests/create_gabor_filter_test.py | Python | gpl-3.0 | 1,397 | 0 |
# coding: utf-8
# In[1]:
import filtering
import numpy as np
import matplotlib.pyplot as plt
# Load input time trace
signal_in_temp = np.loadtxt('../tests/gaborinput.txt')
len_signal = 1000
dt = 0.1
t = np.array(range(0, len_signal)) * dt
signal_in = np.zeros((len_signal, 1))
signal_in[:, 0] = signal_in_temp
# Define filter bank parameters
pmax = (2.5, 2.5, 2.5, 2.5, 2.5, 2.5, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0)
tshift = (0.0, 2.5, 0.0, 2.5, 0.0, 2.5, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0)
sigmaIfc = (0.3, 0.3, 0.5, 0.5, 0.7, 0.7, 0.3, 0.3, 0.5, 0.5, 0.7, 0.7)
fig = plt.figure(figsize=(15, 10))
# Apply each filter to reference trace and plot
for ifilt in range(0, len(pmax)):
ax = fig.add_subplot(4, 3, ifilt + 1)
signal_out_ref = filtering.gaborfilter(signal_in, dt=dt,
pmax=pmax[ifilt],
| nscale=1, fmult=2,
sigmaIfc=sigmaIfc[ifilt],
npad=2048,
tshift=(tshift[ifilt], 0))
fnam = '../tests/gaborresponse_%02d.txt' % (ifilt + 1)
with open(fnam, 'w') as f_response:
for y in signal_out_ref[0][:, 0, 0]:
f_response.write('%e\n' % y)
ax.plot(t, signal_out_ref[0][0:len_signal | , 0, 0], 'r')
fig.savefig('Gaborfilter_reference_data.png', dpi=200)
|
ghisvail/vispy | vispy/plot/tests/test_plot.py | Python | bsd-3-clause | 624 | 0.001603 | # -*- coding: utf-8 -*-
# Copyright (c) 2015, Vispy Development Team.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
import vispy.plot as vp
from vispy.testing import (assert_raises, requires_application,
run_tests_if | _main)
@requires_application()
def test_figure_creation():
"""Test creating a figure"""
with vp.Fig(show=False) as fig:
fig[0, 0:2]
fig[1:3, 0:2]
ax_right = fig[1:3, 2]
assert fig[1:3, 2] is ax_right
# collision
assert_raises(ValueError, fi | g.__getitem__, (slice(1, 3), 1))
run_tests_if_main()
|
red-hood/calendarserver | txdav/caldav/datastore/scheduling/imip/test/test_inbound.py | Python | apache-2.0 | 15,369 | 0.002212 | ##
# Copyright (c) 2008-2015 Apple Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
from twisted.internet.defer import inlineCallbacks, succeed
from twisted.internet import reactor
from twisted.python.modules import getModule
from twisted.trial import unittest
from twistedcaldav.config import ConfigDict
from twistedcaldav.ical import Component
from txdav.caldav.datastore.scheduling.imip.inbound import IMIPReplyWork
from txdav.caldav.datastore.scheduling.imip.inbound import MailReceiver
from txdav.caldav.datastore.scheduling.imip.inbound import MailRetriever
from txdav.caldav.datastore.scheduling.imip.inbound import injectMessage
from txdav.caldav.datastore.scheduling.imip.inbound import shouldDeleteAllMail
from txdav.caldav.datastore.scheduling.imip.inbound import IMAP4DownloadProtocol
from txdav.common.datastore.test.util import CommonCommonTests
from twext.enterprise.jobqueue import JobItem
import email
class InboundTests(CommonCommonTests, unittest.TestCase):
@inlineCallbacks
def setUp(self):
super(InboundTests, self).setUp()
yield self.buildStoreAndDirectory()
self.receiver = MailReceiver(self.store, self.directory)
self.retriever = MailRetriever(
self.store, self.directory,
ConfigDict({
"Type" : "pop",
"UseSSL" : False,
"Server" : "example.com",
"Port" : 123,
"Username" : "xyzzy",
})
)
def decorateTransaction(txn):
txn._mailRetriever = self.retriever
self.store.callWithNewTransactions(decorateTransaction)
module = getModule(__name__)
self.dataPath = module.filePath.sibling("data")
def dataFile(self, name):
"""
Get the contents of a given data file from the 'data/mail' test
fixtures directory.
"""
return self.dataPath.child(name).getContent()
def test_checkDSNFailure(self):
data = {
'good_reply' : (False, None, None),
'dsn_failure_no_original' : (True, 'failed', None),
'dsn_failure_no_ics' : (True, 'failed', None),
'dsn_failure_with_ics' : (True, 'failed', '''BEGIN:VCALENDAR
VERSION:2.0
CALSCALE:GREGORIAN
METHOD:REQUEST
PRODID:-//example Inc.//iCal 3.0//EN
BEGIN:VTIMEZONE
TZID:US/Pacific
BEGIN:STANDARD
DTSTART:20071104T020000
RRULE:FREQ=YEARLY;BYMONTH=11;BYDAY=1SU
TZNAME:PST
TZOFFSETFROM:-0700
TZOFFSETTO:-0800
END:STANDARD
BEGIN:DAYLIGHT
DTSTART:20070311T020000
RRULE:FREQ=YEARLY;BYMONTH=3;BYDAY=2SU
TZNAME:PDT
TZOFFSETFROM:-0800
TZOFFSETTO:-0700
END:DAYLIGHT
END:VTIMEZONE
BEGIN:VEVENT
UID:1E71F9C8-AEDA-48EB-98D0-76E898F6BB5C
DTSTART;TZID=US/Pacific:20080812T094500
DTEND;TZID=US/Pacific:20080812T104500
ATTENDEE;CUTYPE=INDIVIDUAL;CN=User 01;PARTSTAT=ACCEPTED:mailto:user01@exam
ple.com
ATTENDEE;CUTYPE=INDIVIDUAL;RSVP=TRUE;ROLE=REQ-PARTICIPANT;PARTSTAT=NEEDS-A
CTION;CN=nonexistant@example.com:mailto:nonexistant@example.com
CREATED:20080812T191857Z
DTSTAMP:20080812T191932Z
ORGANIZER;CN=User 01:mailto:xyzzy+8e16b897-d544-4217-88e9-a363d08
46f6c@example.com
SEQUENCE:2
SUMMARY:New Event
TRANSP:OPAQUE
END:VEVENT
END:VCALENDAR
'''),
}
for filename, expected in data.iteritems():
msg = email.message_from_string(self.dataFile(filename))
self.assertEquals(self.receiver.checkDSN(msg), expected)
@inlineCallbacks
def test_processDSN(self):
template = """BEGIN:VCALENDAR
VERSION:2.0
CALSCALE:GREGORIAN
METHOD:REQUEST
PRODID:-//example Inc.//iCal 3.0//EN
BEGIN:VTIMEZONE
TZID:US/Pacific
BEGIN:DAYLIGHT
DTSTART:20070311T020000
RRULE:FREQ=YEARLY;BYMONTH=3;BYDAY=2SU
TZNAME:PDT
TZOFFSETFROM:-0800
TZOFFSETTO:-0700
END:DAYLIGHT
BEGIN:STANDARD
DTSTART:20071104T020000
RRULE:FREQ=YEARLY;BYMONTH=11;BYDAY=1SU
TZNAME:PST
TZOFFSETFROM:-0700
TZOFFSETTO:-0800
END:STANDARD
END:VTIMEZONE
BEGIN:VEVENT
UID:1E71F9C8-AEDA-48EB-98D0-76E898F6BB5C
DTSTART;TZID=US/Pacific:20080812T094500
DTEND;TZID=US/Pacific:20080812T104500
ATTENDEE;CUTYPE=INDIVIDUAL;CN=User 01;PARTSTAT=ACCEPTED:mailto:user01@exam
ple.com
ATTENDEE;CUTYPE=INDIVIDUAL;RSVP=TRUE;ROLE=REQ-PARTICIPANT;PARTSTAT=NEEDS-A
CTION;CN=nonexistant@example.com:mailto:nonexistant@example.com
CREATED:20080812T191857Z
DTSTAMP:20080812T191932Z
ORGANIZER;CN=User 01:mailto:xyzzy+%s@example.com
SEQUENCE:2
SUMMARY:New Event
TRANSP:OPAQUE
END:VEVENT
END:VCALENDAR
"""
# Make sure an unknown token is not processed
calBody = template % "bogus_token"
self.assertEquals(
(yield self.receiver.processDSN(calBody, "xyzzy")),
MailReceiver.UNKNOWN_TOKEN
)
# Make sure a known token *is* processed
txn = self.store.newTransaction()
record = (yield txn.imipCreateToken(
"urn:x-uid:5A985493-EE2C-4665-94CF-4DFEA3A89500",
"mailto:user02@example.com",
"1E71F9C8-AEDA-48EB-98D0-76E898F6BB5C"
))
yield txn.commit()
calBody = template % record.token
result = (yiel | d self.receiver.processDSN(calBody, "xyzzy"))
self.assertEquals(result, MailReceiver.INJECTION_SUBMITTED)
yield JobItem.waitEmpty(self.store.newTransaction, reactor, 60)
@inlineCallbacks
def test_processReply(self):
# Make sure an unknown token in an older email is deleted
msg = em | ail.message_from_string(self.dataFile('good_reply_past'))
result = (yield self.receiver.processReply(msg))
self.assertEquals(result, MailReceiver.UNKNOWN_TOKEN_OLD)
# Make sure an unknown token is not processed
msg = email.message_from_string(self.dataFile('good_reply_future'))
result = (yield self.receiver.processReply(msg))
self.assertEquals(result, MailReceiver.UNKNOWN_TOKEN)
# Make sure a known token *is* processed
txn = self.store.newTransaction()
yield txn.imipCreateToken(
"urn:x-uid:5A985493-EE2C-4665-94CF-4DFEA3A89500",
"mailto:xyzzy@example.com",
"1E71F9C8-AEDA-48EB-98D0-76E898F6BB5C",
token="d7cdf68d-8b73-4df1-ad3b-f08002fb285f"
)
yield txn.commit()
result = (yield self.receiver.processReply(msg))
self.assertEquals(result, MailReceiver.INJECTION_SUBMITTED)
yield JobItem.waitEmpty(self.store.newTransaction, reactor, 60)
@inlineCallbacks
def test_processReplyMissingOrganizer(self):
msg = email.message_from_string(self.dataFile('reply_missing_organizer'))
# stick the token in the database first
txn = self.store.newTransaction()
yield txn.imipCreateToken(
"urn:x-uid:5A985493-EE2C-4665-94CF-4DFEA3A89500",
"mailto:xyzzy@example.com",
"1E71F9C8-AEDA-48EB-98D0-76E898F6BB5C",
token="d7cdf68d-8b73-4df1-ad3b-f08002fb285f"
)
yield txn.commit()
result = (yield self.receiver.processReply(msg))
self.assertEquals(result, MailReceiver.INJECTION_SUBMITTED)
yield JobItem.waitEmpty(self.store.newTransaction, reactor, 60)
@inlineCallbacks
def test_processReplyMissingAttendee(self):
msg = email.message_from_string(self.dataFile('reply_missing_attendee'))
txn = self.store.newTransaction()
yield txn.imipCreateToken(
"urn:x-uid:5A985493-EE2C-4665-94CF-4DFEA3A89500",
"mailto:xyzzy@example.com",
"1E71F9C8-AEDA-48EB-98D0-76E898F6BB5C",
token="d7cdf68d-8b73-4df1-ad3b-f08002fb285f"
)
yield txn.commit()
result = (yield self.receiver.processReply(msg))
|
OCA/website | website_legal_page/hooks.py | Python | agpl-3.0 | 4,340 | 0.00023 | # Copyright 2020 Tecnativa - Alexandre Díaz
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
from copy import deepcopy
from xml.sax.saxutils import escape
from lxml import etree as ElementTree
from odoo import SUPERUSER_ID, api
def _merge_views(env, xmlids):
old_view_ids = env["ir.ui.view"].search(
[("key", "in", xmlids), ("active", "=", True)]
)
# Get only the edited version of the views (if has it)
old_view_ids_edited = old_view_ids.filtered("website_id")
old_view_ids_edited_keys = old_view_ids_edited.mapped("key")
views_to_discard = env["ir.ui.view"]
for old_view in old_view_ids:
if not old_view.website_id and old_view.key in old_view_ids_edited_keys:
views_to_discard |= old_view
old_view_ids -= views_to_discard
new_website_page = env.ref("website_legal_page.legal_page_page")
new_view_id = env.ref("website_legal_page.legal_page")
# 'Dolly' separator element
separator = ElementTree.fromstring(
"<div class='s_hr text-left pt32 pb32' data-name='Separator'>"
+ "<hr class='s_hr_1px s_hr_solid border-600 w-100 mx-auto'/></div>"
)
# Replace new content with the old one per website
website_ids = old_view_ids.mapped("website_id")
for website_id in website_ids:
new_xml = ElementTree.fromstring(new_view_id.arch)
table_content_list = new_xml.xpath("//div[@id='section_list']/ul")[0]
sections_content = new_xml.xpath("//div[@id='section_content']")[0]
has_views_edited = any(
old_view_ids_edited.filtered(lambda x: x.website_id == website_id)
)
# Remove 'IS A SAMPLE' alert
if has_views_edited:
alert = new_xml.xpath(
"//section[@data-name='Title']//div[@data-name='Alert']"
)[0]
alert.find("..").remove(alert)
# Remove unused content
for child in table_content_list.getchildren():
table_content_list.remove(child)
for child in sections_content.getchildren():
sections_content.remove(child)
views_done = env["ir.ui.view"]
for old_view_id in old_view_ids:
if old_view_id.website_id != website_id:
continue
anchor_name = old_view_id.key.split(".")[1]
# Insert item in table content list
list_item = ElementTree.fromstring(
"<li><p><a href='#{}'>{}</a></p></li>".format(
anchor_name, escape(old_view_id.name)
)
)
table_content_list.append(list_item)
# Insert section content
old_xml = ElementTree.fromstring(old_view_id.arch)
old_content = old_xml.xpath("//div[@id='wrap']")[0]
sections_content.append(deepcopy(separator))
sections_content.append(
ElementTree.fromstring(
"<a class='legal_anchor' id='%s'/>" % anchor_name
)
)
for children in old_content.getchildren():
sections_content.append(children)
views_done |= old_view_id
old_view_ids -= | views_done
# Create a new page with the changes
view_id = env["ir.ui.view"].create(
{
"arch": ElementTree.tostring(new_xml, encoding="unicode"),
"website_id": website_id.id,
"key": new_view_id.key,
"name": new_view_id.name,
"type": "qweb",
}
)
env["website.page"].create(
| {
"name": new_website_page.name,
"url": new_website_page.url,
"view_id": view_id.id,
"is_published": True,
"website_id": website_id.id,
"website_indexed": True,
"website_published": True,
}
)
def post_init_hook(cr, registry):
with api.Environment.manage():
env = api.Environment(cr, SUPERUSER_ID, {})
is_website_sale_installed = (
env["ir.module.module"].search_count(
[("name", "=", "website_sale"), ("state", "=", "installed")]
)
> 0
)
if is_website_sale_installed:
_merge_views(env, ["website_sale.terms"])
|
andreasots/shodan | irc/__init__.py | Python | mit | 3,983 | 0.005021 | import asyncio
import irc.parser
import pyparsing
def unescape(value):
return value.replace("\\:", ";") \
.replace("\\s", " ") \
.replace("\\\\", "\\") \
.replace("\\r", "\r") \
.replace("\\n", "\n")
class Connection:
def __init__(self, host, port, handler, loop=None):
self.host = host
self.port = port
self.handler = handler
self.loop = loop or asyncio.get_event_loop()
self.writer = None
@asyncio.coroutine
def run(self):
wait_time = 1
while True:
try:
reader, self.writer = yield from asyncio.open_connection(self.host, self.port, loop=self.loop)
yield from self.signal("connect", self)
while not reader.at_eof():
line = yield from reader.readline()
if not line.endswith(b"\r\n"):
continue
wait_time = 1
try:
tags, source, command, params = irc.parser.message.parseString(line.decode("utf-8", "replace"))
except pyparsing.ParseException as e:
print("Parse error while parsing %r: %s" % (line, e))
continue
tags = {tag: unescape(value) for tag, value in tags}
params = list(params)
if "server" in source:
source = source["server"]
elif "nick" in source:
source = sourc | e["nick"]
if len(source) == 1:
source = (source[0], None, None)
elif len(s | ource) == 2:
source = (source[0], None, source[1])
else:
source = (source[0], source[1], source[2])
else:
source = self.host
command = command.lower()
if command == "privmsg" and params[1][0] == "\x01" and params[1][-1] == "\x01": # CTCP message
tag, param = params[1][1:-1].split(" ", 1)
yield from self.signal("ctcp_" + tag.lower(), self, tags, source, [params[0], param])
else:
yield from self.signal(command, self, tags, source, params)
except IOError as e:
pass
yield from asyncio.sleep(wait_time)
wait_time *= 2
def disconnect(self):
self.writer.close()
@asyncio.coroutine
def signal(self, name, *args, **kwargs):
callback = getattr(self.handler, "on_" + name, None)
if callback is not None:
yield from callback(*args, **kwargs)
#
# IRC commands
#
@asyncio.coroutine
def command_raw(self, command):
self.writer.write((command+"\r\n").encode("utf-8"))
yield from self.writer.drain()
@asyncio.coroutine
def password(self, password):
yield from self.command_raw("PASS " + password)
@asyncio.coroutine
def nick(self, nick):
yield from self.command_raw("NICK " + nick)
@asyncio.coroutine
def join(self, target):
yield from self.command_raw("JOIN " + target)
@asyncio.coroutine
def cap_req(self, cap):
yield from self.command_raw("CAP REQ :" + cap)
@asyncio.coroutine
def ping(self, server1, server2=None):
yield from self.command_raw("PING " + server1 + (" " + server2 if server2 is not None else ""))
@asyncio.coroutine
def pong(self, server1, server2=None):
yield from self.command_raw("PONG " + server1 + (" " + server2 if server2 is not None else ""))
@asyncio.coroutine
def privmsg(self, target, message):
yield from self.command_raw("PRIVMSG " + target + " :" + message)
|
jmartinezchaine/OpenERP | openerp/addons/auth_openid/res_users.py | Python | agpl-3.0 | 4,045 | 0.003214 | #!/usr/bin/env python
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2010-2011 OpenERP s.a. (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
########################## | ####################################################
from openerp.osv import osv, fields
import openerp.exceptions
import tools
import utils
class res_users(osv.osv):
_inherit = 'res.users'
# TODO create helper fields for autofill openid_ | url and openid_email -> http://pad.openerp.com/web-openid
_columns = {
'openid_url': fields.char('OpenID URL', size=1024),
'openid_email': fields.char('OpenID Email', size=256,
help="Used for disambiguation in case of a shared OpenID URL"),
'openid_key': fields.char('OpenID Key', size=utils.KEY_LENGTH,
readonly=True),
}
def _check_openid_url_email(self, cr, uid, ids, context=None):
return all(self.search_count(cr, uid, [('active', '=', True), ('openid_url', '=', u.openid_url), ('openid_email', '=', u.openid_email)]) == 1 \
for u in self.browse(cr, uid, ids, context) if u.active and u.openid_url)
def _check_openid_url_email_msg(self, cr, uid, ids, context):
return "There is already an active user with this OpenID Email for this OpenID URL"
_constraints = [
(_check_openid_url_email, lambda self, *a, **kw: self._check_openid_url_email_msg(*a, **kw), ['active', 'openid_url', 'openid_email']),
]
def copy(self, cr, uid, rid, defaults=None, context=None):
reset_fields = 'openid_url openid_email'.split()
reset_values = dict.fromkeys(reset_fields, False)
if defaults is None:
defaults = reset_values
else:
defaults = dict(reset_values, **defaults)
defaults['openid_key'] = False
return super(res_users, self).copy(cr, uid, rid, defaults, context)
def login(self, db, login, password):
result = super(res_users, self).login(db, login, password)
if result:
return result
else:
with utils.cursor(db) as cr:
cr.execute("""UPDATE res_users
SET date=now() AT TIME ZONE 'UTC'
WHERE login=%s AND openid_key=%s AND active=%s RETURNING id""",
(tools.ustr(login), tools.ustr(password), True))
res = cr.fetchone()
cr.commit()
return res[0] if res else False
def check(self, db, uid, passwd):
try:
return super(res_users, self).check(db, uid, passwd)
except openerp.exceptions.AccessDenied:
if not passwd:
raise
with utils.cursor(db) as cr:
cr.execute('''SELECT COUNT(1)
FROM res_users
WHERE id=%s
AND openid_key=%s
AND active=%s''',
(int(uid), passwd, True))
if not cr.fetchone()[0]:
raise
self._uid_cache.setdefault(db, {})[uid] = passwd
res_users()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
jessevig/bertviz | bertviz/transformers_neuron_view/tokenization_bert.py | Python | apache-2.0 | 20,131 | 0.00298 | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tokenization classes."""
from __future__ import absolute_import, division, print_function, unicode_literals
import collections
import logging
import os
import unicodedata
from io import open
from .tokenization_utils import PreTrainedTokenizer
logger = logging.getLogger(__name__)
VOCAB_FILES_NAMES = {'vocab_file': 'vocab.txt'}
PRETRAINED_VOCAB_FILES_MAP = {
'vocab_file':
{
'bert-base-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased-vocab.txt",
'bert-large-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-vocab.txt",
'bert-base-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased-vocab.txt",
'bert-large-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-vocab.txt",
'bert-base-multilingual-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-uncased-vocab.txt",
'bert-base-multilingual-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-cased-vocab.txt",
'bert-base-chinese': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-chinese-vocab.txt",
'bert-base-german-cased': "https://int-deepset-models-bert.s3.eu-central-1.amazonaws.com/pytorch/bert-base-german-cased-vocab.txt",
'bert-large-uncased-whole-word-masking': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-whole-word-m | asking-vocab.txt",
'bert-large-cased-whole-word-masking': "https://s3.amazonaws.com/models.h | uggingface.co/bert/bert-large-cased-whole-word-masking-vocab.txt",
'bert-large-uncased-whole-word-masking-finetuned-squad': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-whole-word-masking-finetuned-squad-vocab.txt",
'bert-large-cased-whole-word-masking-finetuned-squad': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-whole-word-masking-finetuned-squad-vocab.txt",
'bert-base-cased-finetuned-mrpc': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased-finetuned-mrpc-vocab.txt",
}
}
PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {
'bert-base-uncased': 512,
'bert-large-uncased': 512,
'bert-base-cased': 512,
'bert-large-cased': 512,
'bert-base-multilingual-uncased': 512,
'bert-base-multilingual-cased': 512,
'bert-base-chinese': 512,
'bert-base-german-cased': 512,
'bert-large-uncased-whole-word-masking': 512,
'bert-large-cased-whole-word-masking': 512,
'bert-large-uncased-whole-word-masking-finetuned-squad': 512,
'bert-large-cased-whole-word-masking-finetuned-squad': 512,
'bert-base-cased-finetuned-mrpc': 512,
}
def load_vocab(vocab_file):
"""Loads a vocabulary file into a dictionary."""
vocab = collections.OrderedDict()
with open(vocab_file, "r", encoding="utf-8") as reader:
tokens = reader.readlines()
for index, token in enumerate(tokens):
token = token.rstrip('\n')
vocab[token] = index
return vocab
def whitespace_tokenize(text):
"""Runs basic whitespace cleaning and splitting on a piece of text."""
text = text.strip()
if not text:
return []
tokens = text.split()
return tokens
class BertTokenizer(PreTrainedTokenizer):
r"""
Constructs a BertTokenizer.
:class:`~pytorch_transformers.BertTokenizer` runs end-to-end tokenization: punctuation splitting + wordpiece
Args:
vocab_file: Path to a one-wordpiece-per-line vocabulary file
do_lower_case: Whether to lower case the input. Only has an effect when do_wordpiece_only=False
do_basic_tokenize: Whether to do basic tokenization before wordpiece.
max_len: An artificial maximum length to truncate tokenized sequences to; Effective maximum length is always the
minimum of this value (if specified) and the underlying BERT model's sequence length.
never_split: List of tokens which will never be split during tokenization. Only has an effect when
do_wordpiece_only=False
"""
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__(self, vocab_file, do_lower_case=True, do_basic_tokenize=True, never_split=None,
unk_token="[UNK]", sep_token="[SEP]", pad_token="[PAD]", cls_token="[CLS]",
mask_token="[MASK]", tokenize_chinese_chars=True, **kwargs):
"""Constructs a BertTokenizer.
Args:
**vocab_file**: Path to a one-wordpiece-per-line vocabulary file
**do_lower_case**: (`optional`) boolean (default True)
Whether to lower case the input
Only has an effect when do_basic_tokenize=True
**do_basic_tokenize**: (`optional`) boolean (default True)
Whether to do basic tokenization before wordpiece.
**never_split**: (`optional`) list of string
List of tokens which will never be split during tokenization.
Only has an effect when do_basic_tokenize=True
**tokenize_chinese_chars**: (`optional`) boolean (default True)
Whether to tokenize Chinese characters.
This should likely be deactivated for Japanese:
see: https://github.com/huggingface/pytorch-pretrained-BERT/issues/328
"""
super(BertTokenizer, self).__init__(unk_token=unk_token, sep_token=sep_token,
pad_token=pad_token, cls_token=cls_token,
mask_token=mask_token, **kwargs)
if not os.path.isfile(vocab_file):
raise ValueError(
"Can't find a vocabulary file at path '{}'. To load the vocabulary from a Google pretrained "
"model use `tokenizer = BertTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`".format(vocab_file))
self.vocab = load_vocab(vocab_file)
self.ids_to_tokens = collections.OrderedDict(
[(ids, tok) for tok, ids in self.vocab.items()])
self.do_basic_tokenize = do_basic_tokenize
if do_basic_tokenize:
self.basic_tokenizer = BasicTokenizer(do_lower_case=do_lower_case,
never_split=never_split,
tokenize_chinese_chars=tokenize_chinese_chars)
self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab, unk_token=self.unk_token)
@property
def vocab_size(self):
return len(self.vocab)
def _tokenize(self, text):
split_tokens = []
if self.do_basic_tokenize:
for token in self.basic_tokenizer.tokenize(text, never_split=self.all_special_tokens):
for sub_token in self.wordpiece_tokenizer.tokenize(token):
split_tokens.append(sub_token)
else:
split_tokens = self.wordpiece_tokenizer.tokenize(text)
return split_tokens
def _convert_token_to_id(self, token):
""" Converts a token (str/unicode) in an id using the vocab. """
return self.vocab.get(token, self.vocab.get(self.unk_token))
def _convert_id_to_token(self, index):
"""Converts an index (integer) in a token (string/unicode) using the vocab."""
return self.ids_to_tokens.get(index, self.unk_token)
def |
GoogleCloudPlatform/sap-deployment-automation | third_party/github.com/ansible/awx/awx/main/management/commands/cleanup_tokens.py | Python | apache-2.0 | 1,286 | 0.001555 | import logging
from django.core import management
from django.core.management.base import BaseCommand
from awx.main.models import OAuth2AccessToken
from oauth2_provider.models import RefreshToken
class Command(BaseCommand):
def init_logging(self):
| log_levels = dict(enumerate([logging.ERROR, logging.INFO,
logging.DEBUG, 0]))
self.logger = logging.getLogger('awx.main.commands.cleanup_tokens')
self.logger.setLevel(log_levels.get(self.verbosity, 0))
handler = logging.StreamHandler()
handler.setFormatter(logging.Formatter('%(message) | s'))
self.logger.addHandler(handler)
self.logger.propagate = False
def execute(self, *args, **options):
self.verbosity = int(options.get('verbosity', 1))
self.init_logging()
total_accesstokens = OAuth2AccessToken.objects.all().count()
total_refreshtokens = RefreshToken.objects.all().count()
management.call_command('cleartokens')
self.logger.info("Expired OAuth 2 Access Tokens deleted: {}".format(total_accesstokens - OAuth2AccessToken.objects.all().count()))
self.logger.info("Expired OAuth 2 Refresh Tokens deleted: {}".format(total_refreshtokens - RefreshToken.objects.all().count()))
|
cdt15/lingam | tests/test_rcd.py | Python | mit | 9,689 | 0.001651 | import os
import numpy as np
import pandas as pd
from lingam.rcd import RCD
def test_fit_success():
# causal direction: x5 --> x0, x3 --> x1 --> x2, x4 <-- x6
get_external_effect = lambda n: np.random.normal(0.0, 0.5, n) ** 3
n_samples = 100
x5 = get_external_effect(n_samples)
x6 = get_external_effect(n_samples)
x0 = 0.6*x5 + get_external_effect(n_samples)
x3 = 0.5*x5 + get_external_effect(n_samples)
x1 = 1.0*x0 + 1.0*x3 + get_external_effect(n_samples)
x2 = 0.8*x1 - 0.6*x6 + get_external_effect(n_samples)
x4 = 1.0*x1 - 0.5*x6 + get_external_effect(n_samples)
# x5 and x6 are latent confounders
X = pd.DataFrame(np.array([x0, x1, x2, x3, x4]).T,
columns=['x0', 'x1', 'x2', 'x3', 'x4'])
# default
model = RCD()
model.fit(X)
ans = model.ancestors_list_
am = model.adjacency_matrix_
p_values = model.get_error_independence_p_values(X)
# max_explanatory_num=3
model = RCD(max_explanatory_num=3)
model.fit(X)
# max_explanatory_num=1
model = RCD(max_explanatory_num=1)
model.fit(X)
# cor_alpha=0.1
model = RCD(cor_alpha=0.1)
model.fit(X)
# ind_alpha=0.1
model = RCD(ind_alpha=0.1)
model.fit(X)
# shapiro_alpha=0.1
model = RCD(shapiro_alpha=0.1)
model.fit(X)
# shapiro_alpha=0.1
model = RCD(shapiro_alpha=0.0)
model.fit(X)
# MLHSICR=True
model = RCD(MLHSICR=True)
model.fit(X)
# bw_method='scott'
model = RCD(bw_method='scott')
model.fit(X)
# bw_method='silverman'
model = RCD(bw_method='silverman')
model.fit(X)
# no latent confounders
get_external_effect = lambda n: np.random.normal(0.0, 0.5, n) ** 3
n_samples = 100
x0 = get_external_effect(n_samples)
x3 = get_external_effect(n_samples)
x1 = 1.0*x0 + 1.0*x3 + get_external_effect(n_samples)
x2 = 0.8*x1 + get_external_effect(n_samples)
x4 = 1.0*x1 + get_external_effect(n_samples)
X = pd.DataFrame(np.array([x0, x1, x2, x3, x4]).T, columns=['x0', 'x1', 'x2', 'x3', 'x4'])
model = RCD()
model.fit(X)
p_values = model.get_error_independence_p_values(X)
# causal direction: x3-->x0, x3-->x1, x0,x1-->x2
get_external_effect = lambda n: np.random.normal(0.0, 0.5, n) ** 3
n_samples = 100
x3 = get_external_effect(n_samples)
x0 = 0.5*x3 + get_external_effect(n_samples)
x1 = 0.5*x3 + get_external_effect(n_samples)
x2 = 1.0*x0 + 1.0*x1 + get_external_effect(n_samples)
X = pd.DataFrame(np.array([x0, x1, x2]).T, columns=['x0', 'x1', 'x2'])
model = RCD()
model.fit(X)
p_values = model.get_error_independence_p_values(X)
def test_fit_invalid_data():
# Not array data
X = 1
try:
model = RCD()
model.fit(X)
except ValueError:
pass
else:
raise AssertionError
# Include non-numeric data
x0 = np.random.uniform(size=5)
x1 = np.array(['X', 'Y', 'X', 'Y', 'X'])
X = pd.DataFrame(np.array([x0, x1]).T, columns=['x0', 'x1'])
try:
model = RCD()
model.fit(X)
except ValueError:
pass
else:
raise AssertionError
# Include NaN values
x0 = np.random.uniform(size=100)
x1 = 2.0*x0 + np.random.uniform(size=100)
X = pd.DataFrame(np.array([x0, x1]).T, columns=['x0', 'x1'])
X.iloc[10, 0] = np.nan
try:
model = RCD()
model.fit(X)
except ValueError:
pass
else:
raise AssertionError
# Include infinite values
x0 = np.random.uniform(size=100)
x1 = 2.0*x0 + np.random.uniform(size=100)
X = pd.DataFrame(np.array([x0, x1]).T, columns=['x0', 'x1'])
X.iloc[10, 0] = np.inf
try:
model = RCD()
model.fit(X)
except ValueError:
pass
else:
raise AssertionError
# Invalid value: max_explanatory_num
x0 = np.random.uniform(size=100)
x1 = 2.0*x0 + np.random.uniform(size=100)
X = pd.DataFrame(np.array([x0, x1]).T, columns=['x0', 'x1'])
try:
model = RCD(max_explanatory_num=-1)
model.fit(X)
except ValueError:
pass
else:
raise AssertionError
# Invalid value: cor_alpha
try:
model = RCD(cor_alpha=-1)
model.fit(X)
except ValueError:
pass
else:
raise AssertionError
# Invalid value: ind_alpha
try:
model = RCD(ind_alpha=-1)
model.fit(X)
except ValueError:
pass
else:
raise AssertionError
# Invalid value: shapiro_alpha
try:
model = RCD(shapiro_alpha=-1)
model.fit(X)
except ValueError:
pass
else:
raise AssertionError
# Invalid value: bw_method
try:
model = RCD(bw_method='X')
model.fit(X)
except ValueError:
pass
else:
raise AssertionError
def test_bootstrap_success():
get_external_effect = lambda n: np.random.normal(0.0, 0.5, n) ** 3
n_samples = 100
x5 = get_external_effect(n_samples)
x6 = get_external_effect(n_samples)
x0 = 0.6*x5 + get_external_effect(n_samples)
x3 = 0.5*x5 + get_external_effect(n_samples)
x1 = 1.0*x0 + 1.0*x3 + get_external_effect(n_samples)
x2 = 0.8*x1 - 0.6*x6 + get_external_effect(n_samples)
x4 = 1.0*x1 - 0.5*x6 + get_external_effect(n_samples)
# x5 and x6 are latent confounders
X = pd.DataFrame(np.array([x0, x1, x2, x3, x4]).T,
columns=['x0', 'x1', 'x2', 'x3', 'x4'])
model = RCD()
result = model.bootstrap(X, n_sampling=20)
result.adjacency_matrices_
result.total_effects_
# No argument
cdc = result.get_causal_direction_counts()
# n_directions=2
cdc = result.get_causal_direction_counts(n_directions=2)
# min_causal_effect=0.2
cdc = result.get_causal_direction_counts(min_causal_effect=0.2)
# split_by_causal_effect_sign=True
cdc = result.get_causal_direction_counts(split_by_causal_effect_sign=True)
# No argument
dagc = result.get_directed_acyclic_graph_counts()
# n_dags=2
dagc = result.get_directed_acyclic_graph_counts(n_dags=2)
# min_causal_effect=0.6
dagc = result.get_directed_acyclic_graph_counts(min_causal_effect=0.6)
# split_by_causal_effect_sign=True
dagc = result.get_directed_acyclic_graph_counts(split_by_causal_effect_sign=True)
# get_probabilities
probs = result.get_probabilities()
# get_probabilities
probs = result.get_probabilities(min_causal_effect=0.6)
# get_total_causal_effects
ce = result.get_total_causal_effects()
# get_total_causal_effects
ce = result.get_total_causal_effects(min_causal_effect=0.6)
# MLHSICR=True
model = RCD(MLHSICR=True)
result = model.bootstrap(X, n_sampling=20)
# no latent confounders
get_external_effect = lambda n: np.random.normal(0.0, 0.5, n) ** 3
n_samples = 100
x0 = get_external_effect(n_samples)
x3 = get_external_effect(n_samples)
x1 = 1.0*x0 + 1.0*x3 + get_external_effect(n_samples)
x2 = 0.8*x1 + get_external_effect(n_samples)
x4 = 1.0*x1 + get_external_effect(n_samples)
X = pd.DataFrame(np.array([x0, x1, x2, x3, x4]).T, columns=['x0', 'x1', 'x2', 'x3', 'x4'])
model = RCD()
result = model.bootstrap(X, n_sampling=20)
# causal direction: x3-->x0, x3-->x1, x0,x1-->x2
get_external_effect = lambda n: np.random.normal(0.0, 0.5, n) ** 3
n_samples = 100
x3 = get_external_effect(n_samples)
x0 = 0.5*x3 + get_external_effect(n_samples)
x1 = 0.5*x3 + get_external_effect(n_samples)
x2 = 1.0*x0 + 1.0*x1 + get_external_effect(n_samples)
X = pd.DataFrame(np.array([x0, x1, x2]).T, columns=['x0', 'x1', 'x2'])
model = RCD()
result = model.bootstrap(X, n_sampling=20)
def test_bootstrap_invalid(): |
get_external_effect = lambda n: np.random.normal(0.0, 0.5, n) ** 3
n_samples = 100
x5 = get_external_effect(n_samples)
x6 = get_external_effect(n_sam | ples)
x0 = 0.6*x5 + get_external_effect(n_samples)
x3 = 0.5*x5 + get_external_effect(n_samples)
x1 = 1.0*x0 + 1.0*x3 + get_external_effect(n_samples)
x2 = 0.8*x1 - 0.6*x6 + get_external_effe |
tscholak/smbkmeans | tfidf_smbkmeans.py | Python | mit | 5,558 | 0.002339 | # -*- coding: utf-8 -*-
import os
import sys
import inspect
cmd_folder = os.path.realpath(
os.path.abspath(
os.path.split(
inspect.getfile(
inspect.currentframe()
)
)[0]
)
)
if cmd_folder not in sys.path:
sys.path.insert(0, cmd_folder)
from smbkmeans import *
import pandas as pd
import numpy as np
import scipy.sparse as sp
import random
from bson.son impor | t SON
from pymongo import MongoClient
from monary import Monary
import bz2
try:
import cPickle as pickle
except:
import pickle
settings = {
'mongo_host': 'server.local',
'mongo_db_name': 'mydb',
'mongo_port': 27017,
'tfidf_collection': 'tfidf',
'models_per_k': 25,
'ld_k_min': 0.5,
'ld_k_max': 2.5,
'k_steps': 50,
'batch_size': 1024
}
blacklist = {
'consumers': [],
'brands': [0],
| 'companies': [10000],
'categories': [0]
}
if __name__ == "__main__":
# establish PyMongo connection:
mongo_client = MongoClient(settings['mongo_host'],
settings['mongo_port'])
mongo_db = mongo_client[settings['mongo_db_name']]
# get collection:
tfidf_collection = mongo_db[settings['tfidf_collection']]
# find out who the consumers are
cursor = tfidf_collection.find(
{"consumer": {
"$nin": blacklist['consumers']
}}
).distinct('consumer')
consumers = np.array(cursor, dtype=np.int64)
n_consumers = len(consumers)
# find out how many items there are
cursor = tfidf_collection.find().distinct('item')
items = np.array(cursor, dtype=np.int64)
n_items = len(items)
# close PyMongo connection
mongo_client.close()
# set up Monary
monary_client = Monary(settings['mongo_host'],
settings['mongo_port'])
def get_consumer_mtx(consumer_batch):
'''Returns a sparse matrix with feature vectors for a consumer batch.'''
pipeline = [
{"$match": {
"consumer": {"$in": consumer_batch},
"brand": {"$nin": blacklist['brands']},
"company": {"$nin": blacklist['companies']},
"category": {"$nin": blacklist['categories']}
}},
{"$project": {
"_id": False,
"consumer": True,
"item": True,
"tfidf": "$purchasetfidf2"
}},
{"$sort": SON([("consumer", 1)])}
]
try:
# careful! Monary returns masked numpy arrays!
result = monary_client.aggregate(
settings['mongo_db_name'],
settings['tfidf_collection'],
pipeline,
["consumer", "item", "tfidf"],
["int64", "int64", "float64"])
except:
return sp.csr_matrix(shape=(len(consumer_batch), n_items),
dtype=np.float64)
# convert into CSR matrix
_, consumer_idcs = np.unique(result[0].data,
return_inverse=True)
mtx = sp.csr_matrix(
(result[2].data, (consumer_idcs,
result[1].data)),
shape=(len(consumer_batch), n_items),
dtype=np.float64)
# normalize each row (this step can't be moved into the database
# because of the item blacklist)
for row_idx in xrange(len(consumer_batch)):
row = mtx.data[mtx.indptr[row_idx]:mtx.indptr[row_idx + 1]]
row /= np.linalg.norm(row)
return mtx
def get_batch(batch_size=100, offset=0, random_pick=True):
if random_pick:
# pick batch_size examples randomly from the consumers in the
# collection
consumer_batch = random.sample(consumers, batch_size)
else:
# advance index by offset
consumer_batch = list(consumers)[offset:]
# get the next batch_size consumers from the collection
consumer_batch = consumer_batch[:batch_size]
# obtain sparse matrix filled with feature vectors from database
mtx = get_consumer_mtx(consumer_batch)
return mtx
# train the models
ns_clusters = np.unique(np.int64(np.floor(
10. ** np.linspace(settings['ld_k_min'],
settings['ld_k_max'],
settings['k_steps'],
endpoint=True))))
np.random.shuffle(ns_clusters)
ns_clusters = ns_clusters.tolist()
models = [SphericalMiniBatchKMeans(n_clusters=n_clusters,
n_init=10,
max_iter=1000,
batch_size=settings['batch_size'],
reassignment_ratio=.01,
max_no_improvement=10,
project_l=5.) for _ in xrange(settings['models_per_k']) for n_clusters in ns_clusters]
filename = cmd_folder + '/tfidf_smbkmeans__tfidf2.pkl.bz2'
for model in models:
_ = model.fit(n_samples=n_consumers,
get_batch=get_batch)
fp = bz2.BZ2File(filename, 'w')
pickle.dump(models, fp, pickle.HIGHEST_PROTOCOL)
fp.close()
|
dedalusj/PaperChase | backend/paperchase/helpers/favicon.py | Python | mit | 1,942 | 0 | import re
import requests
from urlparse import urlparse, urljoin
from bs4 import BeautifulSoup
class FaviconFetcher():
def _htc(self, m):
return chr(int(m.group(1), 16))
def _url_decode(self, url):
rex = re.compile('%([0-9a-hA-H][0-9a-hA-H])', re.M)
return rex.sub(self._htc, url)
def _extract_path(self, url):
return self._url_decode(url.lstrip("/"))
def _extract_domain(self, url):
return "http://" + urlparse(self._extract_path(url))[1]
def ic | on_at_root(self, domain):
root_icon_path | = domain + "/favicon.ico"
r = requests.get(root_icon_path)
if r.status_code == 200:
return root_icon_path
return None
def icon_in_page(self, url):
path = self._extract_path(url)
r = requests.get(path)
if r.status_code == 200:
page_soup = BeautifulSoup(r.content)
page_soup_icon = page_soup.find(
"link", rel=re.compile("^(shortcut|icon|shortcut icon)$",
re.IGNORECASE))
if page_soup_icon:
page_icon_href = page_soup_icon.get("href")
if page_icon_href:
page_icon_path = urljoin(path, page_icon_href)
else:
return None
page_path_favicon_result = requests.get(page_icon_path)
if page_path_favicon_result.status_code == 200:
return page_icon_path
return None
def find_favicon(self, url):
domain = self._extract_domain(url)
candidate_url = self.icon_at_root(domain)
if candidate_url:
return candidate_url
candidate_url = self.icon_in_page(domain)
if candidate_url:
return candidate_url
candidate_url = self.icon_in_page(url)
if candidate_url:
return candidate_url
return None
|
minghuascode/pyj | pyjs/src/pyjs/lib_trans/pycompiler/astpprint.py | Python | apache-2.0 | 2,379 | 0.003363 | """Python AST pretty-printer.
Copyright(C) 2007, Martin Blais <blais@furius.ca>
This module exports a function that can be used to print a human-readable
version of the AST.
This code is downloaded verbatim from:
http://code.activestate.com/recipes/533146/
"""
__author__ = 'Martin Blais <blais@furius.ca>'
import sys
__all__ = ('printAst','getAststr')
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
def getAststr(astmod, ast, indent=' ', initlevel=0):
"Pretty-print an AST to the given output stream."
stream = StringIO()
rec_node(astmod, ast, initlevel, indent, stream.write)
stream.write('\n')
stream.seek(0)
return stream.read()
def printAst(astmod, ast, indent=' ', stream=sys.stdout, initlevel=0):
"Pretty-print an AST to the given output stream."
rec_node(astmod, ast, initlevel, indent, stream.write)
stream.write('\n')
stream.flush()
def rec_node(astmod, node, level, indent, write):
"Recurse through a node, pretty-printing it."
pfx = indent * level
if isinstance(node, astmod.Node):
write(pfx)
write(node.__class__.__name__)
| write('(')
i = 0
for child in node.getChildren():
if not isinstance(child, astmod.Node):
continue
if i != 0:
write(',')
write('\n')
rec_node(astmod, child, level+ | 1, indent, write)
i += 1
if i == 0:
# None of the children as nodes, simply join their repr on a single
# line.
res = []
for child in node.getChildren():
res.append(repr(child))
write(', '.join(res))
else:
write('\n')
write(pfx)
write(')')
else:
write(pfx)
write(repr(node))
def main():
from compiler import ast
import optparse
parser = optparse.OptionParser(__doc__.strip())
opts, args = parser.parse_args()
if not args:
parser.error("You need to specify the name of Python files to print out.")
import compiler, traceback
for fn in args:
print('\n\n%s:\n' % fn)
try:
printAst(ast, compiler.parseFile(fn), initlevel=1)
except SyntaxError, e:
traceback.print_exc()
if __name__ == '__main__':
main()
|
LumPenPacK/NetworkExtractionFromImages | win_build/nefi2_win_amd64_msvc_2015/site-packages/networkx/algorithms/chordal/__init__.py | Python | bsd-2-clause | 56 | 0.017857 | from networkx.algorithms.chord | al.cho | rdal_alg import *
|
AdaHeads/Coverage_Tests | src/event_stack.py | Python | gpl-3.0 | 5,316 | 0.010534 |
import websocket
import logging
import json
import config
import threading
from time import sleep
from pprint import pformat
class TimeOutReached(Exception):
pass
class EventListenerThread(threading.Thread):
log = None
ws = None
messageStack = []
ws_uri = None
authtoken = None
open = False
#messageStack = dict()
def __init__(self, uri, token):
self.log = self.log = logging.getLogger(self.__class__.__name__)
super(EventListenerThread, self).__init__()
self.ws_uri = uri
self.authtoken = token
self.open = False
self.flush()
def flush(self):
self.messageStack = []
def stack_contains (self, event_type, call_id=None, destination=None):
for item in self.messageStack:
if item['event'] == event_type:
if call_id is None:
if | destination is None:
return True
elif item['call']['destination'] == destination:
return True
elif item['call']['i | d'] == call_id:
if destination == None:
return True
elif item['call']['destination'] == destination:
return True
return False
def WaitForOpen (self, timeout=3.0):
RESOLUTION = 0.1
timeSlept = 0.0
while timeSlept < timeout:
timeSlept += RESOLUTION
if self.open:
return
sleep (RESOLUTION)
raise TimeOutReached ("Did not open websocket in a timely manner")
def WaitFor (self, event_type, call_id=None, timeout=10.0):
RESOLUTION = 0.1
timeSlept = 0.0
while timeSlept < timeout:
timeSlept += RESOLUTION
if self.stack_contains (event_type=event_type, call_id=call_id):
return
sleep (RESOLUTION)
raise TimeOutReached (event_type + ":" + str (call_id))
def getLatestEvent (self, event_type, call_id=None, destination=None):
for item in reversed (self.messageStack):
if item['event'] == event_type:
if call_id == None:
if destination == None:
return item['event']
elif item['call']['destination'] == destination:
return item['event']
elif item['call']['id'] == call_id:
if destination == None:
return item['event']
elif item['call']['destination'] == destination:
return item['event']
return False
def Get_Latest_Event (self, Event_Type, Call_ID = None, Destination = None):
try:
for item in reversed (self.messageStack):
if item['event'] == Event_Type:
if Call_ID is None:
if Destination is None:
return item
elif item['call']['destination'] == Destination:
return item
elif item['call']['id'] == Call_ID:
if Destination is None:
return item
elif item['call']['destination'] == Destination:
return item
except:
self.log.critical ("Exception in Get_Latest_Event: messageStack = " + str (self.messageStack))
raise
self.log.info ("Didn't find a match on {Event_Type = " + Event_Type + " & Call_ID = " + str(Call_ID) + " & Destination = " + str(Destination) + "}")
return None
def dump_stack(self):
return pformat(self.messageStack)
def on_error(self, ws, error):
self.log.error ("Unspecified error:" + str(error))
def on_open (self, ws):
self.log.info ("Opened websocket")
self.open = True
def on_close(self, ws):
self.log.info ("Closed websocket")
self.open = False
def on_message(self, ws, message):
self.log.info(message)
self.messageStack.append(json.loads(message))
def connect (self):
full_uri= self.ws_uri + "?token=" + self.authtoken
try:
self.ws = websocket.WebSocketApp (full_uri,
on_message = self.on_message,
on_error = self.on_error,
on_close = self.on_close)
self.ws.on_open = self.on_open
self.log.info("Websocket connected to " + full_uri)
except:
self.log.critical("Websocket could not connect to " + full_uri)
def run(self):
try:
self.log.info ("Starting websocket")
self.connect()
self.ws.run_forever()
except:
self.log.critical("Run in thread failed!")
def stop(self):
if self.open:
self.ws.close()
self.open = False
def __del__(self):
self.stop()
if __name__ == "__main__":
elt = EventListenerThread(uri=config.call_flow_events, token=config.authtoken)
elt.start()
elt.stop()
|
danielwikstrom/Verificacion | tests/context.py | Python | mit | 150 | 0.026667 | # -*- coding: utf-8 -*-
import sys
impor | t os
sys.path.insert(0, os.path.abspath(os.path.joi | n(os.path.dirname(__file__), '..'))) #WTF?
import sample
|
cuckoo5/soap | Soap_know/handler/base.py | Python | gpl-3.0 | 2,274 | 0.006157 | #coding=utf-8
import tornado
import session
import util.config as config
class BaseHandler(tornado.web.RequestHandler):
def __init__(self, *argc, **argkw):
print 'bbbbbbbbbbbbbbbbbbbbbbbb'
super(BaseHandler, self).__init__(*argc, **argkw)
self.session = session.Session(self.application.session_manager, self)
def get_current_user(self):
user = self.session.get("user")
# user = self.get_secure_cookie("user")
if user:
user_json = tornado.escape.json_decode(user)
print 'user = ', user_json
| return user_json
else:
return None
def result(self, code, msg, result):
| ret = self.format(code, msg, result)
self.set_header("Content-Type", "application/json; charset=UTF-8")
self.write(ret)
def set_current_user(self, user):
# self.set_secure_cookie("user", tornado.escape.json_encode(user))
self.session["user"] = tornado.escape.json_encode(user)
self.session.save()
# # Allows us to get the previous URL
# def get_referring_url(self):
# try:
# _, _, referer, _, _, _ = urlparse.urlparse(self.request.headers.get('Referer'))
# if referer:
# return referer
# # Test code will throw this if there was no 'previous' page
# except AttributeError:
# pass
# return '/'
#
# def get_flash(self):
# flash = self.get_secure_cookie('flash')
# self.clear_cookie('flash')
# return flash
#
# def get_essentials(self):
# mp = {k: ''.join(v) for k, v in self.request.arguments.iteritems()}
# print mp
# pass
def write_error(self, status_code, **kwargs):
print 'write_error = ', status_code
if status_code == 404:
self.render('404.html', title=config.title)
elif status_code == 500:
self.render('500.html', title=config.title)
else:
super(BaseHandler, self).write_error(status_code, **kwargs)
def format(self, error_code=0, error_msg=None, result=None):
ret = {"errorCode":error_code, "errorMessage":error_msg, "result":result}
return tornado.escape.json_encode(ret) |
DemocracyClub/yournextrepresentative | ynr/apps/candidates/migrations/0030_merge.py | Python | agpl-3.0 | 234 | 0 | from dj | ango.db import migrations
class Migration(migrations.Migration):
dependencies = [
("candidates", "0029_add_ordering_to_fields_meta"),
("candidates", "0028_auto_20160411_1055"),
]
oper | ations = []
|
zijistark/zckTools | src/zck/token_codegen.py | Python | lgpl-3.0 | 1,078 | 0.029685 | #!/usr/bin/python3
import sys
from pathlib import Path
list_scope_path = Path("./list_scope_tokens.txt")
keyword_bit = 13
list_scope_bit = 14
def main():
if len(sys.argv) < 2:
print("Error: Must specify an argument of either 'tokens' or 'emitters'!", file=sys.stderr)
return 1
list_scopes = set()
with list_scope_path.open('r') as f:
for line in f:
line = line.strip()
if line.startswith('#') or len(line) == 0:
continue
list_scopes.add(line)
max_kw_len = max( len(kw) for kw in list_scopes )
if sys.argv[1] == 'tokens':
t_id = (1 << (keyword_bit - 1)) | (1 << (list_scope_bit-1))
for t in sorted(list_scopes):
print(' {:<{width}} = 0x{:4X};'.format(t.upper(), t_id, width=max_kw_len))
t_id += 1
elif sys.argv[1] == 'emitters':
for t in | sorted(list_scopes):
print(' {:<{width}} => T_{}(Lexeme);'.format('"' + t + '"', t.upper(), width = max_kw_len + 2))
else:
print("Error: Must specify an argument of either 'tokens' or | 'emitters'!", file=sys.stderr)
return 1
return 0
if __name__ == '__main__':
sys.exit(main())
|
arider/riderml | riderml/regression/SGD_regressor.py | Python | mit | 1,474 | 0 | from ..base import model
from gradient_descent import stochastic_gradient_descent
from ..util import loss_functions
import numpy
from ..util.preprocessing import as_matrix
class SGD_regressor(model):
def __init__(self,
function=loss_functions.linear,
dfunction=loss_functions.dlinear,
theta=None,
learning_rate=.01,
fit_intercept=True):
self.theta = theta
self.learning_rate = learning_rate
self.function = function
self.dfunction = dfunction
self.fit_intercept = fit_intercept
def fit(self, x, y, iterations=1, shuffle=True, batch_size=.2):
if self.fit_intercept:
tmp = as_matrix(x)
| data = numpy.ones((tmp.shape[0], tmp.shape[1] + 1))
data[:, 1:] = tmp
else:
data = as_matrix(x)
self.theta = stochastic_gradient_descent(
self.function,
| self.dfunction,
data,
y,
self.theta,
iterations,
self.learning_rate,
shuffle=shuffle,
batch_size=batch_size)
def predict(self, data):
if self.fit_intercept:
tmp = as_matrix(data)
x = numpy.ones((tmp.shape[0], tmp.shape[1] + 1))
x[:, 1:] = tmp
return self.function(x, self.theta)
else:
return self.function(as_matrix(data), self.theta)
|
Tesora/tesora-horizon | openstack_dashboard/contrib/trove/content/database_configurations/panel.py | Python | apache-2.0 | 840 | 0 | # Copyright 2015 Tesora Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, soft | ware
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.utils.translation import ugettext_lazy as _
import horizon
class Configurations(horizon.Panel) | :
name = _("Configuration Groups")
slug = 'database_configurations'
permissions = ('openstack.services.database',)
|
SiLab-Bonn/pyBAR | pybar/scans/scan_threshold.py | Python | bsd-3-clause | 4,996 | 0.003803 | import logging
from pybar.analysis.analyze_raw_data import AnalyzeRawData
from pybar.fei4.register_utils import invert_pixel_mask
from pybar.fei4_run_base import Fei4RunBase
from pybar.fei4.register_utils import scan_loop
from pybar.run_manager import RunManager
class ThresholdScan(Fei4RunBase):
'''Standard Threshold Scan
Implementation of a standard threshold scan.
'''
_default_run_conf = {
"broadcast_commands": True,
"threaded_scan": True,
"mask_steps": 3, # mask steps, be carefull PlsrDAC injects different charge for different mask steps
"n_injections": 100, # number of injections per PlsrDAC step
"scan_parameters": [('PlsrDAC', [None, 100])], # the PlsrDAC range
"step_size": 1, # step size of the PlsrDAC during scan
"use_enable_mask": False, # if True, use Enable mask during scan, if False, all pixels will be enabled
"enable_shift_masks": ["Enable", "C_High", "C_Low"], # enable masks shifted during scan
"disable_shift_masks": [], # disable masks shifted during scan
"pulser_dac_correction": False # PlsrDAC correction for each double column
}
def configure(self):
commands = []
commands.extend(self.register.get_commands("ConfMode"))
# C_Low
if "C_Low".lower() in map(lambda x: x.lower(), self.enable_shift_masks):
self.register.set_pixel_register_value('C_Low', 1)
commands.extend(self.register.get_commands("WrFrontEnd", same_mask_for_all_dc=True, name='C_Low'))
else:
self.register.set_pixel_register_value('C_Low', 0)
commands.extend(self.register.get_commands("WrFrontEnd", same_mask_for_all_dc=True, name='C_Low'))
# C_High
if "C_High".lower() in map(lambda x: x.lower(), self.enable_shift_masks):
self.register.set_pixel_register_value('C_High', 1)
commands.extend(self.register.get_commands("WrFrontEnd", same_mask_for_all_dc=True, name='C_High'))
else:
self.register.set_pixel_register_value('C_High', 0)
commands.extend(self.register.get_commands("WrFrontEnd", same_mask_for_all_dc=True, name='C_High'))
commands.extend(self.register.get_commands("RunMode"))
self.register_utils.send_commands(commands)
def scan(self):
scan_parameter_range = [0, (2 ** self.register.global_registers['PlsrDAC']['bitlength'])]
if self.scan_parameters.PlsrDAC[0]:
scan_parameter_range[0] = self.scan_parameters.PlsrDAC[0]
if self.scan_parameters.PlsrDAC[1]:
scan_parameter_range[1] = self.scan_parameters.PlsrDAC[1]
scan_parameter_range = range(scan_parameter_range[0], scan_parameter_range[1] + 1, self.step_size)
logging.info("Scanning %s from %d to %d", 'PlsrDAC', scan_parameter_range[0], scan_parameter_range[-1])
for scan_parameter_value in scan_parameter_range:
if self.stop_run.is_set():
break
commands = []
commands.extend(self.register.get_commands("ConfMode"))
self.register.set_global_register_value('PlsrDAC', scan_parameter_value)
commands.extend(self.regist | er.get_commands("WrRegister", name=['PlsrDAC']))
self. | register_utils.send_commands(commands)
with self.readout(PlsrDAC=scan_parameter_value):
cal_lvl1_command = self.register.get_commands("CAL")[0] + self.register.get_commands("zeros", length=40)[0] + self.register.get_commands("LV1")[0]
scan_loop(self, cal_lvl1_command, repeat_command=self.n_injections, use_delay=True, mask_steps=self.mask_steps, enable_mask_steps=None, enable_double_columns=None, same_mask_for_all_dc=True, fast_dc_loop=True, bol_function=None, eol_function=None, digital_injection=False, enable_shift_masks=self.enable_shift_masks, disable_shift_masks=self.disable_shift_masks, restore_shift_masks=False, mask=invert_pixel_mask(self.register.get_pixel_register_value('Enable')) if self.use_enable_mask else None, double_column_correction=self.pulser_dac_correction)
def analyze(self):
with AnalyzeRawData(raw_data_file=self.output_filename, create_pdf=True) as analyze_raw_data:
analyze_raw_data.create_tot_hist = False
analyze_raw_data.create_fitted_threshold_hists = True
analyze_raw_data.create_threshold_mask = True
analyze_raw_data.n_injections = 100
analyze_raw_data.interpreter.set_warning_output(False) # so far the data structure in a threshold scan was always bad, too many warnings given
analyze_raw_data.interpret_word_table()
analyze_raw_data.interpreter.print_summary()
analyze_raw_data.plot_histograms()
if __name__ == "__main__":
with RunManager('configuration.yaml') as runmngr:
runmngr.run_run(ThresholdScan)
|
syci/partner-contact | base_location/tests/test_base_location.py | Python | agpl-3.0 | 10,304 | 0.001262 | # Copyright 2015 Yannick Vaucher, Camptocamp SA
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
import psycopg2
from odoo.exceptions import ValidationError
from odoo.tests import common, tagged
from odoo.tools.misc import mute_logger
@tagged("post_install", "-at_install")
class TestBaseLocation(common.SavepointCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
state_obj = cls.env["res.country.state"]
city_obj = cls.env["res.city"]
zip_obj = cls.env["res.city.zip"]
cls.partner_obj = cls.env["res.partner"]
cls.env = cls.env(context=dict(cls.env.context, tracking_disable=True))
cls.state_vd = state_obj.create(
{"name": "Vaud", "code": "VD", "country_id": cls.env.ref("base.ch").id}
)
cls.env.ref("base.es").write({"enforce_cities": True})
cls.company = cls.env.ref("base.main_company")
cls.state_bcn = state_obj.create(
{"name": "Barcelona", "code": "08", "country_id": cls.env.ref("base.es").id}
)
cls.state_madrid = state_obj.create(
{"name": "Madrid", "code": "28", "country_id": cls.env.ref("base.es").id}
)
cls.city_bcn = city_obj.creat | e(
{
"name": "Barcelona",
"state_id": cls.state_bcn.id,
"country_id": cls.env.ref("base.es").id,
}
)
cls.city_madrid = city_obj.create(
{
"name": "Madrid",
"state_id": cls.state_madrid.id,
"country_id": cls.env.ref("base.es").id,
}
)
| cls.city_lausanne = city_obj.create(
{
"name": "Lausanne",
"state_id": cls.state_vd.id,
"country_id": cls.env.ref("base.ch").id,
}
)
cls.lausanne = zip_obj.create({"name": "666", "city_id": cls.city_lausanne.id})
cls.barcelona = zip_obj.create({"name": "444", "city_id": cls.city_bcn.id})
def test_onchange_partner_city_completion(self):
"""Test that partner data is filled accodingly"""
partner1 = self.partner_obj.new({"name": "Camptocamp"})
partner1.zip_id = self.barcelona
partner1._onchange_zip_id()
self.assertEqual(partner1.zip, self.barcelona.name)
self.assertEqual(partner1.city, self.barcelona.city_id.name)
self.assertEqual(partner1.state_id, self.barcelona.city_id.state_id)
self.assertEqual(partner1.country_id, self.barcelona.city_id.country_id)
def test_onchange_company_city_completion(self):
"""Test that company data is filled accodingly"""
company = self.env["res.company"].new({"name": "Test"})
company.zip_id = self.lausanne
company._onchange_zip_id()
self.assertEqual(company.zip, self.lausanne.name)
self.assertEqual(company.city, self.lausanne.city_id.name)
self.assertEqual(company.state_id, self.lausanne.city_id.state_id)
self.assertEqual(company.country_id, self.lausanne.city_id.country_id)
def test_company_address_fields(self):
"""Test if the partner address fields changes when
changing the ones from the company"""
company = self.env["res.company"].create({"name": "Test"})
self.assertTrue(company.partner_id)
company.partner_id.write(
{
"zip_id": self.lausanne.id,
"state_id": self.lausanne.city_id.state_id.id,
"country_id": self.lausanne.city_id.country_id.id,
"city_id": self.lausanne.city_id.id,
"city": self.lausanne.city_id.name,
"zip": self.lausanne.name,
}
)
company._compute_address()
self.assertEqual(company.zip_id, company.partner_id.zip_id)
self.assertEqual(company.city_id, company.partner_id.city_id)
def test_company_address_fields_inverse(self):
"""Test inverse fields from res.company"""
company = self.env["res.company"].create({"name": "Test"})
company.zip_id = self.barcelona.id
company._inverse_city_id()
company._inverse_zip_id()
self.assertEqual(company.zip_id, company.partner_id.zip_id)
self.assertEqual(company.city_id, company.partner_id.city_id)
def test_onchange_company_city_id_completion(self):
"""Test city auto-completion when changing zip in a company"""
company = self.env["res.company"].new({"name": "Test"})
company.zip_id = self.barcelona
company._onchange_zip_id()
self.assertEqual(company.city_id, self.barcelona.city_id)
def test_constrains_partner_01(self):
"""Test partner 1 constraints"""
with self.assertRaises(ValidationError):
self.partner_obj.create({"name": "P1", "zip_id": self.barcelona.id})
def test_writing_company(self):
self.company.zip_id = self.barcelona
def test_constrains_partner_country(self):
"""Test partner country constraints"""
partner = self.partner_obj.create(
{
"name": "P1",
"zip_id": self.barcelona.id,
"country_id": self.barcelona.city_id.country_id.id,
"state_id": self.barcelona.city_id.state_id.id,
"city_id": self.barcelona.city_id.id,
}
)
with self.assertRaises(ValidationError):
partner.country_id = self.ref("base.ch")
def test_constrains_partner_state(self):
"""Test partner state constraints"""
partner = self.partner_obj.create(
{
"name": "P1",
"zip_id": self.barcelona.id,
"country_id": self.barcelona.city_id.country_id.id,
"state_id": self.barcelona.city_id.state_id.id,
"city_id": self.barcelona.city_id.id,
}
)
with self.assertRaises(ValidationError):
partner.state_id = self.state_vd.id
def test_constrains_partner_city(self):
"""Test partner city constraints"""
partner = self.partner_obj.create(
{
"name": "P1",
"zip_id": self.barcelona.id,
"country_id": self.barcelona.city_id.country_id.id,
"state_id": self.barcelona.city_id.state_id.id,
"city_id": self.barcelona.city_id.id,
}
)
with self.assertRaises(ValidationError):
partner.city_id = self.city_lausanne
def test_partner_onchange_country(self):
"""Test partner onchange country_id"""
partner = self.partner_obj.new({"name": "TEST", "zip_id": self.lausanne.id})
partner.country_id = self.env.ref("base.es")
partner._onchange_country_id()
self.assertFalse(partner.zip_id)
def test_partner_onchange_city(self):
"""Test partner onchange city_id"""
partner = self.partner_obj.new({"name": "TEST", "zip_id": self.lausanne.id})
self.city_bcn.country_id.enforce_cities = False
partner.city_id = self.city_bcn
partner._onchange_city_id()
self.assertFalse(partner.zip_id)
partner.city_id = False
res = partner._onchange_city_id()
self.assertFalse(res["domain"]["zip_id"])
def test_partner_onchange_state(self):
"""Test partner onchange state_id"""
partner = self.partner_obj.new({"name": "TEST", "zip_id": self.lausanne.id})
partner.state_id = self.state_bcn
partner._onchange_state_id()
self.assertFalse(partner.zip_id)
self.assertEqual(partner.country_id, partner.state_id.country_id)
def test_company_onchange_state(self):
"""Test company onchange state_id"""
self.company.state_id = self.state_bcn
self.company._onchange_state_id()
self.assertEqual(self.company.country_id, self.company.state_id.country_id)
def test_display_name(self):
"""Test if the display_name is stored and computed properly"""
self.assertEqual(
self.lausanne.display_name,
"666 |
fstonezst/LightGBM | python-package/lightgbm/callback.py | Python | mit | 6,606 | 0.00106 | # coding: utf-8
# pylint: disable = invalid-name, W0105, C0301
from __future__ import absolute_import
import collections
from operator import gt, lt
from .compat import range_
class EarlyStopException(Exception):
"""Exception of early stopping.
Parameters
----------
best_iteration : int
The best iteration stopped.
"""
def __init__(self, best_iteration, best_score):
super(EarlyStopException, self).__init__()
self.best_iteration = best_iteration
self.best_score = best_score
# Callback environment used by callbacks
CallbackEnv = collections.namedtuple(
"LightGBMCallbackEnv",
["model",
"params",
"iteration",
"begin_iteration",
"end_iteration",
"evaluation_result_list"])
def _format_eval_result(value, show_stdv=True):
"""format metric string"""
if len(value) == 4:
return '%s\'s %s: %g' % (value[0], value[1], value[2])
elif len(value) == 5:
if show_stdv:
return '%s\'s %s: %g + %g' % (value[0], value[1], value[2], value[4])
else:
return '%s\'s %s: %g' % (value[0], value[1], value[2])
else:
raise ValueError("Wrong metric value")
def print_evaluation(period=1, show_stdv=True):
"""Create a callback that print evaluation result.
Parameters
----------
period : int
The period to log the evaluation results
show_stdv : bool, optional
Whether show stdv if provided
Returns
-------
callback : function
A callback that print evaluation every period iterations.
"""
def callback(env):
"""internal function"""
if period > 0 and env.evaluation_result_list and (env.iteration + 1) % period == 0:
result = '\t'.join([_format_eval_result(x, show_stdv) for x in env.evaluation_result_list])
print('[%d]\t%s' % (env.iteration + 1, result))
callback.order = 10
return callback
def record_evaluation(eval_result):
"""Create a call back that records the evaluation history into eval_result.
Parameters
----------
eval_result : dict
A dictionary to store the evaluation results.
Returns
-------
callback : function
The requested callback function.
"""
if not isinstance(eval_result, dict):
raise TypeError('Eval_result should be a dictionary')
eval_result.clear()
def init(env):
"""internal function"""
for data_name, _, _, _ in env.evaluation_result_list:
eval_result.setdefault(data_name, collections.defaultdict(list))
def callback(env):
"""internal function"""
if not eval_result:
init(env)
for data_name, eval_name, result, _ in env.evaluation_result_list:
eval_result[data_name][eval_name].append(result)
callback.order = 20
return callback
def reset_parameter(**kwargs):
"""Reset parameter after first iteration
NOTE: the initial parameter will still take in-effect on first iteration.
Parameters
----------
**kwargs: value should be list or function
List of parameters for each boosting round
or a customized function that calculates learning_rate in terms of
current number of round (e.g. yields learning rate decay)
- list l: parameter = l[current_round]
- function f: parameter = f(current_round)
Returns
-------
callback : function
The requested callback function.
"""
def callback(env):
"""internal function"""
new_parameters = {}
for key, value in kwargs.items():
if key in ['num_class', 'boosting_type', 'metric']:
raise RuntimeError("cannot reset {} during training".format(repr(key)))
if isinstance(value, list):
if len(value) != env.end_iteration - env.begin_iteration:
raise ValueError("Length of list {} has to equal to 'num_boost_round'.".format(repr(key)))
new_param = value[env.iteration - env.begin_iteration]
else:
new_param = value(env.iteration - env.begin_iteration)
if new_param != env.params.get(key, None):
new_parameters[key] = new_param
if new_parameters:
env.model.reset_parameter(new_parameters)
env.params.update(new_parameters)
callback.before_iteration = True
callback.order = 10
return callback
def early_stopping(stopping_rounds, verbose=True):
"""Create a callback that activates early stopping.
Activates early stopping.
Requires at least one validation data and one metric
If there's more than one, will check all of them
Parameters
----------
stopping_rounds : int
The stopping rounds before the trend occur.
verbose : optional, bool
Whether to print message about early stopping information.
Returns
-------
callback : function
The requested callback function.
"""
best_score = []
best_iter = []
best_score_list = []
cmp_op = []
def init(env):
"""internal function"""
if not env.evaluation_result_list:
raise ValueError('For early stopping, at least one dataset and eval metric is required for evaluation')
if verbose:
msg = "Train until valid scores didn't improve in {} rounds."
print(msg.format(stopping_rounds))
for eval_ret in env.evaluation_result_list:
best_iter.append(0)
best_score_list.append(None)
if eval_ | ret[3]:
best_score.append(float('-inf'))
cmp_op.append(gt)
| else:
best_score.append(float('inf'))
cmp_op.append(lt)
def callback(env):
"""internal function"""
if not cmp_op:
init(env)
for i in range_(len(env.evaluation_result_list)):
score = env.evaluation_result_list[i][2]
if cmp_op[i](score, best_score[i]):
best_score[i] = score
best_iter[i] = env.iteration
best_score_list[i] = env.evaluation_result_list
elif env.iteration - best_iter[i] >= stopping_rounds:
if verbose:
print('Early stopping, best iteration is:\n[%d]\t%s' % (
best_iter[i] + 1, '\t'.join([_format_eval_result(x) for x in best_score_list[i]])))
raise EarlyStopException(best_iter[i], best_score_list[i])
callback.order = 30
return callback
|
glassesfactory/Shimehari | shimehari/core/manage/commands/create.py | Python | bsd-3-clause | 5,921 | 0.002141 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
u"""
===============================
Shimehari.core.manage.commands.create
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
アプリケーションを新たに作成する create コマンド
各コマンドモジュールは共通インターフェースとして
Command クラスを持ちます。
===============================
"""
import os
import sys
import errno
import shutil
from optparse import make_option
import shimehari
from shimehari.core.manage import CreatableCommand
from shimehari.core.helpers import importFromString
from shimehari.core.exceptions import CommandError
debugFormat = ('-' * 80 + '\\n' + '%(levelname)s in %(module)s [%(pathname)s:%(lineno)d]:\\n' + '%(message)s\\n' + '-' * 80)
outputFormat = ('%(asctime)s %(levelname)s in %(module)s [%(pathname)s:%(lineno)d]:\\n' + '%(message)s\\n' + '-' * 80)
u"""
===============================
::pkg:: Shimehari.core.manage.commands.create
Command
~~~~~~~
コマンドの実装
===============================
"""
class Command(Creatab | leCommand):
name = 'create'
summary = 'Create Shimehari Application'
usage = "Usage: %prog APPLICATION_NAME [OPTIONS]"
option_list = CreatableCommand.option_list + (
make_option('--path', '-p', action='store', type='string', dest='path', help='target create path'),
make_option('--template', '-t', action='store', type='string', dest='template', help='using project tempalte')
)
def __init__(self):
super(Command, self).__init__()
| def handle(self, appDir='app', *args, **options):
try:
importFromString(appDir)
except ImportError:
pass
else:
raise CommandError('%s mou aru' % appDir)
path = options.get('path')
if path is None:
appRootDir = os.path.join(os.getcwd(), appDir)
try:
os.makedirs(appRootDir)
except OSError, error:
if error.errno == errno.EEXIST:
msg = '%s is already exists' % appRootDir
else:
msg = error
raise CommandError(msg)
else:
appRootDir = os.path.abspath(os.path.expanduser(path))
if not os.path.exists(appRootDir):
raise CommandError("auau")
template = options.get('template')
if template is None:
appTemplateDir = os.path.join(shimehari.__path__[0], 'core', 'conf', 'app_template')
else:
appTemplateDir = template
prefixLen = len(appTemplateDir) + 1
for root, dirs, files in os.walk(appTemplateDir):
pathRest = root[prefixLen:]
relativeDir = pathRest.replace('app_name', 'app')
if relativeDir:
targetDir = os.path.join(appRootDir, relativeDir)
if not os.path.exists(targetDir):
os.mkdir(targetDir)
for dirname in dirs[:]:
if dirname.startswith('.'):
dirs.remove(dirname)
for filename in files[:]:
if filename.endswith(('.pyo', '.pyc', '.py.class')):
continue
oldPath = os.path.join(root, filename)
newPath = os.path.join(appRootDir, relativeDir, filename.replace('app_name', 'app'))
self.readAndCreateFile(oldPath, newPath)
#ここどうすっかな
self.createDirectory(appRootDir, 'views')
self.createDirectory(appRootDir, 'assets')
self.createDirectory(appRootDir, 'log')
#generate config file
confOrgPath = os.path.join(shimehari.__path__[0], 'core', 'conf', 'config.org.py')
newConfPath = os.path.join(os.getcwd(), 'config.py')
self.readAndCreateFileWithRename(confOrgPath, newConfPath,
(appDir, appDir, debugFormat, outputFormat))
sys.stdout.write("New App Create Complete. enjoy!\n")
u"""-----------------------------
::pkg:: Shimehari.core.manage.commands.create.Command
readAndCreateFile
~~~~~~~~~~~~~~~~~
指定されたディレクトリからテンプレートファイルを読み込み
新たに生成したい指定ディレクトリへファイルを生成します。
[args]
:old テンプレートファイルのパス
:new 生成したいディレクトリへのパスとファイル名
------------------------------"""
def readAndCreateFile(self, old, new):
if os.path.exists(new):
raise CommandError('already... %s' % new)
with open(old, 'r') as template:
content = template.read()
with open(new, 'w') as newFile:
newFile.write(content)
sys.stdout.write(u"Creating: %s\n" % new)
try:
shutil.copymode(old, new)
self.toWritable(new)
except OSError:
sys.stderr.write('permission error')
def readAndCreateFileWithRename(self, old, new, name):
if os.path.exists(new):
raise CommandError('Controller already exists.')
with open(old, 'r') as template:
content = template.read()
if '%s' in content:
content = content % name
with open(new, 'w') as newFile:
newFile.write(content)
sys.stdout.write("Creating: %s\n" % new)
try:
shutil.copymode(old, new)
self.toWritable(new)
except OSError:
sys.stderr.write('can not setting permission')
def createDirectory(self, rootDir, dirname):
targetName = os.path.join(rootDir, dirname)
if not os.path.exists(targetName):
os.mkdir(targetName)
sys.stdout.write("Creating: %s\n" % targetName)
Command()
|
fjorba/invenio | modules/bibformat/lib/bibreformat.py | Python | gpl-2.0 | 23,401 | 0.005641 | ## -*- mode: python; coding: utf-8; -*-
##
## This file is part of Invenio.
## Copyright (C) 2007, 2008, 2010, 2011, 2012 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Call BibFormat engine and create HTML brief (and other) formats cache for
bibliographic records."""
__revision__ = "$Id$"
import sys
try:
from invenio.dbquery import run_sql
from invenio.config import \
CFG_SITE_URL,\
CFG_TMPDIR,\
CFG_BINDIR
from invenio.intbitset import intbitset
from invenio.search_engine import perform_request_search, search_pattern
from invenio.search_engine import print_record
from invenio.bibrank_citation_searcher import get_cited_by
from invenio.bibrank_citation_indexer import get_bibrankmethod_lastupdate
| from invenio.bibformat import format_record
from invenio.bibformat_config import CFG_BIBFORMAT_USE_OLD_BIBFORMAT
from invenio.shellutils import split_cli_ids_arg
from invenio.bibtask import task_init, write_message, task_set_option, \
task_get_option, task_update_progress, task_has_option, \
task_low_level_submissio | n, task_sleep_now_if_required, \
task_get_task_param
import os
import time
import zlib
from datetime import datetime
except ImportError, e:
print "Error: %s" % e
sys.exit(1)
def fetch_last_updated(format):
select_sql = "SELECT last_updated FROM format WHERE code = %s"
row = run_sql(select_sql, (format.lower(), ))
# Fallback in case we receive None instead of a valid date
last_date = row[0][0] or datetime(year=1900, month=1, day=1)
return last_date
def store_last_updated(format, update_date):
sql = "UPDATE format SET last_updated = %s " \
"WHERE code = %s AND (last_updated < %s or last_updated IS NULL)"
iso_date = update_date.strftime("%Y-%m-%d %H:%M:%S")
run_sql(sql, (iso_date, format.lower(), iso_date))
### run the bibreformat task bibsched scheduled
###
def bibreformat_task(fmt, sql, sql_queries, cds_query, process_format, process, recids):
"""
BibReformat main task
@param fmt: output format to use
@param sql: dictionary with pre-created sql queries for various cases (for selecting records). Some of these queries will be picked depending on the case
@param sql_queries: a list of sql queries to be executed to select records to reformat.
@param cds_query: a search query to be executed to select records to reformat
@param process_format:
@param process:
@param recids: a list of record IDs to reformat
@return: None
"""
write_message("Processing format %s" % fmt)
t1 = os.times()[4]
start_date = datetime.now()
### Query the database
###
task_update_progress('Fetching records to process')
if process_format: # '-without' parameter
write_message("Querying database for records without cache...")
without_format = without_fmt(sql)
recIDs = intbitset(recids)
if cds_query['field'] != "" or \
cds_query['collection'] != "" or \
cds_query['pattern'] != "":
write_message("Querying database (CDS query)...")
if cds_query['collection'] == "":
# use search_pattern() whenever possible, as it can search
# even in private collections
res = search_pattern(p=cds_query['pattern'],
f=cds_query['field'],
m=cds_query['matching'])
else:
# use perform_request_search when '-c' argument has been
# defined, as it is not supported by search_pattern()
res = intbitset(perform_request_search(req=None, of='id',
c=cds_query['collection'],
p=cds_query['pattern'],
f=cds_query['field']))
recIDs |= res
for sql_query in sql_queries:
write_message("Querying database (%s) ..." % sql_query, verbose=2)
recIDs |= intbitset(run_sql(sql_query))
if fmt == "HDREF" and recIDs:
# HDREF represents the references tab
# the tab needs to be recomputed not only when the record changes
# but also when one of the citations changes
latest_bibrank_run = get_bibrankmethod_lastupdate('citation')
sql = """SELECT id, modification_date FROM bibrec
WHERE id in (%s)""" % ','.join(str(r) for r in recIDs)
def check_date(mod_date):
return mod_date < latest_bibrank_run
recIDs = intbitset([recid for recid, mod_date in run_sql(sql) \
if check_date(mod_date)])
for r in recIDs:
recIDs |= intbitset(get_cited_by(r))
### list of corresponding record IDs was retrieved
### now format the selected records
if process_format:
write_message("Records to be processed: %d" % (len(recIDs) \
+ len(without_format)))
write_message("Out of it records without existing cache: %d" % len(without_format))
else:
write_message("Records to be processed: %d" % (len(recIDs)))
### Initialize main loop
total_rec = 0 # Total number of records
tbibformat = 0 # time taken up by external call
tbibupload = 0 # time taken up by external call
### Iterate over all records prepared in lists I (option)
if process:
if CFG_BIBFORMAT_USE_OLD_BIBFORMAT: # FIXME: remove this
# when migration from php to
# python bibformat is done
(total_rec_1, tbibformat_1, tbibupload_1) = iterate_over_old(recIDs,
fmt)
else:
(total_rec_1, tbibformat_1, tbibupload_1) = iterate_over_new(recIDs,
fmt)
total_rec += total_rec_1
tbibformat += tbibformat_1
tbibupload += tbibupload_1
### Iterate over all records prepared in list II (no_format)
if process_format and process:
if CFG_BIBFORMAT_USE_OLD_BIBFORMAT: # FIXME: remove this
# when migration from php to
# python bibformat is done
(total_rec_2, tbibformat_2, tbibupload_2) = iterate_over_old(without_format,
fmt)
else:
(total_rec_2, tbibformat_2, tbibupload_2) = iterate_over_new(without_format,
fmt)
total_rec += total_rec_2
tbibformat += tbibformat_2
tbibupload += tbibupload_2
### Store last run time
if task_has_option("last"):
write_message("storing run date to %s" % start_date)
store_last_updated(fmt, start_date)
### Final statistics
t2 = os.times()[4]
elapsed = t2 - t1
message = "total records processed: %d" % total_rec
write_message(message)
message = "total processing time: %2f sec" % elapsed
write_message(message)
message = "Time spent on external call (os.system):"
write_message(message)
message = " bibformat: %2f sec" % tbibformat
write_message(message)
message = " bibupload: %2f sec" % tbibupload
write_mess |
amanzi/ats-dev | tools/python_models/wrm_brookscorey.py | Python | bsd-3-clause | 2,012 | 0.012425 | import numpy as np
class BrooksCorey(object):
def __init__( self, lambd=0., alpha=0., sr=0.0, smoothing_interval=0. ):
self._lambda = lambd
self._alpha = alpha
self._sr = sr
self._pc0 = smoothing_interval
self._factor = -2.0 - (0.5 + 2.0) * self._lambda;
self._pc_bubble = 1.0 / self._alpha
if self._pc0 > 0.:
k0 = self.k_relative(self._pc0) - 1.
k0p = self.d_k_relative(self._pc0)
self._a = (3 * k0 - k0p * self._pc0) / (self._pc0**2)
self._b = (k0p * self._pc0 - 2 * k0) | / (self._pc0**3)
def capillaryPressure( self, s ):
se = (s - self._sr) / (1.0 - self._sr)
se = min(se, 1.0);
return pow(se, -1.0/self._lambda) / self._alpha
def d_capillaryPressure( self, s ):
se = (s - sel | f._sr) / (1.0 - self._sr)
se = min(se, 1.0);
return -1. / self._lambda * pow(se, -1.0/self._lambda - 1.) / self._alpha / (1. - self._sr);
def saturation( self, pc ):
if pc > self._pc_bubble:
return pow(self._alpha * pc, -self._lambda) * (1.0 - self._sr) + self._sr
else:
return 1.0;
def d_saturation( self, pc ):
if pc > self._pc_bubble:
return -pow(self._alpha * pc, -self._lambda - 1.0) * (1.0 - self._sr) * self._alpha * self._lambda
else:
return 0.
def k_relative( self, pc ):
if pc <= self._pc_bubble:
return 1.0
elif pc >= self._pc0:
return pow(self._alpha * pc, self._factor)
else:
dpc = pc - self._pc_bubble
return 1.0 + self._a * dpc**2 + self._b * dpc**3
def d_k_relative( self, pc ):
if pc <= self._pc_bubble:
return 0.
elif pc >= self._pc0:
return self._factor * self._alpha * pow(self._alpha * pc, self._factor - 1.0)
else:
dpc = pc - self._pc_bubble
return self._a * 2 * dpc + self._b * 3 * dpc**2
|
pmisik/buildbot | master/buildbot/www/avatar.py | Python | gpl-2.0 | 9,042 | 0.001327 | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
import base64
import hashlib
from urllib.parse import urlencode
from urllib.parse import urljoin
from urllib.parse import urlparse
from urllib.parse import urlunparse
from twisted.internet import defer
from twisted.python import log
from buildbot import config
from buildbot.util import httpclientservice
from buildbot.util import unicode2bytes
from buildbot.util.config import ConfiguredMixin
from buildbot.www import resource
class AvatarBase(ConfiguredMixin):
name = "noavatar"
def getUserAvatar(self, email, username, size, defaultAvatarUrl):
raise NotImplementedError()
class AvatarGitHub(AvatarBase):
name = "github"
DEFAULT_GITHUB_API_URL = 'https://api.github.com'
def __init__(self,
github_api_endpoint=None,
token=None,
client_id=None,
client_secret=None,
debug=False,
verify=False):
httpclientservice.HTTPClientService.checkAvailable(self.__class__.__name__)
self.github_api_endpoint = github_api_endpoint
if github_api_endpoint is None:
self.github_api_endpoint = self.DEFAULT_GITHUB_API_URL
self.token = token
self.client_creds = None
if bool(client_id) != bool(client_secret):
config.error('client_id and client_secret must be both provided or none')
if client_id:
if token:
config.error('client_id and client_secret must not be provided when token is')
self.client_creds = base64.b64encode(b':'.join(
cred.encode('utf-8') for cred in (client_id, client_secret)
)).decode('ascii')
self.debug = debug
self.verify = verify
self.master = None
self.client = None
@defer.inlineCallbacks
def _get_http_client(self):
if self.client is not None:
return self.client
headers = {
'User-Agent': 'Buildbot',
}
if self.token:
headers['Authorization'] = 'token ' + self.token
elif self.client_creds:
headers['Authorization'] = 'basic ' + self.client_creds
self.client = yield httpclientservice.HTTPClientService.getService(self.master,
self.github_api_endpoint, headers=headers,
debug=self.debug, verify=self.verify)
return self.client
@defer.inlineCallbacks
def _get_avatar_by_username(self, username):
headers = {
'Accept': 'application/vnd.github.v3+json',
}
url = f'/users/{username}'
http = yield self._get_http_client()
res = yield http.get(url, headers=headers)
if res.code == 404:
# Not found
return None
if 200 <= res.code < 300:
data = yield res.json()
return data['avatar_url']
log.msg(f'Failed looking up user: response code {res.code}')
return None
@defer.inlineCallbacks
def _search_avatar_by_user_email(self, email):
headers = {
'Accept': 'application/vnd.github.v3+json',
}
query = f'{email} in:email'
url = f"/search/users?{urlencode({'q': query,})}"
http = yield self._get_http_client()
res = yield http.get(url, headers=headers)
if 200 <= res.code < 300:
data = yield res.json()
if data['total_count'] == 0:
# Not found
return None
return data['items'][0]['avatar_url']
log.msg(f'Failed searching user by email: response code {res.code}')
return None
@defer.inlineCallbacks
def _search_avatar_by_commit(self, email):
headers = {
'Accept': 'application/vnd.github.v3+json,application/vnd.github.cloak-preview',
}
query = {
'q': f'author-email:{email}',
'sort': 'committer-date',
'per_page': '1',
}
sorted_query = sorted(query.items(), key=lambda x: x[0])
url = f'/search/commits?{urlencode(sorted_query)}'
http = yield self._get_http_client()
res = yield http.get(url, headers=headers)
if 200 <= res.code < 300:
data = yield res.json()
if data['total_count'] == 0:
# Not found
return None
author = data['items'][0]['author']
if author is None:
# No Github account found
return None
return author['avatar_url']
log.msg(f'Fai | led searching user by commit: response code {res.co | de}')
return None
def _add_size_to_url(self, avatar, size):
parts = urlparse(avatar)
query = parts.query
if query:
query += '&'
query += f's={size}'
return urlunparse((parts.scheme,
parts.netloc, parts.path, parts.params,
query, parts.fragment))
@defer.inlineCallbacks
def getUserAvatar(self, email, username, size, defaultAvatarUrl):
avatar = None
if username:
username = username.decode('utf-8')
if email:
email = email.decode('utf-8')
if username:
avatar = yield self._get_avatar_by_username(username)
if not avatar and email:
# Try searching a user with said mail
avatar = yield self._search_avatar_by_user_email(email)
if not avatar and email:
# No luck, try to find a commit with this email
avatar = yield self._search_avatar_by_commit(email)
if not avatar:
# No luck
return None
if size:
avatar = self._add_size_to_url(avatar, size)
raise resource.Redirect(avatar)
class AvatarGravatar(AvatarBase):
name = "gravatar"
# gravatar does not want intranet URL, which is most of where the bots are
# just use same default as github (retro)
default = "retro"
def getUserAvatar(self, email, username, size, defaultAvatarUrl):
# construct the url
emailBytes = unicode2bytes(email.lower())
emailHash = hashlib.md5(emailBytes)
gravatar_url = "//www.gravatar.com/avatar/"
gravatar_url += emailHash.hexdigest() + "?"
if self.default != "url":
defaultAvatarUrl = self.default
url = {'d': defaultAvatarUrl, 's': str(size)}
sorted_url = sorted(url.items(), key=lambda x: x[0])
gravatar_url += urlencode(sorted_url)
raise resource.Redirect(gravatar_url)
class AvatarResource(resource.Resource):
# enable reconfigResource calls
needsReconfig = True
defaultAvatarUrl = b"img/nobody.png"
def reconfigResource(self, new_config):
self.avatarMethods = new_config.www.get('avatar_methods', [])
self.defaultAvatarFullUrl = urljoin(
unicode2bytes(new_config.buildbotURL), unicode2bytes(self.defaultAvatarUrl))
self.cache = {}
# ensure the avatarMethods is a iterable
if isinstance(self.avatarMethods, AvatarBase):
self.avatarMethods = (self.avatarMethods, )
for method in self.avatarMethods:
method.master = self.master
def render_GET(self, request):
return self.asyncRenderHelper(request, self.renderAvatar)
@defer.inlineCallbacks
def renderAvatar(self, request):
email = request.args.get(b"email", [b""])[0]
size = requ |
maxamillion/loopabull | loopabull/__init__.py | Python | gpl-3.0 | 261 | 0.003831 | import | logging
logging.basicConfig()
from enum import Enum
logger = logging.getLogger('loopabull')
logger.setLevel(logging.INFO)
class Result(Enum):
runfinished = 1
runerrored = 2
unrouted = 3 |
error = 4
# vim: set expandtab sw=4 sts=4 ts=4
|
geary/claslite | web/app/proxy/handlers.py | Python | unlicense | 1,485 | 0.070034 | # -*- coding: utf-8 -*-
'''
handlers.py for Earth Engine proxy
By Michael Geary - http://mg.to/
See UNLICENSE or http://unlicense.org/ for public domain notice.
'''
from main import fix_sys_path; fix_sys_path()
from tipfy import RequestHandler, Response
from tipfy.utils import json_encode
import cgi, re, urllib, urllib2
from earthengine import EarthEngine
class ProxyHandler( RequestHandler ):
'''
Proxy an Earth Engine request
'''
def _getApiFromUrl( self ):
return re.sub( r'http(s)?://[^/]+/\w+/', '', self.request.url )
def _proxy( self, allow, data=None ):
api = self._getApiFromUrl()
test = api.startswith( 'test/' )
if test:
api = api.replace( 'test/', '', 1 | )
debug = api.startswith( 'debug/' )
if debug:
api = api.replace( 'debug/', '', 1 )
if not re.match( allow, api ):
self.abort( 403 )
if test:
response = Response( api )
response.headers['Content-Type'] = 'text/plain'
return response
result = EarthEngine( self ).post( api, data )
if de | bug:
json = json_encode( result, indent=4 )
else:
json = json_encode( result )
response = Response( json )
if debug:
response.headers['Content-Type'] = 'text/plain'
else:
response.headers['Content-Type'] = 'application/json'
return response
def get( self, path=None ):
return self._proxy( r'(info|list|map|value)\?' )
def post( self, path=None ):
return self._proxy( r'(mapid)$', urllib.urlencode(self.request.form) )
|
alxgu/ansible | lib/ansible/modules/network/avi/avi_useraccountprofile.py | Python | gpl-3.0 | 4,875 | 0.001641 | #!/usr/bin/python
#
# @author: Gaurav Rastogi (grastogi@avinetworks.com)
# Eric Anderson (eanderson@avinetworks.com)
# module_check: supported
# Avi Version: 17.1.1
#
# Copyright: (c) 2017 Gaurav Rastogi, <grastogi@avinetworks.com>
# GNU General Public License v3 | .0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'support | ed_by': 'community'}
DOCUMENTATION = '''
---
module: avi_useraccountprofile
author: Gaurav Rastogi (@grastogi23) <grastogi@avinetworks.com>
short_description: Module for setup of UserAccountProfile Avi RESTful Object
description:
- This module is used to configure UserAccountProfile object
- more examples at U(https://github.com/avinetworks/devops)
requirements: [ avisdk ]
version_added: "2.4"
options:
state:
description:
- The state that should be applied on the entity.
default: present
choices: ["absent", "present"]
avi_api_update_method:
description:
- Default method for object update is HTTP PUT.
- Setting to patch will override that behavior to use HTTP PATCH.
version_added: "2.5"
default: put
choices: ["put", "patch"]
avi_api_patch_op:
description:
- Patch operation to use when using avi_api_update_method as patch.
version_added: "2.5"
choices: ["add", "replace", "delete"]
account_lock_timeout:
description:
- Lock timeout period (in minutes).
- Default is 30 minutes.
- Default value when not specified in API or module is interpreted by Avi Controller as 30.
- Units(MIN).
credentials_timeout_threshold:
description:
- The time period after which credentials expire.
- Default is 180 days.
- Default value when not specified in API or module is interpreted by Avi Controller as 180.
- Units(DAYS).
max_concurrent_sessions:
description:
- Maximum number of concurrent sessions allowed.
- There are unlimited sessions by default.
- Default value when not specified in API or module is interpreted by Avi Controller as 0.
max_login_failure_count:
description:
- Number of login attempts before lockout.
- Default is 3 attempts.
- Default value when not specified in API or module is interpreted by Avi Controller as 3.
max_password_history_count:
description:
- Maximum number of passwords to be maintained in the password history.
- Default is 4 passwords.
- Default value when not specified in API or module is interpreted by Avi Controller as 4.
name:
description:
- Name of the object.
required: true
url:
description:
- Avi controller URL of the object.
uuid:
description:
- Unique object identifier of the object.
extends_documentation_fragment:
- avi
'''
EXAMPLES = """
- name: Example to create UserAccountProfile object
avi_useraccountprofile:
controller: 10.10.25.42
username: admin
password: something
state: present
name: sample_useraccountprofile
"""
RETURN = '''
obj:
description: UserAccountProfile (api/useraccountprofile) object
returned: success, changed
type: dict
'''
from ansible.module_utils.basic import AnsibleModule
try:
from ansible.module_utils.network.avi.avi import (
avi_common_argument_spec, HAS_AVI, avi_ansible_api)
except ImportError:
HAS_AVI = False
def main():
argument_specs = dict(
state=dict(default='present',
choices=['absent', 'present']),
avi_api_update_method=dict(default='put',
choices=['put', 'patch']),
avi_api_patch_op=dict(choices=['add', 'replace', 'delete']),
account_lock_timeout=dict(type='int',),
credentials_timeout_threshold=dict(type='int',),
max_concurrent_sessions=dict(type='int',),
max_login_failure_count=dict(type='int',),
max_password_history_count=dict(type='int',),
name=dict(type='str', required=True),
url=dict(type='str',),
uuid=dict(type='str',),
)
argument_specs.update(avi_common_argument_spec())
module = AnsibleModule(
argument_spec=argument_specs, supports_check_mode=True)
if not HAS_AVI:
return module.fail_json(msg=(
'Avi python API SDK (avisdk>=17.1) is not installed. '
'For more details visit https://github.com/avinetworks/sdk.'))
return avi_ansible_api(module, 'useraccountprofile',
set([]))
if __name__ == '__main__':
main()
|
jamessqr/james-squires-dotcom | blog/models.py | Python | bsd-3-clause | 1,065 | 0.050704 | from django.db import models
import datetime
class Category(models.Model):
title = models.CharField(max_length=250, help_text='Maximum 250 characters')
slug = models.SlugField()
descripti | on = models.TextField()
class Meta:
ordering = ['title']
verbose_name_plural = "Categories"
class Admin:
pass
#TODO: This does not work!
#class CategoryAdmin(admin.ModelAdmin):
#prepopulated_fields = {"slug": ("title",)}
def __unicode__(self):
return self.title
def get_absolulte_url(self):
return "/categories/%s/" % self.slug
class Entry(models.Model):
title = models.CharField(max_length=250, help_text='Maximum 25 | 0 characters')
excerpt = models.TextField(blank=True)
body = models.TextField()
slug = models.SlugField()
pub_date = models.DateTimeField(default=datetime.datetime.now)
class Meta:
ordering = ['title']
verbose_name_plural = "Entries"
class Admin:
pass
def __unicode__(self):
return self.title
def get_absolute_url(self):
return "/%s/%s" % (self.pub_date.strftime("%Y/%m/%d").lower(),self.slug) |
maxwward/SCOPEBak | askbot/migrations/0141_auto__del_field_group_is_open.py | Python | gpl-3.0 | 32,000 | 0.007969 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting field 'Group.is_open'
db.delete_column('askbot_group', 'is_open')
def backwards(self, orm):
# Adding field 'Group.is_open'
db.add_column('askbot_group', 'is_open',
self.gf('django.db.models.fields.BooleanField')(default=False),
keep_default=False)
models = {
'askbot.activity': {
'Meta': {'object_name': 'Activity', 'db_table': "u'activity'"},
'active_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'activity_type': ('django.db.models.fields.SmallIntegerField', [], {}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_auditted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'exercise': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['askbot.Post']", 'null': 'True'}),
'receiving_users': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'received_activity'", 'symmetrical': 'False', 'to': "orm['auth.User']"}),
'recipients': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'incoming_activity'", 'symmetrical': 'False', 'through': "orm['askbot.ActivityAuditStatus']", 'to': "orm['auth.User']"}),
'summary': ('django.db.models.fields.TextField', [], {'default': "''"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'askbot.activityauditstatus': {
'Meta': {'uniqu | e_together': "(('user', 'activity'),)", 'object_name': 'ActivityAuditStatus'},
'activity': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['askbot.Activity']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'status': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
| 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'askbot.anonymousproblem': {
'Meta': {'object_name': 'AnonymousProblem'},
'added_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip_addr': ('django.db.models.fields.IPAddressField', [], {'max_length': '15'}),
'exercise': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'anonymous_problems'", 'to': "orm['askbot.Post']"}),
'session_key': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'summary': ('django.db.models.fields.CharField', [], {'max_length': '180'}),
'text': ('django.db.models.fields.TextField', [], {}),
'wiki': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'askbot.anonymousexercise': {
'Meta': {'object_name': 'AnonymousExercise'},
'added_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip_addr': ('django.db.models.fields.IPAddressField', [], {'max_length': '15'}),
'is_anonymous': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'session_key': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'summary': ('django.db.models.fields.CharField', [], {'max_length': '180'}),
'tagnames': ('django.db.models.fields.CharField', [], {'max_length': '125'}),
'text': ('django.db.models.fields.TextField', [], {}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '300'}),
'wiki': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'askbot.askwidget': {
'Meta': {'object_name': 'AskWidget'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'include_text_field': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'inner_style': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'outer_style': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'tag': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['askbot.Tag']", 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'askbot.award': {
'Meta': {'object_name': 'Award', 'db_table': "u'award'"},
'awarded_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'badge': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'award_badge'", 'to': "orm['askbot.BadgeData']"}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'notified': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'award_user'", 'to': "orm['auth.User']"})
},
'askbot.badgedata': {
'Meta': {'ordering': "('slug',)", 'object_name': 'BadgeData'},
'awarded_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'awarded_to': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'badges'", 'symmetrical': 'False', 'through': "orm['askbot.Award']", 'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'})
},
'askbot.draftproblem': {
'Meta': {'object_name': 'DraftProblem'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'draft_problems'", 'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'text': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'thread': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'draft_problems'", 'to': "orm['askbot.Thread']"})
},
'askbot.draftexercise': {
'Meta': {'object_name': 'DraftExercise'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'tagnames': ('django.db.models.fields.CharField', [], {'max_length': '125', 'null': 'True'}),
'text': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '300', 'null': 'True'})
},
'askbot.emailfeedsetting': {
'Meta': {'unique_together': "(('subscriber', 'feed_type'),)", 'object_name': 'EmailFeedSetting'},
'added_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'feed_type': ('django.db |
jbm950/pysunspec | sunspec/core/modbus/mbmap.py | Python | mit | 20,337 | 0.00177 |
"""
Copyright (C) 2017 SunSpec Alliance
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included
in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
IN THE SOFTWARE.
"""
import struct
try:
import xml.etree.ElementTree as ET
except:
import elementtree.ElementTree as ET
MBMAP_ROOT = 'mbmap'
MBMAP_ADDR = 'addr'
MBMAP_FUNC = 'func'
MBMAP_FUNC_INPUT = 'input'
MBMAP_FUNC_HOLDING = 'holding'
MBMAP_NS = 'ns'
MBMAP_LID = 'lid'
MBMAP_MAPID = 'mapid'
MBMAP_TIME = 'time'
MBMAP_REGS = 'regs'
MBMAP_REGS_OFFSET = 'offset'
MBMAP_REGS_LEN = 'len'
MBMAP_REGS_TYPE = 'type'
MBMAP_REGS_ACCESS = 'access'
MBMAP_REGS_FILL = 'fill'
MBMAP_REGS_ACCESS_R = 'r'
MBMAP_REGS_ACCESS_RW = 'rw'
MBMAP_REGS_TYPE_S16 = 's16'
MBMAP_REGS_TYPE_U16 = 'u16'
MBMAP_REGS_TYPE_S32 = 's32'
MBMAP_REGS_TYPE_U32 = 'u32'
MBMAP_REGS_TYPE_S64 = 's64'
MBMAP_REGS_TYPE_U64 = 'u64'
MBMAP_REGS_TYPE_F32 = 'f32'
MBMAP_REGS_TYPE_F64 = 'f64'
MBMAP_REGS_TYPE_STRING = 'string'
MBMAP_REGS_TYPE_HEX_STRING = 'hexstr'
MBMAP_BASE_ADDR_DEFAULT = 40000
func_value = {MBMAP_FUNC_INPUT: 4, MBMAP_FUNC_HOLDING: 3}
func_name = {4: MBMAP_FUNC_INPUT, 3: MBMAP_FUNC_HOLDING}
class ModbusMapError(Exception):
pass
class ModbusMap(object):
"""
Parameters:
slave_id :
Modbus slave id.
func :
Modbus function string associated with the map. Valid values are:
:const:`sunspec.core.modbus.mbmap.MBMAP_FUNC_HOLDING` or
:const:`sunspec.core.modbus.mbmap.MBMAP_FUNC_INPUT`.
base_addr :
Base address of the Modbus map.
Raises:
ModbusMapError: Raised for any modbus map error.
Attributes:
slave_id
Modbus slave id.
func
Actual Modbus function associated with the map.
base_addr
Base address of the Modbus map.
regs
List of :const:`sunspec.core.modbus.mbmap.ModbusMapRegs` blocks that
comprise the Modbus register map.
"""
def __init__(self, slave_id=None, func=MBMAP_FUNC_HOLDING, base_addr=MBMAP_BASE_ADDR_DEFAULT, ns=None, lid=None,
mapid=None, time=None):
self.slave_id = slave_id
self.base_addr = base_addr
self.ns = ns
self.lid = lid
self.mapid = mapid
self.time = time
self.regs = []
value = func_value.get(func)
if value is None:
raise ModbusMapError('Unsupported function: %s' % (func))
self.func = value
def from_hex(self, name, pathlist=None):
data = None
offset = 0
try:
f = open(name, 'r')
for line in f:
if line[0] != '#':
data_list = line.rstrip('\r\n').split()
data_len = len(data_list)/2
if data_len | > 0:
# print offset, data_list
for b in data_list:
c = struct.pack('B', int(b, 16))
if data is None:
data = c
else:
| data += c
mmr = ModbusMapRegs(offset, len(data)/2, data, MBMAP_REGS_ACCESS_RW)
self.regs.append(mmr)
f.close()
except Exception, e:
try:
f.close()
except:
pass
raise ModbusMapError('Error loading map file: %s' % str(e))
def from_xml(self, filename=None, pathlist=None, element=None):
"""Load Modbus map from a Modbus map (mbmap) formatted file.
Parameters:
filename :
File name of the Modbus map file
pathlist :
Pathlist object containing alternate paths to the Modbus map
file.
"""
offset = 0
next_offset = offset
try:
if filename is not None:
if pathlist is not None:
map_data = pathlist.read(filename)
else:
f = open(filename, 'r')
map_data = f.read()
f.close()
root = ET.fromstring(map_data)
elif element is not None:
root = element
else:
raise ModbusMapError('Root element not provided')
func = root.attrib.get(MBMAP_FUNC, MBMAP_FUNC_HOLDING)
value = func_value.get(func)
if value is None:
raise ModbusMapError('Unsupported function: %s' % (func))
self.func = value
self.base_addr = root.attrib.get(MBMAP_ADDR, 40000)
self.ns = root.attrib.get(MBMAP_NS)
self.lid = root.attrib.get(MBMAP_LID)
self.mapid = root.attrib.get(MBMAP_MAPID)
self.time = root.attrib.get(MBMAP_TIME)
for r in root.findall(MBMAP_REGS):
offset = r.attrib.get(MBMAP_REGS_OFFSET)
if offset is None:
offset = next_offset
else:
offset = int(offset)
rlen = int(r.attrib.get(MBMAP_REGS_LEN, 0))
rtype = r.attrib.get(MBMAP_REGS_TYPE, MBMAP_REGS_TYPE_HEX_STRING)
access = r.attrib.get(MBMAP_REGS_ACCESS, MBMAP_REGS_ACCESS_R)
fill = r.attrib.get(MBMAP_REGS_FILL, '\0')
text = r.text
if len(self.regs) > 0:
last_regs = self.regs[-1]
last_regs_next = last_regs.offset + last_regs.count
else:
last_regs = None
last_regs_next = 0
if offset < last_regs_next:
raise ModbusMapError('Register offsets must be in ascending order with no overlap %d %d' % (offset, last_regs_next))
data = None
if not text:
if rtype == MBMAP_REGS_TYPE_STRING or rtype == MBMAP_REGS_TYPE_HEX_STRING:
text = ''
else:
text = '0'
if rtype == MBMAP_REGS_TYPE_S16:
data = struct.pack('>h', int(text, 0))
rlen = 1
elif rtype == MBMAP_REGS_TYPE_U16:
data = struct.pack('>H', int(text, 0))
rlen = 1
elif rtype == MBMAP_REGS_TYPE_S32:
data = struct.pack('>l', int(text, 0))
rlen = 2
elif rtype == MBMAP_REGS_TYPE_U32:
data = struct.pack('>L', long(text, 0))
rlen = 2
elif rtype == MBMAP_REGS_TYPE_S64:
data = struct.pack('>q', long(text, 0))
rlen = 4
elif rtype == MBMAP_REGS_TYPE_U64:
data = struct.pack('>Q', long(text, 0))
rlen = 4
elif rtype == MBMAP_REGS_TYPE_F32:
data = struct.pack('>f', float(text))
rlen = 2
elif rtype == MBMAP_REGS_TYPE_F64:
data = struct.pack('>d', float(text))
|
pbmanis/acq4 | acq4/analysis/modules/MapImager/SpatialCorrelatorCtrlTemplate_pyqt5.py | Python | mit | 5,266 | 0.001899 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'acq4/analysis/modules/MapImager/SpatialCorrelatorCtrlTemplate.ui'
#
# Created by: PyQt5 UI code generator 5.8.2
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_Form(object):
def setupUi(self, Form):
Form.setObjectName("Form")
Form.resize(273, 234)
self.gridLayout = QtWidgets.QGridLayout(Form)
self.gridLayout.setContentsMargins(3, 3, 3, 3)
self.gridLayout.setSpacing(3)
self.gridLayout.setObjectName("gridLayout")
self.horizontalLayout = QtWidgets.QHBoxLayout()
self.horizontalLayout.setSpacing(1)
self.horizontalLayout.setObjectName("horizontalLayout")
self.label = QtWidgets.QLabel(Form)
self.label.setObjectName("label")
self.horizontalLayout.addWidget(self.label)
self.spontSpin = SpinBox(Form)
self.spontSpin.setSuffix("")
self.spontSpin.setObjectName("spontSpin")
self.horizontalLayout.addWidget(self.spontSpin)
self.gridLayout.addLayout(self.horizontalLayout, 1, 0, 1, 2)
self.horizontalLayout_2 = QtWidgets.QHBoxLayout()
self.horizontalLayout_2.setSpacing(1)
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
self.label_2 = QtWidgets.QLabel(Form)
self.label_2.setObjectName("label_2")
self.horizontalLayout_2.addWidget(self.label_2)
self.deltaTSpin = SpinBox(Form)
self.deltaTSpin.setObjectName("deltaTSpin")
self.horizontalLayout_2.addWidget(self.deltaTSpin)
self.gridLayout.addLayout(self.horizontalLayout_2, 2, 0, 1, 2)
self.horizontalLayout_3 = QtWidgets.QHBoxLayout()
self.horizontalLayout_3.setSpacing(1)
self.horizontalLayout_3.setObjectName("horizontalLayout_3")
self.label_3 = QtWidgets.QLabel(Form)
self.label_3.setObjectName("label_3")
self.horizontalLayout_3.addWidget(self.label_3)
self.radiusSpin = SpinBox(Form)
self.radiusSpin.setObjectName("radiusSpin")
self.horizontalLayout_3.addWidget(self.radiusSpin)
self.gridLayout.addLayout(self.horizontalLayout_3, 3, 0, 1, 2)
self.disableChk = QtWidgets.QCheckBox(Form)
self.disableChk.setObjectName("disableChk")
self.gridLayout.addWidget(self.disableChk, 6, 0, 1, 1)
self.processBtn = QtWidgets.QPushButton(Form)
self.processBtn.setObjectName( | "processBtn")
self.gridLayout.addWidget(self.processBtn, 6, 1, 1, 1)
self.groupBox = QtWidgets.QGroupBox(Form)
self.groupBox.setObjectName("groupBox")
self.gridLayout_2 = QtWidgets.QGridLayout(self.groupBox)
self.gridLayout_2.setContentsMargins(3, 3, 3, 3)
self.gridLayout_2.setSpacing(3)
| self.gridLayout_2.setObjectName("gridLayout_2")
self.probabilityRadio = QtWidgets.QRadioButton(self.groupBox)
self.probabilityRadio.setChecked(True)
self.probabilityRadio.setObjectName("probabilityRadio")
self.gridLayout_2.addWidget(self.probabilityRadio, 0, 0, 1, 2)
self.thresholdSpin = SpinBox(self.groupBox)
self.thresholdSpin.setEnabled(True)
self.thresholdSpin.setObjectName("thresholdSpin")
self.gridLayout_2.addWidget(self.thresholdSpin, 2, 1, 1, 1)
self.label_4 = QtWidgets.QLabel(self.groupBox)
self.label_4.setAlignment(Qt.Qt.AlignRight|Qt.Qt.AlignTrailing|Qt.Qt.AlignVCenter)
self.label_4.setObjectName("label_4")
self.gridLayout_2.addWidget(self.label_4, 2, 0, 1, 1)
self.thresholdRadio = QtWidgets.QRadioButton(self.groupBox)
self.thresholdRadio.setObjectName("thresholdRadio")
self.gridLayout_2.addWidget(self.thresholdRadio, 1, 0, 1, 2)
self.gridLayout.addWidget(self.groupBox, 5, 0, 1, 2)
self.eventCombo = ComboBox(Form)
self.eventCombo.setObjectName("eventCombo")
self.gridLayout.addWidget(self.eventCombo, 0, 1, 1, 1)
self.label_5 = QtWidgets.QLabel(Form)
self.label_5.setObjectName("label_5")
self.gridLayout.addWidget(self.label_5, 0, 0, 1, 1)
self.retranslateUi(Form)
Qt.QMetaObject.connectSlotsByName(Form)
def retranslateUi(self, Form):
_translate = Qt.QCoreApplication.translate
Form.setWindowTitle(_translate("Form", "Form"))
self.label.setText(_translate("Form", "Spontaneous Event Rate:"))
self.label_2.setText(_translate("Form", "Post-stimulus time window:"))
self.label_3.setText(_translate("Form", "Correlation Radius:"))
self.disableChk.setText(_translate("Form", "Disable"))
self.processBtn.setText(_translate("Form", "re-Process"))
self.groupBox.setTitle(_translate("Form", "Output data:"))
self.probabilityRadio.setText(_translate("Form", "Probability values (float)"))
self.label_4.setText(_translate("Form", "Threshold:"))
self.thresholdRadio.setText(_translate("Form", "Spots that cross threshold (boolean)"))
self.label_5.setText(_translate("Form", "Event Parameter to use:"))
from acq4.pyqtgraph.widgets.ComboBox import ComboBox
from acq4.pyqtgraph.widgets.SpinBox import SpinBox
|
geertw/rdt-serviceinfo | tests/test_data.py | Python | gpl-3.0 | 3,353 | 0.000895 | import serviceinfo.data as data
import unittest
import datetime
class ServiceTest(unittest.TestCase):
def _create_service(self):
"""
Internal method to create a service object
:return: Service object
"""
service = data.Service()
service.servicenumber = 1234
stop1 = data.ServiceStop("ut")
stop1.stop_name = "Utrecht Centraal"
stop2 = data.ServiceStop("asd")
stop2.stop_name = "Amsterdam Centraal"
service.stops.append(stop1)
service.stops.append(stop2)
return (stop1, stop2, service)
def test_service_departure(self):
stop1, stop2, service = self._create_service()
self.assertEquals(service.get_departure(), stop1)
self.assertEquals(service.get_departure().stop_name, stop1.stop_name)
self.assertEquals(service.get_departure_str(), "ut")
def test_service_destination(self):
stop1, stop2, service = self._create_service()
self.assertEquals(service.get_destination(), stop2)
self.assertEquals(service.get_destination().stop_name, stop2.stop_name)
self.assertEquals(service.get_destination_str(), "asd")
def test_service_servicedate(self):
service = data.Service()
service.servicenumber = 1234
service.service_date = datetime.date(year=2015, month=4, day=1)
self.assertEquals(service.get_servicedate_str(), "2015-04-01")
def test_stop_repr(self):
stop = data.ServiceStop("ut")
stop.stop_name = "Utrecht Centraal"
| self.assertEquals(repr(stop), "<ServiceStop @ ut>")
def test_stop_get_arrival_platform(self):
stop = data.ServiceStop("ledn")
stop.scheduled_arrival_platform = None
stop.actual_arrival_platform = None
self.assertIsNone(stop.get_arrival_platform())
stop.scheduled_arrival_platform = "9a"
stop.actual_arrival_platform = None
| self.assertEquals(stop.get_arrival_platform(), "9a")
stop.scheduled_arrival_platform = "9a"
stop.actual_arrival_platform = "8a"
self.assertEquals(stop.get_arrival_platform(), "8a")
def test_stop_get_departure_platform(self):
stop = data.ServiceStop("ledn")
stop.scheduled_departure_platform = None
stop.actual_departure_platform = None
self.assertIsNone(stop.get_departure_platform())
stop.scheduled_departure_platform = "9a"
stop.actual_departure_platform = None
self.assertEquals(stop.get_departure_platform(), "9a")
stop.scheduled_departure_platform = "9a"
stop.actual_departure_platform = "8a"
self.assertEquals(stop.get_departure_platform(), "8a")
def test_service_repr(self):
service = data.Service()
service.service_id = 999
service.servicenumber = 9876
service.transport_mode = "IC"
service.service_date = datetime.date(year=2015, month=4, day=1)
stop = data.ServiceStop("ut")
stop.stop_name = "Utrecht Centraal"
service.stops.append(stop)
stop = data.ServiceStop("asd")
stop.stop_name = "Amsterdam Centraal"
service.stops.append(stop)
self.assertEquals(repr(service), "<Service i999 / IC9876-asd @ 2015-04-01 [2 stops]>")
if __name__ == '__main__': #
unittest.main()
|
alvaroaleman/ansible | lib/ansible/executor/module_common.py | Python | gpl-3.0 | 35,832 | 0.002623 | # (c) 2013-2014, Michael DeHaan <michael.dehaan@gmail.com>
# (c) 2015 Toshio Kuratomi <tkuratomi@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import ast
import base64
import datetime
import imp
import json
import os
import shlex
import zipfile
from io import BytesIO
from ansible.release import __version__, __author__
from ansible import constants as C
from ansible.errors import AnsibleError
from ansible.module_utils._text import to_bytes, to_text
# Must import strategy and use write_locks from there
# If we import write_locks directly then we end up binding a
# variable to the object and then it never gets updated.
from ansible.executor import action_write_locks
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
REPLACER = b"#<<INCLUDE_ANSIBLE_MODULE_COMMON>>"
REPLACER_VERSION = b"\"<<ANSIBLE_VERSION>>\""
REPLACER_COMPLEX = b"\"<<INCLUDE_ANSIBLE_MODULE_COMPLEX_ARGS>>\""
REPLACER_WINDOWS = b"# POWERSHELL_COMMON"
REPLACER_JSONARGS = b"<<INCLUDE_ANSIBLE_MODULE_JSON_ARGS>>"
REPLACER_SELINUX = b"<<SELINUX_SPECIAL_FILESYSTEMS>>"
# We could end up writing out parameters with unicode characters so we need to
# specify an encoding for the python source file
ENCODING_STRING = u'# -*- coding: utf-8 -*-'
# we've moved the module_common relative to the snippets, so fix the path
_SNIPPET_PATH = os.path.join(os.path.dirname(__file__), '..', 'module_utils')
# ******************************************************************************
ANSIBALLZ_TEMPLATE = u'''%(shebang)s
%(coding)s
ANSIBALLZ_WRAPPER = True # For test-module script to tell this is a ANSIBALLZ_WRAPPER
# This code is part of Ansible, but is an independent component.
# The code in this particular templatable string, and this templatable string
# only, is BSD licensed. Modules which end up using this snippet, which is
# dynamically combined together by Ansible still belong to the author of the
# module, and they may assign their own license to the complete work.
#
# Copyright (c), James Cammarata, 2016
# Copyright (c), Toshio Kuratomi, 2016
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import os.path
import sys
import __main__
# For some distros and python versions we pick up this script in the temporary
# directory. This leads to problems when the ansible module masks a python
# library that another import needs. We have not figured out what about the
# specific distros and python versions causes this to behave differently.
#
# Tested distros:
# Fedora23 with python3.4 Works
# Ubuntu15.10 with python2.7 Works
# Ubuntu15.10 with python3.4 Fails without this
# Ubuntu16.04.1 with python3.5 Fails without this
# To test on another platform:
# * use the copy module (since this shadows the stdlib copy module)
# * Turn off pipelining
# * Make sure that the destination file does not exist
# * ansible ubuntu16-test -m copy -a 'src=/etc/motd dest=/var/tmp/m'
# This will traceback in shutil. Looking at the complete traceback will show
# that shutil is importing copy which finds the ansible module instead of the
# stdlib module
scriptdir = None
try:
scriptdir = os.path.dirname(os.path.abspath(__main__.__file__))
except (AttributeError, OSError):
# Some platforms don't set __file__ when reading from stdin
# OSX raises OSError if using abspath() in a directory we don't have
# permission to read.
pass
if scriptdir is not None:
sys.path = [p for p in sys.path if p != scriptdir]
import base64
import shutil
import zipfile
import tempfile
import subprocess
if sys.version_info < (3,):
bytes = str
PY3 = False
else:
unicode = str
PY3 = True
try:
# Python-2.6+
from io import BytesIO as IOStream
except ImportError:
# Python < 2.6
from StringIO import StringIO as IOStream
ZIPDATA = """%(zipdata)s"""
def invoke_module(module, modlib_path, json_params):
pythonpath = os.environ.get('PYTHONPATH')
if pythonpath:
os.environ['PYTHONPATH'] = ':'.join((modlib_path, pythonpath))
else:
os.environ['PYTHONPATH'] = modlib_path
p = subprocess.Popen([%(interpreter)s, module], env=os.environ, shell=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
(stdout, stderr) = p.communicate(json_params)
if not isinstance(stderr, (bytes, unicode)):
st | derr = stderr.read()
if not isinstance(stdout, (bytes, unicode)):
std | out = stdout.read()
if PY3:
sys.stderr.buffer.write(stderr)
sys.stdout.buffer.write(stdout)
else:
sys.stderr.write(stderr)
sys.stdout.write(stdout)
return p.returncode
def debug(command, zipped_mod, json_params):
# The code here normally doesn't run. It's only used for debugging on the
# remote machine.
#
# The subcommands in this function make it easier to debug ansiballz
# modules. Here's the basic steps:
#
# Run ansible with the environment variable: ANSIBLE_KEEP_REMOTE_FILES=1 and -vvv
# to save the module file remotely::
# $ ANSIBLE_KEEP_REMOTE_FILES=1 ansible host1 -m ping -a 'data=october' -vvv
#
# Part of the verbose output will tell you where on the remote machine the
# module was written to::
# [...]
# <host1> SSH: EXEC ssh -C -q -o ControlMaster=auto -o ControlPersist=60s -o KbdInteractiveAuthentication=no -o
# PreferredAuthentications=gssapi-with-mic,gssapi-keyex,hostbased,publickey -o PasswordAuthentication=no -o ConnectTimeout=10 -o
# ControlPath=/home/badger/.ansible/cp/ansible-ssh-%%h-%%p-%%r -tt rhel7 '/bin/sh -c '"'"'LANG=en_US.UTF-8 LC_ALL=en_US.UTF-8
# LC_MESSAGES=en_US.UTF-8 /usr/bin/python /home/badger/.ansible/tmp/ansible-tmp-1461173013.93-9076457629738/ping'"'"''
# [...]
#
# Login to the remote machine and run the module file via from the previous
# step with the explode subcommand to extract the module payload into
# source files::
# $ ssh host1
# $ /usr/bin/python /home/badger/.ansible/tmp/ansible-tmp-1461173013.93-9076457629738/ping explode
# Module expanded into:
# /home/badger/.ansible/tmp/ansible-tmp-146117 |
tomkralidis/GeoHealthCheck | docker/plugins/user/mywmsprobe.py | Python | mit | 1,652 | 0 | from GeoHealthCheck.probe import Probe
from GeoHealthCheck.result import Result
from owslib.wms import WebMapService
class MyWMSProbe(Probe):
"""
Example Probe for WMS Probe user plugin. This is a free-form Probe
that overrides perform_request with custom checks.
To configure a probe, use Docker Container ENV
GHC_USER_PLUGINS='GeoHealthCheck.plugins.user.mywmsprobe,...'.
Note that GeoHealthCheck.plugins package prefix is required as
Plugins are placed in GHC app tree there.
"""
NAME = 'MyWMSProbe'
DESCRIPTION = 'Example User Probe, gets WMS Capabilities'
RESOURCE_TYPE = 'OGC:WMS'
REQUEST_METHOD = 'GET'
PARAM_DEFS = {
'probing_level': {
'type': 'string',
'description': 'How heavy the Probe should be.',
'default': 'minor',
'required': True,
'range': ['minor', 'moderate', 'full']
}
}
"""Param defs"""
def __init__(self):
Probe.__init__(self)
def perform_request(self):
"""
Per | form the request.
See https://github.com/geopython/OWSLib/blob/
master/tests/doctests/wms_Ge | oServerCapabilities.txt
"""
# Test capabilities doc
result = Result(True, 'Test Capabilities')
result.start()
try:
wms = WebMapService(self._resource.url)
title = wms.identification.title
self.log('response: title=%s' % title)
except Exception as err:
result.set(False, str(err))
# Do more rigorous stuff here below
result.stop()
self.result.add_result(result)
|
vialette/ultrastorage | ultrastorage/inserter/descendinggaugedinserter.py | Python | mit | 413 | 0 | # coding=utf-8
"""Descending gauged inserter.
.. moduleauthor:: Stéphane Vialette <vialette@gmail.com>
"""
from .gaugedinserter import GaugedInserter
class DescendingGaugedInserter(GaugedInserter):
def __init__(self | , storage_system):
"""Initialize this descending gauged inserter with a stroage system.
| """
super(DescendingGaugedInserter, self).__init__(storage_system, True)
|
pterk/django-tcc | tcc/templatetags/autopaginator.py | Python | mit | 9,909 | 0.00222 | try:
set
except NameError:
from sets import Set as set
from django.core.paginator import Paginator, Page, InvalidPage
from django.db.models import F
from django.http import Http404
from coffin import template
from jinja2 import nodes
from jinja2.ext import Extension
from jinja2.exceptions import TemplateSyntaxError
from tcc import settings
register = template.Library()
# Most of the code below is borrowed from the django_pagination module by James Tauber and Pinax Team,
# http://pinaxproject.com/docs/dev/external/pagination/index.html
class ParentCommentPaginator(Paginator):
def page(self, number):
"Returns a Page object for the given 1-based page number."
number = self.validate_number(number)
if self.count == 0:
return Page(self.object_list, number, self)
bottom = (number - 1) * self.per_page
bottomdate = self.parentcomments[bottom].sortdate
top = botto | m + self.per_page
if top + self.orphans >= self.count:
object_list = self.object_list.filter(sortdate__lte=bottomdate)
else:
topdate = self.parentcomments[bottom+self.per_page-1].sortdate
object_list = self.object_list.filter(
sortdate__range=(topdate, bottomdate))
return Page(object_list, number, self)
def _get_count(self):
"Returns t | he total number of objects, across all pages."
if self._count is None:
try:
self.parentcomments = self.object_list.filter(
parent__isnull=True)
self._count = self.parentcomments.count()
except (AttributeError, TypeError):
# AttributeError if object_list has no count() method.
# TypeError if object_list.count() requires arguments
# (i.e. is of type list).
self._count = len(self.object_list)
return self._count
count = property(_get_count)
class AutopaginateExtension(Extension):
"""
Applies pagination to the given dataset (and saves truncated
dataset to the context variable), sets context variable with
data enough to build html for paginator
General syntax:
{% autopaginate dataset [as ctx_variable] %}
if "as" part is omitted, trying to save truncated dataset back
to the original context variable. Pagination data is saved to
the NAME_pages context variable, where NAME is original name
of the dataset or ctx_variable
"""
tags = set(['autopaginate'])
default_kwargs = {
'per_page': settings.PER_PAGE,
'orphans': settings.PAGE_ORPHANS,
'window': settings.PAGE_WINDOW,
'hashtag': '',
'prefix': '',
}
def parse(self, parser):
lineno = parser.stream.next().lineno
object_list = parser.parse_expression()
if parser.stream.skip_if('name:as'):
name = parser.stream.expect('name').value
elif hasattr(object_list, 'name'):
name = object_list.name
else:
raise TemplateSyntaxError(
"Cannot determine the name of objects " \
"you want to paginate, use 'as foobar' syntax", lineno)
kwargs = [] # wait... what?
loops = 0
while parser.stream.current.type != 'block_end':
lineno = parser.stream.current.lineno
if loops:
parser.stream.expect('comma')
key = parser.parse_assign_target().name
if key not in self.default_kwargs.keys():
raise TemplateSyntaxError(
"Unknown keyword argument for autopaginate. "\
"Your options are: %s" % (
", ".join(self.default_kwargs.keys())
))
parser.stream.expect('assign')
value = parser.parse_expression()
kwargs.append(nodes.Keyword(key, value))
loops += 1
return [
nodes.Assign(nodes.Name(name + '_pages', 'store'),
self.call_method('_render_pages',
[object_list, nodes.Name('request', 'load')],
kwargs)
).set_lineno(lineno),
nodes.Assign(nodes.Name(name, 'store'),
nodes.Getattr(nodes.Name(name + '_pages', 'load'),
'object_list',
nodes.Impossible())
).set_lineno(lineno),
]
def _render_pages(self, objs, request, **kwargs):
mykwargs = self.default_kwargs.copy()
mykwargs.update(kwargs)
prefix = mykwargs.pop('prefix')
window = mykwargs.pop('window')
hashtag = mykwargs.pop('hashtag')
try:
paginator = ParentCommentPaginator(objs, **mykwargs)
key = 'page'
if prefix:
key = prefix + key
try:
try:
pageno = int(request.GET[key])
except (KeyError, ValueError, TypeError):
pageno = 1
page_obj = paginator.page(pageno)
except InvalidPage:
raise Http404('Invalid page requested. If DEBUG were set to ' +
'False, an HTTP 404 page would have been shown instead.')
page_range = paginator.page_range
# Calculate the record range in the current page for display.
records = {'first': 1 + (page_obj.number - 1) * paginator.per_page}
records['last'] = records['first'] + paginator.per_page - 1
if records['last'] + paginator.orphans >= paginator.count:
records['last'] = paginator.count
# First and last are simply the first *n* pages and the last *n* pages,
# where *n* is the current window size.
first = set(page_range[:window])
last = set(page_range[-window:])
# Now we look around our current page, making sure that we don't wrap
# around.
current_start = page_obj.number-1-window
if current_start < 0:
current_start = 0
current_end = page_obj.number-1+window
if current_end < 0:
current_end = 0
current = set(page_range[current_start:current_end])
pages = []
# If there's no overlap between the first set of pages and the current
# set of pages, then there's a possible need for elusion.
if len(first.intersection(current)) == 0:
first_list = list(first)
first_list.sort()
second_list = list(current)
second_list.sort()
pages.extend(first_list)
diff = second_list[0] - first_list[-1]
# If there is a gap of two, between the last page of the first
# set and the first page of the current set, then we're missing a
# page.
if diff == 2:
pages.append(second_list[0] - 1)
# If the difference is just one, then there's nothing to be done,
# as the pages need no elusion and are correct.
elif diff == 1:
pass
# Otherwise, there's a bigger gap which needs to be signaled for
# elusion, by pushing a None value to the page list.
else:
pages.append(None)
pages.extend(second_list)
else:
unioned = list(first.union(current))
unioned.sort()
pages.extend(unioned)
# If there's no overlap between the current set of pages and the last
# set of pages, then there's a possible need for elusion.
if len(current.intersection(last)) == 0:
second_list = list(last)
second_list.sort()
diff = second_list[0] - pages[-1]
# |
mtoshi/rancidcmd | docs/source/conf.py | Python | mit | 11,597 | 0.006295 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# rancidcmd documentation build configuration file, created by
# sphinx-quickstart on Thu Apr 2 13:16:39 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../../'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'rancidcmd'
copyright = '2015, Toshikatsu Murakoshi'
author = 'Toshikatsu Murakoshi'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1.12'
# The full version, including alpha/beta/rc tags.
release = '0.1.12'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#toda | y_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, th | e current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'rancidcmddoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'rancidcmd.tex', 'rancidcmd Documentation',
'rancidcmd\\_author\\_name', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls |
wakiyamap/electrum-mona | electrum_mona/tests/test_lnpeer.py | Python | mit | 56,455 | 0.003135 | import asyncio
import tempfile
from decimal import Decimal
import os
from contextlib import contextmanager
from collections import defaultdict
import logging
import concurrent
from concurrent import futures
import unittest
from typing import Iterable, NamedTuple, Tuple, List, Dict
from aiorpcx import TaskGroup, timeout_after, TaskTimeout
import electrum_mona
import electrum_mona.trampoline
from electrum_mona import bitcoin
from electrum_mona import constants
from electrum_mona.network import Network
from electrum_mona.ecc import ECPrivkey
from electrum_mona import simple_config, lnutil
from electrum_mona.lnaddr import lnencode, LnAddr, lndecode
from electrum_mona.bitcoin import COIN, sha256
from electrum_mona.util import bh2u, create_and_start_event_loop, NetworkRetryManager, bfh
from electrum_mona.lnpeer import Peer, UpfrontShutdownScriptViolation
from electrum_mona.lnutil import LNPeerAddr, Keypair, privkey_to_pubkey
from electrum_mona.lnutil import LightningPeerConnectionClosed, RemoteMisbehaving
from electrum_mona.lnutil import PaymentFailure, LnFeatures, HTLCOwner
from electrum_mona.lnchannel import ChannelState, PeerState, Channel
from electrum_mona.lnrouter import LNPathFinder, PathEdge, LNPathInconsistent
from electrum_mona.channel_db import ChannelDB
from electrum_mona.lnworker import LNWallet, NoPathFound
from electrum_mona.lnmsg import encode_msg, decode_msg
from electrum_mona import lnmsg
from electrum_mona.logging import console_stderr_handler, Logger
from electrum_mona.lnworker import PaymentInfo, RECEIVED
from electrum_mona.lnonion import OnionFailureCode
from electrum_mona.lnutil import derive_payment_secret_from_payment_preimage
from electrum_mona.lnutil import LOCAL, REMOTE
from electrum_mona.invoices import PR_PAID, PR_UNPAID
from .test_lnchannel import create_test_channels
from .test_bitcoin import needs_test_with_all_chacha20_implementations
from . import TestCaseForTestnet
def keypair():
priv = ECPrivkey.generate_random_key().get_secret_bytes()
k1 = Keypair(
pubkey=privkey_to_pubkey(priv),
privkey=priv)
return k1
@contextmanager
def noop_lock():
yield
class MockNetwork:
def __init__(self, tx_queue):
self.callbacks = defaultdict(list)
self.lnwatcher = None
self.interface = None
user_config = {}
user_dir = tempfile.mkdtemp(prefix="electrum-lnpeer-test-")
self.config = simple_config.SimpleConfig(user_config, read_user_dir_function=lambda: user_dir)
self.asyncio_loop = asyncio.get_event_loop()
self.channel_db = ChannelDB(self)
self.channel_db.data_loaded.set()
self.path_finder = LNPathFinder(self.channel_db)
self.tx_queue = tx_queue
self._blockchain = MockBlockchain()
@property
def callback_lock(self):
return noop_lock()
def get_local_height(self):
return 0
def blockchain(self):
return self._blockchain
async def broadcast_transaction(self, tx):
if self.tx_queue:
await self.tx_queue.put(tx)
async def try_broadcasting(self, tx, name):
await self.broadcast_transaction(tx)
class MockBlockchain:
def height(self):
return 0
def is_tip_stale(self):
return False
class MockWallet:
def set_label(self, x, y):
pass
def save_db(self):
pass
def add_transaction(self, tx):
pass
def is_lightning_backup(self):
return False
def is_mine(self, addr):
return True
class MockLNWallet(Logger, NetworkRetryManager[LNPeerAddr]):
MPP_EXPIRY = 2 # HTLC timestamps are cast to int, so this cannot be 1
TIMEOUT_SHUTDOWN_FAIL_PENDING_HTLCS = 0
INITIAL_TRAMPOLINE_FEE_LEVEL = 0
def __init__(self, *, local_keypair: Keypair, chans: Iterable['Channel'], tx_queue, name):
self.name = name
Logger.__init__(self)
NetworkRetryManager.__init__(self, max_retry_delay_normal=1, init_retry_delay_normal=1)
self.node_keypair = local_keypair
self.network = MockNetwork(tx_queue)
self.taskgroup = TaskGroup()
self.lnwatcher = None
self.listen_server = None
self._channels = {chan.channel_id: chan for chan in chans}
self.payments = {}
self.logs = defaultdict(list)
self.wallet = MockWallet()
self.features = LnFeatures(0)
self.features |= LnFeatures.OPTION_DATA_LOSS_PROTECT_OPT
self.features |= LnFeatures.OPTION_UPFRONT_SHUTDOWN_SCRIPT_OPT
self.features |= LnFeatures.VAR_ONION_OPT
self.features |= LnFeatures.PAYMENT_SECRET_OPT
self.features |= LnFeatures.OPTION_TRAMPOLINE_ROUTING_OPT
self.pending_payments = defaultdict(asyncio.Future)
for chan in chans:
chan.lnworker = self
self._peers = {} # bytes -> Peer
# used in tests
self.enable_htlc_settle = True
self.enable_htlc_forwarding = True
self.received_mpp_htlcs = dict()
self.sent_htlcs = defaultdict(asyncio.Queue)
self.sent_htlcs_routes = dict()
self.sent_buckets = defaultdict(set)
self.trampoline_forwarding_failures = {}
self.inflight_payments = set()
self.preimages = {}
self.stopping_soon = False
self.downstream_htlc_to_upstream_peer_map = {}
self.logger.info(f"created LNWallet[{name}] with nodeID={local_keypair.pubkey.hex()}")
def get_invoice_status(self, key):
pass
@property
def lock(self):
return noop_lock()
@property
def channel_db(self):
return self.network.channel_db if self.network else None
@property
def channels(self):
return self._channels
@property
def peers(self):
return self._peers
def get_channel_by_short_id(self, short_channel_id):
with self.lock:
for chan in self._channels.values():
if chan.short_channel_id == short_channel_id:
return chan
def channel_st | ate_changed(self, chan):
pass
def save_channel(self, chan):
print("Ignoring channel save")
def | diagnostic_name(self):
return self.name
async def stop(self):
await LNWallet.stop(self)
if self.channel_db:
self.channel_db.stop()
await self.channel_db.stopped_event.wait()
async def create_routes_from_invoice(self, amount_msat: int, decoded_invoice: LnAddr, *, full_path=None):
return [r async for r in self.create_routes_for_payment(
amount_msat=amount_msat,
final_total_msat=amount_msat,
invoice_pubkey=decoded_invoice.pubkey.serialize(),
min_cltv_expiry=decoded_invoice.get_min_final_cltv_expiry(),
r_tags=decoded_invoice.get_routing_info('r'),
invoice_features=decoded_invoice.get_features(),
trampoline_fee_levels=defaultdict(int),
use_two_trampolines=False,
payment_hash=decoded_invoice.paymenthash,
payment_secret=decoded_invoice.payment_secret,
full_path=full_path)]
get_payments = LNWallet.get_payments
get_payment_info = LNWallet.get_payment_info
save_payment_info = LNWallet.save_payment_info
set_invoice_status = LNWallet.set_invoice_status
set_request_status = LNWallet.set_request_status
set_payment_status = LNWallet.set_payment_status
get_payment_status = LNWallet.get_payment_status
check_received_mpp_htlc = LNWallet.check_received_mpp_htlc
htlc_fulfilled = LNWallet.htlc_fulfilled
htlc_failed = LNWallet.htlc_failed
save_preimage = LNWallet.save_preimage
get_preimage = LNWallet.get_preimage
create_route_for_payment = LNWallet.create_route_for_payment
create_routes_for_payment = LNWallet.create_routes_for_payment
_check_invoice = staticmethod(LNWallet._check_invoice)
pay_to_route = LNWallet.pay_to_route
pay_to_node = LNWallet.pay_to_node
pay_invoice = LNWallet.pay_invoice
force_close_channel = LNWallet.force_close_channel
try_force_closing = LNWallet.try_force_closing
get_first_timestamp = lambda |
owtf/owtf | owtf/plugins/web/grep/Cookies_attributes@OWTF-SM-002.py | Python | bsd-3-clause | 557 | 0.003591 | """
GREP Plugin for Cookies Attributes
NOTE: GREP plugins do NOT send traffic to the target and only grep the HTTP Transaction Log
"""
from owtf.plugin.helper import plugin_helper
DESCRIPTION = "Searches transaction DB for Cookie attributes"
def run(PluginInfo):
title = "This plugin looks for cookie setting headers (TODO: Check vuln scanners' output!)<br />"
Content = plugin_helper.HtmlString( | title)
Content | += plugin_helper.FindResponseHeaderMatchesForRegexpName(
"HEADERS_FOR_COOKIES"
)
# TODO: Fix up
return Content
|
yuewko/neutron | neutron/tests/unit/agent/common/test_polling.py | Python | apache-2.0 | 2,694 | 0 | # Copyright 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from neutron.agent.common import base_polling as polling
from neutron.tests import base
class TestBasePollingManager(base.BaseTestCase):
def setUp(self):
super(TestBasePollingManager, self).setUp()
self.pm = polling.BasePollingManager()
def test__is_polling_required_should_not_be_implemented(self):
self.assertRaises(NotImplementedError, self.pm._is_polling_required)
def test_force_polling_sets_interval_attribute(self):
self.assertFalse(self.pm._force_polling)
self.pm.force_polling()
self.assertTrue(self.pm._force_polling)
def test_polling_completed_sets_interval_attribute(self):
self.pm._polling_completed = False
self.pm.polling_completed()
self.assertTrue(self.pm._polling_completed)
def | mock_is_polling_ | required(self, return_value):
return mock.patch.object(self.pm, '_is_polling_required',
return_value=return_value)
def test_is_polling_required_returns_true_when_forced(self):
with self.mock_is_polling_required(False):
self.pm.force_polling()
self.assertTrue(self.pm.is_polling_required)
self.assertFalse(self.pm._force_polling)
def test_is_polling_required_returns_true_when_polling_not_completed(self):
with self.mock_is_polling_required(False):
self.pm._polling_completed = False
self.assertTrue(self.pm.is_polling_required)
def test_is_polling_required_returns_true_when_updates_are_present(self):
with self.mock_is_polling_required(True):
self.assertTrue(self.pm.is_polling_required)
self.assertFalse(self.pm._polling_completed)
def test_is_polling_required_returns_false_for_no_updates(self):
with self.mock_is_polling_required(False):
self.assertFalse(self.pm.is_polling_required)
class TestAlwaysPoll(base.BaseTestCase):
def test_is_polling_required_always_returns_true(self):
pm = polling.AlwaysPoll()
self.assertTrue(pm.is_polling_required)
|
zoxuyu/rsplus | src/maths.py | Python | gpl-3.0 | 441 | 0.002268 | d | ef gcd(a, b):
while b != 0:
a, b = b, a % b
return a
def modinv(a, b):
"""Returns a tuple (r, i, j) such that r = gcd(a, b) = ia + jb"""
x = 0
y = 1
lx = 1
ly = 0
oa = a
ob = b
while b != 0:
q = a // b
(a, b) = (b, a % b)
(x, lx) = ((lx - (q * x)), x)
(y, ly) = ((ly - (q * y)), y)
if lx < 0:
lx += ob
if ly < 0:
ly += oa
retu | rn lx
|
moonso/genmod | genmod/annotate_variants/annotate.py | Python | mit | 2,975 | 0.004706 | import logging
from genmod.annotate_regions.get_features import get_region
from genmod.annotate_variants.read_tabix_files import (get_frequencies,
get_spidex_score, get_cadd_scores)
logger = logging.getLogger(__name__)
def annotate_variant(variant, annotation_arguments):
"""Annotate a variant based on what arguments that are passed"""
variant_info = variant.rstrip().split('\t')
chrom = variant_info[0]
if chrom.startswith(('chr', 'CHR', 'Chr')):
chrom = chrom[3:]
pos = int(variant_info[1])
ref = variant_info[3]
alt = variant_info[4]
info = variant_info[7]
if info == '.':
info = []
else:
info = info.split(';')
## TODO this needs to be handeled different for SV:s
start = pos
# This is a construct so that there will not be inconsistent genetic regions
end = pos + 1
# end = pos + max(len(ref), len(alt))
#Check which annotations that are available
regions = None
if 'region_trees' in annotation_arguments:
regions = get_region(chrom, start, end, annotation_arguments['region_trees'])
if regions:
info.append("Annotation={0}".format(','.join(regions)))
if 'exac' in annotation_arguments:
reader = annotation_arguments['exac']
frequencies = get_frequencies(reader, chrom, start, alt)
if 'AF' in frequencies:
info.append("EXACAF={0}".format(frequencies['AF']))
if annotation_arguments.get('max_af'):
if 'MAX_AF' in frequencies:
info.append("EXAC_MAX_AF={0}".format(frequencies['MAX_AF']))
if 'thousand_g' in annotation_arguments:
reader = annotation_arguments['thousand_g']
frequencies = get_frequencies(reader, chrom, start, alt)
if 'AF' in frequencies:
info.append("1000GAF={0}".format(frequencies['AF']))
if annotation_arguments.get('max_af'):
if 'MAX_AF' in frequencies:
info.append("1000G_MAX_AF={0}".format(frequencies['MAX_AF']))
if 'spidex' in annotation_arguments:
reader = annotation_arguments['spidex']
spidex_score = get_spidex_score(reader, chrom, start, alt)
if spidex_score:
info.append("SPIDEX={0}".format(spidex_score))
if 'cadd_files' in annotation_arguments:
readers = annotation_arguments['cadd_files']
cadd_scores = {}
for reader in readers:
if not cadd_scores:
cadd_scores = get_cadd_scores(reader, chrom, start, alt)
if cadd_scores:
info.append("CADD={0}".format(cadd_scores['cadd_phred']))
if annotation_arguments.get('cadd_raw'):
info.append("CADD_raw={0}".format(ca | dd_scores['cadd_raw']))
# Rebuild the info string
if len(info) > 0:
| info_string = ';'.join(info)
else:
info_string = '.'
variant_info[7] = info_string
return '\t'.join(variant_info) |
jeffseif/dogWalkScore | static/py/flaskr.py | Python | bsd-3-clause | 1,701 | 0.008818 | #! /usr/bin/env python3
# Import
import dogWalkScore
from flask import Flask as Flask,\
jsonify as FlaskJSONify,\
render_template as FlaskRender,\
request as FlaskRequest
# Initialize
# Flask
app = Flask(__name__)
# Build graph
id2Node, id2Edge, id2Poi, id2Tree, graphIds = dogWalkScore.MySql2Graph()
# Create global croppedIds
global croppedIds
croppedIds = []
# Flask routing
@app.route('/')
def index():
return FlaskRender('map.html')
@app.route('/findAddress')
def findAddress():
# Grab address from url
address = FlaskRequest.args.get('q', '')
minutes = float(FlaskRequest.args.get('m', ''))
# Process the add | ress
json = dogWalkScore.FindAddress(address, minutes, id2Node, id2Poi, graphIds, id2Tree)
# Check for bad address
if json is None:
return FlaskJSONify({})
# Update croppedIds
croppedIds.clear( | )
croppedIds.extend(json.get('croppedIds'))
# JSONify it
return FlaskJSONify(json)
@app.route('/routePOI')
def routePOI():
# Grab startId/poiId from url
startId = int(FlaskRequest.args.get('s', ''))
poiId = int(FlaskRequest.args.get('p', ''))
# Route to the POI
json = dogWalkScore.RoutePOI(startId, poiId, croppedIds, id2Node, id2Edge, id2Poi)
# JSONify it
return FlaskJSONify(json)
@app.route('/about')
def about():
return FlaskRender('about.html')
@app.route('/contact')
def contact():
return FlaskRender('contact.html')
@app.route('/<other>')
def other(other):
return about()
# Script
if __name__ == '__main__':
# Run Flask in debug, port 8000
app.run(debug = True, port = 5000, host = '0.0.0.0')
|
btat/Booktype | lib/booktype/apps/convert/urls.py | Python | agpl-3.0 | 1,060 | 0.000943 | # This file is part of Booktype.
# Copyright (c) 2013 Borko Jandras <borko.jandras@sourcefabric.org>
#
# Booktype is free software: you can redistribute it and/or modify |
# it under the terms of the GNU Affero General Public License as published by
# the Free | Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Booktype is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Booktype. If not, see <http://www.gnu.org/licenses/>.
from django.conf.urls import patterns, url, include
from django.views.decorators.csrf import csrf_exempt
from .views import ConvertView
urlpatterns = patterns('',
url(r'^(?P<task_id>.+)$', ConvertView.as_view(), name='convert_status'),
url(r'^$', csrf_exempt(ConvertView.as_view()), name='convert'),
)
|
irregulator/ganetimgr | ganeti/migrations/0012_auto__add_field_cluster_disable_instance_creation.py | Python | gpl-3.0 | 6,975 | 0.008172 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Cluster.disable_instance_creation'
db.add_column('ganeti_cluster', 'disable_instance_creation',
self.gf('django.db.models.fields.BooleanField')(default=False),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Cluster.disable_instance_creation'
db.delete_column('ganeti_cluster', 'disable_instance_creation')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': " | ('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.field | s.CharField', [], {'max_length': '100'})
},
'ganeti.cluster': {
'Meta': {'object_name': 'Cluster'},
'default_disk_template': ('django.db.models.fields.CharField', [], {'default': "'plain'", 'max_length': '255'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'disable_instance_creation': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'fast_create': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'hostname': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'port': ('django.db.models.fields.PositiveIntegerField', [], {'default': '5080'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'}),
'use_gnt_network': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'username': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'})
},
'ganeti.instanceaction': {
'Meta': {'object_name': 'InstanceAction'},
'action': ('django.db.models.fields.IntegerField', [], {}),
'action_value': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
'activation_key': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'applicant': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'cluster': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['ganeti.Cluster']", 'null': 'True', 'blank': 'True'}),
'filed': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'instance': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'last_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
'ganeti.network': {
'Meta': {'object_name': 'Network'},
'cluster': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['ganeti.Cluster']"}),
'cluster_default': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['auth.Group']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ipv6_prefix': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'link': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'mode': ('django.db.models.fields.CharField', [], {'max_length': '64'})
}
}
complete_apps = ['ganeti'] |
channelcat/sanic | tests/test_errorpages.py | Python | mit | 2,703 | 0 | import pytest
from sanic import Sanic
from sanic.errorpages import exception_response
from sanic.exceptions import NotFound
from sanic.request import Request
from sanic.response import HTTPResponse
@pytest.fixture
def app():
app = Sanic("error_page_testing")
@app.route("/error", methods=["GET", "POST"])
def err(request):
raise Exception("something went wrong")
return app
@pytest.fixture
def fake_request(app):
return Request(b"/foobar", {}, "1.1", "GET", None, app)
@pytest.mark.parametrize(
"fallback,content_type, exception, status",
(
(None, "text/html; charset=utf-8", Exception, 500),
("html", "text/html; charset=utf-8", Exception, 500),
("auto", "text/html; charset=utf-8", Exception, 500),
("text", "text/plain; charset=utf-8", Exception, 500),
("json", "application/json", Exception, 500),
(None, "text/html; charset=utf-8", NotFound, 404),
("html", "text/html; charset=utf-8", NotFound, 404),
("auto", "text/html; charset=utf-8", NotFound, 404),
("text", "text/plain; charset=utf-8", NotFound, 404),
("json", "application/json", NotFound, 404),
),
)
def test_should_return_html_valid_setting(
fake_request, fallback, content_type, exception, status
):
if fallback:
fake_request.app.config.FALLBACK_ERROR_FORMAT = fallback
try:
raise exception("bad stuff")
except Exception as e:
response = exception_response(fake_request, e, True)
assert isinstance(response, HTTPResponse)
assert response.status == status
assert response.content_type == content_type
def test_auto_fallback_with_data(app):
app.config.FALLBACK_ERROR_FORMAT = "auto"
_, response = app.test_client.get("/error")
assert response.status == 500
assert response.content_type == "text/html; charset=utf-8"
_, response = app.test_client.post("/error", json={"foo": "bar"})
assert response.status == 500
assert response.content_type == "application/json"
_, response = app.test_client.post("/error", data={"foo": "bar"})
assert response.status == 500
assert response.content_type == "text/html; charset=utf-8"
def test_auto_fallback_with_content_type(app):
app.config.FALLB | ACK_ERROR_FORMAT = "auto"
| _, response = app.test_client.get(
"/error", headers={"content-type": "application/json"}
)
assert response.status == 500
assert response.content_type == "application/json"
_, response = app.test_client.get(
"/error", headers={"content-type": "text/plain"}
)
assert response.status == 500
assert response.content_type == "text/plain; charset=utf-8"
|
mrcrgl/gge-storage | gge_proxy_manager/models/jobs.py | Python | mit | 3,505 | 0.00485 | from __future__ import unicode_literals
from django.db import models
from django.utils.timezone import now, timedelta
Q = models.Q
class LogisticJob(models.Model):
LOCK_FOR = (
(60*15, '15 minutes'),
(60*30, '30 minutes'),
(60*45, '45 minutes'),
(60*60, '1 hour'),
(60*60*3, '3 hours'),
(60*60*6, '6 hours'),
(60*60*9, '9 hours'),
(60*60*12, '12 hours'),
(60*60*18, '18 hours'),
(60*60*24, '24 hours'),
)
RESOURCE = (
('wood', 'Wood'),
('stone', 'Stone'),
('food', 'Food'),
# ('cole', 'Cole'),
)
SPEED = (
('-1', 'Keine Pferde'),
('1001', 'Gold Pferde (test)'),
('1004', 'Rubi Pferde 1 (test)'),
('1007', 'Rubi Pferde 2 (test)'),
)
player = models.ForeignKey("gge_proxy_manager.Player", related_name='logistic_jobs')
castle = models.ForeignKey("gge_proxy_manager.Castle", related_name='outgoing_logistic_jobs')
receiver = models.Forei | gnKey("gge_proxy_manager.Castle", related_name= | 'incoming_logistic_jobs')
speed = models.CharField(max_length=5, choices=SPEED)
is_active = models.BooleanField(default=True)
resource = models.CharField(max_length=6, choices=RESOURCE)
gold_limit = models.PositiveIntegerField(null=True, blank=True, default=None)
resource_limit = models.PositiveIntegerField()
lock_for = models.PositiveIntegerField(choices=LOCK_FOR, default=60*45)
locked_till = models.DateTimeField(default=now, db_index=True)
class Meta:
app_label = 'gge_proxy_manager'
def delay(self):
self.locked_till = now() + timedelta(seconds=self.lock_for)
self.save()
def last_succeed(self):
from .log import LogisticLog
log = LogisticLog.objects.filter(castle=self.castle,
receiver=self.receiver,
resource=self.resource).order_by('-sent').first()
if log:
return log.sent
return None
class ProductionJob(models.Model):
player = models.ForeignKey("gge_proxy_manager.Player", related_name='production_jobs')
castle = models.ForeignKey("gge_proxy_manager.Castle", related_name='production_jobs')
unit = models.ForeignKey("gge_proxy_manager.Unit")
valid_until = models.PositiveIntegerField(null=True, blank=True, default=None,
help_text='Bis zu welcher Menge ist der Auftrag gueltig')
is_active = models.BooleanField(default=True)
gold_limit = models.PositiveIntegerField(null=True, blank=True, default=None)
food_balance_limit = models.IntegerField(null=True, blank=True, default=None)
wood_limit = models.PositiveIntegerField(null=True, blank=True, default=None)
stone_limit = models.PositiveIntegerField(null=True, blank=True, default=None)
burst_mode = models.BooleanField(default=False, help_text='Ignoriert Nahrungsbilanz')
locked_till = models.DateTimeField(default=now, db_index=True)
last_fault_reason = models.CharField(null=True, default=None, max_length=128)
last_fault_date = models.DateTimeField(default=None, null=True)
class Meta:
app_label = 'gge_proxy_manager'
def last_succeed(self):
from .log import ProductionLog
log = ProductionLog.objects.filter(castle=self.castle, unit=self.unit).order_by('-produced').first()
if log:
return log.produced
return None |
ziima/pyvmd | pyvmd/analysis.py | Python | gpl-3.0 | 3,418 | 0.002926 | """
Utilities for structure analysis.
"""
from . import measure
from .atoms import Selection
__all__ = ['hydrogen_bonds', 'HydrogenBond']
class HydrogenBond(object):
"""
Represents hydrogen bond.
@ivar: Hydrogen donor atom
@ivar: Hydrogen atom
@ivar: Hydrogen acceptor atom
"""
def __init__(self, donor, hydrogen, acceptor):
self.donor = donor
self.hydrogen = hydrogen
self.acceptor = acceptor
def __repr__(self):
return '<%s: %s--%s..%s>' % (type(self).__name__, self.donor, self.hydrogen, self.acceptor)
def __eq__(self, other):
return type(self) == type(other) and self.donor == other.donor and self.hydrogen == other.hydrogen and \
self.acceptor == other.acceptor
def __ne__(self, other):
return not self.__eq__(other)
def _get_bonds(donor, acceptor, donor_hydrogens, angle):
# Utility function which finds hydrogen bonds between donor atom and acceptor atom
hydrogens = (a for a in donor.bonded if a in donor_hydrogens)
for hydrogen in hydrogens:
# Check the angle. If it's big enough, then it is a hydrogen bond
if measure.angle(donor, hydrogen, acceptor) >= angle:
yield HydrogenBond(donor, hydrogen, acceptor)
def hydrogen_bonds(donors, acceptors=None, distance=3.0, angle=135):
"""
Returns iterator of hydrogen bonds between the selections.
@param donors: Hydrogen donors selection
@type donors: Selection
@param acceptors: Hydrogen acceptors selection
@type donors: Selection or None
@param distance: Maximal distance between donor and acceptor
@type distance: Non-negative number
@param angle: Minimal angle in degrees between donor, hydrogen and acceptor
@type angle: Number between 0 and 180
@rtype: Generator of HydrogenBond objects
"""
assert isinstance(donors, Selection)
assert acceptors is None or isinstance(acceptors, Selection)
assert distance >= 0
assert 0 <= angle <= 180
# Remove hydrogen atoms from selection. This can be done safely, h | ydrogens are never donors.
donor_heavy = Selection('(%s) and noh' % donors.sele | ction, donors.molecule, donors.frame)
# Create selections of hydrogens for donor molecule. This will be used to find the hydrogen involved in the bond.
donor_hydrogens = Selection('hydrogen', donors.molecule, donors.frame)
if acceptors is None:
# Acceptor is same as donor, just copy
acceptor_heavy = donor_heavy
acceptor_hydrogens = donor_hydrogens
else:
# Acceptor is not the same as donor. Make same selections as for donor.
acceptor_heavy = Selection('(%s) and noh' % acceptors.selection, acceptors.molecule, acceptors.frame)
acceptor_hydrogens = Selection('hydrogen', acceptors.molecule, acceptors.frame)
for donor, acceptor in donor_heavy.contacts(acceptor_heavy, distance):
for hbond in _get_bonds(donor, acceptor, donor_hydrogens, angle):
yield hbond
# If acceptors and donors share atoms, contacts return pair only once.
# Check if donor and acceptors can have opposite roles.
if donor in acceptor_heavy and acceptor in donor_heavy:
# Donor can be acceptor and acceptor can be donor, check the hydrogen bonds.
for hbond in _get_bonds(acceptor, donor, acceptor_hydrogens, angle):
yield hbond
|
rvinas/sentiment_analysis_tensorflow | predict.py | Python | apache-2.0 | 3,339 | 0.002995 | # predict.py: Uses a previously trained TensorFlow model to make predictions on a test set
# Copyright 2016 Ramon Vinas
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
from tsa.data_manager import DataManager
import pickle
tf.flags.DEFINE_string('checkpoints_dir', 'checkpoints',
'Checkpoints directory (example: checkpoints/1479670630). Must contain (at least):\n'
'- config.pkl: Contains parameters used to train the model \n'
'- model.ckpt: Contains the weights of the model \n | '
'- model.ckpt.meta: Contains the TensorFlow graph definition \n')
FLAGS = tf.flags.FLAGS
if FLAGS.checkpoints_dir is None:
raise ValueError('Please, a valid checkpoints directory is required (--checkpoints_dir <file name>)')
# Load configuration
with open('{}/config.pkl'.format(FLAGS.checkpoints_dir), 'rb') as f:
config = pickle.load(f)
# Load data
dm = DataManager(data_dir=config['data_dir'],
stopwords_file=config['stopwords_file'],
| sequence_len=config['sequence_len'],
n_samples=config['n_samples'],
test_size=config['test_size'],
val_samples=config['batch_size'],
random_state=config['random_state'],
ensure_preprocessed=True)
# Import graph and evaluate the model using test data
original_text, x_test, y_test, test_seq_len = dm.get_test_data(original_text=True)
graph = tf.Graph()
with graph.as_default():
sess = tf.Session()
# Import graph and restore its weights
print('Restoring graph ...')
saver = tf.train.import_meta_graph("{}/model.ckpt.meta".format(FLAGS.checkpoints_dir))
saver.restore(sess, ("{}/model.ckpt".format(FLAGS.checkpoints_dir)))
# Recover input/output tensors
input = graph.get_operation_by_name('input').outputs[0]
target = graph.get_operation_by_name('target').outputs[0]
seq_len = graph.get_operation_by_name('lengths').outputs[0]
dropout_keep_prob = graph.get_operation_by_name('dropout_keep_prob').outputs[0]
predict = graph.get_operation_by_name('final_layer/softmax/predictions').outputs[0]
accuracy = graph.get_operation_by_name('accuracy/accuracy').outputs[0]
# Perform prediction
pred, acc = sess.run([predict, accuracy],
feed_dict={input: x_test,
target: y_test,
seq_len: test_seq_len,
dropout_keep_prob: 1})
# Print results
print('\nAccuracy: {0:.4f}\n'.format(acc))
for i in range(100):
print('Sample: {0}'.format(original_text[i]))
print('Predicted sentiment: [{0:.4f}, {1:.4f}]'.format(pred[i, 0], pred[i, 1]))
print('Real sentiment: {0}\n'.format(y_test[i]))
|
RafaelPalomar/girder | girder/utility/webroot.py | Python | apache-2.0 | 4,603 | 0.002172 | # -*- coding: utf-8 -*-
import json
import os
import re
import cherrypy
import mako
from girder import constants
from girder.models.setting import Setting
f | rom girder.settings import SettingKey
from girder.utility import config
class WebrootBase:
"""
Serves a template file in response to GET requests.
This will typically be the base class of any non-API endpoints.
"""
exposed = True
def __init__(self, templatePath):
self.vars = {}
| self.config = config.getConfig()
self._templateDirs = []
self.setTemplatePath(templatePath)
def updateHtmlVars(self, vars):
"""
If any of the variables in the index html need to change, call this
with the updated set of variables to render the template with.
"""
self.vars.update(vars)
def setTemplatePath(self, templatePath):
"""
Set the path to a template file to render instead of the default template.
The default template remains available so that custom templates can
inherit from it. To do so, save the default template filename from
the templateFilename attribute before calling this function, pass
it as a variable to the custom template using updateHtmlVars(), and
reference that variable in an <%inherit> directive like:
<%inherit file="${context.get('defaultTemplateFilename')}"/>
"""
templateDir, templateFilename = os.path.split(templatePath)
self._templateDirs.append(templateDir)
self.templateFilename = templateFilename
# Reset TemplateLookup instance so that it will be instantiated lazily,
# with the latest template directories, on the next GET request
self._templateLookup = None
@staticmethod
def _escapeJavascript(string):
# Per the advice at:
# https://www.owasp.org/index.php/XSS_(Cross_Site_Scripting)_Prevention_Cheat_Sheet#Output_Encoding_Rules_Summary
# replace all non-alphanumeric characters with "\0uXXXX" unicode escaping:
# https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Lexical_grammar#Unicode_escape_sequences
return re.sub(
r'[^a-zA-Z0-9]',
lambda match: '\\u%04X' % ord(match.group()),
string
)
def _renderHTML(self):
if self._templateLookup is None:
self._templateLookup = mako.lookup.TemplateLookup(directories=self._templateDirs)
template = self._templateLookup.get_template(self.templateFilename)
return template.render(js=self._escapeJavascript, json=json.dumps, **self.vars)
def GET(self, **params):
return self._renderHTML()
def DELETE(self, **params):
raise cherrypy.HTTPError(405)
def PATCH(self, **params):
raise cherrypy.HTTPError(405)
def POST(self, **params):
raise cherrypy.HTTPError(405)
def PUT(self, **params):
raise cherrypy.HTTPError(405)
class Webroot(WebrootBase):
"""
The webroot endpoint simply serves the main index HTML file.
"""
def __init__(self, templatePath=None):
if not templatePath:
templatePath = os.path.join(constants.PACKAGE_DIR, 'utility', 'webroot.mako')
super().__init__(templatePath)
self.vars = {}
def _renderHTML(self):
from girder.utility import server
from girder.plugin import loadedPlugins
self.vars['plugins'] = loadedPlugins()
self.vars['pluginCss'] = []
self.vars['pluginJs'] = []
builtDir = os.path.join(constants.STATIC_ROOT_DIR, 'built', 'plugins')
for plugin in self.vars['plugins']:
if os.path.exists(os.path.join(builtDir, plugin, 'plugin.min.css')):
self.vars['pluginCss'].append(plugin)
if os.path.exists(os.path.join(builtDir, plugin, 'plugin.min.js')):
self.vars['pluginJs'].append(plugin)
self.vars['apiRoot'] = server.getApiRoot()
self.vars['staticPublicPath'] = server.getStaticPublicPath()
self.vars['brandName'] = Setting().get(SettingKey.BRAND_NAME)
self.vars['contactEmail'] = Setting().get(SettingKey.CONTACT_EMAIL_ADDRESS)
self.vars['privacyNoticeHref'] = Setting().get(SettingKey.PRIVACY_NOTICE)
self.vars['bannerColor'] = Setting().get(SettingKey.BANNER_COLOR)
self.vars['registrationPolicy'] = Setting().get(SettingKey.REGISTRATION_POLICY)
self.vars['enablePasswordLogin'] = Setting().get(SettingKey.ENABLE_PASSWORD_LOGIN)
return super()._renderHTML()
|
goblinhack/crystaleer | python/things/wall.py | Python | lgpl-3.0 | 943 | 0.00106 | import tp
def thing_init(t):
return
def wall_init(name, short_name, tiles=[]):
x = tp.Tp(name)
x.set_short_name(short_name)
x.set_is_movement_blocking(True)
x.set_is_wall(True)
x.set_is_solid_ground(True)
if tiles is not None:
for t in tiles:
x.set_tile(t, delay_ms=150)
else:
x.set_tile(tile=name, delay_ms=150)
x.thing_init = thing_init
def init():
wall_init(name="wall1", short_name="A boring wall", tiles=["wall1"])
wall_init(name="wall2", short_name="A boring wall", tiles=["wall2"])
wall_init(name="wall3", short_name="A boring wall", tiles=["wall3"])
wall_init(name=" | wall4", short_name="A boring wall", tiles=["wall4"])
wall_init(name="wall5", short_name="A boring wall", ti | les=["wall5"])
wall_init(name="wall6", short_name="A boring wall", tiles=["wall6"])
wall_init(name="wall7", short_name="A boring wall", tiles=["wall7"])
init()
|
Azure/azure-sdk-for-python | sdk/network/azure-mgmt-network/azure/mgmt/network/v2020_06_01/operations/_dscp_configuration_operations.py | Python | mit | 23,827 | 0.00491 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class DscpConfigurationOperations(object):
"""DscpConfigurationOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2020_06_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def _create_or_update_initial(
self,
resource_group_name, # type: str
dscp_configuration_name, # type: str
parameters, # type: "_models.DscpConfiguration"
**kwargs # type: Any
):
# type: (...) -> "_models.DscpConfiguration"
cls = kwargs.pop('cls', None) # type: ClsType["_models.DscpConfiguration"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-06-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'dscpConfigurationName': self._serialize.url("dscp_configuration_name", dscp_configuration_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'DscpConfiguration')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('DscpConfiguration', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('DscpConfiguration', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/dscpConfigurations/{dscpConfigurationName}'} # type: ignore
def begin_create_or_update(
self,
resource_group_name, # type: str
dscp_configuration_name, # type: str
parameters, # type: "_models.DscpConfiguration"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.DscpConfiguration"]
"""Creates or updates a DSCP Configuration.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param dscp_configuration_n | ame: The name of the resource.
:type dscp_configuration_name: str
:param parameters: Parameters supplied to the create or update dscp configuration operation.
:type parameters: ~azure.mgmt.network.v2020_06_01.models.DscpConfiguration
:keyword callable cls: A custom type or function that will | be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either DscpConfiguration or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2020_06_01.models.DscpConfiguration]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.DscpConfiguration"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
dscp_configuration_name=dscp_configuration_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('DscpConfiguration', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'dscpConfigurationName': self._serialize.url("dscp_configuration_name", dscp_configuration_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPo |
pinterb/st2 | st2common/st2common/services/action.py | Python | apache-2.0 | 4,646 | 0.002583 | # Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import six
from st2common import log as logging
from st2common.constants import action as action_constants
from st2common.persistence.liveaction import LiveAction
from st2common.persistence.execution import ActionExecution
from st2common.services import executions
from st2common.util import isotime
from st2common.util import action_db as action_utils
from st2common.util import schema as util_schema
__all__ = [
'request',
'is_action_canceled'
]
LOG = logging.getLogger(__name__)
def _get_immutable_params(parameters):
if not parameters:
return []
return [k for k, v in six.iteritems(parameters) if v.get('immutable', False)]
def request(liveaction):
"""
Request an action execution.
:return: (liveaction, execution)
:rtype: tuple
"""
# Use the user context from the parent action execution. Subtasks in a workflow
# action can be invoked by a system user and so we want to use the user context
# from the original workflow action.
if getattr(liveaction, 'context', None) and 'parent' in liveaction.context:
parent = LiveAction.get_by_id(liveaction.context['parent'])
liveaction.context['user'] = getattr(parent, 'context', dict()).get('user')
# Validate action.
action_db = action_utils.get_action_by_ref(liveaction.action)
if not action_db:
raise ValueError('Action "%s" cannot be found.' % liveaction.action)
if not action_db.enabled:
raise ValueError('Unable to execute. Action "%s" is disabled. | ' % liveaction.action)
runnertype_db = action_utils.get_runnertype_by_name(action_db.runner_type['name'])
if not hasattr(liveaction, 'parameters'):
liveaction.parameters = dict()
# Validate action parameters.
schema = util_schema.get_parameter_schema(action_db)
validator = util_schema.get_validator()
util_schema.validate(liveaction.parameters, schema, validator, use_default=True)
# validate that no immutable pa | rams are being overriden. Although possible to
# ignore the override it is safer to inform the user to avoid surprises.
immutables = _get_immutable_params(action_db.parameters)
immutables.extend(_get_immutable_params(runnertype_db.runner_parameters))
overridden_immutables = [p for p in six.iterkeys(liveaction.parameters) if p in immutables]
if len(overridden_immutables) > 0:
raise ValueError('Override of immutable parameter(s) %s is unsupported.'
% str(overridden_immutables))
# Set notification settings for action.
# XXX: There are cases when we don't want notifications to be sent for a particular
# execution. So we should look at liveaction.parameters['notify']
# and not set liveaction.notify.
if action_db.notify:
liveaction.notify = action_db.notify
# Write to database and send to message queue.
liveaction.status = action_constants.LIVEACTION_STATUS_REQUESTED
liveaction.start_timestamp = isotime.add_utc_tz(datetime.datetime.utcnow())
# Publish creation after both liveaction and actionexecution are created.
liveaction = LiveAction.add_or_update(liveaction, publish=False)
execution = executions.create_execution_object(liveaction, publish=False)
# Assume that this is a creation.
LiveAction.publish_create(liveaction)
LiveAction.publish_status(liveaction)
ActionExecution.publish_create(execution)
extra = {'liveaction_db': liveaction, 'execution_db': execution}
LOG.audit('Action execution requested. LiveAction.id=%s, ActionExecution.id=%s' %
(liveaction.id, execution.id), extra=extra)
return liveaction, execution
def is_action_canceled(liveaction_id):
liveaction_db = action_utils.get_liveaction_by_id(liveaction_id)
return liveaction_db.status == action_constants.LIVEACTION_STATUS_CANCELED
|
gooddata/openstack-nova | nova/objects/request_spec.py | Python | apache-2.0 | 37,886 | 0.000106 | # Copyright 2015 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_serialization import jsonutils
from oslo_utils import versionutils
from nova.db.sqlalchemy import api as db
from nova.db.sqlalchemy import api_models
from nova import exception
from nova import objects
from nova.objects import base
from nova.objects import fields
from nova.objects import instance as obj_instance
from nova.scheduler import utils as scheduler_utils
from nova.virt import hardware
REQUEST_SPEC_OPTIONAL_ATTRS = ['requested_destination',
'security_groups',
'network_metadata']
@base.NovaObjectRegistry.register
class RequestSpec(base.NovaObject):
# Version 1.0: Initial version
# Version 1.1: ImageMeta version 1.6
# Version 1.2: SchedulerRetries version 1.1
# Version 1.3: InstanceGroup version 1.10
# Version 1.4: ImageMeta version 1.7
# Version 1.5: Added get_by_instance_uuid(), create(), save()
# Version 1.6: Added requested_destination
# Version 1.7: Added destroy()
# Version 1.8: Added security_groups
# Version 1.9: Added user_id
# Version 1.10: Added network_metadata
# Version 1.11: Added is_bfv
VERSION = '1.11'
fields = {
'id': fields.IntegerField(),
'image': fields.ObjectField('ImageMeta', nullable=True),
'numa_topology': fields.ObjectField('InstanceNUMATopology',
nullable=True),
'pci_requests': fields.ObjectField('InstancePCIRequests',
nullable=True),
# TODO(mriedem): The project_id shouldn't be nullable since the
# scheduler relies on it being set.
'project_id': fields.StringField(nullable=True),
'user_id': fields.StringField(nullable=True),
'availability_zone': fields.StringField(nullable=True),
'flavor': fields.ObjectField('Flavor', nullable=False),
'num_instances': fields.IntegerField(default=1),
'ignore_hosts': fields.ListOfStringsField(nullable=True),
# NOTE(mriedem): In reality, you can only ever have one
# host in the force_hosts list. The fact this is a list
# is a mistake perpetuated over time.
'force_hosts': fields.ListOfStringsField(nullable=True),
# NOTE(mriedem): In reality, you can only ever have one
# node in the force_nodes list. The fact this is a list
# is a mistake perpetuated over time.
'force_nodes': fields.ListOfStringsField(nullable=True),
'requested_destination': fields.ObjectField('Destination',
nullable=True,
default=None),
'retry': fields.ObjectField('SchedulerRetries', nullable=True),
'limits': fields.ObjectField('SchedulerLimits', nullable=True),
'instance_group': fields.ObjectField('InstanceGroup', nullable=True),
# NOTE(sbauza): Since hints are depending on running filters, we prefer
# to leave the API correctly validating the hints per the filters and
# just provide to the RequestSpec object a free-form dictionary
'scheduler_hints': fields.DictOfListOfStringsField(nullable=True),
'instance_uuid': fields.UUIDField(),
'security_groups': fields.ObjectField('SecurityGroupList'),
'network_metadata': fields.ObjectField('NetworkMetadata'),
'is_bfv': fields.BooleanField(),
}
def obj_make_compatible(self, primitive, target_version):
super(RequestSpec, self).obj_make_compatible(primitive, target_version)
target_version = versionutils.convert_version_to_tuple(target_version)
if target_version < (1, 11) and 'is_bfv' in primitive:
del primitive['is_bfv']
if target_version < (1, 10):
if 'network_metadata' in primitive:
del primitive['network_metadata']
if target_version < (1, 9):
if 'user_id' in primitive:
del primitive['user_id']
if target_version < (1, 8):
if 'security_groups' in primitive:
del primitive['security_groups']
if target_version < (1, 6):
if 'requested_destination' in primitive:
del primitive['requested_destination']
def obj_load_attr(self, attrname):
if attrname not in REQUEST_SPEC_OPTIONAL_ATTRS:
raise exception.ObjectActionError(
action='obj_load_attr',
reason='attribute %s not lazy-loadable' % attrname)
if attrname == 'security_groups':
self.security_groups = objects.SecurityGroupList(objects=[])
return
if attrname == 'network_metadata':
self.network_metadata = objects.NetworkMetadata(
physnets=set(), tunneled=False)
return
# NOTE(sbauza): In case the primitive was not providing that field
# because of a previous RequestSpec version, we want to default
# that field in order to have the same behaviour.
self.obj_set_defaults(attrname)
@property
def vcpus(self):
return self.flavor.vcpus
@property
def memory_mb(self):
return self.flavor.memory_mb
@property
def root_gb(self):
return self.flavor.root_gb
@property
def ephemeral_gb(self):
return self.flavor.ephemeral_gb
@property
def swap(self):
return self.flavor.swap
def _image_meta_from_image(self, image):
if isinstance(image, objects.ImageMeta):
self.image = image
elif isinstance | (image, dict):
# NOTE(sbauza): Until Nova is fully providing an ImageMeta object
# for getting properties, we still need to hydrate it here
# TODO(sbauza): To be removed once all Request | Spec hydrations are
# done on the conductor side and if the image is an ImageMeta
self.image = objects.ImageMeta.from_dict(image)
else:
self.image = None
def _from_instance(self, instance):
if isinstance(instance, obj_instance.Instance):
# NOTE(sbauza): Instance should normally be a NovaObject...
getter = getattr
elif isinstance(instance, dict):
# NOTE(sbauza): ... but there are some cases where request_spec
# has an instance key as a dictionary, just because
# select_destinations() is getting a request_spec dict made by
# sched_utils.build_request_spec()
# TODO(sbauza): To be removed once all RequestSpec hydrations are
# done on the conductor side
getter = lambda x, y: x.get(y)
else:
# If the instance is None, there is no reason to set the fields
return
instance_fields = ['numa_topology', 'pci_requests', 'uuid',
'project_id', 'user_id', 'availability_zone']
for field in instance_fields:
if field == 'uuid':
setattr(self, 'instance_uuid', getter(instance, field))
elif field == 'pci_requests':
self._from_instance_pci_requests(getter(instance, field))
elif field == 'numa_topology':
self._from_instance_numa_topology(getter(instance, field))
else:
setattr(self, field, getter(instance, field))
def _from_instance_pci_requests(self, pci_requests):
if isinstance(pci_requests, dict):
pci_req_cls = objects.InstancePCIRequests
|
metamarcdw/nowallet | nowallet/nowallet.py | Python | mit | 46,341 | 0.001273 | import logging
import sys
import os
FORMAT = "%(asctime)s %(levelname)s: %(message)s" # type: str
stdout_hdlr = logging.StreamHandler(sys.stdout) # type: logging.StreamHandler
stdout_hdlr.setFormatter(logging.Formatter(FORMAT))
stdout_hdlr.setLevel(
logging.ERROR if os.environ.get("NW_LOG") == "ERR" else logging.INFO)
file_hdlr = logging.FileHandler(
filename="nowallet.log", mode="w") # type: logging.FileHandler
file_hdlr.setFormatter(logging.Formatter(FORMAT))
file_hdlr.setLevel(logging.DEBUG)
logging.basicConfig(level=logging.DEBUG, handlers=[stdout_hdlr, file_hdlr])
import asyncio
import io
import random
import collections
import pprint
import time
import json
from decimal import Decimal
from functools import wraps
from urllib import parse
from typing import (
Tuple, List, Set, Dict, KeysView, Any,
Union, Callable, Awaitable
)
from pycoin.serialize import b2h
from pycoin.ui import standard_tx_out_script
from pycoin.tx.tx_utils import distribute_from_split_pool, sign_tx
from pycoin.tx.Tx import Tx
from pycoin.tx.TxIn import TxIn
from pycoin.tx.TxOut import TxOut
from pycoin.tx.Spendable import Spendable
from connectrum.client import StratumClient
from connectrum.svr_info import ServerInfo
from .bip49 import SegwitBIP32Node
from .keys import derive_key
from .socks_http import urlopen
class Connection:
""" Connection object. Connects to an Electrum server, and handles all
Stratum protocol messages.
"""
# pylint: disable=E1111
def __init__(self,
loop: asyncio.AbstractEventLoop,
server: str,
port: int,
proto: str) -> None:
""" Connection object constructor.
:param loop: an asyncio event loop
:param server: a string containing a hostname
:param port: port number that the server listens on
:returns: A new Connection object
"""
logging.info("Connecting...")
self.server_info = ServerInfo(
server, hostname=server, ports=port) # type: ServerInfo
logging.info(str(self.server_info.get_port(proto)))
self.client = StratumClient(loop) # type: StratumClient
self.connection = self.client.connect(
self.server_info,
proto_code=proto,
use_tor=True,
disable_ | cert_verify=(proto != "s")
) # type: asyncio.Future
self.queue = None # type: asyncio.Queue
async def do_connect(self) -> None:
| """ Coroutine. Establishes a persistent connection to an Electrum server.
Awaits the connection because AFAIK an init method can't be async.
"""
await self.connection
logging.info("Connected to server")
async def listen_rpc(self, method: str, args: List) -> Any:
""" Coroutine. Sends a normal RPC message to the server and awaits response.
:param method: The Electrum API method to use
:param args: Params associated with current method
:returns: Future. Response from server for this method(args)
"""
return await self.client.RPC(method, *args)
def listen_subscribe(self, method: str, args: List) -> None:
""" Sends a "subscribe" message to the server and adds to the queue.
Throws away the immediate future containing the "history" hash.
:param method: The Electrum API method to use
:param args: Params associated with current method
"""
t = self.client.subscribe(
method, *args
) # type: Tuple[asyncio.Future, asyncio.Queue]
future, queue = t
self.queue = queue
return future
async def consume_queue(self, queue_func: Callable[[List[str]], Awaitable[None]]) -> None:
""" Coroutine. Infinite loop that consumes the current subscription queue.
:param queue_func: A function to call when new responses arrive
"""
while True:
logging.info("Awaiting queue..")
result = await self.queue.get() # type: List[str]
await queue_func(result)
class History:
""" History object. Holds data relevant to a piece of
our transaction history.
"""
def __init__(self, tx_obj: Tx, is_spend: bool, value: Decimal, height: int) -> None:
""" History object constructor.
:param tx_obj: a pycoin.Tx object representing the tx data
:param is_spend: boolean, was this tx a spend from our wallet?
:param value: the coin_value of this tx
:param height: the height of the block this tx is included in
:returns: A new History object
"""
self.tx_obj = tx_obj # type: Tx
self.is_spend = is_spend # type: bool
self.value = value # type: Decimal
self.height = height # type: int
self.timestamp = None # type: str
async def get_timestamp(self, connection: Connection) -> None:
""" Coroutine. Gets the timestamp for this Tx based on the given height.
:param connection: a Connection object for getting a block header
from the server
"""
if self.height > 0:
block_header = await connection.listen_rpc(
Wallet.methods["get_header"],
[self.height]
) # type: Dict[str, Any]
block_time = block_header["timestamp"]
self.timestamp = block_time
logging.debug("Got timestamp %d from block at height %s",
self.height, self.timestamp)
else:
self.timestamp = int(time.time())
def as_dict(self) -> Dict[str, Any]:
""" Transforms this History object into a dictionary.
:returns: A dictionary representation of this History object
"""
return {
"txid": self.tx_obj.id(),
"is_spend": self.is_spend,
"value": str(self.value),
"height": self.height,
"timestamp": self.timestamp
}
def __str__(self) -> str:
""" Special method __str__()
:returns: The string representation of this History object
"""
return (
"<History: TXID:{} is_spend:{} " +
"value:{} height:{} timestamp:{}>"
).format(self.tx_obj.id(), self.is_spend,
self.value, self.height, time.asctime(time.localtime(self.timestamp)))
def __repr__(self) -> str:
return str(self)
def __hash__(self) -> int:
return hash(self.tx_obj.id())
def __eq__(self, other) -> bool:
return self.tx_obj.id() == other.tx_obj.id()
Chain = collections.namedtuple("Chain", ["netcode", "chain_1209k", "bip44"])
BTC = Chain(netcode="BTC", chain_1209k="btc", bip44=0)
TBTC = Chain(netcode="XTN", chain_1209k="tbtc", bip44=1)
LTC = Chain(netcode="LTC", chain_1209k="ltc", bip44=2)
VTC = Chain(netcode="VTC", chain_1209k="vtc", bip44=28)
def log_time_elapsed(func: Callable) -> Callable:
""" Decorator. Times completion of function and logs at level INFO. """
@wraps(func)
def inner(*args, **kwargs) -> None:
""" Decorator inner function. """
start_time = time.time() # type: float
func(*args, **kwargs)
end_time = time.time() # type: float
seconds = end_time - start_time # type: float
logging.info("Operation completed in {0:.3f} seconds".format(seconds))
return inner
class Wallet:
""" Provides all functionality required for a fully functional and secure
HD brainwallet based on the Warpwallet technique.
"""
COIN = 100000000 # type: int
_GAP_LIMIT = 20 # type: int
methods = {
"get": "blockchain.transaction.get",
"get_balance": "blockchain.scripthash.get_balance",
"listunspent": "blockchain.scripthash.listunspent",
"get_history": "blockchain.scripthash.get_history",
"get_header": "blockchain.block.get_header",
"subscribe": "blockchain.scripthash.subscribe",
"estimatefee": "blockchain.estimatefee",
"broadcast": "blockchain.transaction.broadcast"
} # type: Dict[str, str]
def __init__(self,
s |
osmanbaskaya/text-entail | run/entail_utils.py | Python | mit | 1,076 | 0.003717 | #! /usr/bin/python
# -*- coding: utf-8 -*-
__author__ = "Osman Baskaya"
"""
Some utility functions for entailment project
"""
from collections import defaultdict as dd
from metrics import *
def get_eval_metric(metric_name):
if metric_name == "jaccard":
return jaccard_index
elif metric_name == "1":
return entail_score1
elif metric_name == "2":
return entail_score2
elif metric_name == "3":
return entai | l_score3
def get_test_pairs(test_pairs):
pairs = []
for line in open(test_pairs):
w1, w2, tag = line.split()
pairs.append((w1, w | 2, tag))
return pairs
def get_contexts_above_threshold(test_set, subs_file, threshold):
words = dd(set)
for line_num, line in enumerate(subs_file):
line = line.split()
#tw = line[0]
for i in xrange(1, len(line)-1, 2):
word = line[i]
if word in test_set:
prob = float(line[i+1])
if prob >= threshold:
words[word].add(line_num)
return words, line_num + 1
|
nicolargo/glances | glances/outputs/glances_curses.py | Python | lgpl-3.0 | 47,546 | 0.001367 | # -*- coding: utf-8 -*-
#
# This file is part of Glances.
#
# Copyright (C) 2019 Nicolargo <nicolas@nicolargo.com>
#
# Glances is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Glances is distributed in the hope that it will be useful,
# but WITHOUT | ANY | WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Curses interface class."""
from __future__ import unicode_literals
import sys
from glances.compat import nativestr, u, itervalues, enable, disable
from glances.globals import MACOS, WINDOWS
from glances.logger import logger
from glances.events import glances_events
from glances.processes import glances_processes, sort_processes_key_list
from glances.outputs.glances_unicode import unicode_message
from glances.timer import Timer
# Import curses library for "normal" operating system
try:
import curses
import curses.panel
from curses.textpad import Textbox
except ImportError:
logger.critical("Curses module not found. Glances cannot start in standalone mode.")
if WINDOWS:
logger.critical("For Windows you can try installing windows-curses with pip install.")
sys.exit(1)
class _GlancesCurses(object):
"""This class manages the curses display (and key pressed).
Note: It is a private class, use GlancesCursesClient or GlancesCursesBrowser.
"""
_hotkeys = {
# 'ENTER' > Edit the process filter
'0': {'switch': 'disable_irix'},
'1': {'switch': 'percpu'},
'2': {'switch': 'disable_left_sidebar'},
'3': {'switch': 'disable_quicklook'},
# '4' > Enable or disable quicklook
# '5' > Enable or disable top menu
'6': {'switch': 'meangpu'},
'9': {'switch': 'theme_white'},
'/': {'switch': 'process_short_name'},
'a': {'sort_key': 'auto'},
'A': {'switch': 'disable_amps'},
'b': {'switch': 'byte'},
'B': {'switch': 'diskio_iops'},
'c': {'sort_key': 'cpu_percent'},
'C': {'switch': 'disable_cloud'},
'd': {'switch': 'disable_diskio'},
'D': {'switch': 'disable_docker'},
# 'e' > Enable/Disable process extended
# 'E' > Erase the process filter
# 'f' > Show/hide fs / folder stats
'F': {'switch': 'fs_free_space'},
'g': {'switch': 'generate_graph'},
'G': {'switch': 'disable_gpu'},
'h': {'switch': 'help_tag'},
'i': {'sort_key': 'io_counters'},
'I': {'switch': 'disable_ip'},
# 'k' > Kill selected process
'K': {'switch': 'disable_connections'},
'l': {'switch': 'disable_alert'},
'm': {'sort_key': 'memory_percent'},
'M': {'switch': 'reset_minmax_tag'},
'n': {'switch': 'disable_network'},
'N': {'switch': 'disable_now'},
'p': {'sort_key': 'name'},
'P': {'switch': 'disable_ports'},
# 'q' or ESCAPE > Quit
'Q': {'switch': 'enable_irq'},
'r': {'switch': 'disable_smart'},
'R': {'switch': 'disable_raid'},
's': {'switch': 'disable_sensors'},
'S': {'switch': 'sparkline'},
't': {'sort_key': 'cpu_times'},
'T': {'switch': 'network_sum'},
'u': {'sort_key': 'username'},
'U': {'switch': 'network_cumul'},
# 'w' > Delete finished warning logs
'W': {'switch': 'disable_wifi'},
# 'x' > Delete finished warning and critical logs
# 'z' > Enable or disable processes
# "<" (left arrow) navigation through process sort
# ">" (right arrow) navigation through process sort
# 'UP' > Up in the server list
# 'DOWN' > Down in the server list
}
_sort_loop = sort_processes_key_list
# Define top menu
_top = ['quicklook', 'cpu', 'percpu', 'gpu', 'mem', 'memswap', 'load']
_quicklook_max_width = 68
# Define left sidebar
_left_sidebar = [
'network',
'connections',
'wifi',
'ports',
'diskio',
'fs',
'irq',
'folders',
'raid',
'smart',
'sensors',
'now',
]
_left_sidebar_min_width = 23
_left_sidebar_max_width = 34
# Define right sidebar
_right_sidebar = ['docker', 'processcount', 'amps', 'processlist', 'alert']
def __init__(self, config=None, args=None):
# Init
self.config = config
self.args = args
# Init windows positions
self.term_w = 80
self.term_h = 24
# Space between stats
self.space_between_column = 3
self.space_between_line = 2
# Init the curses screen
self.screen = curses.initscr()
if not self.screen:
logger.critical("Cannot init the curses library.\n")
sys.exit(1)
# Load the 'outputs' section of the configuration file
# - Init the theme (default is black)
self.theme = {'name': 'black'}
# Load configuration file
self.load_config(config)
# Init cursor
self._init_cursor()
# Init the colors
self._init_colors()
# Init main window
self.term_window = self.screen.subwin(0, 0)
# Init edit filter tag
self.edit_filter = False
# Init kill process tag
self.kill_process = False
# Init the process min/max reset
self.args.reset_minmax_tag = False
# Init cursor
self.args.cursor_position = 0
# Catch key pressed with non blocking mode
self.term_window.keypad(1)
self.term_window.nodelay(1)
self.pressedkey = -1
# History tag
self._init_history()
def load_config(self, config):
"""Load the outputs section of the configuration file."""
# Load the theme
if config is not None and config.has_section('outputs'):
logger.debug('Read the outputs section in the configuration file')
self.theme['name'] = config.get_value('outputs', 'curse_theme', default='black')
logger.debug('Theme for the curse interface: {}'.format(self.theme['name']))
def is_theme(self, name):
"""Return True if the theme *name* should be used."""
return getattr(self.args, 'theme_' + name) or self.theme['name'] == name
def _init_history(self):
"""Init the history option."""
self.reset_history_tag = False
def _init_cursor(self):
"""Init cursors."""
if hasattr(curses, 'noecho'):
curses.noecho()
if hasattr(curses, 'cbreak'):
curses.cbreak()
self.set_cursor(0)
def _init_colors(self):
"""Init the Curses color layout."""
# Set curses options
try:
if hasattr(curses, 'start_color'):
curses.start_color()
logger.debug('Curses interface compatible with {} colors'.format(curses.COLORS))
if hasattr(curses, 'use_default_colors'):
curses.use_default_colors()
except Exception as e:
logger.warning('Error initializing terminal color ({})'.format(e))
# Init colors
if self.args.disable_bold:
A_BOLD = 0
self.args.disable_bg = True
else:
A_BOLD = curses.A_BOLD
self.title_color = A_BOLD
self.title_underline_color = A_BOLD | curses.A_UNDERLINE
self.help_color = A_BOLD
if curses.has_colors():
# The screen is compatible with a colored design
if self.is_theme('white'):
# White theme: black ==> white
curses.init_pair(1, curses.COLOR_BLACK, -1)
else:
curses.init_pair(1, curses.COLOR_WHITE, -1)
if self.args.disable_bg:
|
Adarnof/adarnauth-esi | esi/urls.py | Python | gpl-3.0 | 196 | 0 | from __future__ import unicode_literals
from django.conf.urls import url
import esi.views
app_name = 'esi'
urlpatterns = [
url(r'^callback | /$', esi.views.receive_callback, name | ='callback'),
]
|
kracekumar/bt | bt/server.py | Python | gpl-3.0 | 8,012 | 0.000874 | # -*- coding: utf-8 -*-
import os
import asyncio
import struct
from .logger import get_logger
from .protocol import PeerStreamIterator
from .message import (MessageID,
InterestedMessage,
HandshakeMessage,
BitFieldMessage,
NotInterestedMessage,
ChokeMessage,
UnchokeMessage,
HaveMessage,
RequestMessage,
PieceMessage,
CancelMessage,
KeepAliveMessage)
logger = get_logger()
class SourceFileReader:
def __init__(self, torrent):
self.torrent = torrent
self.fd = os.open(self.torrent.name, os.O_RDONLY)
def read(self, begin, index, length):
pos = index * self.torrent.info.piece_length
os.lseek(self.fd, pos, os.SEEK_SET)
return os.read(self.fd, length)
def has_all_pieces(self):
"""Check the size on the disk is equal or greater than
(piece_length - 1) * piece_length.
The assumption is clients wrote the last piece to disk
after checking integrating
Returns True or False.
"""
min_length = (len(self.torrent.info.pieces) - 1) * self.torrent.info.piece_length
return os.path.getsize(self.torrent.name) > min_length
def calculate_have_pieces(self):
pass
def get_have_pieces(self):
"""Get all have pieces
Returns list of all bool values with size of piece+1.
The last element in the list is False and other positions contains
True or False.
Available piece is represented as True and missing piece
is represented as False.
"""
if self.has_all_pieces():
pieces_availability = [True] * len(self.torrent.info.pieces)
pieces_availability.append(False)
return pieces_availability
return self.calculate_have_pieces()
class RequestHandler:
def __init__(self, torrent):
self.torrent = torrent
self.file_reader = SourceFileReader(torrent=self.torrent)
def parse(self, buffer):
"""
Tries to parse protocol messages if there is enough bytes read in the
buffer.
:return The parsed message, or None if no message could be parsed
"""
# Each message is structured as:
# <length prefix><message ID><payload>
#
# The `length prefix` is a four byte big-endian value
# The `message ID` is a decimal byte
# The `payload` is the value of `length prefix`
#
# The message length is not part of the actual length. So another
# 4 bytes needs to be included when slicing the buffer.
self.buffer = buffer
header_length = 4
if len(self.buffer) == 68:
return HandshakeMessage.decode(self.buffer)
elif len(self.buffer) > 4: # 4 bytes is needed to identify the message
message_length = struct.unpack('>I', self.buffer[0:4])[0]
if message_length == 0:
return KeepAliveMessage()
if len(self.buffer) >= message_length:
message_id = struct.unpack('>b', self.buffer[4:5])[0]
def _consume():
"""Consume the current message from the read buffer"""
self.buffer = self.buffer[header_length + message_length:]
def _data():
""""Extract the current message from the read buffer"""
return self.buffer[:header_length + message_length]
if message_id is MessageID.BitField.value:
data = _data()
_consume()
return BitFieldMessage.decode(data)
elif message_id is MessageID.Interested.value:
_consume()
return InterestedMessage()
elif message_id is MessageID.NotInterested.value:
_consume()
return NotInterestedMessage()
elif message_id is MessageID.Choke.value:
_consume()
return ChokeMessage()
elif message_id is MessageID.Unchoke.value:
| _consume()
return UnchokeMessage()
elif message_id is MessageID.Have.value:
data = _data()
_consume()
return HaveMessage.decode(data)
elif message_id is MessageID.Piece.value:
data = _data() |
_consume()
return PieceMessage.decode(data)
elif message_id is MessageID.Request.value:
data = _data()
_consume()
return RequestMessage.decode(data)
elif message_id is MessageID.Cancel.value:
data = _data()
_consume()
return CancelMessage.decode(data)
else:
logger.debug('Unsupported message!')
else:
#import ipdb;ipdb.set_trace()
return None
logger.debug('Not enough in buffer in order to parse')
return None
def get_piece(self, begin, index, length):
data = self.file_reader.read(begin=begin, index=index, length=length)
return PieceMessage(begin=begin, index=index, block=data)
def handle_message(self, buffer):
message = self.parse(buffer)
if isinstance(message, NotInterestedMessage):
logger.debug('Remove interested state')
elif isinstance(message, HandshakeMessage):
logger.debug('Received Handshake')
elif isinstance(message, ChokeMessage):
logger.debug('Received choke message')
self.current_state.append(PeerState.Choked.value)
elif isinstance(message, UnchokeMessage):
logger.debug('Received unchoke message')
elif isinstance(message, HaveMessage):
logger.debug('Received have message')
elif isinstance(message, BitFieldMessage):
logger.debug('Received bit field message: {}'.format(message))
elif isinstance(message, PieceMessage):
pass
elif isinstance(message, InterestedMessage):
return BitFieldMessage(val=self.file_reader.get_have_pieces())
elif isinstance(message, RequestMessage):
return self.get_piece(begin=message.begin, index=message.index,
length=message.length)
elif isinstance(message, CancelMessage):
# TODO: Implement cancel data
pass
return message
class TorrentServer(asyncio.Protocol):
def __init__(self, torrent):
self.torrent = torrent
super().__init__()
def __call__(self):
self.connections = set([])
self.request_handler = RequestHandler(torrent=self.torrent)
logger.debug('Init server')
return self
def connection_made(self, transport):
self.transport = transport
peer = transport.get_extra_info('peername')
self.connections.add(peer)
def data_received(self, data):
message = self.request_handler.handle_message(data)
logger.debug(message)
if message:
logger.info('Serving {}'.format(message))
self.transport.write(message.encode())
def eof_received(self):
logger.debug('eof received')
def connection_lost(self, exc):
logger.debug('connectin lost')
async def run_server(port, torrent):
"""Run a server to respond to all clients
"""
logger.info('Starting server in port {}'.format(port))
loop = asyncio.get_event_loop()
server = await loop.create_server(
TorrentServer(torrent), host='127.0.0.1', port=port)
return server
|
Praesidio/syslog-ng-python-sqs | python_sqs_stdin.py | Python | gpl-2.0 | 871 | 0.001148 | #!/usr/bin/env python
import json, logging, sys
import python_sqs
log = None
line = None
encodings = ('UTF-8', 'WINDOWS-1252', 'ISO-8859-1')
try | :
python_sqs.init(json_input=True)
log = logging.getLogger(__name__)
line = sys.stdin.readline()
while line:
line = line.strip()
for encoding in encodings:
try:
line = unicode(line, | encoding)
break
except ValueError:
pass
else:
log.debug("Falling back to ascii w/replace")
line = unicode(line, errors='replace')
python_sqs.queue(line)
line = sys.stdin.readline()
except Exception, e:
if log:
log.error("Error while processing %s", line, exc_info=e)
else:
print e
finally:
try:
python_sqs.deinit()
except Exception, e:
print e
|
EndPointCorp/lg_ros_nodes | lg_builder/setup.py | Python | apache-2.0 | 322 | 0 | #!/usr/bin/env python3
from distutils.core import setup
from catkin_pkg.python_setup import generate_distutil | s_setup
d = generate_distutils_setup(
packages=['lg_builder'],
package_dir={'': 'src'},
scripts=['scripts/lg-ros-build'],
install_requires=['catkin_pkg', 'python-debian', 'rospkg'] |
)
setup(**d)
|
entomb/CouchPotatoServer | couchpotato/core/providers/nzb/__init__.py | Python | gpl-3.0 | 354 | 0 | config = [{
'name': 'nzb_providers',
'groups': [
| {
'label': 'Usenet Providers',
'description': 'Providers searching usenet for new releases',
'wizard': True,
'type': 'list',
'name': 'nzb_providers',
'tab': 'searcher',
| 'options': [],
},
],
}]
|
failys/CAIRIS | cairis/core/TrustBoundary.py | Python | apache-2.0 | 1,461 | 0.019165 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownershi | p. The ASF licenses this file
# to you under the Apache License, V | ersion 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
__author__ = 'Shamal Faily'
class TrustBoundary:
def __init__(self,tbId = -1,tbName = '',tbType = 'General', tbDesc = '',comps = {}, pl = {}, tags = []):
self.theId = tbId
self.theName = tbName
self.theType = tbType
self.theDescription = tbDesc
self.theComponents = comps
self.thePrivilegeLevels = pl
self.theTags = tags
def id(self): return self.theId
def name(self): return self.theName
def type(self): return self.theType
def description(self): return self.theDescription
def components(self): return self.theComponents
def privilegeLevels(self): return self.thePrivilegeLevels
def tags(self): return self.theTags
|
sammyshj/nyx | nyx/panel/config.py | Python | gpl-3.0 | 11,732 | 0.011933 | # Copyright 2010-2016, Damian Johnson and The Tor Project
# See LICENSE for licensing information
"""
Panel presenting the configuration state for tor or nyx. Options can be edited
and the resulting configuration files saved.
"""
import curses
import os
import nyx.controller
import nyx.curses
import nyx.panel
import nyx.popups
import stem.control
import stem.manual
from nyx.curses import WHITE, NORMAL, BOLD, HIGHLIGHT
from nyx import DATA_DIR, tor_controller
from stem.util import conf, enum, log, str_tools
SortAttr = enum.Enum('NAME', 'VALUE', 'VALUE_TYPE', 'CATEGORY', 'USAGE', 'SUMMARY', 'DESCRIPTION', 'MAN_PAGE_ENTRY', 'IS_SET')
DETAILS_HEIGHT = 8
NAME_WIDTH = 25
VALUE_WIDTH = 15
def conf_handler(key, value):
if key == 'features.config.order':
return conf.parse_enum_csv(key, value[0], SortAttr, 3)
CONFIG = conf.config_dict('nyx', {
'attr.config.category_color': {},
'attr.config.sort_color': {},
'features.config.order': [SortAttr.MAN_PAGE_ENTRY, SortAttr.NAME, SortAttr.IS_SET],
'features.config.state.showPrivateOptions': False,
'features.config.state.showVirtualOptions': False,
}, conf_handler)
class ConfigEntry(object):
"""
Configuration option presented in the panel.
:var str name: name of the configuration option
:var str value_type: type of value
:var stem.manual.ConfigOption manual: manual information about the option
"""
def __init__(self, name, value_type, manual):
self.name = name
self.value_type = value_type
self.manual = manual.config_options.get(name, stem.manual.ConfigOption(name))
self._index = manual.config_options.keys().index(name) if name in manual.config_options else 99999
def value(self):
"""
Provides the value of this configuration option.
:returns: **str** representation of the current config value
"""
values = tor_controller().get_conf(self.name, [], True)
if not values:
return '<none>'
elif self.value_type == 'Boolean' and values[0] in ('0', '1'):
return 'False' if values[0] == '0' else 'True'
elif self.value_type == 'DataSize' and values[0].isdigit():
return str_tools.size_label(int(values[0]))
elif self.value_type == 'TimeInterval' and values[0].isdigit():
return str_tools.time_label(int(values[0]), is_long = True)
else:
return ', '.join(values)
def is_set(self):
"""
Checks if the configuration option has a custom value.
:returns: **True** if the option has a custom value, **False** otherwise
"""
return tor_controller().is_set(self.name, False)
def sort_value(self, attr):
"""
Provides a heuristic for sorting by a given value.
:param SortAttr attr: sort attribute to provide a heuristic for
:returns: comparable value for sorting
"""
if attr == SortAttr.CATEGORY:
return self.manual.category
elif attr == SortAttr.NAME:
return self.name
elif attr == SortAttr.VALUE:
return self.value()
elif attr == SortAttr.VALUE_TYPE:
return self.value_type
elif attr == SortAttr.USAGE:
return self.manual.usage
elif attr == SortAttr.SUMMARY:
return self.manual.summary
elif attr == SortAttr.DESCRIPTION:
return self.manual.description
elif attr == SortAttr.MAN_PAGE_ENTRY:
return self._index
elif attr == SortAttr.IS_SET:
return not self.is_set()
class ConfigPanel(nyx.panel.Panel):
"""
Editor for tor's configuration.
"""
def __init__(self):
nyx.panel.Panel.__init__(self)
self._contents = []
self._scroller = nyx.curses.CursorScroller()
self._sort_order = CONFIG['features.config.order']
self._show_all = False # show all options, or just the important ones
cached_manual_path = os.path.join(DATA_DIR, 'manual')
if os.path.exists(cached_manual_path):
manual = stem.manual.Manual.from_cache(cached_manual_path)
else:
try:
manual = stem.manual.Manual.from_man()
try:
manual.save(cached_manual_path)
except IOError as exc:
log.debug("Unable to cache manual information to '%s'. This is fine, but means starting Nyx takes a little longer than usual: " % (cached_manual_path, exc))
except IOError as exc:
log.debug("Unable to use 'man tor' to get information about config options (%s), using bundled information instead" % exc)
manual = stem.manual.Manual.from_cache()
try:
for line in tor_controller().get_info('config/names').splitlines():
# Lines of the form "<option> <type>[ <documentation>]". Documentation
# was apparently only in old tor versions like 0.2.1.25.
if ' ' not in line:
continue
line_comp = line.split()
name, value_type = line_comp[0], line_comp[1]
# skips private and virtual entries if n | ot configured to show them
if name.startswith('__') and not CONFIG['features.config.state.showPrivateOptions']:
continue
elif value_type == 'Virtual' and not CONFIG['features.config.state.showVirtualOptions']:
continue
self._contents.append(ConfigEntry(name, value_type, manual))
self._contents = sorted(self._contents, key = lambda entry: [entry.sort_value(field) for f | ield in self._sort_order])
except stem.ControllerError as exc:
log.warn('Unable to determine the configuration options tor supports: %s' % exc)
def show_sort_dialog(self):
"""
Provides the dialog for sorting our configuration options.
"""
sort_colors = dict([(attr, CONFIG['attr.config.sort_color'].get(attr, WHITE)) for attr in SortAttr])
results = nyx.popups.select_sort_order('Config Option Ordering:', SortAttr, self._sort_order, sort_colors)
if results:
self._sort_order = results
self._contents = sorted(self._contents, key = lambda entry: [entry.sort_value(field) for field in self._sort_order])
def show_write_dialog(self):
"""
Confirmation dialog for saving tor's configuration.
"""
controller = tor_controller()
torrc = controller.get_info('config-text', None)
if nyx.popups.confirm_save_torrc(torrc):
try:
controller.save_conf()
nyx.controller.show_message('Saved configuration to %s' % controller.get_info('config-file', '<unknown>'), HIGHLIGHT, max_wait = 2)
except IOError as exc:
nyx.controller.show_message('Unable to save configuration (%s)' % exc.strerror, HIGHLIGHT, max_wait = 2)
self.redraw()
def key_handlers(self):
def _scroll(key):
page_height = self.get_height() - DETAILS_HEIGHT
is_changed = self._scroller.handle_key(key, self._get_config_options(), page_height)
if is_changed:
self.redraw()
def _edit_selected_value():
selected = self._scroller.selection(self._get_config_options())
initial_value = selected.value() if selected.is_set() else ''
new_value = nyx.controller.input_prompt('%s Value (esc to cancel): ' % selected.name, initial_value)
if new_value != initial_value:
try:
if selected.value_type == 'Boolean':
# if the value's a boolean then allow for 'true' and 'false' inputs
if new_value.lower() == 'true':
new_value = '1'
elif new_value.lower() == 'false':
new_value = '0'
elif selected.value_type == 'LineList':
new_value = new_value.split(',') # set_conf accepts list inputs
tor_controller().set_conf(selected.name, new_value)
self.redraw()
except Exception as exc:
nyx.controller.show_message('%s (press any key)' % exc, HIGHLIGHT, max_wait = 30)
def _toggle_show_all():
self._show_all = not self._show_all
self.redraw()
return (
nyx.panel.KeyHandler('arrows', 'scroll up and down', _scroll, key_func = lambda key: key.is_scroll()),
nyx.panel.KeyHandler('enter', 'edit configuration option', _edit_selected_value, key_func = lambda key: key.is_selection()),
nyx.panel.KeyHandler('w', 'write torrc', self.show_write_dialog),
nyx.panel.KeyHandler('a', 'toggle filtering', _toggle_show_all),
nyx |
cbg-ethz/WES_Cancer_Sim | sim_cancer/tools/GemSIM_v1.6/GemErr.py | Python | apache-2.0 | 29,535 | 0.032673 | #!/usr/bin/python
# Copyright (C) 2011, Kerensa McElroy.
# kerensa@unsw.edu.au
# This file is part of the sequence simulator GemSIM.
# It is used to calculate a platform- and run- specific
# error model for generating realistic sequencing reads.
# Alternatively, users may employ one of the precomputed
# error models distributed as part of the GemSIM package.
# GemSIM is free software; it may be redistributed and
# modified under the terms of the GNU General Public
# License as published by the Free Software Foundation,
# either version 3 of the License, or (at your option)
# any later version.
# GemSIM is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY, without even the implied
# warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
# PURPOSE. See the GNU General Public License for more
# details.
# You should have recieved a copy of the GNU General Public
# License along with GemSIM. If not, see
# http://www.gnu.org/licenses/.
import sys
import getopt
import cPickle
import gzip
import logging
import logging.handlers
import numpy as np
# Make a global logging object.
errlog=logging.getLogger("ErrLog")
# Set logging level, and write everything to a file
errlog.setLevel(logging.DEBUG)
LOG_FILENAME='./err.log'
h=logging.FileHandler(LOG_FILENAME,'w')
f=logging.Formatter("%(levelname)s %(asctime)s %(funcName)s %(lineno)d %(message)s")
h.setFormatter(f)
errlog.addHandler(h)
def rComp(sequence):
"""Reverse complements a sequence, preserving case."""
d={'A':'T','T':'A','C':'G','G':'C','a':'t','t':'a','c':'g','g':'c','N':'N','n':'n'}
cSeq=''
for s in sequence:
cSeq+=d[s]
cSeq=cSeq[::-1]
return cSeq
def getRef(refFile):
"""Returns a genome reference."""
refDict={}
hdList=[]
ref=''
num=0
try:
f=open(refFile)
except IOError:
errlog.error('Cannot find reference file ' +refFile+'. Please check pathname.')
sys.exit('Cannot find reference file '+refFile+'. Please check pathname.')
i=f.readline()
head=i[1:51].rstrip()
i=f.readline().rstrip()
while i:
if i[0]!='>':
ref+=i.rstrip()
i=f.readline()
else:
if head in hdList:
num+=1
head=head+str(num)
ref=ref.upper()
for l in 'RYMKSWHBVD':
ref=ref.replace(l,'N')
refDict[head]=ref
hdList.append(head)
head=i[1:51].rstrip()
i=f.readline()
ref=''
ref=ref.upper()
for l in 'RYMKSWHBVD':
ref=ref.replace(l,'N')
refDict[head]=ref
errlog.debug('Reference file successfully parsed.')
return refDict
def parseFasta(file):
"""Returns sequence string from FASTA format."""
f=open(file)
ref=''
for i in f:
if i[0]!='>':
ref+=i.rstrip()
for l in 'RYLMKSWHBVD':
ref=ref.replace(l,'N')
return ref
def flip(refSlice,seq,qual,cigar):
"""Reverse complements a read."""
flipSeq=''
flipRS=''
flipCig=[]
comp={'A':'T','C':'G','G':'C','T':'A','N':'N','a':'t','t':'a','g':'c','c':'g','n':'n'}
for i in seq:
flipSeq+=comp[i]
for i in refSlice:
flipRS+=comp[i]
for i in range(0,len(cigar),2):
flipCig.append(cigar[i+1])
flipCig.append(cigar[i])
flipCig.reverse()
return flipRS[::-1],flipSeq[::-1],qual[::-1],flipCig
def parseMD(md):
"""Separates a cigar field into list of integers and character strings."""
mdList=[]
mdL=[]
str=''
if md[0].isdigit():
before=True
else:
before=False
for i in md:
if i.isdigit():
if before==True:
str+=i
else:
mdList.append(str)
str=i
before=True
else:
if before==False:
str+=i
else:
mdList.append(str)
str=i
before=False
mdList.append(str)
for i in mdList:
if i.isdigit():
mdL.append(int(i))
else:
mdL.append(i)
return mdL
def updateM(ref,pos,seq,qual,cig,circ,mxNum,maxIndel,dir,readLen,excl):
"""Updates model with mutations, insertions, deletions in read."""
swap={'A':'T','T':'A','C':'G','G':'C','a':'t','t':'a','c':'g','g':'c','N':'N','n':'n'}
inds={'A':0,'T':1,'G':2,'C':3,'N':4,'a': | 0, 't':1, 'g':2, 'c':3, 'n':4}
RposL=pos-1 # | tracks leftmost pos of align against ref
RposR=pos-1 #tracks rightmost pos of align against ref
Rpos=0 #position within aligned reference slice
Spos=0 #sequence position within read
bPos=pos-1 #individual reference base position. 1 index
refSlice=''
cigH=cig
if cig[1]=='H':
cigH=cigH[2:]
if cig[-1]=='H':
cigH=cigH[:-2]
if cigH[1]=='S':
RposL-=cigH[0]
for i in range(0,len(cig),2): #slice alignment out of ref, excluding masked sections.
if cig[i+1]=='M':
RposR+=cig[i]
elif cig[i+1]=='=':
RposR+=cig[i]
elif cig[i+1]=='X':
RposR+=cig[i]
elif cig[i+1]=='D':
RposR+=cig[i]
elif cig[i+1]=='N':
refSlice+=ref[RposL:RposR] #cut before masked section.
RposR+=cig[i]
RposL=RposR
if cigH[-1]=='S':
RposR+=cigH[-2]
refSlice+=ref[RposL:RposR]
refLen=len(ref)
if dir=='f':
if RposR<refLen:
refSlice+=ref[RposR] #+1 to allow consideration of base AFTER last read base.
else:
if circ:
refSlice+=ref[0] #+1 for reads ending at last reference base (circular).
else:
refSlice+='N' #+1 for reads ending at last reference base (linear)
elif dir=='r':
if pos-2>0:
refSlice=ref[pos-2]+refSlice
else:
if circ:
refSlice=ref[-1]+refSlice
else:
refSlice='N'+refSlice
refSlice,seq,qual,cig=flip(refSlice,seq,qual,cig)
bPos=refLen-bPos-len(refSlice) #so when we increment bpos it does in the right direction
seq=seq[:readLen] #make sure fits in matrix
seq=seq.upper()
qual=qual[:readLen]
d0=0
d1=inds['N']
d2=inds['N']
d3=inds['N']
d4=inds['N']
d5=inds[refSlice[0]]
d6=5 #index for totals
if cig[1]!='H':
matrix[mxNum][d0][d1][d2][d3][d4][d5][d6]+=1
for i in range(0,len(cig),2):
if cig[i+1]=='H':
seq=seq[:Spos]+'N'*cig[i]+seq[Spos:]
Spos+=cig[i]
elif cig[i+1]=='M' or cig[i+1]=='S' or cig[i+1]=='X' or cig[i+1]=='=':
matches=cig[i]
count=0
while count<matches:
Spos+=1
Rpos+=1
bPos+=1
count+=1
refBase=refSlice[Rpos-1]
mut=seq[Spos-1]
after=refSlice[Rpos]
qualIndex=ord(qual[Spos-1])-33
if Spos>=4:
seq4=seq[Spos-4:Spos]
else:
seq4='NNNN'+seq[:Spos]
seq4=seq4[-4:]
d0=Spos
d1=inds[refBase]
d2=inds[seq4[2]]
d3=inds[seq4[1]]
d4=inds[seq4[0]]
d5=inds[after]
if mut!=refBase and refBase!='N':
snp=False
if dir=='f':
if str(bPos) in excl:
snp=True
else:
if (str(refLen-bPos)) in excl:
snp=True
if mut in 'ATCGatgcNn' and snp==False:
d6=inds[mut]
matrix[mxNum][d0][d1][d2][d3][d4][d5][d6]+=1
if qualIndex in bQualL[Spos-1]:
bQualL[Spos-1][qualIndex]+=1
|
gwwfps/boxrps | werkzeug/contrib/atom.py | Python | mit | 14,976 | 0.000267 | # -*- coding: utf-8 -*-
"""
werkzeug.contrib.atom
~~~~~~~~~~~~~~~~~~~~~
This module provides a class called :class:`AtomFeed` which can be
used to generate feeds in the Atom syndication format (see :rfc:`4287`).
Example::
def atom_feed(request):
feed = AtomFeed("My Blog", feed_url=request.url,
url=request.host_url,
subtitle="My example blog for a feed test.")
for post in Post.query.limit(10).all():
feed.add(post.title, post.body, content_type='html',
author=post.author, url=post.url, id=post.uid,
updated=post.last_update, published=post.pub_date)
return feed.get_response()
:copyright: (c) 2010 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from datetime import datetime
from werkzeug.utils import escape
from werkzeug.wrappers import BaseResponse
XHTML_NAMESPACE = 'http://www.w3.org/1999/xhtml'
def _make_text_block(name, content, content_type=None):
"""Helper function for the builder that creates an XML text block."""
if content_type == 'xhtml':
return u'<%s type="xhtml"><div xmlns="%s">%s</div></%s>\n' % \
(name, XHTML_NAMESPACE, content, name)
if not content_type:
return u'<%s>%s</%s>\n' % (name, escape(content), name)
return u'<%s type="%s">%s</%s>\n' % (name, content_type,
escape(content), name)
def format_iso8601(obj):
"""Format a datetime object for iso8601"""
return obj.strftime('%Y-%m-%dT%H:%M:%SZ')
class AtomFeed(object):
"""A helper class that creates Atom feeds.
:param title: the title of the feed. Required.
:param title_type: the type attribute for the title element. One of
``'html'``, ``'text'`` or ``'xhtml'``.
:param url: the url for the feed (not the url *of* the feed)
:param id: a globally unique id for the feed. Must be an URI. If
not present the `feed_url` is used, but one of both is
required.
:param updated: the time the feed was modified the last time. Must
be a :class:`datetime.datetime` object. If not
present the latest entry's `updated` is used.
:param feed_url: the URL to the feed. Should be the URL that was
requested.
:param author: the author of the feed. Must be either a string (the
name) or a dict with name (required) and uri or
email (both optional). Can be a list of (may be
mixed, too) strings and dicts, too, if there are
multiple authors. Required if not every entry has an
author element.
:param icon: an icon for the feed.
:param logo: a logo for the feed.
:param rights: copyright information for the feed.
:param rights_type: the type attribute for the rights element. One of
``'html'``, ``'text'`` or ``'xhtml'``. Default is
``'text'``.
:param subtitle: a short description of the feed.
:param subtitle_type: the type attribute for the subtitle element.
One of ``'text'``, ``'html'``, ``'text'``
or ``'xhtml'``. Default is ``'text'``.
:param links: additional links. Must be a list of dictionaries with
href (required) and rel, type, hrefla | ng, title, length
(all optional)
:param generator: the software that generated this feed. This must be
a tuple in the form ``(name, url, version)``. If
you don't want to specify one of them, set the item
to `None`.
:param entries: a list | with the entries for the feed. Entries can also
be added later with :meth:`add`.
For more information on the elements see
http://www.atomenabled.org/developers/syndication/
Everywhere where a list is demanded, any iterable can be used.
"""
default_generator = ('Werkzeug', None, None)
def __init__(self, title=None, entries=None, **kwargs):
self.title = title
self.title_type = kwargs.get('title_type', 'text')
self.url = kwargs.get('url')
self.feed_url = kwargs.get('feed_url', self.url)
self.id = kwargs.get('id', self.feed_url)
self.updated = kwargs.get('updated')
self.author = kwargs.get('author', ())
self.icon = kwargs.get('icon')
self.logo = kwargs.get('logo')
self.rights = kwargs.get('rights')
self.rights_type = kwargs.get('rights_type')
self.subtitle = kwargs.get('subtitle')
self.subtitle_type = kwargs.get('subtitle_type', 'text')
self.generator = kwargs.get('generator')
if self.generator is None:
self.generator = self.default_generator
self.links = kwargs.get('links', [])
self.entries = entries and list(entries) or []
if not hasattr(self.author, '__iter__') \
or isinstance(self.author, (basestring, dict)):
self.author = [self.author]
for i, author in enumerate(self.author):
if not isinstance(author, dict):
self.author[i] = {'name': author}
if not self.title:
raise ValueError('title is required')
if not self.id:
raise ValueError('id is required')
for author in self.author:
if 'name' not in author:
raise TypeError('author must contain at least a name')
def add(self, *args, **kwargs):
"""Add a new entry to the feed. This function can either be called
with a :class:`FeedEntry` or some keyword and positional arguments
that are forwarded to the :class:`FeedEntry` constructor.
"""
if len(args) == 1 and not kwargs and isinstance(args[0], FeedEntry):
self.entries.append(args[0])
else:
kwargs['feed_url'] = self.feed_url
self.entries.append(FeedEntry(*args, **kwargs))
def __repr__(self):
return '<%s %r (%d entries)>' % (
self.__class__.__name__,
self.title,
len(self.entries)
)
def generate(self):
"""Return a generator that yields pieces of XML."""
# atom demands either an author element in every entry or a global one
if not self.author:
if False in map(lambda e: bool(e.author), self.entries):
self.author = ({'name': u'unbekannter Autor'},)
if not self.updated:
dates = sorted([entry.updated for entry in self.entries])
self.updated = dates and dates[-1] or datetime.utcnow()
yield u'<?xml version="1.0" encoding="utf-8"?>\n'
yield u'<feed xmlns="http://www.w3.org/2005/Atom">\n'
yield ' ' + _make_text_block('title', self.title, self.title_type)
yield u' <id>%s</id>\n' % escape(self.id)
yield u' <updated>%s</updated>\n' % format_iso8601(self.updated)
if self.url:
yield u' <link href="%s" />\n' % escape(self.url, True)
if self.feed_url:
yield u' <link href="%s" rel="self" />\n' % \
escape(self.feed_url, True)
for link in self.links:
yield u' <link %s/>\n' % ''.join('%s="%s" ' % \
(k, escape(link[k], True)) for k in link)
for author in self.author:
yield u' <author>\n'
yield u' <name>%s</name>\n' % escape(author['name'])
if 'uri' in author:
yield u' <uri>%s</uri>\n' % escape(author['uri'])
if 'email' in author:
yield ' <email>%s</email>\n' % escape(author['email'])
yield ' </author>\n'
if self.subtitle:
yield ' ' + _make_text_block('subtitle', self.subtitle,
self.subtitle_type)
if self.icon:
yield u' <icon>%s</icon>\n' % e |
imankulov/linguee-api | linguee_api/utils.py | Python | mit | 592 | 0 | import importlib
def import_string(import_name: str):
"""
Import an object based | on the import string.
Separate module name from the object name with ":". For example,
"linuguee_api.downloaders:HTTPXDownloader"
"""
if ":" not in import_name:
raise RuntimeError(
f'{import_name} must separate module from object with ":". '
| f'For example, "linguee_api.downloaders:HTTPXDownloader"'
)
module_name, object_name = import_name.rsplit(":", 1)
mod = importlib.import_module(module_name)
return getattr(mod, object_name)
|
JazzeYoung/VeryDeepAutoEncoder | benchmark/regression/regression.py | Python | bsd-3-clause | 5,237 | 0.013366 | from __future__ import absolute_import, print_function, division
import theano
import numpy as N
from theano import tenso | r as T
from theano.tensor import nnet as NN
from six.moves import xrange
from theano.compile import module as M
class RegressionLayer(M.Module):
def __init__(self, input = None, target = None, regularize = True):
super(RegressionLayer, self).__init__() #boilerplate
# MODEL CONFIGURATION
self.regulari | ze = regularize
# ACQUIRE/MAKE INPUT AND TARGET
if not input:
input = T.matrix('input')
if not target:
target = T.matrix('target')
# HYPER-PARAMETERS
self.stepsize = T.scalar() # a stepsize for gradient descent
# PARAMETERS
self.w = T.matrix() #the linear transform to apply to our input points
self.b = T.vector() #a vector of biases, which make our transform affine instead of linear
# REGRESSION MODEL
self.activation = T.dot(input, self.w) + self.b
self.prediction = self.build_prediction()
# CLASSIFICATION COST
self.classification_cost = self.build_classification_cost(target)
# REGULARIZATION COST
self.regularization = self.build_regularization()
# TOTAL COST
self.cost = self.classification_cost
if self.regularize:
self.cost = self.cost + self.regularization
# GET THE GRADIENTS NECESSARY TO FIT OUR PARAMETERS
self.grad_w, self.grad_b, grad_act = T.grad(self.cost, [self.w, self.b, self.prediction])
print('grads', self.grad_w, self.grad_b)
# INTERFACE METHODS
self.update = M.Method([input, target],
[self.cost, self.grad_w, self.grad_b, grad_act],
updates={self.w: self.w - self.stepsize * self.grad_w,
self.b: self.b - self.stepsize * self.grad_b})
self.apply = M.Method(input, self.prediction)
def params(self):
return self.w, self.b
def _instance_initialize(self, obj, input_size = None, target_size = None,
seed = 1827, **init):
# obj is an "instance" of this module holding values for each member and
# functions for each method
if input_size and target_size:
# initialize w and b in a special way using input_size and target_size
sz = (input_size, target_size)
rng = N.random.RandomState(seed)
obj.w = rng.uniform(size = sz, low = -0.5, high = 0.5)
obj.b = N.zeros(target_size)
obj.stepsize = 0.01
# here we call the default_initialize method, which takes all the name: value
# pairs in init and sets the property with that name to the provided value
# this covers setting stepsize, l2_coef; w and b can be set that way too
# we call it after as we want the parameter to superseed the default value.
M.default_initialize(obj,**init)
def build_regularization(self):
return T.zero() # no regularization!
class SpecifiedRegressionLayer(RegressionLayer):
""" XE mean cross entropy"""
def build_prediction(self):
# return NN.softmax(self.activation) #use this line to expose a slow subtensor
# implementation
return NN.sigmoid(self.activation)
def build_classification_cost(self, target):
self.classification_cost_matrix = (target - self.prediction)**2
#print self.classification_cost_matrix.type
self.classification_costs = T.sum(self.classification_cost_matrix, axis=1)
return T.sum(self.classification_costs)
def build_regularization(self):
self.l2_coef = T.scalar() # we can add a hyper parameter if we need to
return self.l2_coef * T.sum(self.w * self.w)
class PrintEverythingMode(theano.Mode):
def __init__(self, linker, optimizer=None):
def print_eval(i, node, fn):
print(i, node, [input[0] for input in fn.inputs], end=' ')
fn()
print([output[0] for output in fn.outputs])
wrap_linker = theano.gof.WrapLinkerMany([linker], [print_eval])
super(PrintEverythingMode, self).__init__(wrap_linker, optimizer)
def test_module_advanced_example():
profmode = theano.ProfileMode(optimizer='fast_run', linker=theano.gof.OpWiseCLinker())
profmode = PrintEverythingMode(theano.gof.OpWiseCLinker(), 'fast_run')
data_x = N.random.randn(4, 10)
data_y = [ [int(x)] for x in (N.random.randn(4) > 0)]
model = SpecifiedRegressionLayer(regularize = False).make(input_size = 10,
target_size = 1,
stepsize = 0.1,
mode=profmode)
for i in xrange(1000):
xe, gw, gb, ga = model.update(data_x, data_y)
if i % 100 == 0:
print(i, xe)
pass
#for inputs, targets in my_training_set():
#print "cost:", model.update(inputs, targets)
print("final weights:", model.w)
print("final biases:", model.b)
profmode.print_summary()
|
sebasvega95/HPC-assignments | CUDA/grayscale/timing.py | Python | mit | 2,425 | 0.003711 | from time import time
from os import remove
from matplotlib.image import imread
import json
import subprocess
import numpy as np
import matplotlib.pyplot as plt
def time_a_function(program, args):
start = time()
subprocess.call([program] + [args])
end = time()
return float(end - start)
def clean(programs):
for p in programs:
remove(p)
def plot_results(times, programs, images):
x = [imread(img)[:,:,0].shape for img in images]
xlabels = [str(xi) for xi in x]
x = [np.prod(xi) for xi in x]
for p in programs:
y, std_y = zip(*times[p])
# plt.plot(x, y, 'o')
plt.errorbar(x, y, yerr=std_y, fmt='o')
plt.xticks(x, xlabels)
plt.xlabel('Image size')
plt.ylabel('Time (s)')
plt.show()
def print_results(times, programs, images):
sizes = [imread(img)[:,:,0].size for img in images]
for p in programs:
print '\n{}'.format(p)
mean_t, std_t = zip(*times[p])
print 'Image'.rjust(13), 'Size'.rjust(8), 'Avg. time'.rjust(10), 'Std. time'.rjust(10)
for img, size, m, s in zip(images, sizes, mean_t, std_t):
print '{:13} {:8d} {:10.5f} {:10.5f}'.format(img, size, m, s)
def main():
print 'Running make...'
subprocess.call(['make', '-j8'], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
programs = ['./grayscale.out', './grayscale-seq.out']
images = ['img/emma{}.png'.format(i) for i in range(1, 6)]
n = 20
times = {}
try:
print 'Loading times.json...'
time_file = open('times.json', 'r')
times = json.load(time_file)
except IOError:
print 'Failed, calculating times'
for p in programs:
times[p] = []
for img in images:
t = []
| print 'Runnin | g {} with {} {} times...'.format(p, img, n),
for _ in range(n):
t.append(time_a_function(p, img))
mean_t = np.mean(t)
std_t = np.std(t)
print '({} +- {})s on average'.format(mean_t, std_t)
times[p].append((mean_t, std_t))
time_file = open('times.json', 'w')
print 'Writing times.json...'
json.dump(times, time_file)
time_file.close()
print_results(times, programs, images)
plot_results(times, programs, images)
clean(programs)
if __name__ == '__main__':
main()
|
pouta/bitcoin | contrib/devtools/fix-copyright-headers.py | Python | mit | 1,484 | 0.015499 | #!/usr/bin/env python
'''
Run this script inside of src/ and it will look for all the files
that were changed this year that still have the last year in the
copyright headers, and it will fix the headers on that file using
a perl regex one liner.
For example: if it finds something like this and we're in 2014
// Copyright (c) 2009-2013 The Bitcoin Core developers
it will change it to
// Copyright (c) 2009-2014 The Bitcoin Core developers
It will do this for all the files in the folder and its children.
Author: @gubatron
'''
import os
import time
year = time.gmtime()[0]
last_year = year - 1
command = "perl -pi -e 's/%s The Bitcoin/%s The Bitcoin/' %s"
listFilesCommand = "find . | grep %s"
extensions = [".cpp",".h"]
def getLastGitModifiedDate(filePath):
gitGetLastCommitDateCommand = "git log " + filePath +" | grep Date | head -n 1"
p = os.popen(gitGetLastCommitDateCommand)
result = ""
for l in p:
result = l
break
result = result.replace("\n","")
return result
n=1
for extension in extensions:
foundFiles = os.popen(listFilesCommand % extension)
for filePath in foundFiles:
filePath = filePath[1:-1]
if filePath.endswith(extension):
filePath = os.getcwd() + filePath
| modifiedTime = getLastGitModifiedDate(filePath)
if len(modifiedTime) > 0 and str(year) in modifiedTime:
print n,"Last Git Modified: ", modifiedTime, " - ", filePath
os.popen(command % (last_year,y | ear,filePath))
n = n + 1
|
russloewe/PyGravity | tests/test.py | Python | gpl-2.0 | 17,139 | 0.02013 | import unittest
from PyGravity import PyGravity, round_sig, Particle, Physics, Data_IO
from math import sqrt
import numpy as np
import sys
'''
Unit Tests for PyGravity. These Unit tests will run tests against the
installed PyGravity module and not against the source files in this
package. So if you modify source, you need to re-install with
.. code-block:: python
python setup.py install --force
'''
class Round_test(unittest.TestCase):
def setUp(self):
pass
def test_rounding_domain(self):
a = 0
self.failUnless(round_sig(a, 1) == 0)
def test_neg_numbers(self):
a = -1.20
self.failUnless(round_sig(a,2) == -1.2)
class Particle_Class_Tests(unittest.TestCase):
def setUp(self):
pass
def test_particle_creation(self):
a = Particle('a',np.array([1,2,3]),
np.array([1,1,1]),
55.5)
self.failUnless(hasattr(a, 'pos'))
self.failUnless(hasattr(a, 'vol'))
self.failUnless(hasattr(a, 'mass'))
self.failUnless(a.name == 'a')
def test_particle_motion(self):
a = Particle('a', np.array(['1','2','3']),
np.array(['1','1','1']),
55.5)
a.move(1)
V = np.array([1,1,1])
P = np.array([2,3,4])
self.failUnless(np.array_equal(P, a.pos))
def test_particle_motion2(self):
a = Particle('a',[1.1,2.1,3.0],
np.array([1.1,2.1,3.0]),
55.5)
Accel = np.array([2,2,-4])
Ans = np.array([3.1, 4.1, -1.0])
a.accelerate(Accel,1)
self.failUnless(np.array_equal(Ans, a.vol))
def test_particle_acceleration(self):
a = Particle('a',[1, 1, 1],
[1,1,1],
44)
Acc = np.array([3, 3, -1])
V_ans = np.array([4, 4, 0], dtype=float)
P_ans = np.array([5, 5, 1], dtype=float)
a.accelerate(Acc,1)
self.failUnless(np.array_equal(V_ans, a.vol))
a.move(1.0)
print P_ans
print a.pos
self.failUnless(np.array_equal(P_ans, a.pos))
class Physics_Class_Tests(unittest.TestCase):
def setUp(self):
self.part1 = Particle('a',np.array([1,1,1]),
np.array([1,1,1]),
5)
self.part2 = Particle('b',np.array([2.0e42,1.0e42,3.0e42]),
np.array([2.3, 1.2, 4.2]),
5.0e6)
part1 = Particle('aa',np.array(['1.00009','1.000009','1.000009']),
np.array(['1.09','1.09','1.09']),
5)
part2 = Particle('b',np.array([2,2,2]),
np.array([2.3, 1.2, 4.2]),
np.array([10]))
part3 = Particle('c',np.array(['1.2e20','1.2e21','1.4e10']),
np.array(['1','1','1']),
5)
part4 = Particle('d',np.array(['1.01e-40','1.3e-40','1.4e-40']),
np.array(['1','1','1']),
5.8e-100)
part5 = Particle('e',np.array(['1.01e10','1.44440000001110',
'1.00000000000001']),
np.array(['1','1','1']),
5.2)
part6 = Particle('f',np.array(['1.5','-1.2','-1.5']),
np.array(['1','1','1']),
5.3e48)
self.part_list = [part1, part2, part3, part4, part5, part6]
def test_Grav_Force_against_known_answer(self):
part1 = Particle('a',[1, 1,1 ],
[1, 1, 1],
5.0)
part2 = Particle('b',[2, 2, 2],
[2.3, 1.2, 4.2],
10)
answer = np.array([6.42e-10, 6.42e-10,6.42e-10 ])
force_vec = Physics.grav_force(part1, part2)
self.failUnless(np.allclose(answer, force_vec, 1.0e-6)) |
def test_force_of_gravity_magnitude_against_known_answer(self):
part1 = Particle('a',np.array(['1.0','1.0','1.0']),
| np.array(['0','0','0']),
5.0e10)
part2 = Particle('b',np.array(['11.0','1.0','1.0']),
np.array(['0','0','0']), 5.0e10)
force_vec = Physics.grav_force(part1, part2)
self.failUnless(np.linalg.norm(force_vec) == 1668460000)
def test_grav_accel_against_known_answer(self):
A = Particle('a',np.array(['1','1','1']),
np.array([1,1,1]), 5)
B = Particle('b',np.array(['2','2','2']),
np.array([1,1,1]), 10)
#the known_answer was calculated by hand and verified w/ wolfram|alpha
known_answer = np.array([1.28438e-10,
1.28438e-10, 1.28438e-10])
self.failUnless(np.allclose(known_answer ,
Physics.grav_accel(A,B),
1.0e-6))
part6 = Particle('f',np.array(['1.5','-1.2','-1.5']),
np.array(['1','1','1']), 5.3e20)
known_answer_part_6 = np.array([4.63129113e+08,
-2.03776810e+09,
-2.31564557e+09])
self.failUnless(np.allclose(known_answer_part_6,
Physics.grav_accel(A,part6),
1.0e-6))
def test_Grav_Accel_against_known_answer(self):
part1 = Particle('a',np.array(['1.0','1.0','1.0']),
np.array([1,1,1]), 5.0)
Acceleration_answer = np.array([6.42e-10,
6.42e-10,
6.42e-10 ]) * (1/part1.mass)
Acc_vecc_one = (Physics.grav_force(part1, self.part_list[1]) * (1/part1.mass))
Acc_vecc_two = Physics.grav_accel(part1, self.part_list[1])
self.failUnless(np.allclose(Acc_vecc_one, Acceleration_answer,
1.0e-6))
self.failUnless(np.allclose(Acc_vecc_two, Acceleration_answer,
1.0e-6))
def test_Grav_Accl_vs_Grav_Force(self):
part1 = Particle('a',np.array(['1.0','1.0','1.0']),
np.array([1,1,1]), 5.0)
for i in self.part_list:
self.failUnless(
np.allclose(
Physics.grav_force(part1, i) * (-1.0/part1.mass),
Physics.grav_accel(part1, i),
1.0e-6)
)
def test_grav_accel_extension_against_known_answer(self):
A = Particle('a',[1.0, 1.0, 1.0], [1,1,1], 5.0)
B = Particle('b',[2.0, 2.0, 2.0], [1,1,1], 10.0)
#the known_answer was calculated by hand and verified w/ wolfram|alpha
known_answer = np.array([1.28438e-10, 1.28438e-10, 1.28438e-10])
Acc_vec_one = Physics.c_grav_accel(A, B)
self.failUnless(np.allclose(Acc_vec_one, known_answer, 1.0e-6))
def test_grav_accel_method_equal(self):
part1 = Particle('a',[1.0, 1.0, 1.0], [1, 1, 1], 5.0)
part2 = Particle('b',[1.00001,1.0000001,1.00001], [1,1,1], 5)
part3 = Particle('c',[1.2e20, 1.2e21, 1.4e10], [1, 1, 1], 5.0)
part4 = Particle('d',[1.01e-40, 1.3e-40, 1.4e-40], [1, 1, 1], 5.8e-100)
part5 = Particle('e',[1.01e10, 1.44440000001110, 1.00000001], [1, 1, 1], 5.2)
part6 = Particle('f',[-1.5, -1.5, -1.5], [-1, 1, 1], 5.3e24)
base = PyGravity()
self.failUnless(np.allclose(Physics.grav_accel(part1,part2),
Physics.c_grav_accel(part1,part2),
1.0e-6))
self.failUnless(np.allclose(Physics.grav_accel(part1,part3),
Physics.c_grav_accel(part1,part3),
|
bd-j/prospector | prospect/models/__init__.py | Python | mit | 518 | 0.001931 | """This module includes | objects that store parameter specfications and
efficiently convert between parameter dictionaries and parameter vectors
necessary for fitting algorithms. There are submodules for parameter priors,
common parameter transformations, and pre-defined | sets of parameter
specifications.
"""
from .sedmodel import ProspectorParams, SedModel, SpecModel, PolySpecModel, SplineSpecModel
__all__ = ["ProspectorParams",
"SpecModel", "PolySpecModel", "SplineSpecModel",
"SedModel"]
|
john-soklaski/CoilSnake | coilsnake/util/eb/text.py | Python | gpl-3.0 | 2,207 | 0.002266 | class CharacterSubstitutions(object):
character_substitutions = dict()
def standard_text_from_block(block, offset, max_length):
str = ''
for i in range(offset, offset + max_length):
c = block[i]
if c == 0:
return str
else:
str += chr(c - 0x30)
return str
def standard_text_to_byte_list(text, max_length):
# First, substitute all of the characters
if CharacterSubstitutions.character_substitutions:
for k, v in CharacterSubstitutions.character_substitutions.items():
text = text.replace(k, v)
byte_list = []
text_pos = 0
while text_pos < len(text):
c = text[text_pos]
if c == '[':
end_bracket_pos = text.find(']', text_pos)
if end_bracket_pos == -1:
raise ValueError("String contains '[' at position {} but no subsequent ']': {}".format(
text_pos, text
))
bracket_bytes = text[text_pos+1:end_bracket_pos].split()
for bracket_byte in bracket_bytes:
if len(bracket_byte) != 2:
raise ValueError("String contains invalid hex number '{}', must be two digits: {}".format(
bracket_byte, text
))
try:
bracket_byte_value = int(bracket_byte, 16)
except ValueError as e:
raise ValueError("String contains invalid hex number '{}': {}".format(
bracket_byte, text
), e)
byte_list.append(bracket_byte_value)
text_pos = end_bracket_pos + 1
else:
byte_list.append(ord(c) + 0x30)
text_pos += 1
| num_bytes = len(byte_list)
if num_bytes > max_length:
raise ValueError("String cannot be written in {} bytes o | r less: {}".format(
max_length, text
))
elif num_bytes < max_length:
byte_list.append(0)
return byte_list
def standard_text_to_block(block, offset, text, max_length):
byte_list = standard_text_to_byte_list(text, max_length)
block[offset:offset+len(byte_list)] = byte_list |
simonwittber/middleman | clients/py/auth.py | Python | mit | 1,290 | 0.000775 | from collections import defaultdict
import serviceprovider
import textservice
import hashlib
import os
class Auth(serviceprovider.ServiceProvider, textservice.TextService):
salts = dict()
users = dict()
async def REQ_Salt(self, headers, msg):
conn = headers["cid"]
headers["salt"] = self.salts[conn] = hashlib.md5(
os.urandom(16)).hexdigest()
async def REQ_Verify(self, headers, msg):
conn = headers["cid"]
e = headers["email"]
h = headers["hash"]
salt = self.salts[conn]
# lookup pass for user
if e in self.users:
p = self.users[e]
localHash = hashlib.md5((salt + p).encode()).hexdigest()
if h == localHash:
headers["auth"] = "Y"
await self.internal("UID", dict(forcid=conn, setuid=hashlib.md5(e.encode()).hexdigest()))
else:
headers["auth"] = "N"
else:
headers["auth"] = "N"
async def REQ_Register(self, h | eaders, msg): |
h = headers["hash"]
e = headers["email"]
if e in self.users:
headers["register"] = "N"
else:
headers["register"] = "Y"
self.users[e] = h
serviceprovider.run(Auth, require_uid=False)
|
achawkins/Forsteri | forsteri/gui/window/preferences.py | Python | mit | 5,406 | 0.00407 | #!/usr/bin/python
"""
Preferences Frame
Copyright (c) 2014, 2015 Andrew Hawkins
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
"""
Import Declarations
"""
import os
import pickle
import wx
from forsteri.interface import data as idata
"""
Constant Declarations
"""
"""
Frame Class
"""
class PreferencesFrame(wx.Frame):
"""
"""
def __init__(self, *args, **kwargs):
"""
"""
## Panel
# Initialize the parents constructor.
super(PreferencesFrame, self).__init__(*args, **kwargs)
# Create the master panel.
masterPanel = wx.Panel(self)
# Create the master sizer.
masterSizer = wx.BoxSizer(wx.VERTICAL)
## Reporting
# Create the reporting static box.
reportSB = wx.StaticBox(masterPanel, label="Reporting")
# Create the reporting sizer.
reportSizer = wx.StaticBoxSizer(reportSB, wx.VERTICAL)
# Create the first rows sizer.
row1Sizer = wx.BoxSizer(wx.HORIZONTAL)
# Create the label for the first row.
row1Label = wx.StaticText(masterPanel, label="Forecast Type")
# Create the list of choices for the first row.
choice1 = ["Auto", "MLR", "EMA", "Naive"]
# Create the object for the first row.
self.row1Obj = wx.ComboBox(masterPanel, size=(150, -1),
| choices=choice1, style=wx.CB_READONLY)
# Add the contents to the row 1 sizer.
row1Sizer.Add(row1Label, flag=wx.ALIGN_CENTER|wx.RIGHT, border=5)
row1Sizer.Add(self.row1Obj, flag=wx.ALIGN | _CENTER)
# Add all rows to the report sizer.
reportSizer.Add(row1Sizer, flag=wx.ALL, border=5)
#
## Finish Buttons
# Create the finish sizer.
finishSizer = wx.BoxSizer(wx.HORIZONTAL)
# Create the buttons.
okButton = wx.Button(masterPanel, id=wx.ID_OK)
cancelButton = wx.Button(masterPanel, id=wx.ID_CANCEL)
# Set the OK button to be the dafault button.
okButton.SetDefault()
# Add the buttons to the finish sizer.
finishSizer.AddMany([okButton, (5, 0), cancelButton, (5, 0)])
# Bind button presses to functions.
okButton.Bind(wx.EVT_BUTTON, self.onOK)
cancelButton.Bind(wx.EVT_BUTTON, self.onCancel)
## Panel Operations
# Add everything to the master sizer.
masterSizer.Add(reportSizer, flag=wx.ALL, border=5)
masterSizer.AddSpacer(9)
masterSizer.Add(wx.StaticLine(masterPanel, size=(585, 2)),
flag=wx.ALIGN_CENTER)
masterSizer.AddSpacer(9)
masterSizer.Add(finishSizer,
flag=wx.BOTTOM|wx.ALIGN_RIGHT, border=5)
# Load the prefernces.
self.loadPref()
# Set the sizer for the master panel.
masterPanel.SetSizer(masterSizer)
# Bind closing the frame to a function.
self.Bind(wx.EVT_CLOSE, self.onClose)
# Set window properties.
self.SetSize((600, 400))
self.SetTitle("Preferences")
self.Centre()
self.Show(True)
"""
Helper Functions
"""
def loadPref(self):
"""
"""
# Load the preferences from the pickle file.
pref = pickle.load(open(os.path.join(idata.DATA, "Forsteri",
"pref.p"), "rb"))
# Set all of the prefernce objects.
self.row1Obj.SetValue(pref["report_type"])
return True
def savePref(self):
"""
"""
# Initialize the preferences dictionary.
pref = {}
# Get all of the preference objects data.
pref["report_type"] = self.row1Obj.GetValue()
# Save the preferences into the pickle file.
pickle.dump(pref, open(os.path.join(idata.DATA, "Forsteri",
"pref.p"), "wb"))
return True
"""
Event Handlers
"""
def onOK(self, event):
"""
"""
# Save the preferences.
self.savePref()
self.Close()
def onCancel(self, event):
"""
"""
self.Close()
def onClose(self, event):
"""
"""
self.Destroy()
def main():
"""
When the file is called independently create and display the manager frame.
"""
app = wx.App()
PreferencesFrame(None, style=wx.DEFAULT_FRAME_STYLE)#^wx.RESIZE_BORDER)
app.MainLoop()
if __name__ == '__main__':
main()
|
pnakis/qgis_vector_transform | test/test_vector_transform_dialog.py | Python | gpl-3.0 | 1,563 | 0.00128 | # coding=utf-8
"""Dialog test.
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'pnakis@hotmail.com'
__date__ = '2017-06-04'
__copyright__ = 'Copyright 2017, Panagiotis Nakis'
import unittest
from PyQt4.QtGui import QDialogButtonBox, QDialog
from vector_transform_dialog import VectorTranformationDialog
from utilities import get_qgis_app
QGIS_APP = get_qgis_app()
class VectorTranformationDialogTest(unittest.TestCase):
"""Test dialog works."""
def setUp(self):
"""Runs before each test."""
self.dialog = VectorTranformationDialog(None)
def tearDown(self):
"""Runs after each test."""
self.dialog = None
|
def test_dialog_ok(self):
"""Test we can click OK."""
button = self.dialog.button_box.button(QDialogButtonBox.Ok)
button.click()
result = self.dialog.result()
self.assertEqual(result, QDialog.Accepted)
def test_dialog_cancel(self):
"""Test we can click cancel."""
button = self.dialog.button_box.button(QDialogButtonBox.Cancel)
button.click()
result = sel | f.dialog.result()
self.assertEqual(result, QDialog.Rejected)
if __name__ == "__main__":
suite = unittest.makeSuite(VectorTranformationDialogTest)
runner = unittest.TextTestRunner(verbosity=2)
runner.run(suite)
|
fujy/ROS-Project | src/rbx1/rbx1_nav/nodes/navynavy.py | Python | mit | 12,006 | 0.008163 | #!/usr/bin/env python
""" nav_test.py - Version 1.1 2013-12-20
Command a robot to move autonomously among a number of goal locations defined in the map frame.
On each round, select a new random sequence of locations, then attempt to move to each location
in succession. Keep track of success rate, time elapsed, and total distance traveled.
Created for the Pi Robot Project: http://www.pirobot.org
Copyright (c) 2012 Patrick Goebel. All rights reserved.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.5
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details at:
http://www.gnu.org/licenses/gpl.html
"""
import rospy
import actionlib
from actionlib_msgs.msg import *
from geometry_msgs.msg import Pose, PoseWithCovarianceStamped, Point, Quaternion, Twist
from move_base_msgs.msg import MoveBaseAction, MoveBaseGoal
from random import sample
import math
from ar_track_alvar.msg import AlvarMarkers
class NavTest():
def __init__(self):
rospy.init_node('nav_test', anonymous=True)
rospy.on_shutdown(self.shutdown)
# How long in seconds should the robot pause at each location?
self.rest_time = rospy.get_param("~rest_time", 10)
# Are we running in the fake simulator?
self.fake_test = rospy.get_param("~fake_test", False)
# Goal state return values
goal_states = ['PENDING', 'ACTIVE', 'PREEMPTED',
'SUCCEEDED', 'ABORTED', 'REJECTED',
'PREEMPTING', 'RECALLING', 'RECALLED',
'LOST']
# Set up the goal locations. Poses are defined in the map frame.
# An easy way to find the pose coordinates is to point-and-click
# Nav Goals in RViz when running in the simulator.
# Pose coordinates are then displayed in the terminal
# that was used to launch RViz.
locations = dict()
locations['hall_foyer'] = Pose(Point(01, 0, 0.000), Quaternion(0.000, 0.000, 0.223, 0.975))
# locations['hall_kitchen'] = Pose(Point(-1.994, 4.382, 0.000), Quaternion(0.000, 0.000, -0.670, 0.743))
# locations['hall_bedroom'] = Pose(Point(-3.719, 4.401, 0.000), Quaternion(0.000, 0.000, 0.733, 0.680))
# locations['living_room_1'] = Pose(Point(0.720, 2.229, 0.000), Quaternion(0.000, 0.000, 0.786, 0.618))
# locations['living_room_2'] = Pose(Point(1.471, 1.007, 0.000), Quaternion(0.000, 0.000, 0.480, 0.877))
# locations['dining_room_1'] = Pose(Point(-0.861, -0.019, 0.000), Quaternion(0.000, 0.000, 0.892, -0.451))
# Publisher to manually control the robot (e.g. to stop it)
self.cmd_vel_pub = rospy.Publisher('/cmd_vel', Twist)
# Subscribe to the move_base action server
self.move_base = actionlib.SimpleActionClient("move_base", MoveBaseAction)
rospy.loginfo("Waiting for move_base action server...")
# Wait 60 seconds for the action server to become available
self.move_base.wait_for_server(rospy.Duration(60))
rospy.loginfo("Connected to move base server")
# A variable to hold the initial pose of the robot to be set by
# the user in RViz
initial_pose = PoseWithCovarianceStamped()
# Variables to keep track of success rate, running time,
# and distance traveled
n_locations = len(locations)
n_goals = 0
n_successes = 0
i = n_locations
distance_traveled = 0
start_time = rospy.Time.now()
running_time = 0
location = ""
last_location = ""
# Get the initial pose from the user
rospy.loginfo("*** Click the 2D Pose Estimate button in RViz to set the robot's initial pose...")
rospy.wait_for_message('initialpose', PoseWithCovarianceStamped)
self.last_location = Pose()
rospy.Subscriber('initialpose', PoseWithCovarianceStamped, self.update_initial_pose)
# self.image_sub = rospy.Subscriber("/camera/rgb/image_color", Image, self.image_callback)
| self.ar_sub = rospy.Subscriber('ar_pose_marker', AlvarMarkers, self.doSomething)
self.is_visible = False;
# Make sure we have the initial pose
| while initial_pose.header.stamp == "":
rospy.sleep(1)
rospy.loginfo("Starting navigation test")
# Begin the main loop and run through a sequence of locations
while not rospy.is_shutdown():
# If we've gone through the current sequence,
# start with a new random sequence
if i == n_locations:
i = 0
sequence = sample(locations, n_locations)
# Skip over first location if it is the same as
# the last location
if sequence[0] == last_location:
i = 1
# # Get the next location in the current sequence
# location = sequence[i]
# # Keep track of the distance traveled.
# # Use updated initial pose if available.
# if initial_pose.header.stamp == "":
# distance = sqrt(pow(locations[location].position.x -
# locations[last_location].position.x, 2) +
# pow(locations[location].position.y -
# locations[last_location].position.y, 2))
# else:
# rospy.loginfo("Updating current pose.")
# distance = sqrt(pow(locations[location].position.x -
# initial_pose.pose.pose.position.x, 2) +
# pow(locations[location].position.y -
# initial_pose.pose.pose.position.y, 2))
# initial_pose.header.stamp = ""
# # Store the last location for distance calculations
# last_location = location
# # Increment the counters
# i += 1
# n_goals += 1
# Set up the next goal location
# self.goal = MoveBaseGoal()
# self.goal.target_pose.pose = locations[location]
# self.goal.target_pose.header.frame_id = 'map'
# self.goal.target_pose.header.stamp = rospy.Time.now()
# # Let the user know where the robot is going next
# rospy.loginfo("Going to: " + str(location))
# # Start the robot toward the next location
# self.move_base.send_goal(self.goal)
# # Allow 5 minutes to get there
# finished_within_time = self.move_base.wait_for_result(rospy.Duration(300))
# # Check for success or failure
# if not finished_within_time:
# self.move_base.cancel_goal()
# rospy.loginfo("Timed out achieving goal")
# else:
# state = self.move_base.get_state()
# if state == GoalStatus.SUCCEEDED:
# rospy.loginfo("Goal succeeded!")
# n_successes += 1
# distance_traveled += distance
# rospy.loginfo("State:" + str(state))
# else:
# rospy.loginfo("Goal failed with error code: " + str(goal_states[state]))
# How long have we been running?
running_time = rospy.Time.now() - start_time
running_time = running_time.secs / 60.0
# Print a summary success/failure, |
cogeorg/BlackRhino | examples/solow/abm_template/tests/tests_transaction.py | Python | gpl-3.0 | 44,588 | 0.002198 | #!/usr/bin/env python
# [SublimeLinter pep8-max-line-length:150]
# -*- coding: utf-8 -*-
"""
black_rhino is a multi-agent simulator for financial network analysis
Copyright (C) 2016 Co-Pierre Georg (co-pierre.georg@keble.ox.ac.uk)
Pawel Fiedor (pawel@fiedor.eu)
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as | published by
the Free Software Foundation, version 3 of the License.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without e | ven the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import logging
# -------------------------------------------------------------------------
# class Tests
# -------------------------------------------------------------------------
class TestsTransaction(object):
#
# VARIABLES
#
#
# METHODS
#
# -------------------------------------------------------------------------
# __init__
# -------------------------------------------------------------------------
def __init__(self):
pass
# -------------------------------------------------------------------------
# -------------------------------------------------------------------------
# print_info(text)
# -------------------------------------------------------------------------
def print_info(self, text):
print('##############################################################################\n')
print(text)
print('##############################################################################\n')
# -------------------------------------------------------------------------
# -------------------------------------------------------------------------
# TESTS FOR TRANSACTION.PY
# -------------------------------------------------------------------------
# -------------------------------------------------------------------------
# transaction__init
# -------------------------------------------------------------------------
def transaction__init(self, args):
import os
from sample_agent import Agent
from sample_config import Config
from sample_transaction import Transaction
text = "This test checks transaction.init \n"
self.print_info(text)
#
# INITIALIZATION
#
environment_directory = str(args[0])
identifier = str(args[1])
log_directory = str(args[2])
# Configure logging parameters so we get output while the program runs
logging.basicConfig(format='%(asctime)s %(message)s', datefmt='%m/%d/%Y %H:%M:%S',
filename=log_directory + identifier + ".log", level=logging.INFO)
logging.info('START logging for test transaction__init in run: %s',
environment_directory + identifier + ".xml")
# Construct a config
config = Config()
#
# TESTING
#
print("Creating a transaction \n")
transaction = Transaction()
print("Transaction ID: ")
print(transaction.identifier)
# -------------------------------------------------------------------------
# -------------------------------------------------------------------------
# transaction__del
# -------------------------------------------------------------------------
def transaction__del(self, args):
import os
from sample_agent import Agent
from sample_config import Config
from sample_transaction import Transaction
text = "This test checks transaction.del \n"
self.print_info(text)
#
# INITIALIZATION
#
environment_directory = str(args[0])
identifier = str(args[1])
log_directory = str(args[2])
# Configure logging parameters so we get output while the program runs
logging.basicConfig(format='%(asctime)s %(message)s', datefmt='%m/%d/%Y %H:%M:%S',
filename=log_directory + identifier + ".log", level=logging.INFO)
logging.info('START logging for test transaction__del in run: %s',
environment_directory + identifier + ".xml")
# Construct a config
config = Config()
#
# TESTING
#
print("Creating a transaction")
transaction = Transaction()
print("Transaction exists? ")
print("transaction" in locals())
print("Deleting the transaction")
del transaction
print("Transaction exists? ")
print("transaction" in locals())
# -------------------------------------------------------------------------
# -------------------------------------------------------------------------
# transaction__get_type_
# -------------------------------------------------------------------------
def transaction__get_type_(self, args):
import os
from sample_agent import Agent
from sample_config import Config
from sample_transaction import Transaction
text = "This test checks transaction.get_type_ \n"
self.print_info(text)
#
# INITIALIZATION
#
environment_directory = str(args[0])
identifier = str(args[1])
log_directory = str(args[2])
# Configure logging parameters so we get output while the program runs
logging.basicConfig(format='%(asctime)s %(message)s', datefmt='%m/%d/%Y %H:%M:%S',
filename=log_directory + identifier + ".log", level=logging.INFO)
logging.info('START logging for test transaction__get_type_ in run: %s',
environment_directory + identifier + ".xml")
# Construct a config
config = Config()
print("Creating a transaction")
transaction = Transaction()
transaction.type_ = "test_type"
print("Type: ")
print(transaction.get_type_())
# -------------------------------------------------------------------------
# -------------------------------------------------------------------------
# transaction__set_type_
# -------------------------------------------------------------------------
def transaction__set_type_(self, args):
import os
from sample_agent import Agent
from sample_config import Config
from sample_transaction import Transaction
text = "This test checks transaction.set_type_ \n"
self.print_info(text)
#
# INITIALIZATION
#
environment_directory = str(args[0])
identifier = str(args[1])
log_directory = str(args[2])
# Configure logging parameters so we get output while the program runs
logging.basicConfig(format='%(asctime)s %(message)s', datefmt='%m/%d/%Y %H:%M:%S',
filename=log_directory + identifier + ".log", level=logging.INFO)
logging.info('START logging for test transaction__set_type_ in run: %s',
environment_directory + identifier + ".xml")
# Construct a config
config = Config()
#
# TESTING
#
print("Creating a transaction")
transaction = Transaction()
transaction.type_ = "test_type"
print("Type: ")
print(transaction.get_type_())
print("Setting type")
transaction.set_type_("new_type")
print("Type: ")
print(transaction.get_type_())
# -------------------------------------------------------------------------
# -------------------------------------------------------------------------
# transaction__get_asset
# -------------------------------------------------------------------------
def transaction__get_asset(self, args):
import os
from sample_agent import Agent
from sample_config import Confi |
avaquero/llibreria | usuaris/forms.py | Python | gpl-3.0 | 272 | 0.025735 | from django import forms
class formulariLogin(forms.Form):
usuari = forms.CharField(max_length=100 | )
contrasenya = forms.CharField(max | _length=100, widget=forms.PasswordInput() )
class formulariUsuari(forms.Form):
usuari = forms.CharField(max_length=100)
|
LivingInSyn/RestartMe | v1_python_kivy/installer/installer.py | Python | gpl-2.0 | 2,173 | 0.015647 | '''
Created by Jeremy Mill, originally for use in the hi tech classrooms at the University of CT
Licensed under the GPLv3
jeremymill@gmail.com
githu | b.com/livinginsyn
'''
import os
import shutil as util
import _winreg as wreg
import subprocess
#first thing to find is the version of the OS
try:
os.environ["PROGRAMFILES(X86)"]
ver = 64
except:
ver = 32
def sixty_folder():
directory = 'C:\\Program Files (x86)\\RestartMe\\'
if not os.path.exists(directory):
os.mkdir(directory)
else:
overwrite = True
return directory
def thirty_folder():
directory = 'C:\\Program File | s\\RestartMe\\'
if not os.path.exists(directory):
os.mkdir(directory)
else:
overwrite = True
return directory
def resource_path(relative_path):
""" Get absolute path to resource, works for dev and for PyInstaller """
try:
# PyInstaller creates a temp folder and stores path in _MEIPASS
base_path = sys._MEIPASS
except Exception:
base_path = os.path.abspath(".")
return os.path.join(base_path, relative_path)
def install(directory,ver):
files = ["Restart.exe","admin_key_32.reg","admin_key_64.reg","RestartMe_32.lnk","RestartMe_64.lnk"]
for item in files:
to_copy = resource_path(item)
util.copy(to_copy,directory)
#now make the reg key for running as admin for all users
if ver == 32:
reg_file = "admin_key_32.reg"
lnk_file = "RestartMe_32.lnk"
if ver == 64:
reg_file = "admin_key_64.reg"
lnk_file = "RestartMe_64.lnk"
#use regedit in silent mode to place the value in the key
reg_file_path = os.path.join(directory,reg_file)
subprocess.Popen(["regedit.exe","/s",reg_file_path])
#create the short cut
to_copy = os.path.join(directory,lnk_file)
util.copy(to_copy,"C:\\Users\\Public\\Desktop\\")
os.rename("C:\\Users\\Public\\Desktop\\"+lnk_file,"C:\\Users\\Public\\Desktop\\RestartMe.lnk")
#the meat and potatoes
if ver == 64:
directory = sixty_folder()
install(directory,ver)
elif ver == 32:
directory = thirty_folder()
install(directory,ver)
|
nealegibson/GeePea | examples/mean_function.py | Python | gpl-3.0 | 592 | 0.038851 | #!/usr/bin/env python
import GeePea
import numpy as np
#first define mean function in correct format
my_mean_func | = lambda p,x: p[0] + p[1] * x
#create test data
x = np.linspace(0,1,50)
y = my_mean_func([1.,3.],x) + np.sin(2*np.pi*x) + np.random.normal(0,0.1,x.size)
#define mean function parameters and hyperparameters
mfp = [0.8,2.]
hp = [1.,1.,0.1] # kernel hyperparameters (sq exponential takes 3 parameters | for 1D input)
#define the GP
gp = GeePea.GP(x,y,p=mfp+hp,mf=my_mean_func)
#print out the GP attributes
gp.describe()
#optimise and plot
gp.optimise()
gp.plot()
raw_input()
|
ddico/odoo | addons/im_livechat/__manifest__.py | Python | agpl-3.0 | 1,621 | 0.000617 | # -*- coding: utf-8 -*-
{
'name' : 'Live Chat',
'version': '1.0',
'sequence': 170,
'summary': 'Chat with your website visitors',
'category': 'Website/Live Chat',
'complexity': 'easy',
'website': 'https://www.odoo.com/page/live-chat',
'description':
"""
Live Chat Support
==========================
Allow to drop instant messaging widgets on any web page that will communicate
with the current server and dispatch visitors request amongst several live
chat operators.
Help your customers with this chat, and anal | yse their feedback.
""",
'data': [
"data/mail_shortcode_data.xml",
"data/mail_data.xml",
"data/im_livechat_channel_data.xml",
'data/digest_data.xml',
"security/im_livechat_channel_security. | xml",
"security/ir.model.access.csv",
"views/rating_views.xml",
"views/mail_channel_views.xml",
"views/im_livechat_channel_views.xml",
"views/im_livechat_channel_templates.xml",
"views/res_users_views.xml",
"views/digest_views.xml",
"report/im_livechat_report_channel_views.xml",
"report/im_livechat_report_operator_views.xml"
],
'demo': [
"data/im_livechat_channel_demo.xml",
'data/mail_shortcode_demo.xml',
],
'depends': ["mail", "rating", "digest"],
'qweb': [
'static/src/bugfix/bugfix.xml',
'static/src/components/discuss_sidebar/discuss_sidebar.xml',
'static/src/components/thread_icon/thread_icon.xml',
],
'installable': True,
'auto_install': False,
'application': True,
}
|
mmcfarland/model-my-watershed | src/mmw/apps/user/views.py | Python | apache-2.0 | 7,827 | 0.000256 | from django.contrib.auth import (authenticate,
logout as auth_logout,
login as auth_login)
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.shortcuts import redirect, render_to_response
from django.contrib.auth.forms import PasswordResetForm
from registration.forms import RegistrationFormUniqueEmail
from registration.backends.default.views import RegistrationView
from rest_framework import decorators, status
from rest_framework.response import Response
from rest_framework.permissions import AllowAny
from apps.user.models import ItsiUser
from apps.user.itsi import ItsiService
@decorators.api_view(['POST', 'GET'])
@decorators.permission_classes((AllowAny, ))
def login(request):
response_data = {}
status_code = status.HTTP_200_OK
if request.method == 'POST':
user = authenticate(username=request.REQUEST.get('username'),
password=request.REQUEST.get('password'))
if user is not None:
if user.is_active:
auth_login(request, user)
response_data['result'] = 'success'
response_data['username'] = user.username
response_data['guest'] = False
response_data['id'] = user.id
else:
response_data['errors'] = ['Please activate your account']
response_data['guest'] = True
response_data['id'] = 0
status_code = status.HTTP_400_BAD_REQUEST
else:
response_data['errors'] = ['Invalid username or password']
response_data['guest'] = True
response_data['id'] = 0
status_code = status.HTTP_400_BAD_REQUEST
elif request.method == 'GET':
user = request.user
if user.is_authenticated() and user.is_active:
response_data['username'] = user.username
response_data['guest'] = False
response_data['id'] = user.id
else:
response_data['guest'] = True
response_data['id'] = 0
response_data['result'] = 'success'
status_code = status.HTTP_200_OK
return Response(data=response_data, status=status_code)
@decorators.api_view(['GET'])
@decorators.permission_classes((AllowAny, ))
def logout(request):
auth_logout(request)
if request.is_ajax():
response_data = {
'guest': True,
'result': 'success',
'id': 0
}
return Response(data=response_data)
else:
return render_to_response('user/logout.html')
itsi = ItsiService()
def itsi_login(request):
redirect_uri = request.build_absolute_uri(reverse('itsi_auth'))
params = {'redirect_uri': redirect_uri}
auth_url = itsi.get_authorize_url(**params)
return redirect(auth_url)
def itsi_auth(request):
code = request.GET.get('code', None)
# Basic validation
if code is None:
return redirect('/error/itsi')
try:
session = itsi.get_session_from_code(code)
itsi_user = session.get_user()
except:
# In case we are unable to reach ITSI and get an unexpected response
return redirect('/error/itsi')
user = authenticate(itsi_id=itsi_user['id'])
if user is not None and user.is_active:
auth_login(request, user)
return redirect('/')
else:
# User did not authenticate. Save their ITSI ID and send to /register
request.session['itsi_id'] = itsi_user['id']
return redirect(
'/sign-up/itsi/{username}/{first_name}/{last_name}'.format(
**itsi_user['extra']
)
)
@decorators.api_view(['POST'])
@decorators.permission_classes((AllowAny, ))
def itsi_sign_up(request):
# Validate request
errors = []
if 'itsi_id' not in request.session:
errors.append("There was an error in authenticating you with ITSI")
if 'username' not in request.POST or not request.POST.get('username'):
errors.append("Username must be specified")
elif User.objects.filter(username=request.POST.get('username')).exists():
errors.append("Username already exists")
if 'first_name' not in request.POST or not request.POST.get('first_name'):
errors.append("First name must be specified")
if 'last_name' not in request.POST or not request.POST.get('last_name'):
errors.append("Last name must be specified")
if 'agreed' not in request.POST or not request.POST.get('agreed'):
errors.append("You must agree to the terms")
if len(errors) > 0:
response_data = {"errors": errors}
return Response(data=response_data,
status=status.HTTP_400_BAD_REQUEST)
itsi_id = request.session['itsi_id']
# Create new user with given details and no email address or password
# since they will be authenticated using ITSI credentials
user = User.objects.create_user(
request.POST.get('username'),
email=None,
password=None,
first_name=request.POST.get('first_name'),
last_name=request.POST.get('last_name'),
)
user.save()
# Create corresponding itsi_user object that links to ITSI account
itsi_user = ItsiUser.objects.create_itsi_user(user, itsi_id)
itsi_user.save()
# Authenticate and log new user in
user = authenticate(itsi_id=itsi_id)
auth_login(request, user)
response_data = {'result': 'success',
'username': user.username,
'guest': False}
return Response(data=response_data,
status=status.HTTP_200_OK)
@decorators.api_view(['POST'])
@decorators.permission_classes((AllowAny, ))
def sign_up(request):
view = RegistrationView()
form = RegistrationFormUniqueEmail(request.POST)
if form.is_valid():
user = view.register(request, **form.cleaned_data)
response_data = {'result': 'success',
'username': user.username,
'guest': False}
return Response(data=response_data,
status=status.HTTP_200_OK)
else:
errors = []
if 'username' not in form.cleaned_data:
errors.append("Username is invalid or already in use")
if 'password1' not in form.cleaned_data:
errors.append("Password must be specified")
if 'password2' not in form.cleaned_data or \
form.cleaned_data['password1'] != form.cleaned_data['password2']:
errors.append("Passwords do not match")
if 'email' not in form.cleaned_data:
errors.append("Email is invalid or already in use")
if len(errors) == 0:
errors.append("Invalid data submitted")
response_data = {"errors": errors}
return Response(data=response_data,
st | atus=status.HTTP_400_BAD_REQUEST)
@decorators.api_view(['POST'])
@decorators.permission_classes((AllowAny, ))
def forgot(request):
form = PasswordResetForm(request.POST)
if form.is_valid():
email = form.cleaned_data['email']
try:
# If there are active user(s) that match email
next(form.get_users | (email))
form.save(request=request)
response_data = {'result': 'success',
'guest': True}
status_code = status.HTTP_200_OK
except StopIteration:
response_data = {'errors': ["Email cannot be found"]}
status_code = status.HTTP_400_BAD_REQUEST
else:
response_data = {'errors': ["Email is invalid"]}
status_code = status.HTTP_400_BAD_REQUEST
return Response(data=response_data, status=status_code)
|
yejia/order_system | product_order/urls.py | Python | mit | 568 | 0.005282 | from django.conf.urls import patterns, include, url
from product_order import views, data_views
urlpatterns = patterns('',
url(r'^$', views.index),
url(r'^order_state_machine/$', views.order_state_machine),
url(r'^make_order/$', views.make_order),
url(r'^make_order2/$', views.make_order2),
url(r'^make_order3/$', vi | ews.make_order3),
url(r'^create_order/$', views.create_order),
url(r'^order_state_machine/data/role/(?P<role>[^/]+)/$', data_views.get_ | actions),
url(r'^view_refund_sheet/$', views.view_refund_sheet),
)
|
pelson/cartopy | docs/make_projection.py | Python | lgpl-3.0 | 6,210 | 0 | # (C) British Crown Copyright 2011 - 2018, Met Office
#
# This file is part of cartopy.
#
# cartopy is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# cartopy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with cartopy. If not, see <https://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
import os
import inspect
import textwrap
import numpy as np
import cartopy.crs as ccrs
#: A dictionary to allow examples to use non-default parameters to the CRS
#: constructor.
SPECIFIC_PROJECTION_KWARGS = {
ccrs.RotatedPole: {'pole_longitude': 177.5, 'pole_latitude': 37.5},
ccrs.AzimuthalEquidistant: {'central_latitude': 90},
ccrs.NearsidePerspective: {
'central_longitude': -3.53, 'central_latitude': 50.72,
'satellite_height': 10.0e6},
}
def plate_carree_plot():
import matplotlib.pyplot as plt
import cartopy.crs as ccrs
nplots = 2
fig = plt.figure(figsize=(6, 6))
for i in range(0, nplots):
central_longitude = 0 if i == 0 else 180
ax = fig.add_subplot(
nplots, 1, i+1,
projection=ccrs.PlateCarree(central_longitude=central_longitude))
ax.coastlines(resolution='110m')
ax.gridlines()
def utm_plot():
import matplotlib.pyplot as plt
import cartopy.crs as ccrs
nplots = 60
fig = plt.figure(figsize=(10, 3))
for i in range(0, nplots):
ax = fig.add_subplot(1, nplots, i+1,
projection=ccrs.UTM(zone=i+1,
southern_hemisphere=True))
ax.coastlines(resolution='110m')
ax.gridlines()
MULTI_PLOT_CASES = {
ccrs.PlateCarree: plate_carree_plot,
ccrs.UTM: utm_plot,
}
COASTLINE_RESOLUTION = {ccrs.OSNI: '10m',
ccrs.OSGB: '50m',
ccrs.EuroPP: '50m'}
PRJ_SORT_ORDER = {'PlateCarree': 1,
'Mercator': 2, 'Mollweide': 2, 'Robinson': 2,
'TransverseMercator': 2, 'LambertCylindrical': 2,
'LambertConformal': 2, 'EquidistantConic': 2,
'Stereographic': 2, 'Miller': 2,
'Orthographic': 2, 'UTM': 2, 'AlbersEqualArea': 2,
'AzimuthalEquidistant': 2, 'Sinusoidal': 2,
'InterruptedGoodeHomolosine': 3, 'RotatedPole': 3,
'OSGB': 4, 'EuroPP': 5,
'Geostationary': 6, 'NearsidePerspective': 7,
'EckertI': 8.1, 'EckertII': 8.2, 'EckertIII': 8.3,
'EckertIV': 8.4, 'EckertV': 8.5, 'EckertVI': 8.6}
def find_projections():
for obj_name, o in vars(ccrs).copy().items():
if isinstance(o, type) and issubclass(o, ccrs.Projection) and \
not obj_name.startswith('_') and obj_name not in ['Projection']:
yield o
def create_instance(prj_cls, instance_args):
name = prj_cls.__name__
# Format instance arguments into strings
instance_params = ',\n '.join(
'{}={}'.format(k, v)
for k, v in sorted(instance_args.items()))
if instance_params:
instance_params = '\n ' \
+ instance_params
instance_creation_code = '{}({})'.format(name, instance_params)
prj_inst = prj(**instance_args)
return prj_inst, instance_creation_code
if __name__ == '__main__':
fname = os.path.join(os.path.dirname(__file__), 'source',
'crs', 'projections.rst')
table = open(fname, 'w')
notes = """
.. (comment): DO NOT EDIT this file.
.. It is auto-generated by running : cartopy/docs/make_projection.py
.. Please adjust by making changes there.
.. It is included in the repository only to aid detection of changes.
.. _cartopy_projections:
Cartopy projection list
=======================
"""
table.write(textwrap.dedent(notes))
def prj_class_sorter(cls):
return (P | RJ_SORT_ORDER.get(cls.__name__, 100),
cls.__name__)
for prj in sorted(find_projections(), key=prj_class_sorter):
name = prj.__name__
table.write(name + '\n')
table.write('-' * len(name) + '\n\n')
| table.write('.. autoclass:: cartopy.crs.%s\n' % name)
if prj not in MULTI_PLOT_CASES:
# Get instance arguments and number of plots
instance_args = SPECIFIC_PROJECTION_KWARGS.get(prj, {})
prj_inst, instance_repr = create_instance(prj, instance_args)
aspect = (np.diff(prj_inst.x_limits) /
np.diff(prj_inst.y_limits))[0]
width = 3 * aspect
width = '{:.4f}'.format(width).rstrip('0').rstrip('.')
# Generate plotting code
code = textwrap.dedent("""
.. plot::
import matplotlib.pyplot as plt
import cartopy.crs as ccrs
plt.figure(figsize=({width}, 3))
ax = plt.axes(projection=ccrs.{proj_constructor})
ax.coastlines(resolution={coastline_resolution!r})
ax.gridlines()
""").format(width=width,
proj_constructor=instance_repr,
coastline_resolution=COASTLINE_RESOLUTION.get(prj,
'110m'))
else:
func = MULTI_PLOT_CASES[prj]
lines = inspect.getsourcelines(func)
func_code = "".join(lines[0][1:])
code = textwrap.dedent("""
.. plot::
{func_code}
""").format(func_code=func_code)
table.write(code)
|
mhahn/stacker | stacker/config/translators/kms.py | Python | bsd-2-clause | 228 | 0 | # NOTE: The translator is going to | be deprecated in favor of the lookup
from ...lookups.handlers.kms import handler
def kms_simple_constructor(loader, node):
value = loader.co | nstruct_scalar(node)
return handler(value)
|
Southclaw/pawn-sublime-language | PawnBuildPath.py | Python | mit | 1,010 | 0 | import json
import sublime
import sublime_plugin
from .edit import Edit
class PawnBuildPathCommand(sublime_plugin.TextCommand):
def run(self, edit):
self.view.window().show_input_panel(
"Working directory that contains pawncc.exe",
"C:\\Pawno\\",
self.onPawnPathDone,
None,
None
)
def onPawnPathDone(self, path):
view = self.view.window().new_file( | )
path = path.replace("\\", "/")
obj = {
"cmd": [
"pawncc.exe",
"$file",
"-o$file_path/$file_base_name",
"-;+",
"-(+",
"-d3"
],
"file_regex": r"(.*?)\(([0-9]*)[- 0-9]*\)",
"selecto | r": "source.pwn",
"working_dir": path
}
with Edit(view) as edit:
edit.insert(0, json.dumps(obj, indent=4))
view.set_name("Pawn.sublime-build")
view.run_command("save")
|
aperigault/ansible | lib/ansible/modules/cloud/vultr/vultr_ssh_key_info.py | Python | gpl-3.0 | 3,294 | 0.001215 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# (c) 2018, Yanis Guenane <yanis+ansible@guenane.org>
# (c) 2019, René Moser <mail@renemoser.net>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: vultr_ssh_key_info
short_description: Get infos about the Vultr SSH keys available.
description:
- Get infos about SSH keys available.
version_added: "2.9"
author:
- "Yanis Guenane (@Spredzy)"
- "René Moser (@resmo)"
extends_documentation_fragment: vultr
'''
EXAMPLES = r'''
- name: Get Vultr SSH keys infos
vultr_ssh_key_info:
register: result
- name: Print the infos
debug:
var: result.vultr_ssh_key_info
'''
RETURN = r'''
---
vultr_api:
description: Response from Vultr API with a few additions/modification
returned: success
type: complex
contains:
api_account:
description: Account used in the ini file to select the key
returned: success
type: str
sample: default
api_timeout:
description: Timeout used for the API requests
returned: success
type: int
sample: 60
api_retries:
description: Amount of max retries for the API requests
returned: success
type: int
sample: 5
api_endpoint:
description: Endpoint used for the API requests
returned: success
type: str
sample: "https://api.vultr.com"
vultr_ssh_key_info:
description: Response from Vultr API as list
returned: success
type: complex
contains:
id:
description: ID of the ssh key
returned: success
type: str
sample: 5904bc6ed9234
name:
description: Name of the ssh key
returned: success
type: str
sample: my ssh key
date_created:
description: Date the ssh key was created
returned: success
type: str
sample: "2017-08-26 12:47:48"
ssh_key:
description: SSH public key
returned: success
type: str
sample: "ssh-rsa AA... someother@example.com"
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.vultr import (
Vultr,
vultr_argument_spec,
)
class AnsibleVultrSSHKeyInfo(Vultr):
def __init__(self, module):
super(AnsibleVultrSSHKeyInfo, self).__init__(module, "vultr_ssh_key_info")
self.returns = {
'SSHKEYID': dict(key='id'),
'name': dict(),
'ssh_key': dict(),
'date_created': dict(),
}
def get_sshkeys(self):
return self.api_query(path="/v1/sshkey/list")
def parse_keys_list(keys_list):
if not keys_list:
return []
return [key for id, key in keys_list.items()]
def main():
argument_spec = vultr_argument_spec()
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
)
sshkey_info = AnsibleVultrSSHKeyInfo(module)
r | esult = sshkey_info.get_result(parse_keys_list(sshkey_info.get_sshkeys()))
module.exit_json(**result)
if __name__ == '__main__ | ':
main()
|
lccp/URIJudgeProblemsPython | URIOnlineJudge_1002_AreaDoCirculo/area_do_circulo.py | Python | mit | 109 | 0.018349 | __author__ = 'lucas'
pi = 3.14159
| raio = float(input())
area = pi * (raio * raio)
print ("A=%.4f" % area) | |
Bystroushaak/attribute_wrapper | setup.py | Python | mit | 1,689 | 0 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# Interpreter version: python 2.7
#
# Imports =====================================================================
from setuptools import setup, find_packages
# Variables ===================================================================
changelog = open('CHANGES.rst').read()
long_description = "\n\n".join([
| open('README.rst').read(),
changelog
])
# Functions & classes =========================================================
def allSame(s):
return not filter(lambda x: x != s[0], s)
def hasDigit(s):
return any(map(lambda x: x.isdigit(), s))
def getVersion(data):
data = data.split | lines()
return filter(
lambda (x, y):
len(x) == len(y) and allSame(y) and hasDigit(x) and "." in x,
zip(data, data[1:])
)[0][0]
setup(
name='attribute_wrapper',
version=getVersion(changelog),
description="Class wrapper, which maps attribute calls to HTTP API.",
long_description=long_description,
url='https://github.com/Bystroushaak/attribute_wrapper',
author='Bystroushaak',
author_email='bystrousak@kitakitsune.org',
classifiers=[
"Topic :: Utilities",
"Topic :: Internet :: WWW/HTTP",
"Programming Language :: Python :: 2.7",
"License :: OSI Approved :: MIT License",
"Topic :: Software Development :: Libraries",
],
license='MIT',
packages=find_packages('src'),
package_dir={'': 'src'},
include_package_data=True,
zip_safe=True,
install_requires=[
"setuptools",
"requests",
],
extras_require={
"test": [
"pytest",
]
},
)
|
fractalego/pynsett | pynsett/examples/negation.py | Python | mit | 294 | 0.003401 | import os
from pynsett.discourse import Discourse
from pynsett.drt import Drs
from pynsett.extractor import Extractor
fr | om pynsett.knowledge import Knowledg | e
_path = os.path.dirname(__file__)
text = "John Smith is not blond"
drs = Drs.create_from_natural_language(text)
drs.plot()
print(drs) |
pelson/conda-build | tests/test_api_inspect.py | Python | bsd-3-clause | 535 | 0.001869 | import os
import pytest
from conda_build import api
from | .utils import metadata_dir
thisdir = os.path.dirname(os.path.abspath(__file__))
@pytest.mark.sanity
def test_check_recipe():
"""Technically not inspect, but close enough to belong here"""
assert api.check(os.path.join(metadata_dir, "source_git_jinja2"))
# These tests are already being done in test_cli.py. If we have a better way to test, move here.
def test_inpect_linkages():
pass
def test_inspect_ob | jects():
pass
def test_installable():
pass
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.