repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
|---|---|---|---|---|---|---|---|---|
qiita-spots/qiita_client
|
qiita_client/util.py
|
Python
|
bsd-3-clause
| 2,813
| 0
|
# -----------------------------------------------------------------------------
# Copyright (c) 2014--, The Qiita Development Team.
#
# Distributed under the terms of the BSD 3-clause License.
#
# The full license is in the file LICENSE, distributed with this software.
# -----------------------------------------------------------------------------
import pandas as pd
from subprocess import Popen, PIPE
import logging
logger = logging.getLogger(__name__)
def system_call(cmd):
"""Call command and return (stdout, stderr, return_value)
Parameters
----------
cmd : str or iterator of str
The string containing the command to be run, or a sequence of strings
that are the tokens of the command.
Returns
-------
str, str, int
- The standard output of the command
- The standard error of the command
- The exit status of the command
Notes
-----
This function is ported from QIIME (http://www.qiime.org), previously named
qiime_system_call. QIIME is a GPL project, but we obtained permission from
the authors of this function to port it to Qiita and keep it under BSD
license.
"""
logger.debug('Entered system_call()')
# TODO: This may need to be reviewed against the Qiita version of this
# method.
proc = Popen(cmd, universal_newlines=True, shell=True, stdout=PIPE,
stderr=PIPE)
# Communicate pulls all stdout/stderr from the PIPEs
# This call blocks until the command is done
stdout, stderr = proc.communicate()
return_value = proc.returncode
return stdout, stderr, return_value
def get_sample_names_by_run_prefix(mapping_file):
"""Generates a dictionary of run_prefix and sample names
|
Parameters
----------
mapping_file : str
The mapping file
Returns
-------
dict
Dict mapping run_prefix to sample id
Raises
------
ValueError
|
If there is more than 1 sample per run_prefix
"""
logger.debug('Entered get_sample_names_by_run_prefix()')
qiime_map = pd.read_csv(mapping_file, delimiter='\t', dtype=str,
encoding='utf-8', keep_default_na=False,
na_values=[])
qiime_map.set_index('#SampleID', inplace=True)
samples = {}
errors = []
for prefix, df in qiime_map.groupby('run_prefix'):
len_df = len(df)
if len_df != 1:
errors.append('%s has %d samples (%s)' % (prefix, len_df,
', '.join(df.index)))
else:
samples[prefix] = df.index.values[0]
if errors:
raise ValueError("You have run_prefix values with multiple "
"samples: %s" % ' -- '.join(errors))
return samples
|
ducksboard/libsaas
|
libsaas/services/pipedrive/deals.py
|
Python
|
mit
| 5,710
| 0
|
from libsaas import http, parsers
from libsaas.services import base
class Products(base.RESTResource):
path = 'products'
@base.apimethod
def get(self, start=None, limit=None):
"""
Lists products attached to a deal.
Upstream documentation:
https://developers.pipedrive.com/v1#methods-Deals
"""
params = base.get_params(None, locals())
request = http.Request('GET', self.get_url(), params)
return request, parsers.parse_json
@base.apimethod
def delete(self, product_attachment_id):
"""
Deletes a product attachment from a deal, using the
product_attachment_id.
Upstream documentation:
https://developers.pipedrive.com/v1#methods-Deals
"""
params = base.get_params(None, locals())
request = http.Request('DELETE', self.get_url(), params)
return request, parsers.parse_json
class DealsResource(base.RESTResource):
path = 'deals'
class Deals(DealsResource):
@base.apimethod
def get(self, filter_id=None, start=None, limit=None, sort_by=None,
sort_mode=None, owned_by_you=None):
"""
Returns all deals
Upstream documentation:
https://developers.pipedrive.com/v1#methods-Deals
"""
params = base.get_params(None, locals())
return http.Request('GET', self.get_url(), params), parsers.parse_json
@base.apimethod
def delete(self, ids):
"""
Marks multiple deals as deleted.
Upstream documentation:
https://developers.pipedrive.com/v1#methods-Deals
"""
params = base.get_params(None, locals())
request = http.Request('DELETE', self.get_url(), params)
return request, parsers.parse_json
@base.apimethod
def find(self, term):
"""
Searches all deals by their title.
Upstream documentation:
https://developers.pipedrive.com/v1#methods-Deals
"""
params = base.get_params(None, locals())
url = '{0}/find'.format(self.get_url())
return http.Request('GET', url, params), parsers.parse_json
@base.apimethod
def timeline(self, start_date, interval, amount, field_key, user_id=None,
pipeline_id=None, filter_id=None):
"""
Returns open and won deals, grouped by defined interval of time set
in a date-type dealField (field_key) - e.g. when month is the chosen
interval, and 3 months are asked starting from January 1st, 2012,
deals are returned grouped into 3 groups - January, February and
March - based on the value of the given field_key.
Upstream documentation:
https://developers.pipedrive.com/v1#methods-Deals
"""
params = base.get_params(None, locals())
url = '{0}/timeline'.format(self.get_url())
return http.Request('GET', url, params), parsers.parse_json
class Deal(DealsResource):
@base.apimethod
def activities(self, start=None, limit=None, done=None, exclude=None):
"""
Lists activities associated with a deal.
Upstream documentation:
https://developers.pipedrive.com/v1#methods-Deals
"""
params = base.get_params(None, locals())
url = '{0}/activities'.format(self.get_url())
return http.Request('GET', url, params), parsers.parse_json
@base.apimethod
def followers(self):
"""
Lists the followers of a deal.
Upstream documentation:
https://developers.pipedrive.com/v1#methods-Deals
"""
url = '{0}/followers'.format(self.get_url())
return http.Request('GET', url), parsers.parse_json
@base.apimethod
def updates(self, start=None, limit=None):
"""
Lists updates about a deal.
Upstream documentation:
https://developers.pipedrive.com/v1#methods-Deals
"""
params = base.get_params(None, locals())
url = '{0}/updates'.format(self.get_url())
return http.Request('GET', url, params), parsers.parse_json
@base.apimethod
def participants(self, start=None, limit=None):
"""
Lists participants associated with a deal.
Upstream documentation:
https://developers.pipedrive.com/v1#methods-Deals
"""
params = base.get_params(None, locals())
url = '{0}/participants'.format(self.get_url())
|
return http.Request('GET', url, params), parsers.parse_json
@base.apimethod
def files(self, start=None, limit=None):
"""
Lists files associated with a deal.
Upstream documentatio
|
n:
https://developers.pipedrive.com/v1#methods-Deals
"""
params = base.get_params(None, locals())
url = '{0}/files'.format(self.get_url())
return http.Request('GET', url, params), parsers.parse_json
@base.resource(Products)
def products(self):
"""
Returns the resource corresponding to the deal products
"""
return Products(self)
class DealFieldsResource(base.RESTResource):
path = 'dealFields'
def update(self, *args, **kwargs):
raise base.MethodNotSupported()
class DealFields(DealFieldsResource):
@base.apimethod
def delete(self, ids):
"""
Marks multiple activities as deleted.
Upstream documentation:
https://developers.pipedrive.com/v1#methods-DealFields
"""
params = base.get_params(None, locals())
request = http.Request('DELETE', self.get_url(), params)
return request, parsers.parse_json
class DealField(DealFieldsResource):
pass
|
VirrageS/io-kawiarnie
|
caffe/employees/views.py
|
Python
|
mit
| 2,869
| 0
|
"""Module with views for the employee feature."""
from django.contrib import messages
from django.contrib.auth import logout
from django.contrib.auth.decorators import login_required, permission_required
from django.core.urlresolvers import reverse
from django.shortcuts import get_object_or_404, redirect, render
from .forms import EmployeeForm
from .models import Employee
@login_required
def employees_logout_employee(request):
"""Logout a user, default behaviour."""
logout(request)
return render(request, 'employees/logout.html')
@permission_required('employees.add_employee')
def employees_new_employee(request):
"""Create a new employee."""
form = EmployeeForm(request.POST or None, caffe=request.user.caffe)
if form.is_valid():
form.save()
messages.success(request, 'Pracownik został poprawnie stworzony.')
return redirect(reverse('employees:navigate'))
elif request.POST:
messages.error(request, u'Formularz został niepoprawnie wypełniony.')
return render(request, 'employees/new.html', {
'form': form
})
@permission_required('employees.change_employee')
def employees_edit_employee(request, employee_id):
"""Edit an employee."""
employee = get_object_or_404(
Employee,
id=employee_id,
caffe=request.user.caffe
)
form = EmployeeForm(
request.POST or None,
instance=employee,
caffe=request.user.caffe
)
if form.is_valid():
form.save()
messages.success(request, 'Pracownik został poprawnie zmieniony.')
return redirect(reverse('employees:navigate'))
elif request.POST:
messages.error(request, u'Formularz został niepoprawnie wypełniony.')
return render(request, 'employees/edit.html', {
|
'form': form,
'employee': employee
})
@permission_required('employees.delete_employee')
def employees_delete_employee(request, employee_id):
"""Delete an employee."""
employee = get_object_or_404(
Employee,
id=employee_id,
caffe=request.user.caffe
)
if empl
|
oyee == request.user:
messages.error(request, u'Nie możesz usunąć siebie.')
return redirect(reverse('employees:navigate'))
employee.delete()
messages.success(request, u'Pracownik został poprawnie usunięty.')
return redirect(reverse('employees:navigate'))
@permission_required('employees.view_employee')
def employees_show_all_employees(request):
"""Show all employees."""
employees = Employee.objects.filter(caffe=request.user.caffe).all()
return render(request, 'employees/all.html', {
'employees': employees
})
@permission_required('employees.view_employee')
def employees_navigate(request):
"""Show main employee page."""
return render(request, 'home/employees.html')
|
CollabQ/CollabQ
|
vendor/django/conf/global_settings.py
|
Python
|
apache-2.0
| 14,562
| 0.00206
|
# Default Django settings. Override these with settings in the module
# pointed-to by the DJANGO_SETTINGS_MODULE environment variable.
# This is defined here as a do-nothing function because we can't import
# django.utils.translation -- that module depends on the settings.
gettext_noop = lambda s: s
####################
# CORE #
####################
DEBUG = False
TEMPLATE_DEBUG = False
# Whether the framework should propagate raw exceptions rather than catching
# them. This is useful under some testing siutations and should never be used
# on a live site.
DEBUG_PROPAGATE_EXCEPTIONS = False
# Whether to use the "Etag" header. This saves bandwidth but slows down performance.
USE_ETAGS = False
# People who get code error notifications.
# In the format (('Full Name', 'email@domain.com'), ('Full Name', 'anotheremail@domain.com'))
ADMINS = ()
# Tuple of IP addresses, as strings, that:
# * See debug comments, when DEBUG is true
# * Receive x-headers
INTERNAL_IPS = ()
# Local time zone for this installation. All choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name (although not all
# systems may support all possibilities).
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
# Languages we provide translations for, out of the box. The language name
# should be the utf-8 encoded local name for the language.
LANGUAGES = (
('ar', gettext_noop('Arabic')),
('bn', gettext_noop('Bengali')),
('bg', gettext_noop('Bulgarian')),
('ca', gettext_noop('Catalan')),
('cs', gettext_noop('Czech')),
('cy', gettext_noop('Welsh')),
('da', gettext_noop('Danish')),
('de', gettext_noop('German')),
('el', gettext_noop('Greek')),
('en', gettext_noop('English')),
('es', gettext_noop('Spanish')),
('et', gettext_noop('Estonian')),
('es-ar', gettext_noop('Argentinean Spanish')),
('eu', gettext_noop('Basque')),
('fa', gettext_noop('Persian')),
('fi', gettext_noop('Finnish')),
('fr', gettext_noop('French')),
('ga', gettext_noop('Irish')),
('gl', gettext_noop('Galician')),
('hu', gettext_noop('Hungarian')),
('he', gettext_noop('Hebrew')),
('hi', gettext_noop('Hindi')),
('hr', gettext_noop('Croatian')),
('is', gettext_noop('Icelandic')),
('it', gettext_noop('Italian')),
('ja', gettext_noop('Japanese')),
('ka', gettext_noop('Georgian')),
('ko', gettext_noop('Korean')),
('km', gettext_noop('Khmer')),
('kn', gettext_noop('Kannada')),
('lv', gettext_noop('Latvian')),
('lt', gettext_noop('Lithuanian')),
('mk', gettext_noop('Macedonian')),
('nl', gettext_noop('Dutch')),
('no', gettext_noop('Norwegian')),
('pl', gettext_noop('Polish')),
('pt', gettext_noop('Portuguese')),
('pt-br', gettext_noop('Brazilian Portuguese')),
('ro', gettext_noop('Romanian')),
('ru', gettext_noop('Russian')),
('sk', gettext_noop('Slovak')),
('sl', gettext_noop('Slovenian')),
('sr', gettext_noop('Serbian')),
('sv', gettext_noop('Swedish')),
('ta', gettext_noop('Tamil')),
('te', gettext_noop('Telugu')),
('th', gettext_noop('Thai')),
('tr', gettext_noop('Turkish')),
('uk', gettext_noop('Ukrainian')),
('zh-cn', gettext_noop('Simplified Chinese')),
('zh-tw', gettext_noop('Traditional Chinese')),
)
# Languages using BiDi (right-to-left) layout
LANGUAGES_BIDI = ("he", "ar", "fa")
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
LOCALE_PATHS = ()
LANGUAGE_COOKIE_NAME = 'django_language'
# Not-necessarily-technical managers of the site. They get broken link
# notifications and other various e-mails.
MANAGERS = ADMINS
# Default content type and charset to use for all HttpResponse objects, if a
# MIME type isn't manually specified. These are used to construct the
# Content-Type header.
DEFAULT_CONTENT_TYPE = 'text/html'
|
DEFAULT_CHARSET = 'utf-8'
# Encoding of files read from disk (template and initial SQL file
|
s).
FILE_CHARSET = 'utf-8'
# E-mail address that error messages come from.
SERVER_EMAIL = 'root@localhost'
# Whether to send broken-link e-mails.
SEND_BROKEN_LINK_EMAILS = False
# Database connection info.
DATABASE_ENGINE = '' # 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
DATABASE_NAME = '' # Or path to database file if using sqlite3.
DATABASE_USER = '' # Not used with sqlite3.
DATABASE_PASSWORD = '' # Not used with sqlite3.
DATABASE_HOST = '' # Set to empty string for localhost. Not used with sqlite3.
DATABASE_PORT = '' # Set to empty string for default. Not used with sqlite3.
DATABASE_OPTIONS = {} # Set to empty dictionary for default.
# Host for sending e-mail.
EMAIL_HOST = 'localhost'
# Port for sending e-mail.
EMAIL_PORT = 25
# Optional SMTP authentication information for EMAIL_HOST.
EMAIL_HOST_USER = ''
EMAIL_HOST_PASSWORD = ''
EMAIL_USE_TLS = False
# List of strings representing installed apps.
INSTALLED_APPS = ()
# List of locations of the template source files, in search order.
TEMPLATE_DIRS = ()
# List of callables that know how to import templates from various sources.
# See the comments in django/core/template/loader.py for interface
# documentation.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.load_template_source',
'django.template.loaders.app_directories.load_template_source',
# 'django.template.loaders.eggs.load_template_source',
)
# List of processors used by RequestContext to populate the context.
# Each one should be a callable that takes the request object as its
# only parameter and returns a dictionary to add to the context.
TEMPLATE_CONTEXT_PROCESSORS = (
'django.core.context_processors.auth',
'django.core.context_processors.debug',
'django.core.context_processors.i18n',
'django.core.context_processors.media',
# 'django.core.context_processors.request',
)
# Output to use in template system for invalid (e.g. misspelled) variables.
TEMPLATE_STRING_IF_INVALID = ''
# URL prefix for admin media -- CSS, JavaScript and images. Make sure to use a
# trailing slash.
# Examples: "http://foo.com/media/", "/media/".
ADMIN_MEDIA_PREFIX = '/media/'
# Default e-mail address to use for various automated correspondence from
# the site managers.
DEFAULT_FROM_EMAIL = 'webmaster@localhost'
# Subject-line prefix for email messages send with django.core.mail.mail_admins
# or ...mail_managers. Make sure to include the trailing space.
EMAIL_SUBJECT_PREFIX = '[Django] '
# Whether to append trailing slashes to URLs.
APPEND_SLASH = True
# Whether to prepend the "www." subdomain to URLs that don't have it.
PREPEND_WWW = False
# Override the server-derived value of SCRIPT_NAME
FORCE_SCRIPT_NAME = None
# List of compiled regular expression objects representing User-Agent strings
# that are not allowed to visit any page, systemwide. Use this for bad
# robots/crawlers. Here are a few examples:
# import re
# DISALLOWED_USER_AGENTS = (
# re.compile(r'^NaverBot.*'),
# re.compile(r'^EmailSiphon.*'),
# re.compile(r'^SiteSucker.*'),
# re.compile(r'^sohu-search')
# )
DISALLOWED_USER_AGENTS = ()
ABSOLUTE_URL_OVERRIDES = {}
# Tuple of strings representing allowed prefixes for the {% ssi %} tag.
# Example: ('/home/html', '/var/www')
ALLOWED_INCLUDE_ROOTS = ()
# If this is a admin settings module, this should be a list of
# settings modules (in the format 'foo.bar.baz') for which this admin
# is an admin.
ADMIN_FOR = ()
# 404s that may be ignored.
IGNORABLE_404_STARTS = ('/cgi-bin/', '/_vti_bin', '/_vti_inf')
IGNORABLE_404_ENDS = ('mail.pl', 'mailform.pl', 'mail.cgi', 'mailform.cgi', 'favicon.ico', '.php')
# A secret key for this particular Django installation. Used in secret-key
# hashing algorithms. Set this in your settings, or Django will complain
# loudly.
SECRET_KEY = ''
# Default file storage mechanism that holds media.
DEFAULT_FILE_ST
|
rubik/radon
|
radon/visitors.py
|
Python
|
mit
| 14,879
| 0
|
'''This module contains the ComplexityVisitor class which is where all the
analysis concerning Cyclomatic Complexity is done. There is also the class
HalsteadVisitor, that counts Halstead metrics.'''
import ast
import collections
import operator
# Helper functions to use in combination with map()
GET_COMPLEXITY = operator.attrgetter('complexity')
GET_REAL_COMPLEXITY = operator.attrgetter('real_complexity')
NAMES_GETTER = operator.attrgetter('name', 'asname')
GET_ENDLINE = operator.attrgetter('endline')
BaseFunc = collections.namedtuple(
'Function',
[
'name',
'lineno',
'col_offset',
'endline',
'is_method',
'classname',
'closures',
'complexity',
],
)
BaseClass = collections.namedtuple(
'Class',
[
'name',
'lineno',
'col_offset',
'endline',
'methods',
'inner_classes',
'real_complexity',
],
)
def code2ast(source):
'''Convert a string object into an AST object.
This function is retained for backwards compatibility, but it no longer
attemps any conversions. It's equivalent to a call to ``ast.parse``.
'''
return ast.parse(source)
class Function(BaseFunc):
'''Object represeting a function block.'''
@property
def letter(self):
'''The letter representing the function. It is `M` if the function is
actually a method, `F` otherwise.
'''
return 'M' if self.is_method else 'F'
@property
def fullname(self):
'''The full name of the function. If it is a method, then the full name
is:
{class name}.{method name}
Otherwise it is just the function name.
'''
if self.classname is None:
return self.name
return '{0}.{1}'.format(self.classname, self.name)
def __str__(self):
'''String representation of a function block.'''
return '{0} {1}:{2}->{3} {4} - {5}'.format(
self.letter,
self.lineno,
self.col_offset,
self.endline,
self.fullname,
self.complexity,
)
class Class(BaseClass):
'''Object representing a class block.'''
letter = 'C'
@property
def fullname(self):
'''The full name of the class. It is just its name. This attribute
exists for consistency (see :data:`Function.fullname`).
'''
return self.name
@property
def complexity(self):
'''The average complexity of the class. It corresponds to the average
complexity of its methods plus one.
'''
if not self.methods:
return self.real_complexity
methods = len(self.methods)
return int(self.real_complexity / float(methods)) + (methods > 1)
def __str__(self):
'''String representation of a class block.'''
return '{0} {1}:{2}->{3} {4} - {5}'.format(
self.letter,
self.lineno,
self.col_offset,
self.endline,
self.name,
self.complexity,
)
class CodeVisitor(ast.NodeVisitor):
'''Base class for every NodeVisitors in `radon.visitors`. It implements a
couple utility class methods and a static method.
'''
@staticmethod
def get_name(obj):
'''Shorthand for ``obj.__class__.__name__``.'''
return obj.__class__.__name__
@classmethod
def from_code(cls, code, **kwargs):
'''Instanciate the class from source code (string object). The
`**kwargs` are directly passed to the `ast.NodeVisitor` constructor.
'''
return cls.from_ast(code2ast(code), **kwargs)
@classmethod
def from_ast(cls, ast_node, **kwargs):
'''Instantiate the class from an AST node. The `**kwargs` are
directly passed to the `ast.NodeVisitor` constructor.
'''
visitor = cls(**kwargs)
visitor.visit(ast_node)
return visitor
class ComplexityVisitor(CodeVisitor):
'''A visitor that keeps track of the cyclomatic complexity of
the elements.
:param to_method: If True, every function is treated as a method. In this
case the *classname* parameter is used as class name.
:param classname: Name of parent class.
:param off: If True, the starting value for the complexity is set to 1,
otherwise to 0.
'''
def __init__(
self, to_method=False, classname=None, off=True, no_assert=False
):
self.off = off
self.complexity = 1 if off else 0
self.functions = []
self.classes = []
self.to_method = to_method
self.classname = classname
self.no_assert = no_assert
self._max_line = float('-inf')
@property
def functions_complexity(self):
'''The total complexity from all functions (i.e. the total number of
decision points + 1).
This is *not* the sum of all the complexity from the functions. Rather,
it's the complexity of the code *inside* all the functions.
'''
return sum(map(GET_COMPLEXITY, self.functions)) - len(self.functions)
@property
def classes_complexity(self):
'''The total complexity from all classes (i.e. the total number of
decision points + 1).
'''
return sum(map(GET_REAL_COMPLEXITY, self.classes)) - len(self.classes)
@property
def total_complexity(self):
'''The total complexity. Computed adding up the visitor complexity, the
functions complexity, and the classes complexity.
'''
return (
self.complexity
+ self.functions_complexity
+ self.classes_complexity
+ (not self.off)
)
@property
def blocks(self):
'''All the blocks visited. These include: all the functions, the
classes and their methods. The returned list is not sorted.
'''
blocks = []
blocks.extend(self.functions)
for cls in self.classes:
blocks.append(cls)
blocks.extend(cls.methods)
return blocks
@property
def max_line(self):
'''The maximum line number among the analyzed lines.'''
return self._max_line
@max_line.setter
def max_line(self, value):
'''The maximum line number among the analyzed lines.'''
if value > self._max_line:
self._max_line = value
def generic_visit(self, node):
'''Main entry point for the visitor.'''
# Get the name of the class
name = self.get_name(node)
# Check for a lineno attribute
if hasattr(node, 'lineno'):
self.max_line = node.lineno
# The Try/Except block is counted as the number of handlers
# plus the `else` block.
# In Python 3.3 the TryExcept and TryFinally nodes have been merged
# into a single node: Try
if name in ('Try', 'TryExcept'):
self.complexity += len(node.handlers) + bool(node.orelse)
elif name == 'BoolOp':
self.complexity += len(node.values) - 1
# Ifs, with and assert statements count all as 1.
# Note: Lambda functions are not counted anymore, see #68
elif name in ('If', 'IfExp'):
self.complexity += 1
# The For and While blocks count as 1 plus the `else` block.
elif name in ('For', 'While', 'AsyncFor'):
self.complexity += bool(node.orelse) + 1
# List, set, dict comprehensions and generator exps count as 1 plus
# the `if` statement.
elif name == 'comprehension':
self.complexity += len(node.ifs) + 1
super(ComplexityVisitor, self).generic_visit(node)
def visit_Assert(self, node):
'''When visiting `assert` statements, the complexity is increased only
if the `no_assert` attribute is `False`.
'''
self.complexity +=
|
not self.no
|
_assert
def visit_AsyncFunctionDef(self, node):
'''Async function definition is the same thing as the synchronous
one.
'''
self.visit_FunctionDef(node)
def visit_Fu
|
yangming85/lettuce
|
tests/functional/language_specific_features/test_ja.py
|
Python
|
gpl-3.0
| 6,962
| 0.005402
|
# -*- coding: utf-8 -*-
# <Lettuce - Behaviour Driven Development for python>
# Copyright (C) <2010-2012> Gabriel Falc達o <gabriel@nacaolivre.org>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from os.path import dirname, abspath, join
from nose.tools import with_se
|
tup
from tests.asserts import prepare_stdout
from tests.asserts import assert_stdout_lines
from lettuce import Runner
current_dir = abspath(dirname(__file__))
join_path = lambda *x: join(current_dir, *x)
@with_setup(prepare_stdout)
def test_output_with_success_colorless():
"Language: ja -> sucess colorless"
runner = Runner(join_path('ja', 'success', 'dumb.feature'), verbosity=3, no_color=True)
runner.run()
assert_stdout_lines(
u"\n"
u"フィーチャ: ダムフィーチャ # tests/functional/language_specific_features/ja/success/dumb.feature:3\n"
u" テストをグリーンになればテスト成功 # tests/functional/language_specific_features/ja/success/dumb.feature:4\n"
u"\n"
u" シナリオ: 何もしない # tests/functional/language_specific_features/ja/success/dumb.feature:6\n"
u" 前提 何もしない # tests/functional/language_specific_features/ja/success/dumb_steps.py:6\n"
u"\n"
u"1 feature (1 passed)\n"
u"1 scenario (1 passed)\n"
u"1 step (1 passed)\n"
)
@with_setup(prepare_stdout)
def test_output_of_table_with_success_colorless():
"Language: ja -> sucess table colorless"
runner = Runner(join_path('ja', 'success', 'table.feature'), verbosity=3, no_color=True)
runner.run()
assert_stdout_lines(
u"\n"
u"フィーチャ: テーブル記法 # tests/functional/language_specific_features/ja/success/table.feature:3\n"
u" 日本語でのテーブル記法がパスするかのテスト # tests/functional/language_specific_features/ja/success/table.feature:4\n"
u"\n"
u" シナリオ: 何もしないテーブル # tests/functional/language_specific_features/ja/success/table.feature:6\n"
u" 前提 データは以下: # tests/functional/language_specific_features/ja/success/table_steps.py:6\n"
u" | id | 定義 |\n"
u" | 12 | 何かの定義 |\n"
u" | 64 | 別の定義 |\n"
u"\n"
u"1 feature (1 passed)\n"
u"1 scenario (1 passed)\n"
u"1 step (1 passed)\n"
)
@with_setup(prepare_stdout)
def test_output_outlines_success_colorless():
"Language: ja -> sucess outlines colorless"
runner = Runner(join_path('ja', 'success', 'outlines.feature'), verbosity=3, no_color=True)
runner.run()
assert_stdout_lines(
u"\n"
u"フィーチャ: アウトラインを日本語で書く # tests/functional/language_specific_features/ja/success/outlines.feature:3\n"
u" 図表のテストをパスすること # tests/functional/language_specific_features/ja/success/outlines.feature:4\n"
u"\n"
u" シナリオアウトライン: 全てのテストで何もしない # tests/functional/language_specific_features/ja/success/outlines.feature:6\n"
u" 前提 入力値を <データ1> とし # tests/functional/language_specific_features/ja/success/outlines_steps.py:13\n"
u" もし 処理 <方法> を使って # tests/functional/language_specific_features/ja/success/outlines_steps.py:22\n"
u" ならば 表示は <結果> である # tests/functional/language_specific_features/ja/success/outlines_steps.py:31\n"
u"\n"
u" 例:\n"
u" | データ1 | 方法 | 結果 |\n"
u" | 何か | これ | 機能 |\n"
u" | その他 | ここ | 同じ |\n"
u" | データ | 動く | unicodeで! |\n"
u"\n"
u"1 feature (1 passed)\n"
u"3 scenarios (3 passed)\n"
u"9 steps (9 passed)\n"
)
@with_setup(prepare_stdout)
def test_output_outlines_success_colorful():
"Language: ja -> sucess outlines colorful"
runner = Runner(join_path('ja', 'success', 'outlines.feature'), verbosity=3, no_color=False)
runner.run()
assert_stdout_lines(
u'\n'
u"\033[1;37mフィーチャ: アウトラインを日本語で書く \033[1;30m# tests/functional/language_specific_features/ja/success/outlines.feature:3\033[0m\n"
u"\033[1;37m 図表のテストをパスすること \033[1;30m# tests/functional/language_specific_features/ja/success/outlines.feature:4\033[0m\n"
u'\n'
u"\033[1;37m シナリオアウトライン: 全てのテストで何もしない \033[1;30m# tests/functional/language_specific_features/ja/success/outlines.feature:6\033[0m\n"
u"\033[0;36m 前提 入力値を <データ1> とし \033[1;30m# tests/functional/language_specific_features/ja/success/outlines_steps.py:13\033[0m\n"
u"\033[0;36m もし 処理 <方法> を使って \033[1;30m# tests/functional/language_specific_features/ja/success/outlines_steps.py:22\033[0m\n"
u"\033[0;36m ならば 表示は <結果> である \033[1;30m# tests/functional/language_specific_features/ja/success/outlines_steps.py:31\033[0m\n"
u'\n'
u"\033[1;37m 例:\033[0m\n"
u"\033[0;36m \033[1;37m |\033[0;36m データ1\033[1;37m |\033[0;36m 方法\033[1;37m |\033[0;36m 結果 \033[1;37m |\033[0;36m\033[0m\n"
u"\033[1;32m \033[1;37m |\033[1;32m 何か \033[1;37m |\033[1;32m これ\033[1;37m |\033[1;32m 機能 \033[1;37m |\033[1;32m\033[0m\n"
u"\033[1;32m \033[1;37m |\033[1;32m その他 \033[1;37m |\033[1;32m ここ\033[1;37m |\033[1;32m 同じ \033[1;37m |\033[1;32m\033[0m\n"
u"\033[1;32m \033[1;37m |\033[1;32m データ \033[1;37m |\033[1;32m 動く\033[1;37m |\033[1;32m unicodeで!\033[1;37m |\033[1;32m\033[0m\n"
u'\n'
u"\033[1;37m1 feature (\033[1;32m1 passed\033[1;37m)\033[0m\n"
u"\033[1;37m3 scenarios (\033[1;32m3 passed\033[1;37m)\033[0m\n"
u"\033[1;37m9 steps (\033[1;32m9 passed\033[1;37m)\033[0m\n"
)
|
dunkhong/grr
|
grr/server/grr_response_server/output_plugins/__init__.py
|
Python
|
apache-2.0
| 668
| 0.002994
|
#!/usr/bin/env python
"""Output plugins implementations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_lit
|
erals
from grr_response_server import output_plugin
# pylint: disable=unused-import,g-import-not-at-top
try:
from grr_response_server.output_plugins import bigquery_plugin
except ImportError:
pass
from grr_response_server.output_plugins import csv_plu
|
gin
from grr_response_server.output_plugins import email_plugin
from grr_response_server.output_plugins import splunk_plugin
from grr_response_server.output_plugins import sqlite_plugin
from grr_response_server.output_plugins import yaml_plugin
|
aronsky/home-assistant
|
homeassistant/helpers/storage.py
|
Python
|
apache-2.0
| 8,433
| 0.00083
|
"""Helper to help store data."""
from __future__ import annotations
import asyncio
from collections.abc import Callable
from contextlib import suppress
from json import JSONEncoder
import logging
import os
from typing import Any
from homeassistant.const import EVENT_HOMEASSISTANT_FINAL_WRITE
from homeassistant.core import CALLBACK_TYPE, CoreState, Event, HomeAssistant, callback
from homeassistant.helpers.event import async_call_later
from homeassistant.loader import MAX_LOAD_CONCURRENTLY, bind_hass
from homeassistant.util import json as json_util
# mypy: allow-untyped-calls, allow-untyped-defs, no-warn-return-any
# mypy: no-check-untyped-defs
STORAGE_DIR = ".storage"
_LOGGER = logging.getLogger(__name__)
STORAGE_SEMAPHORE = "storage_semaphore"
@bind_hass
async def async_migrator(
hass,
old_path,
store,
*,
old_conf_load_func=None,
old_conf_migrate_func=None,
):
"""Migrate old data to a store and then load data.
async def old_conf_migrate_func(old_data)
"""
store_data = await store.async_load()
# If we already have store data we have already migrated in the past.
if store_data is not None:
return store_data
def load_old_config():
"""Load old config."""
if not os.path.isfile(old_path):
return None
if old_conf_load_func is not None:
return old_conf_load_func(old_path)
return json_util.load_json(old_path)
config = await hass.async_add_executor_job(load_old_config)
if config is None:
return None
if old_conf_migrate_func is not None:
config = await old_conf_migrate_func(config)
await store.async_save(config)
await hass.async_add_executor_job(os.remove, old_path)
return config
@bind_hass
class Store:
"""Class to help storing data."""
def __init__(
self,
hass: HomeAssistant,
version: int,
key: str,
private: bool = False,
*,
encoder: type[JSONEncoder] | None = None,
) -> None:
"""Initialize storage class."""
self.version = version
self.key = key
self.hass = hass
self._private = private
self._data: dict[str, Any] | None = None
self._unsub_delay_listener: CALLBACK_TYPE | None = None
self._unsub_final_write_listener: CALLBACK_TYPE | None = None
self._write_lock = asyncio.Lock()
self._load_task: asyncio.Future | None = None
self._encoder = encoder
@property
def path(self):
"""Return the config path."""
return self.hass.config.path(STORAGE_DIR, self.key)
async def async_load(self) -> dict | list | None:
"""Load data.
If the expected version does not match the given version, the migrate
function will be invoked with await migrate_func(version, config).
Will ensure that when a call comes in while another one is in progress,
the second call will wait and return the result of the first call.
"""
if self._load_task is None:
self._load_task = self.hass.async_create_task(self._async_load())
return await self._load_task
async def _async_load(self):
"""Load the data and ensure the task is removed."""
if STORAGE_SEMAPHORE not in
|
self.hass.data:
self.hass.data[STORAGE_SEMAPHORE] = asyncio.Semaphore(MAX_LOAD_CO
|
NCURRENTLY)
try:
async with self.hass.data[STORAGE_SEMAPHORE]:
return await self._async_load_data()
finally:
self._load_task = None
async def _async_load_data(self):
"""Load the data."""
# Check if we have a pending write
if self._data is not None:
data = self._data
# If we didn't generate data yet, do it now.
if "data_func" in data:
data["data"] = data.pop("data_func")()
else:
data = await self.hass.async_add_executor_job(
json_util.load_json, self.path
)
if data == {}:
return None
if data["version"] == self.version:
stored = data["data"]
else:
_LOGGER.info(
"Migrating %s storage from %s to %s",
self.key,
data["version"],
self.version,
)
stored = await self._async_migrate_func(data["version"], data["data"])
return stored
async def async_save(self, data: dict | list) -> None:
"""Save data."""
self._data = {"version": self.version, "key": self.key, "data": data}
if self.hass.state == CoreState.stopping:
self._async_ensure_final_write_listener()
return
await self._async_handle_write_data()
@callback
def async_delay_save(self, data_func: Callable[[], dict], delay: float = 0) -> None:
"""Save data with an optional delay."""
self._data = {"version": self.version, "key": self.key, "data_func": data_func}
self._async_cleanup_delay_listener()
self._async_ensure_final_write_listener()
if self.hass.state == CoreState.stopping:
return
self._unsub_delay_listener = async_call_later(
self.hass, delay, self._async_callback_delayed_write
)
@callback
def _async_ensure_final_write_listener(self) -> None:
"""Ensure that we write if we quit before delay has passed."""
if self._unsub_final_write_listener is None:
self._unsub_final_write_listener = self.hass.bus.async_listen_once(
EVENT_HOMEASSISTANT_FINAL_WRITE, self._async_callback_final_write
)
@callback
def _async_cleanup_final_write_listener(self) -> None:
"""Clean up a stop listener."""
if self._unsub_final_write_listener is not None:
self._unsub_final_write_listener()
self._unsub_final_write_listener = None
@callback
def _async_cleanup_delay_listener(self) -> None:
"""Clean up a delay listener."""
if self._unsub_delay_listener is not None:
self._unsub_delay_listener()
self._unsub_delay_listener = None
async def _async_callback_delayed_write(self, _now):
"""Handle a delayed write callback."""
# catch the case where a call is scheduled and then we stop Home Assistant
if self.hass.state == CoreState.stopping:
self._async_ensure_final_write_listener()
return
await self._async_handle_write_data()
async def _async_callback_final_write(self, _event: Event) -> None:
"""Handle a write because Home Assistant is in final write state."""
self._unsub_final_write_listener = None
await self._async_handle_write_data()
async def _async_handle_write_data(self, *_args):
"""Handle writing the config."""
async with self._write_lock:
self._async_cleanup_delay_listener()
self._async_cleanup_final_write_listener()
if self._data is None:
# Another write already consumed the data
return
data = self._data
if "data_func" in data:
data["data"] = data.pop("data_func")()
self._data = None
try:
await self.hass.async_add_executor_job(
self._write_data, self.path, data
)
except (json_util.SerializationError, json_util.WriteError) as err:
_LOGGER.error("Error writing config for %s: %s", self.key, err)
def _write_data(self, path: str, data: dict) -> None:
"""Write the data."""
if not os.path.isdir(os.path.dirname(path)):
os.makedirs(os.path.dirname(path))
_LOGGER.debug("Writing data for %s to %s", self.key, path)
json_util.save_json(path, data, self._private, encoder=self._encoder)
async def _async_migrate_func(self, old_version, old_data):
"""Migrate to the new version."""
raise NotImplementedError
async def async_remove(self)
|
superstack/nova
|
nova/scheduler/api.py
|
Python
|
apache-2.0
| 9,884
| 0.001012
|
# Copyright (c) 2011 Openstack, LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND
|
, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Handles all requests relating to schedulers.
"""
import novaclient
from nova import db
from nova import excep
|
tion
from nova import flags
from nova import log as logging
from nova import rpc
from eventlet import greenpool
FLAGS = flags.FLAGS
flags.DEFINE_bool('enable_zone_routing',
False,
'When True, routing to child zones will occur.')
LOG = logging.getLogger('nova.scheduler.api')
def _call_scheduler(method, context, params=None):
"""Generic handler for RPC calls to the scheduler.
:param params: Optional dictionary of arguments to be passed to the
scheduler worker
:retval: Result returned by scheduler worker
"""
if not params:
params = {}
queue = FLAGS.scheduler_topic
kwargs = {'method': method, 'args': params}
return rpc.call(context, queue, kwargs)
def get_zone_list(context):
"""Return a list of zones assoicated with this zone."""
items = _call_scheduler('get_zone_list', context)
for item in items:
item['api_url'] = item['api_url'].replace('\\/', '/')
if not items:
items = db.zone_get_all(context)
return items
def zone_get(context, zone_id):
return db.zone_get(context, zone_id)
def zone_delete(context, zone_id):
return db.zone_delete(context, zone_id)
def zone_create(context, data):
return db.zone_create(context, data)
def zone_update(context, zone_id, data):
return db.zone_update(context, zone_id, data)
def get_zone_capabilities(context):
"""Returns a dict of key, value capabilities for this zone."""
return _call_scheduler('get_zone_capabilities', context=context)
def select(context, specs=None):
"""Returns a list of hosts."""
return _call_scheduler('select', context=context,
params={"specs": specs})
def update_service_capabilities(context, service_name, host, capabilities):
"""Send an update to all the scheduler services informing them
of the capabilities of this service."""
kwargs = dict(method='update_service_capabilities',
args=dict(service_name=service_name, host=host,
capabilities=capabilities))
return rpc.fanout_cast(context, 'scheduler', kwargs)
def _wrap_method(function, self):
"""Wrap method to supply self."""
def _wrap(*args, **kwargs):
return function(self, *args, **kwargs)
return _wrap
def _process(func, zone):
"""Worker stub for green thread pool. Give the worker
an authenticated nova client and zone info."""
nova = novaclient.OpenStack(zone.username, zone.password, zone.api_url)
nova.authenticate()
return func(nova, zone)
def call_zone_method(context, method, errors_to_ignore=None, *args, **kwargs):
"""Returns a list of (zone, call_result) objects."""
if not isinstance(errors_to_ignore, (list, tuple)):
# This will also handle the default None
errors_to_ignore = [errors_to_ignore]
pool = greenpool.GreenPool()
results = []
for zone in db.zone_get_all(context):
try:
nova = novaclient.OpenStack(zone.username, zone.password,
zone.api_url)
nova.authenticate()
except novaclient.exceptions.BadRequest, e:
url = zone.api_url
LOG.warn(_("Failed request to zone; URL=%(url)s: %(e)s")
% locals())
#TODO (dabo) - add logic for failure counts per zone,
# with escalation after a given number of failures.
continue
zone_method = getattr(nova.zones, method)
def _error_trap(*args, **kwargs):
try:
return zone_method(*args, **kwargs)
except Exception as e:
if type(e) in errors_to_ignore:
return None
# TODO (dabo) - want to be able to re-raise here.
# Returning a string now; raising was causing issues.
# raise e
return "ERROR", "%s" % e
res = pool.spawn(_error_trap, *args, **kwargs)
results.append((zone, res))
pool.waitall()
return [(zone.id, res.wait()) for zone, res in results]
def child_zone_helper(zone_list, func):
"""Fire off a command to each zone in the list.
The return is [novaclient return objects] from each child zone.
For example, if you are calling server.pause(), the list will
be whatever the response from server.pause() is. One entry
per child zone called."""
green_pool = greenpool.GreenPool()
return [result for result in green_pool.imap(
_wrap_method(_process, func), zone_list)]
def _issue_novaclient_command(nova, zone, collection, method_name, item_id):
"""Use novaclient to issue command to a single child zone.
One of these will be run in parallel for each child zone."""
manager = getattr(nova, collection)
result = None
try:
try:
result = manager.get(int(item_id))
except ValueError, e:
result = manager.find(name=item_id)
except novaclient.NotFound:
url = zone.api_url
LOG.debug(_("%(collection)s '%(item_id)s' not found on '%(url)s'" %
locals()))
return None
if method_name.lower() not in ['get', 'find']:
result = getattr(result, method_name)()
return result
def wrap_novaclient_function(f, collection, method_name, item_id):
"""Appends collection, method_name and item_id to the incoming
(nova, zone) call from child_zone_helper."""
def inner(nova, zone):
return f(nova, zone, collection, method_name, item_id)
return inner
class RedirectResult(exception.Error):
"""Used to the HTTP API know that these results are pre-cooked
and they can be returned to the caller directly."""
def __init__(self, results):
self.results = results
super(RedirectResult, self).__init__(
message=_("Uncaught Zone redirection exception"))
class reroute_compute(object):
"""Decorator used to indicate that the method should
delegate the call the child zones if the db query
can't find anything."""
def __init__(self, method_name):
self.method_name = method_name
def __call__(self, f):
def wrapped_f(*args, **kwargs):
collection, context, item_id = \
self.get_collection_context_and_id(args, kwargs)
try:
# Call the original function ...
return f(*args, **kwargs)
except exception.InstanceNotFound, e:
LOG.debug(_("Instance %(item_id)s not found "
"locally: '%(e)s'" % locals()))
if not FLAGS.enable_zone_routing:
raise
zones = db.zone_get_all(context)
if not zones:
raise
# Ask the children to provide an answer ...
LOG.debug(_("Asking child zones ..."))
result = self._call_child_zones(zones,
wrap_novaclient_function(_issue_novaclient_command,
collection, self.method_name, item_id))
# Scrub the results and raise another exception
# so the API layers can bail out gracefully ...
raise RedirectResult(self.unmarshall_result(result))
return wrapped_f
def _call_child_zones(self, zones, functio
|
qutip/qutip
|
qutip/tests/test_mkl.py
|
Python
|
bsd-3-clause
| 3,447
| 0
|
import pytest
import numpy as np
import scipy.linalg
import scipy.sparse
import qutip
if qutip.settings.has_mkl:
from qutip._mkl.spsolve import mkl_splu, mkl_spsolve
pytestmark = [
pytest.mark.skipif(not qutip.settings.has_mkl,
reason='MKL extensions not found.'),
]
class Test_spsolve:
def test_single_rhs_vector_real(self):
Adense = np.array([[0, 1, 1],
[1, 0, 1],
[0, 0, 1]])
As = scipy.sparse.csr_matrix(Adense)
np.random.seed(1234)
x = np.random.randn(3)
b = As * x
x2 = mkl_spsolve(As, b, verbose=True)
np.testing.assert_allclose(x, x2)
def test_single_rhs_vector_complex(self):
A = qutip.rand_herm(10)
x = qutip.rand_ket(10).full()
b = A.full() @ x
y = mkl_spsolve(A.data, b, verbose=True)
np.testing.assert_allclose(x, y)
@pytest.mark.parametrize('dtype', [np.float64, np.complex128])
def test_multi_rhs_vector(self, dtype):
M = np.array([
[1, 0, 2],
[0, 0, 3],
[-4, 5, 6],
], dtype=dtype)
sM = scipy.sparse.csr_matrix(M)
N = np.array([
[3,
|
0, 1],
[0, 2, 0],
[0, 0, 0],
|
], dtype=dtype)
sX = mkl_spsolve(sM, N, verbose=True)
X = scipy.linalg.solve(M, N)
np.testing.assert_allclose(X, sX)
def test_rhs_shape_is_maintained(self):
A = scipy.sparse.csr_matrix(np.array([
[1, 0, 2],
[0, 0, 3],
[-4, 5, 6],
], dtype=np.complex128))
b = np.array([0, 2, 0], dtype=np.complex128)
out = mkl_spsolve(A, b, verbose=True)
assert b.shape == out.shape
b = np.array([0, 2, 0], dtype=np.complex128).reshape((3, 1))
out = mkl_spsolve(A, b, verbose=True)
assert b.shape == out.shape
def test_sparse_rhs(self):
A = scipy.sparse.csr_matrix([
[1, 2, 0],
[0, 3, 0],
[0, 0, 5],
])
b = scipy.sparse.csr_matrix([
[0, 1],
[1, 0],
[0, 0],
])
x = mkl_spsolve(A, b, verbose=True)
ans = np.array([[-0.66666667, 1],
[0.33333333, 0],
[0, 0]])
np.testing.assert_allclose(x.toarray(), ans)
@pytest.mark.parametrize('dtype', [np.float64, np.complex128])
def test_symmetric_solver(self, dtype):
A = qutip.rand_herm(np.arange(1, 11)).data
if dtype == np.float64:
A = A.real
x = np.ones(10, dtype=dtype)
b = A.dot(x)
y = mkl_spsolve(A, b, hermitian=1, verbose=True)
np.testing.assert_allclose(x, y)
class Test_splu:
@pytest.mark.parametrize('dtype', [np.float64, np.complex128])
def test_repeated_rhs_solve(self, dtype):
M = np.array([
[1, 0, 2],
[0, 0, 3],
[-4, 5, 6],
], dtype=dtype)
sM = scipy.sparse.csr_matrix(M)
N = np.array([
[3, 0, 1],
[0, 2, 0],
[0, 0, 0],
], dtype=dtype)
test_X = np.zeros((3, 3), dtype=dtype)
lu = mkl_splu(sM, verbose=True)
for k in range(3):
test_X[:, k] = lu.solve(N[:, k])
lu.delete()
expected_X = scipy.linalg.solve(M, N)
np.testing.assert_allclose(test_X, expected_X)
|
mitchcapper/mythbox
|
resources/lib/mysql-connector-python/python3/examples/engines.py
|
Python
|
gpl-2.0
| 1,836
| 0.002179
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# MySQL Connector/Python - MySQL driver written in Python.
# Copyright (c) 2009, 2012, Oracle and/or its affiliates. All rights reserved.
# MySQL Connector/Python is licensed under the terms of the GPLv2
# <http://www.gnu.org/licenses/old-licenses/gpl-2.0.html>, like most
# MySQL Connectors. There are special exceptions to the terms and
# conditions of the GPLv2 as it is applied to this software, see the
# FOSS License Exception
|
# <http://www.mysql.com/about/legal/licensing/foss-exception.html>.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public Lice
|
nse as published by
# the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
import sys, os
import mysql.connector
"""
Example using MySQL Connector/Python showing:
* that show engines works..
"""
def main(config):
output = []
db = mysql.connector.Connect(**config)
cursor = db.cursor()
# Select it again and show it
stmt_select = "SHOW ENGINES"
cursor.execute(stmt_select)
rows = cursor.fetchall()
for row in rows:
output.append(repr(row))
db.close()
return output
if __name__ == '__main__':
#
# Configure MySQL login and database to use in config.py
#
import config
config = config.Config.dbinfo().copy()
out = main(config)
print('\n'.join(out))
|
rjw57/edpcmentoring
|
docs/conf.py
|
Python
|
mit
| 10,393
| 0.005966
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# EDPC Mentoring Database documentation build configuration file, created by
# sphinx-quickstart on Thu Apr 28 23:28:25 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import django
# on_rtd is whether we are on readthedocs.org, this line of code grabbed from
# docs.readthedocs.org
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd: # only import and set the theme if we're building docs locally
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# Add the project top-level directory to the import path so that we may find
# Django applications.
sys.path.insert(0, os.path.abspath(os.path.join('..', 'edpcmentoring')))
# Since we document some classes which make use of Django constructs, we need to
# make sure Django is configured.
os.environ.setdefault("DJANGO_SETTINGS_MODULE",
"edpcmentoring.settings_development")
django.setup()
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.mathjax',
'sphinx.ext.viewcode',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'EDPC Mentoring Database'
c
|
opyright = '2016, EDPC'
author = 'EDPC'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1.0'
# The full version, including alpha/beta/rc tags.
release = '
|
0.1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
# html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents.
# "<project> v<release> documentation" by default.
#html_title = 'EDPC Mentoring Database v0.1.0'
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not None, a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
# The empty string is equivalent to '%b %d, %Y'.
#html_last_updated_fmt = None
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr', 'zh'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# 'ja' uses this config value.
# 'zh' user can custom change `jieba` dictionary path.
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'EDPCMentoringDatabasedoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'let
|
superisaac/django-mljson-serializer
|
django_mljson/serializer.py
|
Python
|
mit
| 2,206
| 0.001813
|
"""
Serialize data to/from JSON
"""
# Avoid shadowing the standard library json module
from __future__ import absolute_import, unicode_literals
import datetime
import decimal
import json
import sys
import uuid
from io import BytesIO
from django.core.serializers.base import DeserializationError
from django.core.serializers.python import (
Deserializer as PythonDeserializer, Serializer as PythonSerializer,
)
from django.core.serializers.json import DjangoJSONEncoder
from django.utils import six
from django.utils.timezone import is_aware
class Serializer(PythonSerializer):
"""
Convert a queryset to JSON.
"""
internal_use_only = False
def _init_options(self):
if json.__version__.split('.') >= ['2', '1', '3']:
# Use JS strings to represent Python Decimal instances (ticket #16850)
se
|
lf.options.update({'use_decimal': False})
self._current = None
self.json_kwarg
|
s = self.options.copy()
self.json_kwargs.pop('stream', None)
self.json_kwargs.pop('fields', None)
def start_serialization(self):
self._init_options()
def end_serialization(self):
'''
Do nothing
'''
def end_object(self, obj):
# self._current has the field data
json.dump(self.get_dump_object(obj), self.stream,
cls=DjangoJSONEncoder, **self.json_kwargs)
self.stream.write('\n')
self._current = None
def getvalue(self):
# Grand-parent super
return super(PythonSerializer, self).getvalue()
def Deserializer(stream_or_string, **options):
"""
Deserialize a stream or string of JSON data.
"""
if isinstance(stream_or_string, (bytes, six.string_types)):
stream_or_string = BytesIO(stream_or_string)
try:
def line_generator():
for line in stream_or_string:
yield json.loads(line.strip())
for obj in PythonDeserializer(line_generator(), **options):
yield obj
except GeneratorExit:
raise
except Exception as e:
# Map to deserializer error
six.reraise(DeserializationError, DeserializationError(e), sys.exc_info()[2])
|
RevansChen/online-judge
|
Codewars/7kyu/build-a-square/Python/test.py
|
Python
|
mit
| 191
| 0.005236
|
# Python - 3.6.0
test.assert_equals(generateShape(3), '+++\n+++\n+++')
test.assert_equals(genera
|
teShape(8), '++++++++\n++++++++\n++++++++\n++++++++\n++++++++\n++++++++\n++++++++\n++
|
++++++')
|
jek/flatland
|
flatland/schema/base.py
|
Python
|
mit
| 29,692
| 0.000404
|
# -*- coding: utf-8; fill-column: 78 -*-
import collections
import itertools
import operator
from flatland.schema.paths import pathexpr
from flatland.signals import validator_validated
from flatland.util import (
Unspecified,
assignable_class_property,
class_cloner,
named_int_factory,
symbol,
)
__all__ = 'Element'
NoneType = type(None)
Root = symbol('Root')
NotEmpty = symbol('NotEmpty')
Skip = named_int_factory('Skip', True, doc="""\
Abort validation of the element & mark as valid.
""")
SkipAll = named_int_factory('SkipAll', True, doc="""\
Abort validation of the element and its children & mark as valid.
The :attr:`~Element.valid` of child elements will not be changed by skipping.
Unless otherwise set, the child elements will retain the default value
(:obj:`Unevaluated`). Only meaningful during a decent validation. Functions
as :obj:`Skip` on upward validation.
""")
SkipAllFalse = named_int_factory('SkipAllFalse', False, doc="""\
Aborts validation of the element and its children & mark as invalid.
The :attr:`~Element.valid` of child elements will n
|
ot be changed by skipping.
Unless otherwise set, the child elements will retain the default value
(:obj:`Unevaluated`). Only meaningful during a decent validation. Functions
as ``False`` on upward validation.
""")
Unevaluated = named_int_factory('Unevaluated', True, doc="""\
A psuedo-boolean representing a presumptively valid state.
Assigned to
|
newly created elements that have never been evaluated by
:meth:`Element.validate`. Evaluates to true.
""")
# TODO: implement a lighter version of the xml quoters
xml = None
class _BaseElement(object):
# Required by the genshi support's __bases__ manipulation, unfortunately.
pass
class Element(_BaseElement):
"""Base class for form fields.
A data node that stores a Python and a text value plus added state.
"""
name = None
"""The Unicode name of the element."""
optional = False
"""If True, :meth:`validate` with return True if no value has been set.
:attr:`validators` are not called for optional, empty elements.
"""
validators = ()
"""A sequence of validators, invoked by :meth:`validate`.
See `Validation`_
"""
default = None
"""The default value of this element."""
default_factory = None
"""A callable to generate default element values. Passed an element.
*default_factory* will be used preferentially over :attr:`default`.
"""
ugettext = None
"""If set, provides translation support to validation messages.
See `Message Internationalization`_.
"""
ungettext = None
"""If set, provides translation support to validation messages.
See `Message Internationalization`_.
"""
value = None
"""The element's native Python value.
Only validation routines should write this attribute directly: use
:meth:`set` to update the element's value.
"""
u = u''
"""A Unicode representation of the element's value.
As in :attr:`value`, writing directly to this attribute should be
restricted to validation routines.
"""
flattenable = False
children_flattenable = True
validates_down = None
validates_up = None
def __init__(self, value=Unspecified, **kw):
self.parent = kw.pop('parent', None)
self.valid = Unevaluated
self.errors = []
self.warnings = []
# FIXME This (and 'using') should also do descent_validators
# via lookup - or don't copy at all
if 'validators' in kw:
kw['validators'] = list(kw['validators'])
for attribute, override in kw.items():
if hasattr(self, attribute):
setattr(self, attribute, override)
else:
raise TypeError(
"%r is an invalid keyword argument: not a known "
"argument or an overridable class property of %s" % (
attribute, type(self).__name__))
if value is not Unspecified:
self.set(value)
@class_cloner
def named(cls, name):
"""Return a class with ``name`` = *name*
:param name: a string or None. ``str`` will be converted to
``unicode``.
:returns: a new class
"""
if not isinstance(name, (unicode, NoneType)):
name = unicode(name)
cls.name = name
return cls
@class_cloner
def using(cls, **overrides):
"""Return a class with attributes set from *\*\*overrides*.
:param \*\*overrides: new values for any attributes already present on
the class. A ``TypeError`` is raised for unknown attributes.
:returns: a new class
"""
# TODO: See TODO in __init__
if 'validators' in overrides:
overrides['validators'] = list(overrides['validators'])
for attribute, value in overrides.iteritems():
# TODO: must make better
if callable(value):
value = staticmethod(value)
if hasattr(cls, attribute):
setattr(cls, attribute, value)
continue
raise TypeError(
"%r is an invalid keyword argument: not a known "
"argument or an overridable class property of %s" % (
attribute, cls.__name__))
return cls
@class_cloner
def validated_by(cls, *validators):
"""Return a class with validators set to *\*validators*.
:param \*validators: one or more validator functions, replacing any
validators present on the class.
:returns: a new class
"""
# TODO: See TODO in __init__
for validator in validators:
# metaclass gymnastics can fool this assertion. don't do that.
if isinstance(validator, type):
raise TypeError(
"Validator %r is a type, not a callable or instance of a"
"validator class. Did you mean %r()?" % (
validator, validator))
cls.validators = list(validators)
return cls
@class_cloner
def including_validators(cls, *validators, **kw):
"""Return a class with additional *\*validators*.
:param \*validators: one or more validator functions
:param position: defaults to -1. By default, additional validators
are placed after existing validators. Use 0 for before, or any
other list index to splice in *validators* at that point.
:returns: a new class
"""
position = kw.pop('position', -1)
if kw:
raise TypeError('including_validators() got an '
'unexpected keyword argument %r' % (
kw.popitem()[0]))
mutable = list(cls.validators)
if position < 0:
position = len(mutable) + 1 + position
mutable[position:position] = list(validators)
cls.validators = mutable
return cls
def validate_element(self, element, state, descending):
"""Assess the validity of an element.
TODO: this method is dead. Evaluate docstring for good bits that
should be elsewhere.
:param element: an :class:`Element`
:param state: may be None, an optional value of supplied to
``element.validate``
:param descending: a boolean, True the first time the element
has been seen in this run, False the next
:returns: boolean; a truth value or None
The :meth:`Element.validate` process visits each element in
the tree twice: once heading down the tree, breadth-first, and
again heading back up in the reverse direction. Scalar fields
will typically validate on the first pass, and containers on
the second.
Return no value or None to ``pass``, accepting the element as
presumptively valid.
Exceptions raised by :meth:`validate_element` will not be
caught by :meth:`Element.validate`.
Directly modifying and normalizing :attr:`Eleme
|
TribeMedia/synapse
|
tests/util/test_lrucache.py
|
Python
|
apache-2.0
| 7,584
| 0
|
# -*- coding: utf-8 -*-
# Copyright 2015, 2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the Li
|
cense.
from .. import unittest
from synapse.util.caches.lrucache import LruCache
from synapse.util.caches.treecache import TreeCache
from mock import Mock
class LruCacheTestCase(unittest.TestCase):
def test_get_set(self):
cache = LruCache(1)
cache["key"] = "value"
self.assertEquals(cache.get("key"), "value")
self.assertEquals(cache["key"], "value")
def test_eviction(self):
cache = LruCache(2)
cache[1] = 1
cache[2] = 2
|
self.assertEquals(cache.get(1), 1)
self.assertEquals(cache.get(2), 2)
cache[3] = 3
self.assertEquals(cache.get(1), None)
self.assertEquals(cache.get(2), 2)
self.assertEquals(cache.get(3), 3)
def test_setdefault(self):
cache = LruCache(1)
self.assertEquals(cache.setdefault("key", 1), 1)
self.assertEquals(cache.get("key"), 1)
self.assertEquals(cache.setdefault("key", 2), 1)
self.assertEquals(cache.get("key"), 1)
cache["key"] = 2 # Make sure overriding works.
self.assertEquals(cache.get("key"), 2)
def test_pop(self):
cache = LruCache(1)
cache["key"] = 1
self.assertEquals(cache.pop("key"), 1)
self.assertEquals(cache.pop("key"), None)
def test_del_multi(self):
cache = LruCache(4, 2, cache_type=TreeCache)
cache[("animal", "cat")] = "mew"
cache[("animal", "dog")] = "woof"
cache[("vehicles", "car")] = "vroom"
cache[("vehicles", "train")] = "chuff"
self.assertEquals(len(cache), 4)
self.assertEquals(cache.get(("animal", "cat")), "mew")
self.assertEquals(cache.get(("vehicles", "car")), "vroom")
cache.del_multi(("animal",))
self.assertEquals(len(cache), 2)
self.assertEquals(cache.get(("animal", "cat")), None)
self.assertEquals(cache.get(("animal", "dog")), None)
self.assertEquals(cache.get(("vehicles", "car")), "vroom")
self.assertEquals(cache.get(("vehicles", "train")), "chuff")
# Man from del_multi say "Yes".
def test_clear(self):
cache = LruCache(1)
cache["key"] = 1
cache.clear()
self.assertEquals(len(cache), 0)
class LruCacheCallbacksTestCase(unittest.TestCase):
def test_get(self):
m = Mock()
cache = LruCache(1)
cache.set("key", "value")
self.assertFalse(m.called)
cache.get("key", callbacks=[m])
self.assertFalse(m.called)
cache.get("key", "value")
self.assertFalse(m.called)
cache.set("key", "value2")
self.assertEquals(m.call_count, 1)
cache.set("key", "value")
self.assertEquals(m.call_count, 1)
def test_multi_get(self):
m = Mock()
cache = LruCache(1)
cache.set("key", "value")
self.assertFalse(m.called)
cache.get("key", callbacks=[m])
self.assertFalse(m.called)
cache.get("key", callbacks=[m])
self.assertFalse(m.called)
cache.set("key", "value2")
self.assertEquals(m.call_count, 1)
cache.set("key", "value")
self.assertEquals(m.call_count, 1)
def test_set(self):
m = Mock()
cache = LruCache(1)
cache.set("key", "value", callbacks=[m])
self.assertFalse(m.called)
cache.set("key", "value")
self.assertFalse(m.called)
cache.set("key", "value2")
self.assertEquals(m.call_count, 1)
cache.set("key", "value")
self.assertEquals(m.call_count, 1)
def test_pop(self):
m = Mock()
cache = LruCache(1)
cache.set("key", "value", callbacks=[m])
self.assertFalse(m.called)
cache.pop("key")
self.assertEquals(m.call_count, 1)
cache.set("key", "value")
self.assertEquals(m.call_count, 1)
cache.pop("key")
self.assertEquals(m.call_count, 1)
def test_del_multi(self):
m1 = Mock()
m2 = Mock()
m3 = Mock()
m4 = Mock()
cache = LruCache(4, 2, cache_type=TreeCache)
cache.set(("a", "1"), "value", callbacks=[m1])
cache.set(("a", "2"), "value", callbacks=[m2])
cache.set(("b", "1"), "value", callbacks=[m3])
cache.set(("b", "2"), "value", callbacks=[m4])
self.assertEquals(m1.call_count, 0)
self.assertEquals(m2.call_count, 0)
self.assertEquals(m3.call_count, 0)
self.assertEquals(m4.call_count, 0)
cache.del_multi(("a",))
self.assertEquals(m1.call_count, 1)
self.assertEquals(m2.call_count, 1)
self.assertEquals(m3.call_count, 0)
self.assertEquals(m4.call_count, 0)
def test_clear(self):
m1 = Mock()
m2 = Mock()
cache = LruCache(5)
cache.set("key1", "value", callbacks=[m1])
cache.set("key2", "value", callbacks=[m2])
self.assertEquals(m1.call_count, 0)
self.assertEquals(m2.call_count, 0)
cache.clear()
self.assertEquals(m1.call_count, 1)
self.assertEquals(m2.call_count, 1)
def test_eviction(self):
m1 = Mock(name="m1")
m2 = Mock(name="m2")
m3 = Mock(name="m3")
cache = LruCache(2)
cache.set("key1", "value", callbacks=[m1])
cache.set("key2", "value", callbacks=[m2])
self.assertEquals(m1.call_count, 0)
self.assertEquals(m2.call_count, 0)
self.assertEquals(m3.call_count, 0)
cache.set("key3", "value", callbacks=[m3])
self.assertEquals(m1.call_count, 1)
self.assertEquals(m2.call_count, 0)
self.assertEquals(m3.call_count, 0)
cache.set("key3", "value")
self.assertEquals(m1.call_count, 1)
self.assertEquals(m2.call_count, 0)
self.assertEquals(m3.call_count, 0)
cache.get("key2")
self.assertEquals(m1.call_count, 1)
self.assertEquals(m2.call_count, 0)
self.assertEquals(m3.call_count, 0)
cache.set("key1", "value", callbacks=[m1])
self.assertEquals(m1.call_count, 1)
self.assertEquals(m2.call_count, 0)
self.assertEquals(m3.call_count, 1)
class LruCacheSizedTestCase(unittest.TestCase):
def test_evict(self):
cache = LruCache(5, size_callback=len)
cache["key1"] = [0]
cache["key2"] = [1, 2]
cache["key3"] = [3]
cache["key4"] = [4]
self.assertEquals(cache["key1"], [0])
self.assertEquals(cache["key2"], [1, 2])
self.assertEquals(cache["key3"], [3])
self.assertEquals(cache["key4"], [4])
self.assertEquals(len(cache), 5)
cache["key5"] = [5, 6]
self.assertEquals(len(cache), 4)
self.assertEquals(cache.get("key1"), None)
self.assertEquals(cache.get("key2"), None)
self.assertEquals(cache["key3"], [3])
self.assertEquals(cache["key4"], [4])
self.assertEquals(cache["key5"], [5, 6])
|
inkfountain/learn-py-a-little
|
lesson_file/lesson.py
|
Python
|
gpl-2.0
| 1,662
| 0.011044
|
# 1、`if __name__ == "__main__":`
'''
__name__是指示当前py文件调用方式的方法。
如果它等于"__main__"就表示是直接执行,如果不是,则用来被别的文件调用。
一般写在文件的最后。
查看format.py和wordsCount.py的布局
'''
# 2、函数
'''
查看format.py中的formatLines函数
def xxx():
# 函数体
'''
# 3、if条件语句
'''
if xx:
# xxx
elif xxx:
# xxx
elif xxx:
# xxx
else:
# xxx
`else` 表示剩下的所有情况,该分支需要放置
|
到最后,可以没有该分支
`elif` 可以有多个,也可以只有一个,也可以没有
'''
# 4、列表
'''
列表是中括号包裹,并以逗号分隔的一系列值的集合,举例:
'''
numList = [3, 4, 5, 6, 7]
strList = ['he ', 'is ', 'a ', 'dog']
dList = [[3,4], [5,6]] # 列表组成的类表
# 4、for循环
'''
对列表numList遍历,并打印出所有的值
'''
for i in numList:
print i
# 5、文件读写
infile = open(r'd:\\a.txt', 'r') # 'r' 表示读取文件;infile代表打开的文件
lines = infile.readlines() # 读取文件所有行,并保存以行为单位保存在列表(list类型)lines中
infile.close() # 关闭文件
outfile = open(r'd:\\
|
outfile.txt', 'w') # 'r' 表示读取文件;outfile代表将要写内容的文件
outfile.write('hello world' + '\n') # 向文件中写入内容
outfile.close() # 关闭文件
# 6、练习
'''
对文档words.txt,分别统计以小写字母a、b、c开头的单词的个数, 在wordsCount.py的基础上开发
注:这里涉及到读文件,读出来的内容是一个字符串的数组,然后统计需要通过遍历方式进行
'''
|
stuckj/dupeguru
|
core/tests/ignore_test.py
|
Python
|
gpl-3.0
| 4,306
| 0.019508
|
# Created By: Virgil Dupras
# Created On: 2006/05/02
# Copyright 2015 Hardcoded Software (http://www.hardcoded.net)
#
# This software is licensed under the "GPLv3" License as described in the "LICENSE" file,
# which should be included with this package. The terms are also available at
# http://www.gnu.org/licenses/gpl-3.0.html
import io
from xml.etree import ElementTree as ET
from pytest import raises
from hscommon.testutil import eq_
from ..ignore import *
def test_empty():
il = IgnoreList()
eq_(0,len(il))
assert not il.AreIgnored('foo','bar')
def test_simple():
il = IgnoreList()
il.Ignore('foo','bar')
assert il.AreIgnored('foo','bar')
assert il.AreIgnored('bar','foo')
assert not il.AreIgnored('foo','bleh')
assert not il.AreIgnored('bleh','bar')
eq_(1,len(il))
def test_multiple():
il = IgnoreList()
il.Ignore('foo','bar')
il.Ignore('foo','bleh')
il.Ignore('bleh','bar')
il.Ignore('aybabtu','bleh')
assert il.AreIgnored('foo','bar')
assert il.AreIgnored('bar','foo')
assert il.AreIgnored('foo','bleh')
assert il.AreIgnored('bleh','bar')
assert not il.AreIgnored('aybabtu','bar')
eq_(4,len(il))
def test_clear():
il = IgnoreList()
il.Ignore('foo','bar')
il.Clear()
assert not il.AreIgno
|
red('foo','ba
|
r')
assert not il.AreIgnored('bar','foo')
eq_(0,len(il))
def test_add_same_twice():
il = IgnoreList()
il.Ignore('foo','bar')
il.Ignore('bar','foo')
eq_(1,len(il))
def test_save_to_xml():
il = IgnoreList()
il.Ignore('foo','bar')
il.Ignore('foo','bleh')
il.Ignore('bleh','bar')
f = io.BytesIO()
il.save_to_xml(f)
f.seek(0)
doc = ET.parse(f)
root = doc.getroot()
eq_(root.tag, 'ignore_list')
eq_(len(root), 2)
eq_(len([c for c in root if c.tag == 'file']), 2)
f1, f2 = root[:]
subchildren = [c for c in f1 if c.tag == 'file'] + [c for c in f2 if c.tag == 'file']
eq_(len(subchildren), 3)
def test_SaveThenLoad():
il = IgnoreList()
il.Ignore('foo', 'bar')
il.Ignore('foo', 'bleh')
il.Ignore('bleh', 'bar')
il.Ignore('\u00e9', 'bar')
f = io.BytesIO()
il.save_to_xml(f)
f.seek(0)
il = IgnoreList()
il.load_from_xml(f)
eq_(4,len(il))
assert il.AreIgnored('\u00e9','bar')
def test_LoadXML_with_empty_file_tags():
f = io.BytesIO()
f.write(b'<?xml version="1.0" encoding="utf-8"?><ignore_list><file><file/></file></ignore_list>')
f.seek(0)
il = IgnoreList()
il.load_from_xml(f)
eq_(0,len(il))
def test_AreIgnore_works_when_a_child_is_a_key_somewhere_else():
il = IgnoreList()
il.Ignore('foo','bar')
il.Ignore('bar','baz')
assert il.AreIgnored('bar','foo')
def test_no_dupes_when_a_child_is_a_key_somewhere_else():
il = IgnoreList()
il.Ignore('foo','bar')
il.Ignore('bar','baz')
il.Ignore('bar','foo')
eq_(2,len(il))
def test_iterate():
#It must be possible to iterate through ignore list
il = IgnoreList()
expected = [('foo','bar'),('bar','baz'),('foo','baz')]
for i in expected:
il.Ignore(i[0],i[1])
for i in il:
expected.remove(i) #No exception should be raised
assert not expected #expected should be empty
def test_filter():
il = IgnoreList()
il.Ignore('foo','bar')
il.Ignore('bar','baz')
il.Ignore('foo','baz')
il.Filter(lambda f,s: f == 'bar')
eq_(1,len(il))
assert not il.AreIgnored('foo','bar')
assert il.AreIgnored('bar','baz')
def test_save_with_non_ascii_items():
il = IgnoreList()
il.Ignore('\xac', '\xbf')
f = io.BytesIO()
try:
il.save_to_xml(f)
except Exception as e:
raise AssertionError(str(e))
def test_len():
il = IgnoreList()
eq_(0,len(il))
il.Ignore('foo','bar')
eq_(1,len(il))
def test_nonzero():
il = IgnoreList()
assert not il
il.Ignore('foo','bar')
assert il
def test_remove():
il = IgnoreList()
il.Ignore('foo', 'bar')
il.Ignore('foo', 'baz')
il.remove('bar', 'foo')
eq_(len(il), 1)
assert not il.AreIgnored('foo', 'bar')
def test_remove_non_existant():
il = IgnoreList()
il.Ignore('foo', 'bar')
il.Ignore('foo', 'baz')
with raises(ValueError):
il.remove('foo', 'bleh')
|
tvictor20/tvictor-advprog
|
aquaponics/app.py
|
Python
|
gpl-3.0
| 1,404
| 0.003561
|
import time
import RPi.GPIO as GPIO
from flask import Flask, render_template
# GPIO and Sensors =====
|
=======================================================
# Objects to represent sensors used to get water level
class WaterLevelSensor:
# how high the sensor is above the top of the fish tank
|
offset = 0
def __init__(self, echo, trig):
self.echo_pin = echo
self.trig_pin = trig
GPIO.setup(self.trig_pin, GPIO.OUT, initial=0)
GPIO.setup(self.echo_pin, GPIO.IN)
# gets the time it took for the sound to return, in microseconds
def pulse_in(self):
GPIO.output(self.trig_pin, 1)
time.sleep(0.05)
GPIO.output(self.trig_pin, 0)
start = time.clock()
GPIO.wait_for_edge(self.echo_pin, GPIO.RISING)
return time.clock() - start
# returns how far away the water is from the top of the tank in centimeters
def read_water_level(self):
# the speed of sound is ~343 m/s
val = self.pulse_in() * 0.000001715 # ((1 / 1,000,000) * 343) / 2,000
return val - ofset
# Webpage =====================================================================
app = Flask(__name__)
# Posts new readings to the webpage
@route('/')
def display_info():
reading = 0
render_template('info.html', height=reading)
if __name__ == '__main__':
GPIO.setmode(GPIO.BCM)
app.run('127.0.0.1', 8000)
|
LeBarbouze/tunacell
|
tunacell/base/cell.py
|
Python
|
mit
| 15,188
| 0.00079
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
This module defines how cells are stored as tunacell's objects
"""
from __future__ import print_function
import numpy as np
import warnings
import treelib as tlib
from tunacell.base.observable import Observable, FunctionalObservable
from tunacell.base.datatools import (Coordinates, compute_rates,
extrapolate_endpoints,
derivative, logderivative, ExtrapolationError)
class CellError(Exception):
pass
class CellChildsError(CellError):
pass
class CellParentError(CellError):
pass
class CellDivisionError(CellError):
pass
class Cell(tlib.Node):
"""General class to handle cell data structure.
Inherits from treelib.Node class to facilitate tree building.
Parameters
----------
identifier : str
cell identifier
container : :class:`Container` instance
container to which cell belongs
Attributes
----------
container : :class:`Container` instance
container to chich cell belongs
childs : list of :class:`Cell` instances
daughter cells of current cell
parent : :class:`Cell` instance
mother cell of current cell
birth_time : float (default None)
time of cell birth (needs to be computed)
division_time : float (default None)
time of cell division (needs to be computed)
Methods
-------
set_division_events()
computes birth/division times when possible
build(obs)
builds timeseries, uses one of the following methods depending on obs
build_timelapse(obs)
builds and stores timeseries associated to obs, in 'dynamics' mode
build_cyclized(obs)
builds and stores cell-cycle value associated to obs, not in 'dynamics'
mode
"""
def __init__(self, identifier=None, container=None):
tlib.Node.__init__(self, identifier=identifier)
self._childs = []
self._parent = None
self._birth_time = None
self._division_time = None
self._sdata = {} # dictionary to contain computed data
self._protected_against_build = set() # set of obs not to re-build
self.container = container # point to Container instance
# cells are built from a specific container instance
# container can be a given field of view, a channel, a microcolony, ...
return
# We add few definitions to be able to chain between Cell instances
@property
def childs(self):
"Get list of child instances."
return self._childs
@childs.setter
def childs(self, value):
if value is None:
self._childs = []
elif isinstance(value, list):
for item in value:
self.childs = item
elif isinstance(value, Cell):
self._childs.append(value)
else:
raise CellChildsError
@property
def parent(self):
"Get parent instance."
return self._parent
@parent.setter
def parent(self, pcell):
if pcell is None:
self._parent = None
elif isinstance(pcell, Cell):
self._parent = pcell
else:
raise CellParentError
@property
def birth_time(self):
"Get cell cycle start time. See below for Setter."
return self._birth_time
@birth_time.setter
def birth_time(self, value):
"Set cell cycle start time. See above for Getter."
self._birth_time = value
@property
def division_time(self):
"Get cell cycle end time. See below for Setter."
return self._division_time
@division_time.setter
def division_time(self, value):
"Set cell cycle end time. See above for Getter."
if self.birth_time is not None:
if value < self.birth_time:
raise CellDivisionError
self._division_time = value
def set_division_event(self):
"method to call when parent is identified"
previous_frame = None
if (self.parent is not None) and (self.parent.data is not None):
previous_frame = self.parent.data['time'][-1]
first_frame = None
if self.data is not None:
first_frame = self.data['time'][0]
if previous_frame is not None and first_frame is not None:
div_time = (previous_frame + first_frame)/2. # halfway
self.birth_time = div_time
self.parent.division_time = div_time
return
def __repr__(self):
cid = str(self.identifier)
if self.parent:
pid = str(self.parent.identifier)
else:
pid = '-'
if self.childs:
ch = ','.join(['{}'.format(c.identifier) for c in self.childs])
else:
ch = '-'
return cid+';p:'+pid+';ch:'+ch
def info(self):
dic = {}
dic['a. Identifier'] = '{}'.format(self.identifier)
pid = 'None'
if self.parent:
pid = '{}'.format(self.parent.identifier)
dic['b. Parent id'] = pid
chids = 'None'
if self.childs:
chids = ', '.join(['{}'.format(ch.identifier)
for ch in self.childs])
dic['c. Childs'] = chids
dic['d. Birth time'] = '{}'.format(self.birth_time)
dic['e. Division time'] = '{}'.format(self.division_time)
if self.data is not None:
dic['f. N_frames'] = '{}'.format(len(self.data))
return dic
def protect_again
|
st_build(self, obs):
"""Protect current cell against building obs array/value"""
self._protected_against_build.add(obs)
return
def build(self, obs):
"""Builds timeseries"""
if obs in self._protected_against_build:
return
if isinstance(obs, FunctionalObservable):
# first buil
|
d every single Observable
for item in obs.observables:
self.build(item)
arrays = [self._sdata[item.label] for item in obs.observables]
self._sdata[obs.label] = obs.f(*arrays)
elif isinstance(obs, Observable):
if obs.mode == 'dynamics':
self.build_timelapse(obs)
else:
self.compute_cyclized(obs)
else:
raise TypeError('obs must be of type Observable or FunctionalObservable')
def build_timelapse(self, obs):
"""Builds timeseries corresponding to observable of mode 'dynamics'.
Result is an array of same length as time array, stored in a dictionary
_sdata, which keys are obs.label. When using sliding windows,
estimate in a given cell actualize data in its parent cell, if and only
if it has not been actualized before (check disjoint time intervals).
Parameters
----------
obs : Observable instance
mode must be 'dynamics'
Note
-----
Some observables carry the 'local_fit' option True. In this case,
local fits over shifting time-windows are performed. If one would keep
only a given cell's data, then the constraints on shifting time-window
would let some 'empty' times, at which no evaluation can be performed.
This is solved by getting data from the cell's parent cell's data. This
operation computes time-window fiited data in the cell's parent cycle.
Two precautions must then be taken:
1. a given cell's data must be used only once for evaluating parent
cell's data,
2. when data has been used from one daughter cell, concatenate
the current cell's evaluated data to it.
.. warning::
For some computations, the time interval between consecutive
acquisitions is needed. If it's defined in the container or the
experiment metadata, this parameter will be imported; otherwise if
there are at least 2 consecutive values, it will be inferred from
data (at the risk of making mistakes if there are too many missing
val
|
sirk390/coinpy
|
coinpy-lib/src/coinpy/lib/vm/opcode_impl/disabled.py
|
Python
|
lgpl-3.0
| 341
| 0.008798
|
from co
|
inpy.model.scripts.opcodes import OP_2DIV, OP_2MUL, OP_AND, OP_CAT,\
OP_DIV, OP_INVERT, OP_LSHIFT, OP_LEFT, OP_MOD, OP_OR, OP_RIGHT, OP_RSHIFT,\
OP_SUBSTR, OP_XOR, OP_MUL
DISABLED_OPCODES=[OP_CAT, OP_SUBSTR, OP_LEFT, OP_RIGHT, OP_INVERT, OP_AND, O
|
P_OR, OP_XOR, OP_2MUL, OP_2DIV, OP_MUL, OP_DIV, OP_MOD, OP_LSHIFT, OP_RSHIFT]
|
zenoss/ZenPacks.zenoss.Puppet
|
ZenPacks/zenoss/Puppet/BatchDeviceLoader.py
|
Python
|
gpl-2.0
| 26,792
| 0.003844
|
##############################################################################
#
# Copyright (C) Zenoss, Inc. 2009, 2011, all rights reserved.
#
# This content is made available according to terms specified in
# License.zenoss under the directory where your Zenoss product is installed.
#
##############################################################################
__doc__ = """zenbatchload
zenbatchload loads a list of devices read from a file.
"""
import sys
import re
from traceback import format_exc
import socket
import Globals
from ZODB.POSException import ConflictError
from ZODB.transact import transact
from zope.component import getUtility
from zope.event import notify
from zExceptions import BadRequest
from ZPublisher.Converters import type_converters
from Products.ZenModel.interfaces import IDeviceLoader
from Products.ZenUtils.ZCmdBase import ZCmdBase
from Products.ZenModel.Device import Device
from Products.ZenRelations.ZenPropertyManager import iszprop
from Products.ZenModel.ZenModelBase import iscustprop
from Products.ZenEvents.ZenEventClasses import Change_Add
from Products.Zuul.catalog.events import IndexingEvent
from Products.ZenUtils.Utils import unused
# We import DateTime so that we can set properties of type DateTime in the batchload
from DateTime import DateTime
unused(DateTime)
from zenoss.protocols.protobufs.zep_pb2 import SEVERITY_INFO, SEVERITY_ERROR
METHODS_TO_SETTINGS = {
'setManageIp': 'manageIp',
'setPerformanceMonitor': 'performanceMonitor',
'setTitle': 'title',
'setHWTag': 'tag',
'setHWSerialNumber': 'serialNumber',
'setProdState': 'productionState',
'setPriority': 'priority',
'setGroups': 'groupPaths',
'setSystems': 'systemPaths',
# these don't have methods but were added for completeness
'setRackSlot': 'rackSlot',
'setComments': 'comments',
# TODO: setHWProduct and setOSProduct (they take multiple parameters)
}
class BatchDeviceLoader(ZCmdBase):
"""
Base class wrapping around dmd.DeviceLoader
"""
sample_configs = """#
# Example zenbatchloader device file
#
# This file is formatted with one entry per line, like this:
#
# /Devices/device_class_name Python-expression
# hostname Python-expression
#
# For organizers (ie the /Devices path), the Python-expression
# is used to define defaults to be used for devices listed
# after the organizer. The defaults that can be specified are:
#
# * loader arguments (use the --show_options flag to show these)
#
# * zProperties (from a device, use the 'Configuration Properties'
# menu item to see the available ones.)
#
# NOTE: new zProperties *cannot* be created through this file
#
# * cProperties (from a device, use the 'Custom Properties'
# menu item to see the available ones.)
#
# NOTE: new cProperties *cannot* be created through this file
#
# The Python-expression is used to create a dictionary of settings.
# device_settings = eval( 'dict(' + python-expression + ')' )
#
# Setting locations
/Locations/Canada address="Canada"
/Locations/Canada/Alberta address="Alberta, Canada"
/Locations/Canada/Alberta/Calgary address="Calgary, Alberta, Canada"
# If no organizer is specified at the beginning of the file,
# defaults to the /Devices/Discovered device class.
device0 comments="A simple device"
# All settings must be seperated by a comma.
device1 comments="A simple device", zSnmpCommunity='blue', zSnmpVer='v1'
# Notes for this file:
# * Oraganizer names *must* start with '/'
#
/Devices/Server/Linux zSnmpPort=1543
# Python strings can use either ' or " -- there's no difference.
# As a special case, it is also possible to specify the IP address
linux_device1 setManageIp='10.10.10.77', zSnmpCommunity='blue', zSnmpVer="v2c"
# A '\' at the end of the line allows you to place more
# expressions on a new line. Don't forget the comma...
linux_device2 zLinks="<a href='http://example.org'>Support site</a>", \
zTelnetEnable=True, \
zTelnetPromptTimeout=15.3
# A new organizer drops all previous settings, and allows
# for new ones to be used. Settings do not span files.
/Devices/Server/Windows zWinUser="administrator", zWinPassword='fred'
# Bind templates
windows_device1 zDeviceTemplates=[ 'Device', 'myTemplate' ]
# Override the default from the organizer setting.
windows_device2 zWinUser="administrator", zWinPassword='thomas', setProdState=500
# Apply other settings to the device
settingsDevice setManageIp='10.10.10.77', setLocation="123 Elm Street", \
setSystems=['/mySystems'], setPerformanceMonitor='remoteCollector1', \
setHWSerialNumber="abc123456789", setGroups=['/myGroup'], \
setHWProduct=('myproductName','manufacturer'), setOSProduct=('OS Name','manufacturer')
# If the device or device class contains a space, then it must be quoted (either ' or ")
"/Server/Windows/WMI/Active Directory/2008"
# Now, what if we have a device that isn't really a device, and requires
# a special loader?
# The 'loader' setting requires a registered utility, and 'loader_arg_keys' is
# a list from which any other settings will be passed into the loader callable.
#
# Here is a commmented-out example of how a VMware endpoint might be added:
#
#/Devices/VMware loader='vmware', loader_arg_keys=['host', 'username', 'password', 'useSsl', 'id']
#esxwin2 id='esxwin2', host='esxwin2.zenoss.loc', username='testuser', password='password', useSsl=True
# Apply custom schema properties (c-properties) to a device
windows_device7 cDateTest='2010/02/28'
#
# The following are wrapper methods that specifically set values on a device:
#
# setManageIp
# setPerformanceMonitor
# setTitle
# setHWTag
# setHWSerialNumber
# setProdState
# setPriority
# setGroups
# setSystems
# setRackSlot
# setComments
#
"""
def __init__(self, *args, **kwargs):
ZCmdBase.__init__(self, *args, **kwargs)
self.defaults = {}
self.loader = self.dmd.DeviceLoader.loadDevice
self.fqdn = socket.getfqdn()
self.baseEvent = dict(
device=self.fqdn,
component='',
agent='zenbatchload',
monitor='localhost',
manager=self.fqdn,
severity=SEVERITY_ERROR,
# Note: Change_Add events get sent to history by the event class' Zen property
eventClass=Change_Add,
)
# Create the list of options we want people to know about
self.loader_args = dict.fromkeys( self.loader.func_code.co_varnames )
unsupportable_args = [
'REQUEST', 'device', 'self', 'xmlrpc', 'e', 'handler',
]
for opt in unsupportable_args:
if opt in self.loader_args:
del self.loader_args[opt]
def loadDeviceList(self, args=None):
"""
Read through all of the files listed as arguments and
return a list of device entries.
@parameter args: list of filenames (uses self.args is this is None)
@type args: list of strings
@return: list of device specifications
@rtype: list of dictionaries
"""
if args is None:
args = self.args
device_list = []
for filename in args:
if filename.strip() != '':
try:
data = open(filename,'r').readlines()
except IOError:
msg = "Unable to open the file '%s'" % filename
self.reportException(msg)
continue
temp_dev_list = self.parseDevices(data)
if temp_dev_list:
device_list += temp_dev_list
return device_list
def applyZProps(self, device, device_specs):
"""
Apply zProperty settings (if any) to the device.
@parameter device: device to modify
@type d
|
evice: DMD device object
@parameter device_specs: device creation dictionary
@type device_specs: dictionary
"""
self.log.debug( "Applying zProperties..." )
# Returns a list of (key
|
, value) pairs.
# Convert it to a dictionary.
dev_zprops = dict( device.zenPropertyItems() )
for zprop, val
|
ch710798472/GithubRecommended
|
RecGithub/views.py
|
Python
|
mit
| 3,774
| 0.014168
|
#coding:utf-8
from django.shortcuts import render
# Create your views here.
from django.http import HttpRespons
|
e
# 引入我们创建的表单类
from models import SearchForm,SearchRepoForm,ConnectForm
import requests
import json
from chgithub import GetSearchInfo,SearchRepo,SocialConnect,SearchConnect,nonSocialConnect
def index(request):
return render(request, 'index.html')
def add(request, a, b):
c = int(a) + int(b)
return HttpResponse(str(c))
def home(request):
return render(request, 'index.html')
def form(request):
if request.meth
|
od == 'POST': # 当提交表单时
form = SearchForm(request.POST) # form 包含提交的数据
if form.is_valid(): # 如果提交的数据合法
location = form.cleaned_data['location']
language = form.cleaned_data['language']
Dict = {'filename':location+language}
if GetSearchInfo(location,language):
return render(request,'search_result.html',{'Dict':json.dumps(Dict)})
else:
return HttpResponse(str("查找结果不存在,请重新输入!"))
else: # 当正常访问时
form = SearchForm()
return render(request, 'search.html', {'form': form})
def repo(request):
if request.method == 'POST': # 当提交表单时
form = SearchRepoForm(request.POST) # form 包含提交的数据
if form.is_valid(): # 如果提交的数据合法
stars = form.cleaned_data['stars']
language = form.cleaned_data['language']
Dict = {'filename':language+stars}
if SearchRepo(stars,language):
return render(request,'repo_result.html',{'Dict':json.dumps(Dict)})
else:
return HttpResponse(str("查找结果不存在,请重新输入!"))
else: # 当正常访问时
form = SearchRepoForm()
return render(request, 'repo.html', {'form': form})
def connect(request):
if request.method == 'POST': # 当提交表单时
form = ConnectForm(request.POST) # form 包含提交的数据
if form.is_valid(): # 如果提交的数据合法
user = form.cleaned_data['user']
repo = form.cleaned_data['repo']
Dict = {'filename':user+repo}
if SocialConnect(user,repo):
return render(request,'connect_result.html',{'Dict':json.dumps(Dict)})
else:
return HttpResponse(str("查找结果不存在,请重新输入!"))
else: # 当正常访问时
form = ConnectForm()
return render(request, 'connect.html', {'form': form})
def search(request):
searchKey = request.GET['searchKey']
Dict = {'filename': searchKey.strip()}
if searchKey.strip()=='':
return HttpResponse(str("请输入查找关键字!"))
else:
if(SearchConnect(searchKey.strip())):
return render(request, 'search_key_result.html',{'Dict':json.dumps(Dict)})
else:
return HttpResponse(str('请重新查找!'))
def nonconnect(request):
if request.method == 'POST': # 当提交表单时
form = ConnectForm(request.POST) # form 包含提交的数据
if form.is_valid(): # 如果提交的数据合法
user = form.cleaned_data['user']
repo = form.cleaned_data['repo']
Dict = {'filename':user+repo}
if nonSocialConnect(user,repo):
return render(request,'connect_result.html',{'Dict':json.dumps(Dict)})
else:
return HttpResponse(str("查找结果不存在,请重新输入!"))
else: # 当正常访问时
form = ConnectForm()
return render(request, 'connect.html', {'form': form})
|
SciTools/iris
|
lib/iris/tests/unit/fileformats/abf/test_ABFField.py
|
Python
|
lgpl-3.0
| 1,558
| 0
|
# Copyright Iris contributors
#
# This file is part of Iris and is released under the LGPL license.
# See COPYING and COPYING.LESSER in the root of the repository for full
# licensing details.
"""Unit tests for the `iris.fileformats.abf.ABFField` class."""
# Import iris.tests first so that some things can be initialised before
# importing anything else.
import iris.tests as tests # isort:skip
from unittest import mock
from iris.fileformats.abf import ABFField
class MethodCounter:
def __init__(self, method_name):
self.method_name = method_name
self.count = 0
def __enter__(self):
self.orig_method = getattr(ABFField, self.method_name)
def new_method(*args, **kwargs):
self.count += 1
self.orig_method(*args, **kwargs)
setattr(ABFField, self.method_name, new_method)
return self
def __exit__(self, exc_type, exc_value, traceback):
setattr(ABFField, self.method_name, self.orig_method)
|
return False
class Test_data(tests.IrisTest):
def test_single_read(self):
path = "0000000000000000ja
|
n00000"
field = ABFField(path)
with mock.patch("iris.fileformats.abf.np.fromfile") as fromfile:
with MethodCounter("__getattr__") as getattr:
with MethodCounter("_read") as read:
field.data
fromfile.assert_called_once_with(path, dtype=">u1")
self.assertEqual(getattr.count, 1)
self.assertEqual(read.count, 1)
if __name__ == "__main__":
tests.main()
|
guitarmanj/king-phisher
|
king_phisher/client/mailer.py
|
Python
|
bsd-3-clause
| 38,476
| 0.023365
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# king_phisher/client/mailer.py
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of the project nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# disable this warning for the email.mime.* modules that have to be imported
# pylint: disable=unused-import
import codecs
import collections
import csv
import datetime
import email.encoders as encoders
import email.mime as mime
import email.mime.base
import email.mime.image
import email.mime.multipart
import email.mime.text
import email.utils
import logging
import mimetypes
import os
import smtplib
import socket
import threading
import time
import urllib.parse
from king_phisher import errors
from king_phisher import ics
from king_phisher import ipaddress
from king_phisher import its
from king_phisher import templates
from king_phisher import utilities
from king_phisher.client import gui_utilities
from king_phisher.client.dialogs import ssh_host_key
from king_phisher.constants import ConnectionErrorReason
from king_phisher.ssh_forward import SSHTCPForwarder
from gi.repository import GLib
import paramiko
import smoke_zephyr.utilities
__all__ = (
'guess_smtp_server_address',
'MailSenderThread',
'render_message_template'
)
template_environment = templates.MessageTemplateEnvironment()
MessageAttachments = collections.namedtuple('MessageAttachments', ('files', 'images'))
"""
A named tuple for holding both image and file attachments for a message.
.. py:attribute:: files
A tuple of :py:class:`~.mime.MIMEBase` instances representing the messsages
attachments.
.. py:attribute:: images
A tuple of :py:class:`~.mime.MIMEImage` instances representing the images in
the message.
"""
MIME_TEXT_PLAIN = 'This message requires an HTML aware email agent to be properly viewed.\r\n\r\n'
"""The static string to place in MIME message as a text/plain part. This is shown by email clients that do not support HTML."""
def _iterate_targets_file(target_file, config=None):
target_file_h = open(target_file, 'rU')
csv_reader = csv.DictReader(target_file_h, ('first_name', 'last_name', 'email_address', 'department'))
uid_charset = None if config is None else config['mailer.message_uid.charset']
for line_no, raw_target in enumerate(csv_reader, 1):
if None in raw_target:
# remove the additional fields
del raw_target[None]
if its.py_v2:
# this will intentionally cause a UnicodeDecodeError to be raised as is the behaviour in python 3.x
# when csv.DictReader is initialized
raw_target = dict((k, (v if v is None else v.decode('utf-8'))) for k, v in raw_target.items())
if uid_charset is not None:
raw_target['uid'
|
] = utilities.make_message_uid(
upper=uid_charset['upper'],
lower=uid_charset['lower'],
digits=uid_charset['digits']
)
target = MessageTarget(line=line_no, **raw_target)
# the caller needs to catch and process the missing fields appropriately
yield target
|
target_file_h.close()
def count_targets_file(target_file):
"""
Count the number of valid targets that the specified file contains. This
skips lines which are missing fields or where the email address is invalid.
:param str target_file: The path the the target CSV file on disk.
:return: The number of valid targets.
:rtype: int
"""
count = 0
for target in _iterate_targets_file(target_file):
if target.missing_fields:
continue
if not utilities.is_valid_email_address(target.email_address):
continue
count += 1
return count
def get_invite_start_from_config(config):
"""
Get the start time for an invite from the configuration. This takes into
account whether the invite is for all day or starts at a specific time.
:param dict config: The King Phisher client configuration.
:return: The timestamp of when the invite is to start.
:rtype: :py:class:`datetime.datetime`
"""
if config['mailer.calendar_invite_all_day']:
start_time = datetime.datetime.combine(
config['mailer.calendar_invite_date'],
datetime.time(0, 0)
)
else:
start_time = datetime.datetime.combine(
config['mailer.calendar_invite_date'],
datetime.time(
int(config['mailer.calendar_invite_start_hour']),
int(config['mailer.calendar_invite_start_minute'])
)
)
return start_time
@smoke_zephyr.utilities.Cache('3m')
def guess_smtp_server_address(host, forward_host=None):
"""
Guess the IP address of the SMTP server that will be connected to given the
SMTP host information and an optional SSH forwarding host. If a hostname is
in use it will be resolved to an IP address, either IPv4 or IPv6 and in that
order. If a hostname resolves to multiple IP addresses, None will be
returned. This function is intended to guess the SMTP servers IP address
given the client configuration so it can be used for SPF record checks.
:param str host: The SMTP server that is being connected to.
:param str forward_host: An optional host that is being used to tunnel the connection.
:return: The IP address of the SMTP server.
:rtype: None, :py:class:`ipaddress.IPv4Address`, :py:class:`ipaddress.IPv6Address`
"""
host = host.rsplit(':', 1)[0]
if ipaddress.is_valid(host):
ip = ipaddress.ip_address(host)
if not ip.is_loopback:
return ip
else:
info = None
for family in (socket.AF_INET, socket.AF_INET6):
try:
info = socket.getaddrinfo(host, 1, family)
except socket.gaierror:
continue
info = set(list([r[4][0] for r in info]))
if len(info) != 1:
return
break
if info:
ip = ipaddress.ip_address(info.pop())
if not ip.is_loopback:
return ip
if forward_host:
return guess_smtp_server_address(forward_host)
return
def render_message_template(template, config, target=None, analyze=False):
"""
Take a message from a template and format it to be sent by replacing
variables and processing other template directives. If the *target*
parameter is not set, a placeholder will be created and the message will be
formatted to be previewed.
:param str template: The message template.
:param dict config: The King Phisher client configuration.
:param target: The messages intended target information.
:type target: :py:class:`.MessageTarget`
:param bool analyze: Set the template environment to analyze mode.
:return: The formatted message.
:rtype: str
"""
if target is None:
target = MessageTargetPlaceholder(uid=config['server_config'].get('server.secret_id'))
template_environment.set_mode(template_environment.MODE_PREVIEW)
if analyze:
template_environment.set_mode(template_environment.MODE_ANALYZE)
template = template_environment.from_string(template)
template_vars = {}
template_vars['campaign'] = dict(
id=str(config['campaign_id']),
name=config['campaign_name']
)
template_vars['client'] = dict(
first_name=target.first_name,
last_name=target
|
youfoh/webkit-efl
|
Tools/Scripts/webkitpy/layout_tests/servers/apache_http_server.py
|
Python
|
lgpl-2.1
| 8,511
| 0.005522
|
#!/usr/bin/env python
# Copyright (C) 2011 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""A class to start/stop the apache http server used by layout tests."""
import logging
import os
import re
import sys
from webkitpy.layout_tests.servers import http_server_base
_log = logging.getLogger(__name__)
class LayoutTestApacheHttpd(http_server_base.HttpServerBase):
def __init__(self, port_obj, output_dir, additional_dirs=None, number_of_servers=None):
"""Args:
port_obj: handle to the platform-specific routines
output_dir: the absolute path to the layout test result directory
"""
http_server_base.HttpServerBase.__init__(self, port_obj, number_of_servers)
# We use the name "httpd" instead of "apache" to make our paths (e.g. the pid file: /tmp/WebKit/httpd.pid)
# match old-run-webkit-tests: https://bugs.webkit.org/show_bug.cgi?id=63956
self._name = 'httpd'
self._mappings = [{'port': 8000},
{'port': 8080},
{'port': 8081},
{'port': 8443, 'sslcert': True}]
self._output_dir = output_dir
self._filesystem.maybe_make_directory(output_dir)
self._pid_file = self._filesystem.join(self._runtime_path, '%s.pid' % self._name)
test_dir = self._port_obj.layout_tests_dir()
js_test_resources_dir = self._filesystem.join(test_dir, "fast", "js", "resources")
media_resources_dir = self._filesystem.join(test_dir, "media")
mime_types_path = self._filesystem.join(test_dir, "http", "conf", "mime.types")
cert_file = self._filesystem.join(test_dir, "http", "conf", "webkit-httpd.pem")
access_log = self._filesystem.join(output_dir, "access_log.txt")
error_log = self._filesystem.join(output_dir, "error_log.txt")
document_root = self._filesystem.join(test_dir, "http", "tests")
# FIXME: We shouldn't be calling a protected method of _port_obj!
executable = self._port_obj._path_to_apache()
start_cmd = [executable,
'-f', "\"%s\"" % self._get_apache_config_file_path(test_dir, output_dir),
'-C', "\'DocumentRoot \"%s\"\'" % document_root,
'-c', "\'Alias /js-test-resources \"%s\"'" % js_test_resources_dir,
'-c', "\'Alias /media-resources \"%s\"'" % media_resources_dir,
'-C', "\'Listen %s\'" % "127.0.0.1:8000",
'-C', "\'Listen %s\'" % "127.0.0.1:8081",
'-c', "\'TypesConfig \"%s\"\'" % mime_types_path,
'-c', "\'CustomLog \"%s\" common\'" % access_log,
'-c', "\'ErrorLog \"%s\"\'" % error_log,
'-C', "\'User \"%s\"\'" % os.environ.get("USERNAME", os.environ.get("USER", "")),
'-c', "\'PidFile %s'" % self._pid_file,
'-k', "start"]
if additional_dirs:
for alias, path in additional_dirs.iteritems():
start_cmd += ['-c', "\'Alias %s \"%s\"\'" % (alias, path),
# Disable CGI handler for additional dirs.
'-c', "\'<Location %s>\'" % alias,
'-c', "\'RemoveHandler .cgi .pl\'",
'-c', "\'</Location>\'"]
if self._number_of_servers:
start_cmd += ['-c', "\'StartServers %d\'" % self._number_of_servers,
'-c', "\'MinSpareServers %d\'" % self._number_of_servers,
'-c', "\'MaxSpareServers %d\'" % self._number_of_servers]
stop_cmd = [executable,
'-f', "\"%s\"" % self._get_apache_config_file_path(test_dir, output_dir),
'-c', "\'PidFile %s'" % self._pid_file,
'-k', "stop"]
start_cmd.extend(['-c', "\'SSLCertificateFile %s\'" % cert_file])
# Join the string here so that Cygwin/Windows and Mac/Linux
# can use the same code. Otherwise, we could remove the single
# quotes above and keep cmd as a se
|
quence.
# FIXME: I
|
t's unclear if this is still needed.
self._start_cmd = " ".join(start_cmd)
self._stop_cmd = " ".join(stop_cmd)
def _get_apache_config_file_path(self, test_dir, output_dir):
"""Returns the path to the apache config file to use.
Args:
test_dir: absolute path to the LayoutTests directory.
output_dir: absolute path to the layout test results directory.
"""
httpd_config = self._port_obj._path_to_apache_config_file()
httpd_config_copy = os.path.join(output_dir, "httpd.conf")
httpd_conf = self._filesystem.read_text_file(httpd_config)
# FIXME: Why do we need to copy the config file since we're not modifying it?
self._filesystem.write_text_file(httpd_config_copy, httpd_conf)
return httpd_config_copy
def _spawn_process(self):
_log.debug('Starting %s server, cmd="%s"' % (self._name, str(self._start_cmd)))
retval, err = self._run(self._start_cmd)
if retval or len(err):
raise http_server_base.ServerError('Failed to start %s: %s' % (self._name, err))
# For some reason apache isn't guaranteed to have created the pid file before
# the process exits, so we wait a little while longer.
if not self._wait_for_action(lambda: self._filesystem.exists(self._pid_file)):
raise http_server_base.ServerError('Failed to start %s: no pid file found' % self._name)
return int(self._filesystem.read_text_file(self._pid_file))
def _stop_running_server(self):
# If apache was forcefully killed, the pid file will not have been deleted, so check
# that the process specified by the pid_file no longer exists before deleting the file.
if self._pid and not self._executive.check_running_pid(self._pid):
self._filesystem.remove(self._pid_file)
return
retval, err = self._run(self._stop_cmd)
if retval or len(err):
raise http_server_base.ServerError('Failed to stop %s: %s' % (self._name, err))
# For some reason apache isn't guaranteed to have actually stopped after
# the stop command returns, so we wait a little while longer for the
# pid file to be removed.
if not self._wait_for_action(lambda: not self._filesystem.exists(self._pid_file)):
raise http_server_base.ServerError('Failed to stop %s: pid file still exists' % self._name)
def _run(self, cmd):
# Use shell=True because we join the arguments into a string for
# the sake of Window/Cygwin and it needs quoting that breaks
# shell=False.
# FIXME: We should not need to be joining shell arguments i
|
lucasplus/MABDI
|
scripts/Plot_Depth_Image_To_Z.py
|
Python
|
bsd-3-clause
| 4,777
| 0.000837
|
import vtk
import numpy as np
import matplotlib.pyplot as plt
def vtkmatrix_to_numpy(matrix):
m = np.ones((4, 4))
for i in range(4):
for j in range(4):
m[i, j] = matrix.GetElement(i, j)
return m
"""
Get transformation from viewpoint coordinates to
real-world coordinates. (tmat)
"""
# vtk rendering objects
ren = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
# create cube and add it to renderer
# (not needed except to validate positioning of camera)
cube = vtk.vtkCubeSource()
cube.SetCenter(0.0, 0.0, 3.0)
cubeMapper = vtk.vtkPolyDataMapper()
cubeMapper.SetInputConnection(cube.GetOutputPort())
cubeActor = vtk.vtkActor()
cubeActor.SetMapper(cubeMapper)
ren.AddActor(cubeActor)
# set the intrinsic parameters
renWin.SetSize((640, 480))
cam = ren.GetActiveCamera()
cam.SetViewAngle(60.0)
cam.SetClippingRange(0.8, 4.0)
iren.GetInteractorStyle().SetAutoAdjustCameraClippingRange(0)
# have it positioned at the origin and looking down the z axis
cam.SetPosition(0.0, 0.0, 0.0)
cam.SetFocalPoint(0.0, 0.0, 1.0)
iren.Initialize()
iren.Render()
vtktmat = cam.GetCompositeProjectionTransformMatrix(
ren.GetTiledAspectRa
|
tio(),
0.0, 1.0)
vtktmat.Invert()
tmat = vtkmatrix_to_numpy(vtktmat)
""" Plot """
plt.figure(frameon=False, dpi=100)
nvalues = 100
noise = 0.002
# vpc - view point coordinates
# wc - world coordinates
vpc = np.zeros((4, nvalues))
vpc[2, :] = np.linspace(0, 1, nvalues)
vpc[3, :] = np.ones((1, vpc.shape[1]))
wc = np.dot(tmat, vpc)
wc = wc / wc[3]
wz = wc[2, :]
plt.plot(vpc[2, :],
wz,
'-o', color='b',
marker
|
size=2, markerfacecolor='g')
# nvpc, nwc - same as vpc, wc but with noise
nvpc = vpc.copy()
nvpc[2, :] += noise
nwc = np.dot(tmat, nvpc)
nwc = nwc / nwc[3]
nwz = nwc[2, :]
# plt.plot(vpc[2, :],
# nwz,
# color='r')
# nvpc, nwc - same as vpc, wc but with noise
nvpc = vpc.copy()
nvpc[2, :] -= noise
nwc = np.dot(tmat, nvpc)
nwc = nwc / nwc[3]
nwz = nwc[2, :]
# plt.plot(vpc[2, :],
# nwz,
# color='r')
""" Plot display properties """
plt.title('View to Sensor Coordinates Along Z Axis')
plt.xlabel('View Coordinates Z (normalized units)')
plt.ylabel('Sensor Coordinates Z (m)')
plt.grid(True)
ax = plt.gca()
for item in ([ax.title, ax.xaxis.label, ax.yaxis.label]):
item.set_fontsize(18)
for item in (ax.get_xticklabels() + ax.get_yticklabels()):
item.set_fontsize(12)
# plt.savefig('plot_depth.png')
plt.show()
""" Plot """
# wc
plt.figure(frameon=False, dpi=100)
plt.plot(wz,
(wz-nwz)*100,
'-o', color='b',
markersize=2, markerfacecolor='g',
label='MABDI')
plt.plot(wz,
(0.5*2.85e-5*pow(wz*100, 2)),
color='r',
label='Khoshelham Noise Model')
""" Plot display properties """
plt.title('Standard Deviation of Noise Along Sensor Z')
plt.xlabel('Distance to Actual Point (m)')
plt.ylabel('Standard Deviation of Error (cm)')
plt.grid(True)
ax = plt.gca()
for item in ([ax.title, ax.xaxis.label, ax.yaxis.label]):
item.set_fontsize(18)
for item in (ax.get_xticklabels() + ax.get_yticklabels()):
item.set_fontsize(12)
plt.legend(loc='upper left')
# plt.savefig('plot_depth.png')
plt.show()
"""
val1 = 0.6
val2 = 0.8
noise = 0.001
vp = np.array([(0.0, 0.0, 0.0, 0.0, 0.0, 0.0),
(0.0, 0.0, 0.0, 0.0, 0.0, 0.0),
(val1 - noise, val1, val1 + noise, val2 - noise, val2, val2 + noise),
(1.0, 1.0, 1.0, 1.0, 1.0, 1.0)])
wp = np.dot(tmat, vp)
wp = wp / wp[3]
vpz = vp[2, :]
wpz = wp[2, :]
plt.plot(vpz,
wpz,
'o', color='b',
markersize=9, markerfacecolor='r')
string = 'with noise = {:.3f}\n' \
' x y \n' \
'({:.4f}, {:.4f})\n' \
'({:.4f}, {:.4f})\n' \
'({:.4f}, {:.4f})\n' \
'diff = {:.2f} (cm)'.format(noise,
vpz[0], wpz[0],
vpz[1], wpz[1],
vpz[2], wpz[2],
abs(wpz[2] - wpz[0]) * 100)
bbox = {'edgecolor': 'black', 'facecolor': 'white', 'pad': 10}
plt.text(0.305, 1.72, string, bbox=bbox)
string = 'with noise = {:.3f}\n' \
' x y \n' \
'({:.4f}, {:.4f})\n' \
'({:.4f}, {:.4f})\n' \
'({:.4f}, {:.4f})\n' \
'diff = {:.2f} (cm)'.format(noise,
vpz[3], wpz[3],
vpz[4], wpz[4],
vpz[5], wpz[5],
abs(wpz[5] - wpz[3]) * 100)
plt.text(0.835, 1.20, string, bbox=bbox)
"""
|
nypdmax/NUMA
|
tools/qemu-xen/tests/qemu-iotests/qcow2.py
|
Python
|
gpl-2.0
| 7,287
| 0.009332
|
#!/usr/bin/env python
import sys
import struct
import string
class QcowHeaderExtension:
def __init__(self, magic, length, data):
self.magic = magic
self.length = length
self.data = data
@classmethod
def create(cls, magic, data):
return QcowHeaderExtension(magic, len(data), data)
class QcowHeader:
uint32_t = 'I'
uint64_t = 'Q'
fields = [
# Version 2 header fields
[ uint32_t, '%#x', 'magic' ],
[ uint32_t, '%d', 'version' ],
[ uint64_t, '%#x', 'backing_file_offset' ],
[ uint32_t, '%#x', 'backing_file_size' ],
[ uint32_t, '%d', 'cluster_bits' ],
[ uint64_t, '%d', 'size' ],
[ uint32_t, '%d', 'crypt_method' ],
[ uint32_t, '%d', 'l1_size' ],
[ uint64_t, '%#x', 'l1_table_offset' ],
[ uint64_t, '%#x', 'refcount_table_offset' ],
[ uint32_t, '%d', 'refcount_table_clusters' ],
[ uint32_t, '%d', 'nb_snapshots' ],
[ uint64_t, '%#x', 'snapshot_offset' ],
# Version 3 header fields
[ uint64_t, '%#x', 'incompatible_features' ],
[ uint64_t, '%#x', 'compatible_features' ],
[ uint64_t, '%#x', 'autoclear_features' ],
[ uint32_t, '%d', 'refcount_order' ],
[ uint32_t
|
, '%d', 'header_length' ],
];
fmt = '>' + ''.join(field[0] for field in fields)
def __init__(self, fd):
buf_size = struct.calcsize(QcowHeader.fmt)
fd.seek(0)
buf = fd.read(buf_size)
header = struct.unpack(QcowHeader.fmt, buf)
self.__dict__ = dict((field[2], header[i])
for i, field in enumerate(QcowHeader.fields))
self.set_defaults()
self.cluster_size = 1 << self.clu
|
ster_bits
fd.seek(self.header_length)
self.load_extensions(fd)
if self.backing_file_offset:
fd.seek(self.backing_file_offset)
self.backing_file = fd.read(self.backing_file_size)
else:
self.backing_file = None
def set_defaults(self):
if self.version == 2:
self.incompatible_features = 0
self.compatible_features = 0
self.autoclear_features = 0
self.refcount_order = 4
self.header_length = 72
def load_extensions(self, fd):
self.extensions = []
if self.backing_file_offset != 0:
end = min(self.cluster_size, self.backing_file_offset)
else:
end = self.cluster_size
while fd.tell() < end:
(magic, length) = struct.unpack('>II', fd.read(8))
if magic == 0:
break
else:
padded = (length + 7) & ~7
data = fd.read(padded)
self.extensions.append(QcowHeaderExtension(magic, length, data))
def update_extensions(self, fd):
fd.seek(self.header_length)
extensions = self.extensions
extensions.append(QcowHeaderExtension(0, 0, ""))
for ex in extensions:
buf = struct.pack('>II', ex.magic, ex.length)
fd.write(buf)
fd.write(ex.data)
if self.backing_file != None:
self.backing_file_offset = fd.tell()
fd.write(self.backing_file)
if fd.tell() > self.cluster_size:
raise Exception("I think I just broke the image...")
def update(self, fd):
header_bytes = self.header_length
self.update_extensions(fd)
fd.seek(0)
header = tuple(self.__dict__[f] for t, p, f in QcowHeader.fields)
buf = struct.pack(QcowHeader.fmt, *header)
buf = buf[0:header_bytes-1]
fd.write(buf)
def dump(self):
for f in QcowHeader.fields:
print "%-25s" % f[2], f[1] % self.__dict__[f[2]]
print ""
def dump_extensions(self):
for ex in self.extensions:
data = ex.data[:ex.length]
if all(c in string.printable for c in data):
data = "'%s'" % data
else:
data = "<binary>"
print "Header extension:"
print "%-25s %#x" % ("magic", ex.magic)
print "%-25s %d" % ("length", ex.length)
print "%-25s %s" % ("data", data)
print ""
def cmd_dump_header(fd):
h = QcowHeader(fd)
h.dump()
h.dump_extensions()
def cmd_set_header(fd, name, value):
try:
value = int(value, 0)
except:
print "'%s' is not a valid number" % value
sys.exit(1)
fields = (field[2] for field in QcowHeader.fields)
if not name in fields:
print "'%s' is not a known header field" % name
sys.exit(1)
h = QcowHeader(fd)
h.__dict__[name] = value
h.update(fd)
def cmd_add_header_ext(fd, magic, data):
try:
magic = int(magic, 0)
except:
print "'%s' is not a valid magic number" % magic
sys.exit(1)
h = QcowHeader(fd)
h.extensions.append(QcowHeaderExtension.create(magic, data))
h.update(fd)
def cmd_del_header_ext(fd, magic):
try:
magic = int(magic, 0)
except:
print "'%s' is not a valid magic number" % magic
sys.exit(1)
h = QcowHeader(fd)
found = False
for ex in h.extensions:
if ex.magic == magic:
found = True
h.extensions.remove(ex)
if not found:
print "No such header extension"
return
h.update(fd)
def cmd_set_feature_bit(fd, group, bit):
try:
bit = int(bit, 0)
if bit < 0 or bit >= 64:
raise ValueError
except:
print "'%s' is not a valid bit number in range [0, 64)" % bit
sys.exit(1)
h = QcowHeader(fd)
if group == 'incompatible':
h.incompatible_features |= 1 << bit
elif group == 'compatible':
h.compatible_features |= 1 << bit
elif group == 'autoclear':
h.autoclear_features |= 1 << bit
else:
print "'%s' is not a valid group, try 'incompatible', 'compatible', or 'autoclear'" % group
sys.exit(1)
h.update(fd)
cmds = [
[ 'dump-header', cmd_dump_header, 0, 'Dump image header and header extensions' ],
[ 'set-header', cmd_set_header, 2, 'Set a field in the header'],
[ 'add-header-ext', cmd_add_header_ext, 2, 'Add a header extension' ],
[ 'del-header-ext', cmd_del_header_ext, 1, 'Delete a header extension' ],
[ 'set-feature-bit', cmd_set_feature_bit, 2, 'Set a feature bit'],
]
def main(filename, cmd, args):
fd = open(filename, "r+b")
try:
for name, handler, num_args, desc in cmds:
if name != cmd:
continue
elif len(args) != num_args:
usage()
return
else:
handler(fd, *args)
return
print "Unknown command '%s'" % cmd
finally:
fd.close()
def usage():
print "Usage: %s <file> <cmd> [<arg>, ...]" % sys.argv[0]
print ""
print "Supported commands:"
for name, handler, num_args, desc in cmds:
print " %-20s - %s" % (name, desc)
if __name__ == '__main__':
if len(sys.argv) < 3:
usage()
sys.exit(1)
main(sys.argv[1], sys.argv[2], sys.argv[3:])
|
hectormartinez/rougexstem
|
taln2016/icsisumm-primary-sys34_v1/nltk/nltk-0.9.2/nltk/classify/weka.py
|
Python
|
apache-2.0
| 8,796
| 0.004093
|
# Natural Language Toolkit: Interface to Weka Classsifiers
#
# Copyright (C) 2001-2008 University of Pennsylvania
# Author: Edward Loper <edloper@gradient.cis.upenn.edu>
# URL: <http://nltk.sf.net>
# For license information, see LICENSE.TXT
#
# $Id: naivebayes.py 2063 2004-07-17 21:02:24Z edloper $
import time, tempfile, os, os.path, subprocess, re
from api import *
from nltk.probability import *
from nltk.internals import java, config_java
"""
Classifiers that make use of the external 'Weka' package.
"""
_weka_classpath = None
_weka_search = ['.',
'/usr/share/weka',
'/usr/local/share/weka',
'/usr/lib/weka',
'/usr/local/lib/weka',]
def config_weka(classpath=None):
global _weka_classpath
# Make sure java's configured first.
config_java()
if classpath is not None:
_weka_classpath = classpath
if _weka_classpath is None:
searchpath = _weka_search
if 'WEKAHOME' in os.environ:
searchpath.insert(0, os.environ['WEKAHOME'])
for path in searchpath:
if os.path.exists(os.path.join(path, 'weka.jar')):
_weka_classpath = os.path.join(path, 'weka.jar')
print '[Found Weka: %s]' % _weka_classpath
if _weka_classpath is None:
raise LookupError('Unable to find weka.jar! Use config_weka() '
'or set the WEKAHOME environment variable. '
'For more information about Weka, please see '
'http://www.cs.waikato.ac.nz/ml/weka/')
class WekaClassifier(ClassifierI):
def __init__(self, formatter, model_filename):
self._formatter = formatter
self._model = model_filename
def batch_prob_classify(self, featuresets):
return self._batch_classify(featuresets, ['-p', '0', '-distribution'])
def batch_classify(self, featuresets):
return self._batch_classify(featuresets, ['-p', '0'])
def _batch_classify(self, featuresets, options):
# Make sure we can find java & weka.
config_weka()
temp_dir = tempfile.mkdtemp()
try:
# Write the test data file.
test_filename = os.path.join(temp_dir, 'test.arff')
self._formatter.write(test_filename, featuresets)
# Call weka to classify the data.
cmd = ['weka.classifiers.bayes.NaiveBayes',
'-l', self._model, '-T', test_filename] + options
(stdout, stderr) = java(cmd, classpath=_weka_classpath,
stdout=subprocess.PIPE)
# Parse weka's output.
return self.parse_weka_output(stdout.split('\n'))
finally:
for f in os.listdir(temp_dir):
os.remove(os.path.join(temp_dir, f))
os.rmdir(temp_dir)
def parse_weka_distribution(self, s):
probs = [float(v) for v in re.split('[*,]+', s) if v.strip()]
probs = dict(zip(self._formatter.labels(), probs))
return DictionaryProbDist(probs)
def parse_weka_output(self, lines):
if lines[0].split() == ['inst#', 'actual', 'predicted',
'error', 'prediction']:
return [line.split()[2].split(':')[1]
for line in lines[1:] if line.strip()]
elif lines[0].split() == ['inst#', 'actual', 'predicted',
'error', 'distribution']:
return [self.parse_weka_distribution(line.split()[-1])
for line in lines[1:] if line.strip()]
else:
for line in lines[:10]: print line
raise ValueError('Unhandled output format -- your version '
'of weka may not be supported.\n'
' Header: %s' % lines[0])
@staticmethod
def train(model_filename, featuresets, quiet=True):
# Make sure we can find java & weka.
config_weka()
# Build an ARFF formatter.
formatter = ARFF_Formatter.from_train(featuresets)
temp_dir = tempfile.mkdtemp()
try:
# Write the training data file.
train_filename = os.path.join(temp_dir, 'train.arff')
formatter.write(train_filename, featuresets)
# Train the weka model.
cmd = ['weka.classifiers.bayes.NaiveBayes',
'-d', model_filename, '-t', train_filename]
if quiet: stdout = subprocess.PIPE
else: stdout = None
java(cmd, classpath=_weka_classpath, stdout=stdout)
# Return the new classifier.
return WekaClassifier(formatter, model_filename)
finally:
for f in os.listdir(temp_dir):
os.remove(os.path.join(temp_dir, f))
os.rmdir(temp_dir)
class ARFF_Formatter:
"""
Converts featuresets and labeled featuresets to ARFF-formatted
strings, appropriate for input into Weka.
"""
def __init__(se
|
lf, labels, features):
"""
@param labels: A list of all labels that can be generated.
@param features: A list of feature specifications, whe
|
re
each feature specification is a tuple (fname, ftype);
and ftype is an ARFF type string such as NUMERIC or
STRING.
"""
self._labels = labels
self._features = features
def format(self, tokens):
return self.header_section() + self.data_section(tokens)
def labels(self):
return list(self._labels)
def write(self, filename, tokens):
f = open(filename, 'w')
f.write(self.format(tokens))
f.close()
@staticmethod
def from_train(tokens):
# Find the set of all attested labels.
labels = set(label for (tok,label) in tokens)
# Determine the types of all features.
features = {}
for tok, label in tokens:
for (fname, fval) in tok.items():
if issubclass(type(fval), bool):
ftype = '{True, False}'
elif issubclass(type(fval), (int, float, long, bool)):
ftype = 'NUMERIC'
elif issubclass(type(fval), basestring):
ftype = 'STRING'
elif fval is None:
continue # can't tell the type.
else:
raise ValueError('Unsupported value type %r' % ftype)
if features.get(fname, ftype) != ftype:
raise ValueError('Inconsistent type for %s' % fname)
features[fname] = ftype
features = sorted(features.items())
return ARFF_Formatter(labels, features)
def header_section(self):
# Header comment.
s = ('% Weka ARFF file\n' +
'% Generated automatically by NLTK\n' +
'%% %s\n\n' % time.ctime())
# Relation name
s += '@RELATION rel\n\n'
# Input attribute specifications
for fname, ftype in self._features:
s += '@ATTRIBUTE %-30r %s\n' % (fname, ftype)
# Label attribute specification
s += '@ATTRIBUTE %-30r {%s}\n' % ('-label-', ','.join(self._labels))
return s
def data_section(self, tokens, labeled=None):
"""
@param labeled: Indicates whether the given tokens are labeled
or not. If C{None}, then the tokens will be assumed to be
labeled if the first token's value is a tuple or list.
"""
# Check if the tokens are labeled or unlabeled. If unlabeled,
# then use 'None'
if labeled is None:
labeled = tokens and isinstance(tokens[0], (tuple, list))
if not labeled:
tokens = [(tok, None) for tok in tokens]
# Data section
s = '\n@DATA\n'
for (tok, label) in tokens:
for fname, ftype in self._features:
s += '%s,' % self._fmt_arff_val(tok.get(fname))
s +=
|
agry/NGECore2
|
scripts/mobiles/talus/sickly_decay_mite_queen.py
|
Python
|
lgpl-3.0
| 1,557
| 0.026975
|
import sys
from services.spawn import MobileTemplate
from services.spawn import WeaponTemplate
from resources.datatables import WeaponType
from resources.datatables import Difficulty
from resources.datatables import Options
from java.util import Vector
def addTemplate(core):
mobileTemplate = MobileTemplate()
mobileTemplate.setCreatureName('sickly_decay_mite_queen')
mobileTemplate.setLevel(19)
mobileTemplate.setDifficulty(Difficulty.NORMAL)
mobileTemplate.setMinSpawnDistance(4)
mobileTemplate.setMaxSpawnDistance(8)
mobileTemplate.setDeathblow(False)
mobileTemplate.setScale(1)
mobileTemplate.setMeatType("Insect Meat")
mobileTemplate.setMeatAmount(15)
mobileTemplate.setSocialGroup("decay mite")
mobileTemplate.setAssistRange(0)
mobileTemplate.setStalker(False)
mobileTemplate.setOptionsBitmask(Options.ATTACKABLE)
|
templates = Vector()
templates.add('object/mobile/shared_bark_mite_hue.iff')
mobileTemplate.setTemplates(templates)
|
weaponTemplates = Vector()
weapontemplate = WeaponTemplate('object/weapon/melee/unarmed/shared_unarmed_default.iff', WeaponType.UNARMED, 1.0, 6, 'kinetic')
weaponTemplates.add(weapontemplate)
mobileTemplate.setWeaponTemplateVector(weaponTemplates)
attacks = Vector()
attacks.add('bm_bite_4')
attacks.add('bm_bolster_armor_4')
attacks.add('bm_enfeeble_4')
mobileTemplate.setDefaultAttack('creatureRangedAttack')
mobileTemplate.setAttacks(attacks)
core.spawnService.addMobileTemplate('sickly_decay_mite_queen', mobileTemplate)
return
|
philgyford/django-ditto
|
ditto/twitter/management/commands/fetch_twitter_accounts.py
|
Python
|
mit
| 1,562
| 0
|
# coding: utf-8
from django.core.management.base import BaseCommand
from ...fetch.fetchers import VerifyFetcher
class Command(BaseCommand):
"""Updates the stored data about the Twitter user for one or all Accounts.
For one account:
./manage.py fetch_accounts --account=philgyford
For all accounts:
./manage.py fetch_accounts
"""
help = "Fetches and updates data about Accounts' Twitter Users"
def add_arguments(self, parser):
parser.add_argument(
"--account",
action="store",
default=False,
help="Only fetch for one Twitter account.",
)
def handle(self, *args, **options):
# We might be fetching for a specific account or all (None).
account = options["account"] if options["account"] else None
results = VerifyFetcher(screen_name=account).fetch()
|
# results should be a list of dicts, either:
# { 'account': 'thescreenname',
# 'success': True
# }
# or:
# { 'account': 'thescreenname',
# 'success': False,
# 'messages': ["This screen_name doesn't exist"]
# }
if options.get("verbosity", 1) > 0:
for result in results:
|
if result["success"]:
self.stdout.write("Fetched @%s" % result["account"])
else:
self.stderr.write(
"Could not fetch @%s: %s"
% (result["account"], result["messages"][0])
)
|
neilLasrado/erpnext
|
erpnext/patches/v13_0/update_advance_received_in_sales_order.py
|
Python
|
gpl-3.0
| 300
| 0.01
|
import frappe
def execute():
frappe.reload_doc("selling", "doctype", "sales_order
|
")
docs = frappe.get_all("Sales Order", {
"advance_pa
|
id": ["!=", 0]
}, "name")
for doc in docs:
frappe.db.set_value("Sales Order", doc.name, "advance_received", 1, update_modified=False)
|
Yelp/pyes
|
docs/_ext/djangodocs.py
|
Python
|
bsd-3-clause
| 3,769
| 0.011409
|
"""
Sphinx plugins for Django documentation.
"""
import docutils.nodes
import docutils.transforms
import sphinx
import sphinx.addnodes
import sphinx.directives
import sphinx.environment
import sphinx.roles
from docutils import nodes
def setup(app):
app.add_crossref_type(
directivename = "setting",
rolename = "setting",
indextemplate = "pair: %s; setting",
)
app.add_crossref_type(
directivename = "templatetag",
rolename = "ttag",
indextemplate = "pair: %s; template tag",
)
app.add_crossref_type(
directivename = "templatefilter",
rolename = "tfilter",
|
indextemplate = "pair: %s; template filter",
)
app.add_crossref_type(
directivename = "fieldlookup",
rolename = "lookup",
indextemplate = "pair: %s, field lookup type",
)
app.add_description_unit(
directivename = "django-admin",
rolename = "djadmin",
indextemplate = "pair: %s; django-admin command",
parse_node = parse_django_admin_node,
)
|
app.add_description_unit(
directivename = "django-admin-option",
rolename = "djadminopt",
indextemplate = "pair: %s; django-admin command-line option",
parse_node = lambda env, sig, signode: \
sphinx.directives.parse_option_desc(signode, sig),
)
app.add_config_value('django_next_version', '0.0', True)
app.add_directive('versionadded', parse_version_directive, 1, (1, 1, 1))
app.add_directive('versionchanged', parse_version_directive, 1, (1, 1, 1))
app.add_transform(SuppressBlockquotes)
def parse_version_directive(name, arguments, options, content, lineno,
content_offset, block_text, state, state_machine):
env = state.document.settings.env
is_nextversion = env.config.django_next_version == arguments[0]
ret = []
node = sphinx.addnodes.versionmodified()
ret.append(node)
if not is_nextversion:
if len(arguments) == 1:
linktext = 'Please, see the release notes <releases-%s>' % (
arguments[0])
xrefs = sphinx.roles.xfileref_role('ref', linktext, linktext,
lineno, state)
node.extend(xrefs[0])
node['version'] = arguments[0]
else:
node['version'] = "Development version"
node['type'] = name
if len(arguments) == 2:
inodes, messages = state.inline_text(arguments[1], lineno+1)
node.extend(inodes)
if content:
state.nested_parse(content, content_offset, node)
ret = ret + messages
env.note_versionchange(node['type'], node['version'], node, lineno)
return ret
class SuppressBlockquotes(docutils.transforms.Transform):
"""
Remove the default blockquotes that encase indented list, tables, etc.
"""
default_priority = 300
suppress_blockquote_child_nodes = (
docutils.nodes.bullet_list,
docutils.nodes.enumerated_list,
docutils.nodes.definition_list,
docutils.nodes.literal_block,
docutils.nodes.doctest_block,
docutils.nodes.line_block,
docutils.nodes.table,
)
def apply(self):
for node in self.document.traverse(docutils.nodes.block_quote):
if len(node.children) == 1 and \
isinstance(node.children[0],
self.suppress_blockquote_child_nodes):
node.replace_self(node.children[0])
def parse_django_admin_node(env, sig, signode):
command = sig.split(' ')[0]
env._django_curr_admin_command = command
title = "django-admin.py %s" % sig
signode += sphinx.addnodes.desc_name(title, title)
return sig
|
wathen/PhD
|
MHD/FEniCS/MHD/Stabilised/SaddlePointForm/Test/SplitMatrix/ScottTest/Hartman2D/Laplacian.py
|
Python
|
mit
| 1,981
| 0.014134
|
from dolfin import *
import numpy as np
import pandas as pd
n = 6
Dim = np.zeros((n,1))
ErrorL2 = np.zeros((n,1))
ErrorH1 = np.zeros((n,1))
OrderL2 = np.zeros((n,1))
OrderH1 = np.zeros((n,1))
# parameters['reorder_dofs_serial'] = False
for x in range(1,n+1):
parameters['form_compiler']['quadrature_degree'] = -1
mesh = UnitSquareMesh(2**x,2**x)
V = VectorFunctionSpace(mesh, "CG", 2)
class u_in(Expression):
def __init__(self):
self.p = 1
def eval_cell(self, values, x, ufc_cell):
values[0] = x[0]*x[0]*x[0]
values[1] = x[1]*x[1]*x[1]
def value_shape(self):
return (2,)
class F_in(Expression):
def __init__(self):
self.p = 1
def eval_cell(self, values, x, ufc_cell):
values[0] = -6*x[0]
values[1] = -6*x[1]
def value_shape(self):
return (2,)
u0 = u_in()
F = F_in()
u = TrialFunction(V)
v = TestFunction(V)
a = inner(grad(u), grad(v))*dx
L = inner(F, v)*dx
def boundary(x, on_boundary):
return on_boundary
bc = DirichletBC(V, u0, boundary)
u = Function(V)
|
solve(a == L, u, bcs=bc,
solver_parameters={"linear_solver": "lu"},
form_compiler_parameters={"optimize": True})
parameters['form_compiler']['quadrature_degree'] = 8
Vexact = VectorFunctionSpace(mesh, "CG", 4)
ue = interpolate(u0, Vexact)
|
e = ue - u
Dim[x-1] = V.dim()
ErrorL2[x-1] = sqrt(abs(assemble(inner(e,e)*dx)))
ErrorH1[x-1] = sqrt(abs(assemble(inner(grad(e),grad(e))*dx)))
if (x > 1):
OrderL2[x-1] = abs(np.log2(ErrorL2[x-1]/ErrorL2[x-2]))
OrderH1[x-1] = abs(np.log2(ErrorH1[x-1]/ErrorH1[x-2]))
TableTitles = ["DoF","L2-erro","L2-order","H1-error","H1-order"]
TableValues = np.concatenate((Dim,ErrorL2,OrderL2,ErrorH1,OrderH1),axis=1)
Table = pd.DataFrame(TableValues, columns = TableTitles)
pd.set_option('precision',3)
print Table
|
zhaochl/python-utils
|
utils/thread/time_thread.py
|
Python
|
apache-2.0
| 846
| 0.01773
|
#!/usr/bin/env python
# coding=utf-8
import threading
import time
class timer(threading.Thread): #The timer class is derived
|
from the class threading.Thread
def __init__(self, num, interval):
threading.Thread.__init__(self)
self.thread_num = num
self.interval =
|
interval
self.thread_stop = False
def run(self): #Overwrite run() method, put what you want the thread do here
while not self.thread_stop:
print 'Thread Object(%d), Time:%s/n' %(self.thread_num, time.ctime())
time.sleep(self.interval)
def stop(self):
self.thread_stop = True
def test():
thread1 = timer(1, 1)
thread2 = timer(2, 2)
thread1.start()
thread2.start()
time.sleep(10)
thread1.stop()
thread2.stop()
return
if __name__ == '__main__':
test()
|
caryben/Ubuntu-bug-fixes
|
hidden_network_workaround.py
|
Python
|
mit
| 357
| 0.005602
|
# My computer was failing to recognize wifi networks after being woken up from sleep so this uses the network manager command
# line tool to force my computer to recognize the network I type in to the
|
terminal.
import subprocess
network_name = raw_input("What is the name of your network? ")
subprocess.check_call(['nmcli',
|
'c', 'up', 'id', network_name])
|
fbradyirl/home-assistant
|
tests/components/system_log/__init__.py
|
Python
|
apache-2.0
| 42
| 0
|
"""Tests
|
for the sys
|
tem_log component."""
|
metis-ai/yowsup
|
yowsup/structs/protocoltreenode.py
|
Python
|
gpl-3.0
| 4,746
| 0.00906
|
import binascii
import sys
class ProtocolTreeNode(object):
def __init__(self, tag, attributes = None, children = None, data = None):
self.tag = tag
self.attributes = attributes or {}
self.children = children or []
self.data = data
assert type(self.children) is list, "Children must be a list, got %s" % type(self.children)
def __eq__(self, protocolTreeNode):
"""
:param protocolTreeNode: ProtocolTreeNode
:return: bool
"""
#
if protocolTreeNode.__class__ == ProtocolTreeNode\
and self.tag == protocolTreeNode.tag\
and self.data == protocolTreeNode.data\
and self.attributes == protocolTreeNode.attributes\
and len(self.getAllChildren()) == len(protocolTreeNode.getAllChildren()):
found = False
for c in self.getAllChildren():
for c2 in protocolTreeNode.getAllChildren():
if c == c2:
found = True
break
if not found:
return False
found = False
for c in protocolTreeNode.getAllChildren():
for c2 in self.getAllChildren():
if c == c2:
found = True
break
if not found:
return False
return True
return False
def __hash__(self):
return hash(self.tag) ^ hash(tuple(self.attributes.items())) ^ hash(self.data)
def toString(self):
out = "<"+self.tag
if self.attributes is not None:
for key,val in self.attributes.items():
out+= " "+key+'="'+val+'"'
out+= ">\n"
if self.data is not None:
if type(self.data) is bytearray:
try:
out += "%s" % self.data.decode()
except UnicodeDecodeError:
out += binascii.hexlify(self.data)
else:
try:
out += "%s" % self.data
except UnicodeDecodeError:
try:
out += "%s" % self.data.decode()
except UnicodeDecodeError:
out += binascii.hexlify(self.data)
if type(self.data) is str and sys.version_info >= (3,0):
out += "\nHEX3:%s\n" % binascii.hexlify(self.data.encode('latin-1'))
else:
out += "\nHEX:%s\n" % binascii.hexlify(self.data)
for c in self.children:
try:
out += c.toString()
except UnicodeDecodeError:
out += "[ENCODED DATA]\n"
out+= "</"+self.tag+">\n"
return out
def __str__(self):
return self.toString()
def getData(self):
return self.data
def setData(self, data):
self.data = data
@staticmethod
def tagEquals(node,string):
return node is not None and node.tag is not None and node.tag == string
@staticmethod
def require(node,string):
if not ProtocolTreeNode.tagEquals(node,string):
raise Exception("failed require. string: "+string);
def __getitem__(self, key):
|
return self.getAttributeValue(key)
def __setitem__(self, key, val):
self.setAttribute(key, val)
def __delitem__(self, key):
self.removeAttribute(key)
def getChild(self,identifier):
if type(identifier) == int:
if len(self.childre
|
n) > identifier:
return self.children[identifier]
else:
return None
for c in self.children:
if identifier == c.tag:
return c
return None
def hasChildren(self):
return len(self.children) > 0
def addChild(self, childNode):
self.children.append(childNode)
def addChildren(self, children):
for c in children:
self.addChild(c)
def getAttributeValue(self,string):
try:
return self.attributes[string]
except KeyError:
return None
def removeAttribute(self, key):
if key in self.attributes:
del self.attributes[key]
def setAttribute(self, key, value):
self.attributes[key] = value
def getAllChildren(self,tag = None):
ret = []
if tag is None:
return self.children
for c in self.children:
if tag == c.tag:
ret.append(c)
return ret
|
antoinecarme/pyaf
|
tests/periodicities/Month/Cycle_Month_200_M_7.py
|
Python
|
bsd-3-clause
| 81
| 0.049383
|
import t
|
ests.periodicities.period_test as per
per.buildModel((7 , 'M' , 200));
| |
gc3-uzh-ch/django-simple-poll
|
voting/urls.py
|
Python
|
agpl-3.0
| 833
| 0.003601
|
from django.conf.urls import patterns, include, url
from django.contrib import admin
from rest_framework import viewsets, routers
from voting_app.models import Topic
from voting_app.views import Vote
from voting_app.serializer import TopicSerializer
admin.autodiscover()
# ViewSets define the view behavior.
class TopicViewSet(vi
|
ewsets.ModelViewSet):
model = Topic
serializer_class = TopicSerializer
queryset = Topic.objects.all().filter(hide=False)
router = routers.DefaultRouter()
router.register(r'topics', TopicViewSet)
urlpatterns = patterns('',
url(r'^$', 'voting_app.views.index', name='index'),
url(r'^', include(router.urls)),
url(r'^vote/$', Vote.as_view()),
url(r'^api-auth/', include('rest_framework.urls', namespace='rest_frame
|
work')),
url(r'^admin/', include(admin.site.urls)),
)
|
2013Commons/hue
|
desktop/libs/libsaml/src/libsaml/conf.py
|
Python
|
apache-2.0
| 4,427
| 0.005195
|
#!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
from django.utils.translation import ugettext_lazy as _t, ugettext as _
from desktop.lib.conf import Config, coerce_bool, coerce_csv
BASEDIR = os.path.dirname(os.path.abspath(__file__))
USERNAME_SOURCES = ('attributes', 'nameid')
def dict_list_map(value):
if isinstance(value, str):
d = {}
for k, v in json.loads(value).iteritems():
d[k] = (v,)
return d
elif isinstance(value, dict):
return value
return None
XMLSEC_BINARY = Config(
key="xmlsec_binary",
default="/usr/local/bin/xmlsec1",
type=str,
help=_t("Xmlsec1 binary path. This program should be executable by the user running Hue."))
ENTITY_ID = Config(
key="entity_id",
default="<base_url>/saml2/metadata/",
type=str,
help=_t("Entity ID for Hue acting as service provider. Can also accept a pattern where '<base_url>' will be replaced with server URL base."))
CREATE_USERS_ON_LOGIN = Config(
key="create_users_on_login",
default=True,
type=coerce_bool,
help=_t("Create users from IdP on login."))
ATTRIBUTE_MAP_DIR = Config(
key="attribute_map_dir",
default=os.path.abspath( os.path.join(BASEDIR, '..', '..', 'attribute-maps') ),
type=str,
private=True,
help=_t("Attribute map directory contains files that map SAML attributes to pysaml2 attributes."))
ALLOW_UNSOLICITED = Config(
key="allow_unsolicited",
default=True,
type=coerce_bool,
private=True,
help=_t("Allow responses that are initiated by the IdP."))
REQUIRED_ATTRIBUTES = Config(
key="required_attributes",
default=['uid'],
type=coerce_csv,
help=_t("Required attributes to ask for from IdP."))
OPTIONAL_ATTRIBUTES = Config(
key="optional_attributes",
default=[],
type=coerce_csv,
help=_t("Optional attributes to ask for from IdP."))
METADATA_FILE = Config(
key="metadata_file",
default=os.path.abspath( os.path.join(BASEDIR, '..', '..', 'examples', 'idp.xml') ),
type=str,
help=_t("IdP metadata in the form of a file. This is generally an XML file containing metadata that the Identity Provider generates."))
KEY_FILE = Config(
key="key_file",
default="",
type=str,
help=_t("key_file is the name of a PEM formatted file that contains the private key of the Hue service. This is presently used both to encr
|
ypt/sign assertions and as client key in a HTTPS session."))
CERT_FILE = Config(
key="cert_file",
default="",
type=str,
help=_t("This is the public part of the service private/public key pair. cert_file must
|
be a PEM formatted certificate chain file."))
USER_ATTRIBUTE_MAPPING = Config(
key="user_attribute_mapping",
default={'uid': ('username', )},
type=dict_list_map,
help=_t("A mapping from attributes in the response from the IdP to django user attributes."))
AUTHN_REQUESTS_SIGNED = Config(
key="authn_requests_signed",
default=False,
type=coerce_bool,
help=_t("Have Hue initiated authn requests be signed and provide a certificate."))
LOGOUT_REQUESTS_SIGNED = Config(
key="logout_requests_signed",
default=False,
type=coerce_bool,
help=_t("Have Hue initiated logout requests be signed and provide a certificate."))
USERNAME_SOURCE = Config(
key="username_source",
default="attributes",
type=str,
help=_t("Username can be sourced from 'attributes' or 'nameid'"))
LOGOUT_ENABLED = Config(
key="logout_enabled",
default=True,
type=coerce_bool,
help=_t("Performs the logout or not."))
def config_validator(user):
res = []
if USERNAME_SOURCE.get() not in USERNAME_SOURCES:
res.append(("libsaml.username_source", _("username_source not configured properly. SAML integration may not work.")))
return res
|
baalkor/timetracking
|
opconsole/migrations/0027_device_name.py
|
Python
|
apache-2.0
| 468
| 0
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-05-05 20:10
from __future__ import unicode_liter
|
als
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('opconsole', '0026_auto_20170504_2048'),
]
operations = [
migrations.AddField(
model_name='device',
name='name',
field=models.CharField(
|
default=b'unnamed', max_length=255),
),
]
|
bt3gl/Plotting-in-Linux
|
grace/src/symbol_mapping.py
|
Python
|
mit
| 8,291
| 0.014353
|
"""
Module to translate various names (unicode, LaTeX & other text) for characters to encodings in the Symbol font standard encodings.
Also, provide grace markup strings for them.
It recognizes unicode names for the greek alphabet and most of the useful symbols in the Symbol font.
Marcus Mendenhall, Vanderbilt University, 2006
symbol_mapping.py,v 1.2 2009/04/03 00:32:07 mendenhall Exp
"""
#a tuple of tuple of the position of the character in the standard Symbol encoding, and all aliases
_symbols=[
(0xa0, u"\u2202", "euro"),
(0xa1, u"\u03d2", "upshook"),
(0xa2, u"\u02b9", "prime"),
(0xa3, u"\u2264", "leq", "lessequal"),
(0xa4, u"\u2044", "fraction", "fractionslash"),
(0xa5, u"\u221E", "infinity", "infty"),
(0xa6, u"\u0192", "f", "function", "fhook"),
(0xa7, u"\u2663", "club"),
(0xa8, u"\u2666", "diamond"),
(0xa9, u"\u2665", "heart"),
(0xaa, u"\u2660", "spade"),
(0xab, u"\u2194", "leftrightarrow", "lrarrow"),
(0xac, u"\u2190", "leftarrow", "larrow"),
(0xad, u"\u2191", "uparrow"),
(0xae, u"\u2192", "rightarrow", "rarrow"),
(0xaf, u"\u2193", "downarrow"),
(0xb0, u"\u00b0", "degree"),
(0xb1, u"\u00b1", "plusminus"),
(0xb2, u"\u02ba", "primeprime", "doubleprime", "prime2"),
(0xb3, u"\u2265", "geq", "greaterequal"),
(0xb4, u"\u00d7", "times"),
(0xb5, u"\u221d", "proportional", "propto"),
(0xb6, u"\u2202", "partial"),
(0xb7, u"\u2022", "cdot", "bullet"),
(0xb8, u"\u00f7", "divide"),
(0xb9, u"\u2260", "notequal", "neq"),
(0xba, u"\u2261", "equiv", "equivalence" ),
(0xbb, u"\u2248", "approx", "almostequal"),
(0xbc, u"\u2026", "ellipsis", "3dots"),
(0xbd, u"\u007c", "vertical", "solidus"),
(0xbe, u"\u23af", "horizontal", "longbar"),
(0xbf, u"\u21b5", "downleftarrow"),
(0xc0, u"\u2135", "aleph", "alef"),
(0xc1, u"\u2111", "script-letter-I"),
(0xc2, u"\u211c", "script-letter-R"),
(0xc3, u"\u2118", "script-letter-P"),
(0xc4, u"\u2297", "circled-times"),
(0xc5, u"\u2295", "circled-plus"),
(0xc6, u"\u2205", "emptyset"),
(0xc7, u"\u2229", "intersection"),
(0xc8, u"\u222a", "union"),
(0xc9, u"\u2283", "superset"),
(0xca, u"\u2287", "superset-or-equal"),
(0xcb, u"\u2284", "not-subset"),
(0xcc, u"\u2282", "subset"),
(0xcd, u"\u2286", "subset-or-equal"),
(0xce, u"\u2208", "element"),
(0xcf, u"\u2209", "not-element"),
(0xd0, u"\u2220", "angle"),
(0xd1, u"\u2207", "del", "nabla", "gradient"),
(0xd2, u"\uf8e8", "registered-serif"),
(0xd3, u"\uf8e9", "copyright-serif"),
(0xd4, u"\uf8ea", "trademark-serif"),
(0xd5, u"\u220f", "product"),
(0xd6, u"\u221a", "sqrt", "radical", "root"),
(0xd7, u"\u22c5", "cdot", "center-dot", "dot-operator"),
(0xd8, u"\u00ac", "not"),
(0xd9, u"\u2227", "logical-and", "conjunction"),
(0xda, u"\u2228", "logical-or", "disjunction", "alternation"),
(0xdb, u"\u21d4", "left-right-double-arrow", "iff"),
(0xdc, u"\u21d0", "left-double-arrow"),
(0xdd, u"\u21d1", "up-double-arrow"),
(0xde, u"\u21d2", "right-double-arrow", "implies"),
(0xdf, u"\u21d3", "down-double-arrow"),
(0xe0, u"\u25ca", "lozenge"),
(0xe1, u"\u3008", "left-angle-bracket", "langle"),
(0xe2, u"\u00ae", "registered-sans"),
(0xe3, u"\u00a9", "copyright-sans"),
(0xe4, u"\u2122", "trademark-sans"),
(0xe5, u"\u2211", "sum"),
(0xf2, u"\u2228", "integral"),
]
#insert unicodes for official greek letters, so a pure unicode string with these already properly encoded will translate correctly
_greekorder='abgdezhqiklmnxoprvstufcyw'
_greekuppernames=['Alpha', 'Beta', 'Gamma', 'Delta', 'Epsilon', 'Zeta', 'Eta', 'Theta', 'Iota',
'Kappa', 'Lambda', 'Mu', 'Nu', 'Xi', 'Omicron', 'Pi', 'Rho', 'finalsigma',
'Sigma', 'Tau', 'Upsilon', 'Phi', 'Chi', 'Psi', 'Omega']
#note thet @ sign is in the place of the 'final sigma' character in the standard greek alphabet mapping for the symbol font
_ugrcaps="".join([unichr(x) for x in range(0x391, 0x391+len(_greekorder))])
_symbols += zip([ord(x) for x in _greekorder.upper()], _ugrcaps)
_symbols += zip([ord(x) for x in _greekorder.upper()], _greekuppernames)
_ugrlower="".join([unichr(x) for x in range(0x3b1, 0x3b1+len(_greekorder))])
|
_symbols += zip([ord(x) for x in _greekorder], _ugrlower)
_greeklowernames=[x.lower() for x in _greekuppernames]
_greeklowernames[_greeklowernames.index('finalsigma')]='altpi'
_symbols += zip([ord(x) for x in _greekorder], [x.lower() for x in _greeklowernames])
gracedict={}
for tt in _symbols:
if tt[0] > 0x20 and tt[0] < 0x7f:
vstr=r"\x"+chr(tt[0])+r"\f{}"
else:
vstr=r"\x\#{%02x}\f{}" % tt[0]
for tag in tt[1:]:
gracedict[tag]=vstr
_normal
|
ascii="".join([chr(i) for i in range(32,127)])
_normalucode=unicode(_normalascii)
def remove_redundant_changes(gracestring):
"""collapse out consecutive font-switching commands so that \xabc\f{}\xdef\f{} becomes \xabcdef\f{}"""
while(1):
xs=gracestring.find(r"\f{}\x")
if xs<0: break
if xs >=0:
gracestring=gracestring[:xs]+gracestring[xs+6:]
return gracestring
def translate_unicode_to_grace(ucstring):
"""take a string consisting of unicode characters for a mixture of normal characters
and characters which map to glyphs in Symbol and create a Grace markup string from it"""
outstr=""
for uc in ucstring:
if uc in _normalucode:
outstr+=str(uc) #better exist in ascii
else:
outstr+=gracedict.get(uc,"?")
return remove_redundant_changes(outstr)
def format_python_to_grace(pystring):
"""take a string with %(alpha)s%(Upsilon)s type coding, and make a Grace markup string from it"""
return remove_redundant_changes(pystring % gracedict)
if __name__=="__main__":
# a ur"foo" string is raw unicode, iin which only \u is interpreted, so it is good for grace escapes
print translate_unicode_to_grace(ur"Hello\xQ\f{}\u0391\u03b1\u2227\u22c5\u03c8\u03a8\u03c9\u03a9")
import sys
import time
import os
import GracePlot
class myGrace(GracePlot.GracePlot):
def write_string(self, text="", font=0, x=0.5, y=0.5, size=1.0, just=0, color=1, coordinates="world", angle=0.0):
strg="""with string
string on
string loctype %(coordinates)s
string %(x)g, %(y)g
string color %(color)d
string rot %(angle)f
string font %(font)d
string just %(just)d
string char size %(size)f
string def "%(text)s"
""" % locals()
self.write(strg)
c=GracePlot.colors
stylecolors=[c.green, c.blue, c.red, c.orange, c.magenta, c.black]
s1, s2, s3, s4, s5, s6 =[
GracePlot.Symbol(symbol=GracePlot.symbols.circle, fillcolor=sc, size=0.3,
linestyle=GracePlot.lines.none) for sc in stylecolors
]
l1, l2, l3, l4, l5, l6=[
GracePlot.Line(type=GracePlot.lines.solid, color=sc, linewidth=2.0) for sc in stylecolors]
noline=GracePlot.Line(type=GracePlot.lines.none)
graceSession=myGrace(width=11, height=8)
g=graceSession[0]
g.xlimit(-1,16)
g.ylimit(-1,22)
for row in range(16):
for col in range(16):
row*16+col
graceSession.write_string(text=r"\x\#{%02x}"%(row*16+col), x=col, y=row, just=2, color=1, size=1.5)
alphabet="".join(map(lambda x: "%("+x+")s", _greeklowernames)) +"%(aleph)s %(trademark-serif)s %(trademark-sans)s"
print alphabet
graceSession.write_string(text=format_python_to_grace(alphabet), x=0, y=17, just=0, color=1, size=1.5)
alphabet="".join(map(lambda x: "%("+x+")s", _greekuppernames))
print alphabet
graceSession.write_string(text=format_python_to_grace(alphabet), x=0, y=18, just=0, color=1, size=1.5)
alphabet=u"\N{GREEK CAPITAL LETTER PSI} \N{NOT SIGN} Goodbye \N{CIRCLED TIMES}"
graceSession.write_string(text
|
NINAnor/QGIS
|
tests/src/python/test_qgseditwidgets.py
|
Python
|
gpl-2.0
| 2,315
| 0.000864
|
# -*- coding: utf-8 -*-
"""QGIS Unit tests for edit widgets.
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'Matthia
|
s Kuhn'
__date__ = '20/05/2015'
__copyright__ = 'Copyright 2015, The QGIS Project'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import qgis
import os
from qgis.core import QgsFeature, QgsGeometry, QgsPoint, QgsVectorLayer, NULL
from qgis.gui import QgsEditorWidgetRegistry
from PyQt4 import QtCore
from qgis.testing import (start_app,
unittest
|
)
from utilities import unitTestDataPath
start_app()
class TestQgsTextEditWidget(unittest.TestCase):
@classmethod
def setUpClass(cls):
QgsEditorWidgetRegistry.initEditors()
def createLayerWithOnePoint(self):
self.layer = QgsVectorLayer("Point?field=fldtxt:string&field=fldint:integer",
"addfeat", "memory")
pr = self.layer.dataProvider()
f = QgsFeature()
f.setAttributes(["test", 123])
f.setGeometry(QgsGeometry.fromPoint(QgsPoint(100, 200)))
assert pr.addFeatures([f])
assert self.layer.pendingFeatureCount() == 1
return self.layer
def doAttributeTest(self, idx, expected):
reg = QgsEditorWidgetRegistry.instance()
configWdg = reg.createConfigWidget('TextEdit', self.layer, idx, None)
config = configWdg.config()
editwidget = reg.create('TextEdit', self.layer, idx, config, None, None)
editwidget.setValue('value')
assert editwidget.value() == expected[0]
editwidget.setValue(123)
assert editwidget.value() == expected[1]
editwidget.setValue(None)
assert editwidget.value() == expected[2]
editwidget.setValue(NULL)
assert editwidget.value() == expected[3]
def test_SetValue(self):
self.createLayerWithOnePoint()
self.doAttributeTest(0, ['value', '123', NULL, NULL])
self.doAttributeTest(1, [NULL, 123, NULL, NULL])
if __name__ == '__main__':
unittest.main()
|
kozlovsky/ponymodules
|
main.py
|
Python
|
mit
| 707
| 0.004243
|
# This is the example of main program file which imports entities,
# connects to the database, drops/creates specified tables
# and populate some data to the database
from pony.orm import * # or just import db
|
_session, etc.
import all_entities # This command m
|
ake sure that all entities are imported
from base_entities import db # Will bind this database
from db_settings import current_settings # binding params
db.bind(*current_settings['args'], **current_settings['kwargs'])
from db_utils import connect
from db_loading import populate_database
if __name__ == '__main__':
sql_debug(True)
connect(db, drop_and_create='ALL') # drop_and_create=['Topic', 'Comment'])
populate_database()
|
zstang/learning-python-the-hard-way
|
ex4.py
|
Python
|
mit
| 621
| 0.00161
|
# -
|
*- coding: utf-8 -*-
#
# exercise 4: variables and names
#
cars = 100
space_in_a_car = 4.0
drivers = 30
passengers = 90
cars_not_driven = cars - drivers
cars_driven = drivers
carpool_capacity = cars_driven * space_in_a_car
average_passengers_per_car = passengers / cars_driven
pri
|
nt "There are", cars, "cars avaliable."
print "There are only", drivers, "drivers avaliable."
print "There will be", cars_not_driven, "empty cars today."
print "We can transport", carpool_capacity, "prople today."
print "We have", passengers, "to carpool today"
print "We need to put about", average_passengers_per_car, "in each car."
|
mpasternak/pyglet-fix-issue-552
|
pyglet/image/codecs/pypng.py
|
Python
|
bsd-3-clause
| 41,571
| 0.000385
|
# ----------------------------------------------------------------------------
# pyglet
# Copyright (c) 2006-2008 Alex Holkner
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of pyglet nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
# png.py - PNG encoder in pure Python
# Copyright (C) 2006 Johann C. Rocholl <johann@browsershots.org>
# <ah> Modifications for pyglet by Alex Holkner <alex.holkner@gmail.com>
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# Contributors (alphabetical):
# Nicko van Someren <nicko@nicko.org>
#
# Changelog (recent first):
# 2006-06-17 Nicko: Reworked into a class, faster interlacing.
# 2006-06-17 Johann: Very simple prototype PNG decoder.
# 2006-06-17 Nicko: Test suite with various image generators.
# 2006-06-17 Nicko: Alpha-channel, grey-scale, 16-bit/plane support.
# 2006-06-15 Johann: Scanline iterator interface for large input files.
# 2006-06-09 Johann: Very simple prototype PNG encoder.
"""
Pure Python PNG Reader/Writer
This is an implementation of a subset of the PNG specification at
http://www.w3.org/TR/2003/REC-PNG-20031110 in pure Python. It reads
and writes PNG files with 8/16/24/32/48/64 bits per pixel (greyscale,
RGB, RGBA, with 8 or 16 bits per layer), with a number of options. For
help, type "import png; help(png)" in your python interpreter.
This file can also be used as a command-line utility to convert PNM
files to PNG. The interface is similar to that of the pnmtopng program
from the netpbm package. Type "python png.py --help" at the shell
prompt for usage and a list of options.
"""
__revision__ = '$Rev$'
__date__ = '$Date$'
__author__ = '$Author$'
import sys
import zlib
import struct
import math
from array import array
from pyglet.compat import asbytes
_adam7 = ((0, 0, 8, 8),
(4, 0, 8, 8),
(0, 4, 4, 8),
(2, 0, 4, 4),
(0, 2, 2, 4),
(1, 0, 2, 2),
(0, 1, 1, 2))
def interleave_planes(ipixels, apixels, ipsize, apsize):
"""
Interleave color planes, e.g. RGB + A = RGBA.
Return an array of pixels consisting of the ipsize bytes of data
from each pixel in ipixels followed by the apsize bytes of data
from each pixel in apixels, for an image of size width x height.
"""
itotal = len(ipixels)
atotal = len(apixels)
newtotal = itotal + atotal
newpsize = ipsize + apsize
# Set up the output buffer
out = array('B')
# It's annoying that there is no cheap way to set the array size :-(
out.extend(ipixels)
out.extend(apixels)
# Interleave in the pixel data
for i in range(ipsize):
out[i:newtotal:newpsize] = ipixels[i:itotal:ipsize]
for i in range(apsize):
out[i+ipsize:newtotal:newpsize] = apixels[i:atotal:apsize]
return out
class Error(Exception):
pass
class Writer:
"""
PNG encoder in pure Python.
"""
def __init__(self, width, height,
transparent=None,
background=None,
gamma=None,
greyscale=False,
has_alpha=False,
bytes_per_sample=1,
compression=None,
interlaced=False,
chunk_limit=2**20):
"""
Create a PNG encoder object.
Arguments:
width, height - size of the image in pixels
transparent - create a tRNS chunk
background - create a bKGD chunk
gamma - create a gAMA chunk
greyscale - input data is greyscale, not RG
|
B
has_alpha - input data has alpha channel (RGBA)
bytes_per_sample - 8-bit or 16-bit input data
com
|
pression - zlib compression level (1-9)
chunk_limit - write multiple IDAT chunks to save memory
If specified, the transparent and background parameters must
be a tuple with three integer values for red, green, blue, or
a simple integer (or singleton tuple) for a greyscale image.
If specified, the gamma parameter must be a float value.
"""
if width <= 0 or height <= 0:
raise ValueError("width and height must be greater than zero")
if has_alpha and transparent is not None:
raise ValueError(
"transparent color not allowed with alpha channel")
if bytes_per_sample < 1 or bytes_per_sample > 2:
raise ValueError("bytes per sample must be 1 or 2")
if transparent is not None:
if greyscale:
if type(transparent) is not int:
raise ValueError(
"transparent color for greyscale must be integer")
else:
if not (len(transparent) == 3 and
type(transparent[0]) is int and
type(transparent[1]) is int and
type(transparent[2]) is int):
raise ValueError(
"transparent color must be a triple of integers")
if background is not None:
if greyscale:
if type(background) is not int:
raise ValueError(
"background color for greyscale must be integer")
else:
if not (len(background) == 3 and
type(background[0]) is int and
type(background[1]) is int and
type(background[2]) is int):
raise ValueError(
"background color must be a triple of integers")
self.width = width
self.height = height
self.transparent = transparent
self.background = background
self.gamm
|
BrainTech/openbci
|
obci/control/gui/obci_log_model_dummy.py
|
Python
|
gpl-3.0
| 732
| 0
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import time
import obci_log_model
class DummyLogModel(obci_log_model.LogModel):
def __init__(self):
super(DummyLogModel, self).__init__()
self._ind = 0
self._peers_log = {'amplifier':
{'peer_id':
|
'amplifier', 'logs': []},
|
'mx':
{'peer_id': 'mx', 'logs': []}
} # 'logs keyed by peer id
def next_log(self):
time.sleep(0.05)
self._ind += 1
if self._ind % 2 == 0:
return 'amplifier', 'AMP ' + str(self._ind)
else:
return 'mx', 'MX ' + str(self._ind)
def post_run(self):
pass
|
sqlalchemy/sqlalchemy
|
lib/sqlalchemy/dialects/mysql/expression.py
|
Python
|
mit
| 4,164
| 0
|
# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: https://www.opensource.org/licenses/mit-license.php
import typing
from ... import exc
from ... import util
from ...sql import coercions
from ...sql import elements
from ...sql import operators
from ...sql import roles
from ...sql.base import _generative
from ...sql.base import Generative
Selfmatch = typing.TypeVar("Selfmatch", bound="match")
class match(Generative, elements.BinaryExpression):
"""Produce a ``MATCH (X, Y) AGAINST ('TEXT')`` clause.
E.g.::
from sqlalchemy import desc
from sqlalchemy.dialects.mysql import match
match_expr = match(
users_table.c.firstname,
users_table.c.lastname,
against="Firstname Lastname",
)
stmt = (
|
select(users_table)
.where(match_expr.in_boolean_mode())
.order_by(desc(match_expr))
)
Would produce SQL resembling::
SELECT id, firstname, lastname
FROM user
WHERE MATCH(firstname, lastname) AGAINST (:param_1 IN BOOL
|
EAN MODE)
ORDER BY MATCH(firstname, lastname) AGAINST (:param_2) DESC
The :func:`_mysql.match` function is a standalone version of the
:meth:`_sql.ColumnElement.match` method available on all
SQL expressions, as when :meth:`_expression.ColumnElement.match` is
used, but allows to pass multiple columns
:param cols: column expressions to match against
:param against: expression to be compared towards
:param in_boolean_mode: boolean, set "boolean mode" to true
:param in_natural_language_mode: boolean , set "natural language" to true
:param with_query_expansion: boolean, set "query expansion" to true
.. versionadded:: 1.4.19
.. seealso::
:meth:`_expression.ColumnElement.match`
"""
__visit_name__ = "mysql_match"
inherit_cache = True
def __init__(self, *cols, **kw):
if not cols:
raise exc.ArgumentError("columns are required")
against = kw.pop("against", None)
if against is None:
raise exc.ArgumentError("against is required")
against = coercions.expect(
roles.ExpressionElementRole,
against,
)
left = elements.BooleanClauseList._construct_raw(
operators.comma_op,
clauses=cols,
)
left.group = False
flags = util.immutabledict(
{
"mysql_boolean_mode": kw.pop("in_boolean_mode", False),
"mysql_natural_language": kw.pop(
"in_natural_language_mode", False
),
"mysql_query_expansion": kw.pop("with_query_expansion", False),
}
)
if kw:
raise exc.ArgumentError("unknown arguments: %s" % (", ".join(kw)))
super(match, self).__init__(
left, against, operators.match_op, modifiers=flags
)
@_generative
def in_boolean_mode(self: Selfmatch) -> Selfmatch:
"""Apply the "IN BOOLEAN MODE" modifier to the MATCH expression.
:return: a new :class:`_mysql.match` instance with modifications
applied.
"""
self.modifiers = self.modifiers.union({"mysql_boolean_mode": True})
return self
@_generative
def in_natural_language_mode(self: Selfmatch) -> Selfmatch:
"""Apply the "IN NATURAL LANGUAGE MODE" modifier to the MATCH
expression.
:return: a new :class:`_mysql.match` instance with modifications
applied.
"""
self.modifiers = self.modifiers.union({"mysql_natural_language": True})
return self
@_generative
def with_query_expansion(self: Selfmatch) -> Selfmatch:
"""Apply the "WITH QUERY EXPANSION" modifier to the MATCH expression.
:return: a new :class:`_mysql.match` instance with modifications
applied.
"""
self.modifiers = self.modifiers.union({"mysql_query_expansion": True})
return self
|
jurkov/Bytebot
|
plugins/parking.py
|
Python
|
mit
| 2,182
| 0
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
import urllib2
import json
from plugins.plugin import Plugin
from time import time
from bytebot_config import BYTEBOT_HTTP_TIMEOUT, BYTEBOT_HTTP_MAXSIZE
from bytebot_config import BYTEBOT_PLUGIN_CONFIG
class parking(Plugin):
def __init__(self):
pass
def registerCommand(self, irc):
irc.registerCommand('!parking', 'Parken')
def _get_parking_status(self):
url = BYTEBOT_PLUGIN_CONFIG['parking']['url']
data = urllib2.urlopen(url, timeout=BYTEBOT_HTTP_TIMEOUT).read(
BYTEBOT_HTTP_MAXSIZE)
data = unicode(data, errors='ignore')
ret = json.loads(data)
return ret
def onPrivmsg(self, irc, msg, channel, user):
if msg.find('!parking') == -1:
return
self.irc = irc
self.channel = channel
try:
last_parking = irc.last_parking
except Exception as e:
last_parking = 0
if last_parking < (time() - 60):
try:
data = self._get_parking_status()
irc.msg(channel, 'Free parking lots:')
for x in range(1, len(data)):
name = data[x][u'name'].encode('ascii', 'ignore')
occupied = int(data[x][u'belegt'].encode('ascii',
'ignore'))
spaces = int(data[x][u'maximal'].encode('ascii', 'ignore'))
if(occupied <
|
0):
occupied = 0
if(spaces <= 0):
print_str = '{:25s}: not available'.format(name)
else:
print_str = '{:25s}: '.format(name) + \
'{:3.0f} / '.format(spaces - occupied) + \
'{:3.0f
|
}'.format(spaces)
irc.msg(channel, print_str)
irc.last_parking = time()
except Exception as e:
print(e)
irc.msg(channel, 'Error while fetching data.')
else:
irc.msg(channel, "Don't overdo it ;)")
|
dan-f/polypype
|
tests/test_polypipe.py
|
Python
|
mit
| 5,061
| 0
|
"""
TODO:
- Handle if file already exists
"""
import ctypes
import io
import os
import struct
from contextlib import contextmanager
import ddt
import mock
from unittest2 import TestCase
import tempfile
from polypype import _MAX_C_FLOAT, _MAX_C_UINT32, PolyPype
from polypype.exceptions import (
PolyPypeArgumentException,
PolyPypeException,
PolyPypeFileExistsException,
PolyPypeOverflowException,
PolyPypeTypeException,
)
@ddt.ddt
class PolyPypeTestCase(TestCase):
def setUp(self, *args, **kwargs):
self.test_filename = 'test_output'
self.polypype = PolyPype(self.test_filename)
self.addCleanup(
self.remove_file_if_exists,
self.polypype.output_filename
)
def remove_file_if_exists(self, filename):
if os.path.isfile(filename):
os.remove(filename)
def assert_next_ctype_equal(self, f, t, expected_value):
"""
Given an open file, expect that the next 4 bytes are a c_float with the
expected value.
Arguments:
f (file): File to read from
t (str): `struct` library format string specifying a c type
expected_value (int, float): Numeric value of the expected c type
"""
self.assertEqual(
struct.unpack(t, f.read(4))[0],
ctypes.c_float(expected_value).value
)
@contextmanager
def open_output_file(self):
"""DRY helper for opening the PolyPype output file."""
with io.open(self.test_filename, 'rb') as f:
yield f
@ddt.data(
(0.1, [32, 5, 0.7]),
(0.35, [0]),
(1, [1, 2, 3]),
(1, [_MAX_C_FLOAT])
)
@ddt.unpack
def test_write_event(self, time_delta, params):
self.polypype.write_event(time_delta, params)
with self.open_output_file() as f:
self.assert_next_ctype_equal(f, '<f', time_delta)
self.assert_next_ctype_equal(f, '<I', len(params))
for param in params:
self.assert_next_ctype_equal(f, '<f', param)
@ddt.data(
(None, [None]),
('1', ['1']),
|
(list(), [list()]),
(dict(), [dict()])
)
@ddt.unpack
def test_bad_types(self, time_delta, params):
with self.assertRaises(PolyPypeTypeException):
self.polypype.write_event(time_delta, params)
@ddt.data(list(), dict(), set())
def test_no_params(self, empty_containe
|
r):
time_delta = 1
self.polypype.write_event(time_delta, empty_container)
with self.open_output_file() as f:
self.assert_next_ctype_equal(f, '<f', time_delta)
self.assert_next_ctype_equal(f, '<I', 0)
self.assertEqual(f.read(), '')
def test_too_many_params(self):
big_list = mock.MagicMock()
big_list.__len__ = mock.MagicMock(return_value=_MAX_C_UINT32 + 1)
with self.assertRaises(PolyPypeOverflowException):
self.polypype.write_event(1, big_list)
def test_param_too_large(self):
with self.assertRaises(PolyPypeOverflowException):
self.polypype.write_event(1, [_MAX_C_FLOAT * 2])
@mock.patch(
'polypype.struct.pack',
mock.Mock(side_effect=PolyPypeException)
)
def test_no_event_when_error(self):
"""
Verify that the event is not written to file if any error occurs.
"""
try:
self.polypype.write_event(1, [2])
except PolyPypeException:
pass
self.assertFalse(
os.path.isfile(self.test_filename),
'Expected output file not to exist.'
)
def test_file_already_exists(self):
with self.assertRaises(PolyPypeFileExistsException):
filename = tempfile.mkstemp()[1]
PolyPype(filename)
os.remove(filename)
def test_overwrite_file(self):
self.polypype.write_event(1, [2])
new_polypype = PolyPype(self.test_filename, overwrite_file=True)
new_polypype.write_event(3, [4])
with self.open_output_file() as f:
self.assert_next_ctype_equal(f, '<f', 3)
self.assert_next_ctype_equal(f, '<I', 1)
self.assert_next_ctype_equal(f, '<f', 4)
def test_append_to_file(self):
self.polypype.write_event(1, [2])
new_polypype = PolyPype(self.test_filename, append_to_file=True)
new_polypype.write_event(3, [4])
with self.open_output_file() as f:
self.assert_next_ctype_equal(f, '<f', 1)
self.assert_next_ctype_equal(f, '<I', 1)
self.assert_next_ctype_equal(f, '<f', 2)
self.assert_next_ctype_equal(f, '<f', 3)
self.assert_next_ctype_equal(f, '<I', 1)
self.assert_next_ctype_equal(f, '<f', 4)
def test_append_and_overwrite(self):
with self.assertRaises(PolyPypeArgumentException):
PolyPype(
self.test_filename,
append_to_file=True,
overwrite_file=True
)
|
Conjuro/async
|
channel.py
|
Python
|
bsd-3-clause
| 11,309
| 0.041648
|
# Copyright (C) 2010, 2011 Sebastian Thiel (byronimo@gmail.com) and contributors
#
# This module is part of async and is released under
# the New BSD License: http://www.opensource.org/licenses/bsd-license.php
"""Contains a queue based channel implementation"""
from Queue import (
Empty,
Full
)
from util import (
AsyncQueue,
SyncQueue,
ReadOnly
)
from time import time
import threading
import sys
__all__ = ('Channel', 'SerialChannel', 'Writer', 'ChannelWriter', 'CallbackChannelWriter',
'Reader', 'ChannelReader', 'CallbackChannelReader', 'mkchannel', 'ReadOnly',
'IteratorReader', 'CallbackReaderMixin', 'CallbackWriterMixin')
#{ Classes
class Channel(object):
"""A channel is similar to a file like object. It has a write end as well as one or
more read ends. If Data is in the channel, it can be read, if not the read operation
will block until data becomes available.
If the channel is closed, any read operation will result in an exception
This base class is not instantiated directly, but instead serves as constructor
for Rwriter pairs.
Create a new channel """
__slots__ = 'queue'
# The queue to use to store the actual data
QueueCls = AsyncQueue
def __init__(self):
"""initialize this instance with a queue holding the channel contents"""
self.queue = self.QueueCls()
class SerialChannel(Channel):
"""A slightly faster version of a Channel, which sacrificed thead-safety for performance"""
QueueCls = SyncQueue
class Writer(object):
"""A writer is an object providing write access to a possibly blocking reading device"""
__slots__ = tuple()
#{ Interface
def __init__(self, device):
"""Initialize the instance with the device to write to"""
def write(self, item, block=True, timeout=None):
"""Write the given item into the device
:param block: True if the device may block until space for the item is available
:param timeout: The time in seconds to wait for the device to become ready
in blocking mode"""
raise NotImplementedError()
def size(self):
""":return: number of items already in the device, they could be read
|
with a reader"""
raise NotImplementedError()
def close(self):
"""Close the channel. Multiple close calls on a closed channel are no
an error"""
raise NotImplementedError()
def closed(self):
|
""":return: True if the channel was closed"""
raise NotImplementedError()
#} END interface
class ChannelWriter(Writer):
"""The write end of a channel, a file-like interface for a channel"""
__slots__ = ('channel', '_put')
def __init__(self, channel):
"""Initialize the writer to use the given channel"""
self.channel = channel
self._put = self.channel.queue.put
#{ Interface
def write(self, item, block=False, timeout=None):
return self._put(item, block, timeout)
def size(self):
return self.channel.queue.qsize()
def close(self):
"""Close the channel. Multiple close calls on a closed channel are no
an error"""
self.channel.queue.set_writable(False)
def closed(self):
""":return: True if the channel was closed"""
return not self.channel.queue.writable()
#} END interface
class CallbackWriterMixin(object):
"""The write end of a channel which allows you to setup a callback to be
called after an item was written to the channel"""
# slots don't work with mixin's :(
# __slots__ = ('_pre_cb')
def __init__(self, *args):
super(CallbackWriterMixin, self).__init__(*args)
self._pre_cb = None
def set_pre_cb(self, fun = lambda item: item):
"""
Install a callback to be called before the given item is written.
It returns a possibly altered item which will be written to the channel
instead, making it useful for pre-write item conversions.
Providing None uninstalls the current method.
:return: the previously installed function or None
:note: Must be thread-safe if the channel is used in multiple threads"""
prev = self._pre_cb
self._pre_cb = fun
return prev
def write(self, item, block=True, timeout=None):
if self._pre_cb:
item = self._pre_cb(item)
super(CallbackWriterMixin, self).write(item, block, timeout)
class CallbackChannelWriter(CallbackWriterMixin, ChannelWriter):
"""Implements a channel writer with callback functionality"""
pass
class Reader(object):
"""Allows reading from a device"""
__slots__ = tuple()
#{ Interface
def __init__(self, device):
"""Initialize the instance with the device to read from"""
#{ Iterator protocol
def __iter__(self):
return self
def next(self):
"""Implements the iterator protocol, iterating individual items"""
items = self.read(1)
if items:
return items[0]
raise StopIteration
#} END iterator protocol
#{ Interface
def read(self, count=0, block=True, timeout=None):
"""
read a list of items read from the device. The list, as a sequence
of items, is similar to the string of characters returned when reading from
file like objects.
:param count: given amount of items to read. If < 1, all items will be read
:param block: if True, the call will block until an item is available
:param timeout: if positive and block is True, it will block only for the
given amount of seconds, returning the items it received so far.
The timeout is applied to each read item, not for the whole operation.
:return: single item in a list if count is 1, or a list of count items.
If the device was empty and count was 1, an empty list will be returned.
If count was greater 1, a list with less than count items will be
returned.
If count was < 1, a list with all items that could be read will be
returned."""
raise NotImplementedError()
#} END interface
class ChannelReader(Reader):
"""Allows reading from a channel. The reader is thread-safe if the channel is as well"""
__slots__ = 'channel'
def __init__(self, channel):
"""Initialize this instance from its parent write channel"""
self.channel = channel
#{ Interface
def read(self, count=0, block=True, timeout=None):
# if the channel is closed for writing, we never block
# NOTE: is handled by the queue
# We don't check for a closed state here has it costs time - most of
# the time, it will not be closed, and will bail out automatically once
# it gets closed
# in non-blocking mode, its all not a problem
out = list()
queue = self.channel.queue
if not block:
# be as fast as possible in non-blocking mode, hence
# its a bit 'unrolled'
try:
if count == 1:
out.append(queue.get(False))
elif count < 1:
while True:
out.append(queue.get(False))
# END for each item
else:
for i in xrange(count):
out.append(queue.get(False))
# END for each item
# END handle count
except Empty:
pass
# END handle exceptions
else:
# to get everything into one loop, we set the count accordingly
if count == 0:
count = sys.maxint
# END handle count
i = 0
while i < count:
try:
out.append(queue.get(block, timeout))
i += 1
except Empty:
# here we are only if
# someone woke us up to inform us about the queue that changed
# its writable state
# The following branch checks for closed channels, and pulls
# as many items as we need and as possible, before
# leaving the loop.
if not queue.writable():
try:
while i < count:
out.append(queue.get(False, None))
i += 1
# END count loop
except Empty:
break # out of count loop
# END handle absolutely empty queue
# END handle closed channel
# if we are here, we woke up and the channel is not closed
# Either the queue became writable again, which currently shouldn't
# be able to happen in the channel, or someone read with a timeout
# that actually timed out.
# As it timed out, which is the only reason we are here,
# we have to abort
break
# END ignore empty
# END for each item
# END handle blocking
return out
#} END interface
class CallbackReaderMixin(object):
"""A channel which sends a callback before items are
|
asposecells/Aspose_Cells_Cloud
|
Examples/Python/Examples/DeleteHyperlinksFromExcelWorksheet.py
|
Python
|
mit
| 1,503
| 0.00998
|
import asposecellscloud
from asposecellscloud.CellsApi import CellsApi
from asposecellscloud.CellsApi import ApiException
import asposestoragecloud
from asposestoragecloud.StorageApi import StorageApi
apiKey = "XXXXX" #se
|
pcify App Key
appSid = "XXXXX" #sepcify App SID
apiServer = "http://api.aspose.com/v1.1"
data_folder = "../../data/"
#Instantiate Aspose Storage API SDK
storage_apiClient = asposestoragecloud.ApiClient.ApiClient(apiKey, appSid, True
|
)
storageApi = StorageApi(storage_apiClient)
#Instantiate Aspose Cells API SDK
api_client = asposecellscloud.ApiClient.ApiClient(apiKey, appSid, True)
cellsApi = CellsApi(api_client);
#set input file name
filename = "Sample_Test_Book.xls"
sheetName = "Sheet2"
hyperlinkIndex = 0
#upload file to aspose cloud storage
storageApi.PutCreate(Path=filename, file=data_folder + filename)
try:
#invoke Aspose.Cells Cloud SDK API to delete a hyperlink from a worksheet
response = cellsApi.DeleteWorkSheetHyperlink(name=filename, sheetName=sheetName, hyperlinkIndex=hyperlinkIndex)
if response.Status == "OK":
#download Workbook from storage server
response = storageApi.GetDownload(Path=filename)
outfilename = "c:/temp/" + filename
with open(outfilename, 'wb') as f:
for chunk in response.InputStream:
f.write(chunk)
except ApiException as ex:
print "ApiException:"
print "Code:" + str(ex.code)
print "Message:" + ex.message
|
Dante83/lexinomicon
|
lexinomicon/tests.py
|
Python
|
gpl-3.0
| 388
| 0
|
i
|
mport unittest
from pyramid import testing
class ViewTests(unittest.TestCase):
def setUp(self):
self.config = testing.setUp()
def tearDown(self):
testing.tearDown()
def test_my_view(self):
from .views import my_view
request = testing.DummyRequest()
info = my_view(request)
self.assertEqual(info['project'], 'Lexinomicon'
|
)
|
shucommon/little-routine
|
python/AI/tensorflow/dropout.py
|
Python
|
gpl-3.0
| 2,353
| 0.00742
|
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
import sys
sys.path.append('./MNIST_data')
import os.path
from download import download
have_data = os.path.exists('MNIST_data/train-images-idx3-ubyte.gz')
if not have_data:
download('./MNIST_data')
# load data
mnist = input_data.read_data_sets("MNIST_data", one_hot=True)
# batch
batch_size = 64
n_batch = mnist.train.num_examples // batc
|
h_size
# in [60000, 28 * 28] out [60000, 10]
x = tf.plac
|
eholder(tf.float32, [None,784])
y = tf.placeholder(tf.float32, [None,10])
keep_prob = tf.placeholder(tf.float32)
# 神经网络结构 784-1000-500-10
w1 = tf.Variable(tf.truncated_normal([784,1000], stddev=0.1))
b1 = tf.Variable(tf.zeros([1000]) + 0.1)
l1 = tf.nn.tanh(tf.matmul(x, w1) + b1)
l1_drop = tf.nn.dropout(l1, keep_prob)
w2 = tf.Variable(tf.truncated_normal([1000, 500], stddev=0.1))
b2 = tf.Variable(tf.zeros([500]) + 0.1)
l2 = tf.nn.tanh(tf.matmul(l1_drop, w2) + b2)
l2_drop = tf.nn.dropout(l2, keep_prob)
w3 = tf.Variable(tf.truncated_normal([500, 10], stddev=0.1))
b3 = tf.Variable(tf.zeros([10]) + 0.1)
prediction = tf.nn.softmax(tf.matmul(l2_drop, w3) + b3)
# 二次代价函数 - 回归问题
# loss = tf.losses.mean_squared_error(y, prediction)
# 交叉墒-分类问题
loss = tf.losses.softmax_cross_entropy(y, prediction)
# 梯度下降法优化器
train = tf.train.GradientDescentOptimizer(0.5).minimize(loss)
# save result to a bool array
# 1000 0000 00 -> 0
# 0100 0000 00 -> 1
# ...
correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(prediction, 1))
# correct rate, bool -> float ->mean
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
with tf.Session() as sess:
# init variable
sess.run(tf.global_variables_initializer())
for epoch in range(10):
for batch in range(n_batch):
# get a batch data and label
batch_x, batch_y = mnist.train.next_batch(batch_size)
sess.run(train, feed_dict={x:batch_x, y:batch_y, keep_prob:0.5})
acc = sess.run(accuracy, feed_dict={x:mnist.test.images, y:mnist.test.labels, keep_prob:1.0})
train_acc = sess.run(accuracy, feed_dict={x:mnist.train.images, y:mnist.train.labels, keep_prob:1.0})
print("Iter " + str(epoch + 1) + ", Testing Accuracy " + str(acc) + ", Training Accuracy " + str(train_acc))
|
salilab/cryptosite
|
lib/cryptosite/predict.py
|
Python
|
lgpl-2.1
| 7,574
| 0
|
#!/usr/bin/env python
"""Do the final prediction of binding site given all features."""
from __future__ import print_function, absolute_import
import pickle
import os
import optparse
import cryptosite.config
def get_matrix(inputdata, model='linear'):
Res = {'CYS': (0, 0, 1, 0, 0), 'ASP': (0, 0, 0, 1, 1),
'SER': (0, 1, 1, 1, 1), 'GLN': (0, 0, 1, 0, 1),
'LYS': (0, 1, 0, 1, 1), 'ILE': (0, 1, 0, 0, 1),
'PRO': (0, 1, 1, 1, 0), 'THR': (1, 0, 0, 0, 0),
'PHE': (0, 1, 1, 0, 1), 'ALA': (0, 0, 0, 0, 0),
'GLY': (0, 0, 1, 1, 1), 'HIS': (0, 1, 0, 0, 0),
'GLU': (0, 0, 1, 1, 0), 'LEU': (0, 1, 0, 1, 0),
'ARG': (0, 0, 0, 0, 1), 'TRP': (1, 0, 0, 0, 1),
'VAL': (1, 0, 0, 1, 1), 'ASN': (0, 0, 0, 1, 0),
'TYR': (1, 0, 0, 1, 0), 'MET': (0, 1, 1, 0, 0)}
Res = Res.keys()
SSE = ['B', 'E', 'G', 'H', 'I', 'S', 'T', 'U']
data = open(inputdata)
D = data.readlines()
data.close()
Header = D[0].strip().split()
if model == 'poly':
# the bottom visited for poly SVM
visited = [Header.index('CNC_mean_300'), Header.index('SQC'),
Header.index('D2S')]
visited += [Header.index('SQCn'), Header.index('PCKn'),
Header.index('Hn')]
visited += [Header.index('CN5_std_450'), Header.index('CN5_std_300'),
Header.index('CN5_std_350')]
visited +
|
= [Header.index('CNC'), Header.index('PRT_std_450'),
Header.index('CN5_mean_500')]
visited += [Header.index('Bn'), Header.index('CHRn'),
Header.index('In')]
visited += [Header.index('CNC_std_300'), Header.index('CNS_300'),
|
Header.index('SAS14_std_400')]
visited += [Header.index('SASn')]
elif model == 'linear':
# for linear SVM
visited = [Header.index('CNC_mean_300'), Header.index('SQC'),
Header.index('CN5_std_450')]
visited += [Header.index('D2S'), Header.index('CNS_300'),
Header.index('Hn')]
visited += [Header.index('CN5_mean_450'), Header.index('CN5_std_300'),
Header.index('SQCn')]
visited += [Header.index('CNC_std_350'), Header.index('CNCn'),
Header.index('CVX_mean_450')]
visited += [Header.index('In')]
elif model == 'final':
visited = [Header.index('CNC_mean_'), Header.index('SQC'),
Header.index('PTM')]
else:
print('Wrong model: ', model)
exit()
M = []
Indeces, cnt = {}, 0
for d in D[1:]:
d = d.strip().split('\t')
LA = []
for hd in range(len(Header)):
if hd not in visited:
pass
else:
if hd == 1:
L = [0.] * len(Res)
L[Res.index(d[1])] = 1.
LA += L
elif hd in range(4, 8):
LA += [float(d[hd])]
elif hd == 8:
s = [0.] * len(SSE)
s[SSE.index(d[8])] = 1.
LA += s
else:
LA += [float(d[hd])]
LA += [float(d[-1])]
M.append([d[0]] + LA)
Indeces[cnt] = tuple(d[:3])
cnt += 1
return M, [Header[j] for j in sorted(visited)], Indeces
def predict(inputdata, model='linear'):
import numpy as np
from sklearn.metrics import confusion_matrix
print('Reading in the data ...')
M, Header, Indeces = get_matrix(inputdata, model)
print('Processing the data for model %s ...' % model.upper())
pdb = inputdata.split('.')[0]
print(pdb)
NewIndeces, newcnt = {}, 0
X_learn, Y_learn = [], []
for r, m in enumerate(M):
if len(np.argwhere(np.isnan(np.array(m[1:-1])))) > 0:
raise ValueError(r, m[0])
X_learn.append(np.array(m[1:-1]))
Y_learn.append(m[-1])
NewIndeces[newcnt] = Indeces[r]
newcnt += 1
X_learn = np.array(X_learn)
X_learn = np.vstack((X_learn[:, 0], X_learn[:, 2], X_learn[:, 1])).T
scaler_pkl = {'linear': 'LinearScaler_Final.pkl',
'poly': 'PolyScaler_Final.pkl',
'final': 'Scaler_Final_Final.pkl'}[model]
outmodel_pkl = {'linear': 'LinearSVC_FinalModel.pkl',
'poly': 'PolySVC_FinalModel.pkl',
'final': 'SVM_Final_Final.pkl'}[model]
print('Scaling ...')
with open(os.path.join(cryptosite.config.datadir, scaler_pkl)) as fh:
scaler = pickle.load(fh)
X_learn = scaler.transform(X_learn)
with open(os.path.join(cryptosite.config.datadir, outmodel_pkl)) as fh:
learner = pickle.load(fh)
print('Predicting ...')
# Set _gamma explicitly (earlier versions of cryptosite relied on a hacked
# local copy of sklearn that did this)
learner._gamma = 1.0 / X_learn.shape[1]
Y_pred = learner.predict(X_learn)
CM = confusion_matrix(Y_learn, Y_pred)
print()
print("Confusion matrix for: ", pdb)
print(CM)
print()
# output
Y_pred_prob = learner.predict_proba(X_learn)
Y_PRED_PROB_ALL = list(Y_pred_prob[:, 1])
suffix = {'linear': 'lin', 'poly': 'pol', 'final': 'pol'}[model]
outn = open(pdb + '.%s.pred' % suffix, 'w')
print('Writing output files ...')
outn.write('\t'.join(['PDBID', 'Res', 'ResID'] + Header +
['CryptositeValue']) + '\n')
for x in range(len(Y_PRED_PROB_ALL)):
outn.write('\t'.join(list(NewIndeces[x]) +
[str(i) for i in X_learn[x]] +
[str(Y_PRED_PROB_ALL[x])]) + '\n')
outn.close()
write_pdb(pdb, model)
print('Done!')
def write_pdb(pdb, model='linear'):
suffix = {'linear': 'lin', 'poly': 'pol', 'final': 'pol'}[model]
with open(pdb + '.%s.pred' % suffix) as data:
D = data.readlines()
out = open('%s.%s.pred.pdb' % (pdb, suffix), 'w')
Data = {}
for d in D:
d = d.strip().split()
Data[(d[1], d[2])] = ('0.0', d[-1])
with open('%s_mdl.pdb' % pdb.split('/')[-1]) as data:
D = data.readlines()
for d in D:
if 'ATOM' == d[:4]:
p = (d[17:20], str(int(d[22:26])))
try:
pred = Data[p]
except KeyError:
pred = ('0.0', '0.0')
v = '%.2f' % (float(pred[1]) * 100)
v = (6 - len(v)) * ' ' + v
line = d[:56] + pred[0] + '0' + v + '\n'
out.write(line)
else:
out.write(d)
out.close()
def parse_args():
usage = """%prog [opts] <model_name>
Do the final prediction of binding site given all features.
<model_name> should be the name of the model. The model's 3D structure,
<model_name>_mdl.pdb, and the features file, <model_name>.features,
are read in from the current directory.
Two files are generated on successful prediction:
<model_name>.pol.pred: a simple tab-separated file listing the value of
the CryptoSite score for each residue.
<model_name>.pol.pred.pdb: a PDB file with the CryptoSite score in the
occupancy column, for visualization.
"""
parser = optparse.OptionParser(usage)
opts, args = parser.parse_args()
if len(args) != 1:
parser.error("incorrect number of arguments")
return args[0]
def main():
model_name = parse_args()
predict(model_name + '.features', model='final')
if __name__ == '__main__':
main()
|
daniel-araujo/proctal
|
src/cli/tests/write-binary.py
|
Python
|
gpl-3.0
| 1,310
| 0.00458
|
#!/usr/bin/env python3
import sys
from util import proctal_cli, sleeper
class Error(Exception):
pass
class TestSingleValue:
def __init__(self, type, value):
self.type = type
self.value = value
pass
def run(self, guinea):
address = proctal_cli.allocate(guinea.pid(), self.value.size())
try:
writer = proctal_cli.write(guinea.pid(), address, self.type, binary=True)
try:
writer.write_value(self.value)
writer.stop()
reader = proctal_cli.read(guinea.pid(), address, self.type)
|
try:
value = reader.next_value()
if self.value.cmp(value) != 0:
raise Error("Expected {expected} but got {found}.".format(expected=self.value, found=value))
finally:
reader.stop()
finally:
writer.stop()
finally:
proctal_cli.deal
|
locate(guinea.pid(), address)
int32 = proctal_cli.TypeInteger(32);
int32_test_val = proctal_cli.ValueInteger(int32)
int32_test_val.parse(0x0ACC23AA)
tests = [
TestSingleValue(int32, int32_test_val)
]
guinea = sleeper.run()
try:
for test in tests:
test.run(guinea)
finally:
guinea.stop()
|
DiamondLightSource/ispyb-api
|
src/ispyb/sp/mxacquisition.py
|
Python
|
apache-2.0
| 9,112
| 0.001097
|
# mxacquisition.py
#
# Copyright (C) 2014 Diamond Light Source, Karl Levik
#
# 2014-09-24
#
# Methods to store MX acquisition data
#
import copy
from ispyb.sp.acquisition import Acquisition
from ispyb.strictordereddict import StrictOrderedDict
class MXAcquisition(Acquisition):
"""MXAcquisition provides methods to store data in the MX acquisition tables."""
def __init__(self):
self.insert_data_collection_group = super().upsert_data_collection_group
self.insert_data_collection = super().upsert_data_collection
self.update_data_collection_group = super().upsert_data_collection_group
self.update_data_collection = super().upsert_data_collection
_image_params = StrictOrderedDict(
[
("id", None),
("parentid", None),
("img_number", None),
("filename", None),
("
|
file_location", None),
("measured_intensity", None),
("jpeg_path", None),
("jpeg_thumb_path", None),
("temperature", None),
("cumulative_intensity", None),
("synchrotron_current", None),
("comments", None),
("machine_msg", None),
]
)
_dcg_grid_params = StrictOrderedDict(
[
("id", None),
("p
|
arentid", None),
("dxInMm", None),
("dyInMm", None),
("stepsX", None),
("stepsY", None),
("meshAngle", None),
("pixelsPerMicronX", None),
("pixelsPerMicronY", None),
("snapshotOffsetXPixel", None),
("snapshotOffsetYPixel", None),
("orientation", None),
("snaked", None),
]
)
_dc_grid_params = StrictOrderedDict(
[
("id", None),
("parentid", None),
("dxInMm", None),
("dyInMm", None),
("stepsX", None),
("stepsY", None),
("meshAngle", None),
("pixelsPerMicronX", None),
("pixelsPerMicronY", None),
("snapshotOffsetXPixel", None),
("snapshotOffsetYPixel", None),
("orientation", None),
("snaked", None),
]
)
_dc_position_params = StrictOrderedDict(
[
("id", None),
("pos_x", None),
("pos_y", None),
("pos_z", None),
("scale", None),
]
)
_energy_scan_params = StrictOrderedDict(
[
("id", None),
("session_id", None),
("sample_id", None),
("sub_sample_id", None),
("start_time", None),
("end_time", None),
("start_energy", None),
("end_energy", None),
("detector", None),
("element", None),
("edge_energy", None),
("synchrotron_current", None),
("temperature", None),
("peak_energy", None),
("peak_f_prime", None),
("peak_f_double_prime", None),
("inflection_energy", None),
("inflection_f_prime", None),
("inflection_f_double_prime", None),
("chooch_file_full_path", None),
("jpeg_chooch_file_full_path", None),
("scan_file_full_path", None),
("beam_size_horizontal", None),
("beam_size_vertical", None),
("exposure_time", None),
("transmission", None),
("flux", None),
("flux_end", None),
("comments", None),
]
)
# Is xrayDose populated in EnergyScan? Is it relevant?
_fluo_spectrum_params = StrictOrderedDict(
[
("id", None),
("session_id", None),
("sample_id", None),
("sub_sample_id", None),
("start_time", None),
("end_time", None),
("energy", None),
("file_name", None),
("annotated_pymca_spectrum", None),
("fitted_data_file_full_path", None),
("jpeg_scan_file_full_path", None),
("scan_file_full_path", None),
("beam_size_horizontal", None),
("beam_size_vertical", None),
("exposure_time", None),
("transmission", None),
("flux", None),
("flux_end", None),
("comments", None),
]
)
_fluo_mapping_params = StrictOrderedDict(
[
("id", None),
("roi_id", None),
("grid_info_id", None),
("data_format", None),
("data", None),
("points", None),
("opacity", 1),
("colour_map", None),
("min", None),
("max", None),
("program_id", None),
]
)
_fluo_mapping_roi_params = StrictOrderedDict(
[
("id", None),
("start_energy", None),
("end_energy", None),
("element", None),
("edge", None),
("r", None),
("g", None),
("b", None),
("sample_id", None),
("scalar", None),
]
)
def upsert_xray_centring_result(
self,
result_id=None,
grid_info_id=None,
method=None,
status=None,
x=None,
y=None,
):
"""Insert or update the xray centring result associated with a grid info
:return: The xray centring result id.
"""
return self.get_connection().call_sp_write(
procname="upsert_xray_centring_result",
args=[result_id, grid_info_id, method, status, x, y],
)
@classmethod
def get_dc_position_params(cls):
return copy.deepcopy(cls._dc_position_params)
def update_dc_position(self, values):
"""Update the position info associated with a data collection"""
return self.get_connection().call_sp_write("update_dc_position", values)
@classmethod
def get_dcg_grid_params(cls):
return copy.deepcopy(cls._dcg_grid_params)
def upsert_dcg_grid(self, values):
"""Insert or update the grid info associated with a data collection group"""
return self.get_connection().call_sp_write("upsert_dcg_grid", values)
def retrieve_dcg_grid(self, dcgid, auth_login=None):
"""Retrieve a list of dictionaries containing the grid information for
one data collection group id. Raises ISPyBNoResultException if there
is no grid information available for the given DCGID.
Generally the list will only contain a single dictionary.
"""
return self.get_connection().call_sp_retrieve(
procname="retrieve_grid_info_for_dcg_v2", args=(dcgid, auth_login)
)
@classmethod
def get_dc_grid_params(cls):
return copy.deepcopy(cls._dc_grid_params)
def upsert_dc_grid(self, values):
"""Insert or update the grid info associated with a data collection"""
return self.get_connection().call_sp_write("upsert_dc_grid", values)
def retrieve_dc_grid(self, dcid, auth_login=None):
"""Retrieve a list of dictionaries containing the grid information for
one data collection id. Raises ISPyBNoResultException if there
is no grid information available for the given DCID.
Generally the list will only contain a single dictionary.
"""
return self.get_connection().call_sp_retrieve(
procname="retrieve_grid_info_for_dc", args=(dcid, auth_login)
)
@classmethod
def get_energy_scan_params(cls):
return copy.deepcopy(cls._energy_scan_params)
def upsert_energy_scan(self, values):
"""Insert or update energy scan a.k.a. edge scan"""
return self.get_connection().call_sp_write("upsert_energy_scan", values)
@classmethod
def get_fluo_spectrum_params(cls):
return copy.deepcopy(cls._fluo_spectrum_params)
def upsert_fluo_spectrum(self, values):
"""Insert or update XR fluorescence spectrum a.k.a. MCA spectrum"""
return self.get_connection()
|
littleDad/mesLucioles
|
logger_04.py
|
Python
|
gpl-2.0
| 2,415
| 0.003313
|
# -*- coding: utf8 -*-
import logging
from logging.handlers import RotatingFileHandler
from babel.dates import format_datetime, datetime
from time import sleep
from traceback import print_exception, format_exception
class LogFile(logging.Logger):
'''rotatively logs erverything
'''
def initself(self):
self.setLevel(logging.DEBUG)
self.handler = RotatingFileHandler(
'app.log',
# maxBytes=2000, # approximatively 100 line (81)
maxBytes=6000,
backupCount=3 # number of log backup files
)
self.addHandler(self.handler)
def p_log(self, msg, **kwargs):
'''level = {info, warning, debug, error}
you can also use an exception=exc_info() argument to uprising exceptions!
|
'''
logger = self
if 'error' in kwargs:
print('error YES')
kwargs['level'] = 'error'
if 'exception' in kwargs:
print('exception YES')
kwargs['level'] = 'exception'
if 'level' in kwargs:
level = kwargs['level']
else:
level = "info"
# warning: error must be a python error formating!
if level =
|
= 'error': # or whatever you want with more details
message = ">> " + kwargs['error'][1].message # exc_info()[1].message
eval("logger." + level + "(\"" + message + "\")")
elif level == 'exception':
message = ">> UPRISING OF AN EXCEPTION!"
eval("logger." + level + "(\"" + message + "\")")
for line in format_exception(kwargs['exception'][0], kwargs['exception'][1], kwargs['exception'][2]):
logger.error(line)
else:
if 'newline' in kwargs:
for i in range(kwargs['newline']):
eval("logger." + level + "(\"" + "\")")
if 'blank' in kwargs:
if kwargs['blank']:
message = msg
else:
message = format_datetime(datetime.now(), "HH:mm:ss", locale='en')\
+ " (" + level + ") > "\
+ msg
eval("logger." + level + "(\"" + message + "\")")
if __name__ == '__main__':
logger = LogFile('app.log')
logger.initself()
for i in range(10):
sleep(.5)
logger.p_log('coucou', level="warning")
|
XBigTK13X/wiiu-memshark
|
vendor/tcpgecko/octoling.py
|
Python
|
mit
| 5,604
| 0.010171
|
# -*- coding: cp1252 -*-
#Codename Octohax
#To find Octohax offsets on newer versions, dump memory
#in that area, eg 0x10500000 to 0x10700000, open in hex
#editor, search "Tnk_Simple", there are only 2 results
#Also search for Player00
#There should be like a result or two before what you want
#Looks like this:
'''
.k......k. Riva
l00.Rival00_Hlf.
Rival_Squid.Play
er00_anim...Play
er_Squid_anim...
Player01_anim...
Player00....Play
er00_Hlf....Play
er_Squid....Play
er01....Player01
_Hlf....ToSquid.
ToHuman.Sqd_Jet.
'''
#Then dump 0x12000000 to 0x13000000, search for Tnk_Simple,
#should be first result, with three of them in a row with spacing
from tcpgecko import TCPGecko
import sys
sys.argv.append("280")
tcp = TCPGecko("192.168.1.82")
if sys.argv[1] == "100": #For 1.0.0-?
tcp.writestr(0x105068F0, b"Tnk_Rvl00")
tcp.writestr(0x1051A500, b"Tnk_Rvl00")
tcp.writestr(0x105DBFE0, b"Rival00")
tcp.writestr(0x105DBFEC, b"Rival00_Hlf")
tcp.writestr(0x105DBFF
|
C, b"Rival_Squid")
#tcp.pokemem(0x12CB05A0, 42069)
elif sys
|
.argv[1] == "130": #for 1.3.0
tcp.writestr(0x105068F0, b"Tnk_Rvl00")
tcp.writestr(0x105D4000, b"Tnk_Rvl00")
tcp.writestr(0x105DC118, b"Rival00")
tcp.writestr(0x105DC124, b"Rival00_Hlf")
tcp.writestr(0x105DC134, b"Rival_Squid")
#tcp.pokemem(0x12CB07A0, 42069)
elif sys.argv[1] == "200": #For 2.0.0
tcp.writestr(0x10506AB0, b"Tnk_Rvl00")
tcp.writestr(0x105E0278, b"Tnk_Rvl00")
tcp.writestr(0x105E85B0, b"Rival00")
tcp.writestr(0x105E85BC, b"Rival00_Hlf")
tcp.writestr(0x105E85CC, b"Rival_Squid")
tcp.writestr(0x12BE2350, b"Tnk_Rvl00")
tcp.writestr(0x12BE239C, b"Tnk_Rvl00")
tcp.writestr(0x12BE23E8, b"Tnk_Rvl00")
elif sys.argv[1] == "210": #For 2.1.0
tcp.writestr(0x10506AF8, b"Tnk_Rvl00")
tcp.writestr(0x105E0350, b"Tnk_Rvl00")
tcp.writestr(0x105E8698, b"Rival00")
tcp.writestr(0x105E86A4, b"Rival00_Hlf")
tcp.writestr(0x105E86B4, b"Rival_Squid")
tcp.writestr(0x12BE2350, b"Tnk_Rvl00")
tcp.writestr(0x12BE239C, b"Tnk_Rvl00")
tcp.writestr(0x12BE23E8, b"Tnk_Rvl00")
tcp.pokemem(0x12CC7C80, 0x00000000) #Enforce Female Inkling
elif sys.argv[1] == "220": #For 2.2.0
tcp.writestr(0x10506AF8, b"Tnk_Rvl00")
tcp.writestr(0x105E0350, b"Tnk_Rvl00")
tcp.writestr(0x105EB040, b"Rival00")
tcp.writestr(0x105EB04C, b"Rival00_Hlf")
tcp.writestr(0x105EB05C, b"Rival_Squid")
tcp.writestr(0x12BE5350, b"Tnk_Rvl00")
tcp.writestr(0x12BE539C, b"Tnk_Rvl00")
tcp.writestr(0x12BE53E8, b"Tnk_Rvl00")
tcp.pokemem(0x12CCAC80, 0x00000000) #Enforce Female Inkling
elif sys.argv[1] == "230": #For 2.3.0
tcp.writestr(0x10506AF8, b"Tnk_Rvl00")
tcp.writestr(0x105E3BB8, b"Tnk_Rvl00")
tcp.writestr(0x105EBF98, b"Rival00")
tcp.writestr(0x105EBFA4, b"Rival00_Hlf")
tcp.writestr(0x105EBFB4, b"Rival_Squid")
tcp.writestr(0x12BE6350, b"Tnk_Rvl00")
tcp.writestr(0x12BE639C, b"Tnk_Rvl00")
tcp.writestr(0x12BE63E8, b"Tnk_Rvl00")
tcp.pokemem(0x12CCBB90, 0x00000000) #Enforce Female Inkling
elif sys.argv[1] == "240": #For 2.4.0
tcp.writestr(0x10506AF8, b"Tnk_Rvl00")
tcp.writestr(0x105E4EA0, b"Tnk_Rvl00")
tcp.writestr(0x105ED7B8, b"Rival00")
tcp.writestr(0x105ED7C4, b"Rival00_Hlf")
tcp.writestr(0x105ED7D4, b"Rival_Squid")
tcp.writestr(0x12BE8350, b"Tnk_Rvl00")
tcp.writestr(0x12BE839C, b"Tnk_Rvl00")
tcp.writestr(0x12BE83E8, b"Tnk_Rvl00")
tcp.pokemem(0x12CCDB90, 0x00000000) #Enforce Female Inkling
elif sys.argv[1] == "250": #For 2.5.0
tcp.writestr(0x10506AF8, b"Tnk_Rvl00")
tcp.writestr(0x105E4EB8, b"Tnk_Rvl00")
tcp.writestr(0x105ED7D0, b"Rival00")
tcp.writestr(0x105ED7DC, b"Rival00_Hlf")
#Don't really need squid, looks bad without proper bone offsets
#tcp.writestr(0x105ED7D4, b"Rival_Squid")
tcp.writestr(0x12BE8350, b"Tnk_Rvl00")
tcp.writestr(0x12BE839C, b"Tnk_Rvl00")
tcp.writestr(0x12BE83E8, b"Tnk_Rvl00")
tcp.pokemem(0x12CCDB90, 0x00000000) #Enforce Female Inkling
elif sys.argv[1] == "260": #For 2.6.0
tcp.writestr(0x10506B28, b"Tnk_Rvl00")
tcp.writestr(0x105E59B8, b"Tnk_Rvl00")
tcp.writestr(0x105EE350, b"Rival00")
tcp.writestr(0x105EE35C, b"Rival00_Hlf")
#Don't really need squid, looks bad without proper bone offsets
#tcp.writestr(0x105EE36C, b"Rival_Squid")
tcp.writestr(0x12BE9354, b"Tnk_Rvl00")
tcp.writestr(0x12BE93A0, b"Tnk_Rvl00")
tcp.writestr(0x12BE93EC, b"Tnk_Rvl00")
tcp.pokemem(0x12CCF990, 0x00000000) #Enforce Female Inkling
elif sys.argv[1] == "270": #For 2.7.0
tcp.writestr(0x10506B58, b"Tnk_Rvl00")
tcp.writestr(0x105E5F40, b"Tnk_Rvl00")
tcp.writestr(0x105EE968, b"Rival00")
tcp.writestr(0x105EE974, b"Rival00_Hlf")
#Don't really need squid, looks bad without proper bone offsets
#tcp.writestr(0x105EE984, b"Rival_Squid")
tcp.writestr(0x12BEA354, b"Tnk_Rvl00")
tcp.writestr(0x12BEA3A0, b"Tnk_Rvl00")
tcp.writestr(0x12BEA3EC, b"Tnk_Rvl00")
tcp.pokemem(0x12CD0D90, 0x00000000) #Enforce Female Inkling
elif sys.argv[1] == "280": #For 2.8.0
tcp.writestr(0x10506B58, b"Tnk_Rvl00")
tcp.writestr(0x105E6000, b"Tnk_Rvl00")
tcp.writestr(0x105EEA28, b"Rival00")
tcp.writestr(0x105EEA34, b"Rival00_Hlf")
#Don't really need squid, looks bad without proper bone offsets
#tcp.writestr(0x105EE9A44, b"Rival_Squid")
tcp.writestr(0x12C1F354, b"Tnk_Rvl00")
tcp.writestr(0x12C1F3A0, b"Tnk_Rvl00")
tcp.writestr(0x12C1F3EC, b"Tnk_Rvl00")
tcp.pokemem(0x12D05D90, 0x00000000) #Enforce Female Inkling
tcp.s.close()
print("Done.")
|
dragoon/kilogram
|
kilogram/entity_linking/mention_rw/__init__.py
|
Python
|
apache-2.0
| 7,287
| 0.002196
|
from __future__ import division
import math
import numpy as np
import networkx as nx
from sklearn.preprocessing import normalize
from kilogram import NgramService
class Signature(object):
vector = None
mapping = None
def __init__(self, vector, G, candidate_uris):
"""
:type candidate_uris: set
"""
self.vector = vector
self.mapping = []
for prob, uri in zip(vector, G.nodes()):
if uri in candidate_uris:
self.mapping.append((prob, uri))
self.mapping.sort(reverse=True)
def __repr__(self):
return str(self.mapping[:10])
def _mention_uri(uri, mention):
return mention.replace(' ', '_')+'|'+uri
def _candidate_filter(candidates):
def string_similar(candidate_, topn=10):
substring_similar = [e for e in candidate_.entities
if set(candidate_.cand_string.lower().split()).intersection(e.uri.lower().split('_'))]
if len(substring_similar) >= topn:
return substring_similar
substring_similar2 = [e for e in candidate_.entities
if candidate_.cand_string in e.uri.replace('_', ' ')]
substring_similar.extend(substring_similar2)
return substring_similar[:topn]
def top_prior(candidate_, topn=10):
return sorted(candidate_.entities, key=lambda e: e.count, reverse=True)[:topn]
for candidate in candidates:
entities = top_prior(candidate)
uris = set(e.uri for e in entities)
entities.extend([e for e in string_similar(candidate) if e.uri not in uris])
candidate.entities = entities
ALPHA = 0.15 # restart probability
class SemanticGraph:
G = None
candidates = None
matrix = None
# map candidate urls to indexes in the matrix
index_map = None
candidate_uris = None
def __init__(self, candidates):
self.G = nx.Graph()
_candidate_filter(candidates)
self.candidates = candidates
neighbors = {}
self.index_map = {}
#self.candidate_uris1 = set()
#for cand in candidates:
# self.candidate_uris1.add(cand.cand_string)
self.candidate_uris = set()
for cand in candidates:
total = sum([e.count for e in cand.entities])
for e in cand.entities:
mention_uri = _mention_uri(e.uri, cand.cand_string)
self.candidate_uris.add(mention_uri)
neighbors[mention_uri] = NgramService.get_wiki_link_mention_cooccur(mention_uri)
# delete self
try:
del neighbors[mention_uri][mention_uri]
except KeyError:
pass
for neighbor, weight in neighbors[mention_uri].iteritems():
#if neighbor.split('|')[0] not in self.candidate_uris1:
# continue
if self.G.has_edge(mention_uri, neighbor):
continue
try:
self.G.add_edge(mention_uri, neighbor, {'w': int(weight)})
# happens because of malformed links
except ValueError:
pass
# always add candidates
self.G.add_node(mention_uri, {'prior': e.count/total})
# prune 1-degree edges except original candidates
to_remove = set()
for node, degree in self.G.degree_iter():
if degree <= 1:
to_remove.add(node)
to_remove = to_remove.difference(self.candidate_uris)
self.G.remove_nodes_from(to_remove)
if self.G.number_of_nodes() > 0:
self.matrix = nx.to_scipy_sparse_matrix(self.G, weight='w', dtype=np.float64)
self.matrix = normalize(self.matrix, norm='l1', axis=1)
for i, uri in enumerate(self.G.nodes()):
self.index_map[uri] = i
def _get_entity_teleport_v(self, i):
teleport_vector = np.zeros((self.matrix.shape[0], 1), dtype=np.float64)
teleport_vector[i] = 1-ALPHA
return np.matrix(teleport_vector)
def _get_doc_teleport_v(self):
teleport_vector = np.zeros((self.matrix.shape[0], 1), dtype=np.float64)
resolved = [self.index_map[_mention_uri(x.resolved_true_entity, x.cand_string)] for x in self.candidates
if x.resolved_true_entity is not None]
if len(resolved) > 0:
for i in resolved:
teleport_vector[i] = 1-ALPHA
else:
# assign according to prior probabilities
for candidate in self.candidates:
total_uri_count = sum([e.count for e in candidate.entities], 1)
for e in candidate.entities:
teleport_vector[self.index_map.get(_mention_uri(e.uri, candidate.cand_string))] = e.count/total_u
|
ri_count
return np.matrix(teleport_vector)
def _learn_eigenvector(self, teleport_vector):
pi = np.matrix(np.zeros(teleport_vector.shape))
prev_norm = 0
for _ in range(10000):
pi = self.matrix*pi*ALPHA + teleport_
|
vector
cur_norm = np.linalg.norm(pi)
pi /= cur_norm
if prev_norm and abs(cur_norm - prev_norm) < 0.00001:
break
prev_norm = cur_norm
return np.ravel(pi/pi.sum())
def doc_signature(self):
"""compute document signature"""
return Signature(self._learn_eigenvector(self._get_doc_teleport_v()), self.G, self.candidate_uris)
def compute_signature(self, mention_uri):
sig = Signature(self._learn_eigenvector(self._get_entity_teleport_v(self.index_map[mention_uri])), self.G, self.candidate_uris)
return sig
def _zero_kl_score(self, p, q):
"""
:type p: Signature
:type q: Signature
:return: Zero Kullback-Leiber divergence score
"""
total = 0
for p_i, q_i in zip(p.vector, q.vector):
if q_i == 0:
total += p_i*20
elif p_i > 0:
total += p_i*math.log(p_i/q_i)
return total
def do_linking(self):
# link unambiguous first
for candidate in self.candidates:
if len(candidate.entities) == 1:
candidate.resolved_true_entity = candidate.entities[0].uri
for candidate in sorted(self.candidates, key=lambda x: len(x.entities)):
if candidate.truth_data['uri'] is None:
continue
if not candidate.entities or candidate.resolved_true_entity:
continue
doc_sign = self.doc_signature()
cand_scores = []
for e in candidate.entities:
e_sign = self.compute_signature(_mention_uri(e.uri, candidate.cand_string))
# global similarity + local (prior prob)
sem_sim = 1/self._zero_kl_score(e_sign, doc_sign)
cand_scores.append((e.uri, sem_sim))
max_uri, score = max(cand_scores, key=lambda x: x[1])
candidate.resolved_true_entity = max_uri
if candidate.resolved_true_entity != candidate.truth_data['uri']:
print candidate, candidate.truth_data['uri']
|
weyw/eulerproject
|
wey/p1.py
|
Python
|
gpl-2.0
| 458
| 0.00655
|
# https://projecteuler.net/problem=1
# If we list all the natural numbers below 10 that are multiples of 3 or 5, we get 3, 5, 6 and 9. The sum of these multiples is 23.
# Find the sum of all the multiples of 3 or 5 below 1000.
# = 233168
import sys
def s
|
um(n):
total = 0
for i in range(n):
if (i % 3 == 0) or (i % 5 == 0):
total += i
|
return total
n = 20
if len(sys.argv) == 2:
n = int(sys.argv[1])
print(sum(n))
|
aykol/pymatgen
|
pymatgen/io/tests/test_qchem.py
|
Python
|
mit
| 81,842
| 0.00033
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import unicode_literals
import copy
import glob
import json
import os
import unittest
from pymatgen import Molecule
from pymatgen.io.qchem import QcTask, QcInput, QcOutput
from pymatgen.util.testing import PymatgenTest
__author__ = 'xiaohuiqu'
test_dir = os.path.join(os.path.dirname(__file__), "..", "..", "..",
'test_files', "molecules")
coords = [[0.000000, 0.000000, 0.000000],
[0.000000, 0.000000, 1.089000],
[1.026719, 0.000000, -0.363000],
[-0.513360, -0.889165, -0.363000],
[-0.513360, 0.889165, -0.363000]]
mol = Molecule(["C", "H", "H", "H", "Cl"], coords)
coords2 = [[0.0, 0.0, -2.4],
[0.0, 0.0, 0.0],
[0.0, 0.0, 2.4]]
heavy_mol = Molecule(["Br", "Cd", "Br"], coords2)
coords3 = [[2.632273, -0.313504, -0.750376],
[3.268182, -0.937310, -0.431464],
[2.184198, -0.753305, -1.469059]]
water_mol = Molecule(["O", "H", "H"], coords3)
class QcTaskTest(PymatgenTest):
def elementary_io_verify(self, text, qctask):
self.to_and_from_dict_verify(qctask)
|
self.from_string_verify(contents=text, ref_dict=qctask.as_dict())
def to_and_from_dict_verify(self, qctask):
"""
Helper function. This function should be called in
|
each specific test.
"""
d1 = qctask.as_dict()
qc2 = QcTask.from_dict(d1)
d2 = qc2.as_dict()
self.assertEqual(d1, d2)
def from_string_verify(self, contents, ref_dict):
qctask = QcTask.from_string(contents)
d2 = qctask.as_dict()
self.assertEqual(ref_dict, d2)
def test_read_zmatrix(self):
contents = '''$moLEcule
1 2
S
C 1 1.726563
H 2 1.085845 1 119.580615
C 2 1.423404 1 114.230851 3 -180.000000 0
H 4 1.084884 2 122.286346 1 -180.000000 0
C 4 1.381259 2 112.717365 1 0.000000 0
H 6 1.084731 4 127.143779 2 -180.000000 0
C 6 1.415867 4 110.076147 2 0.000000 0
F 8 1.292591 6 124.884374 4 -180.000000 0
$end
$reM
BASIS = 6-31+G*
EXCHANGE = B3LYP
jobtype = freq
$end
'''
qctask = QcTask.from_string(contents)
ans = '''$molecule
1 2
S 0.00000000 0.00000000 0.00000000
C 0.00000000 0.00000000 1.72656300
H -0.94431813 0.00000000 2.26258784
C 1.29800105 -0.00000002 2.31074808
H 1.45002821 -0.00000002 3.38492732
C 2.30733813 -0.00000003 1.36781908
H 3.37622632 -0.00000005 1.55253338
C 1.75466906 -0.00000003 0.06427152
F 2.44231414 -0.00000004 -1.03023099
$end
$rem
jobtype = freq
exchange = b3lyp
basis = 6-31+g*
$end
'''
ans_tokens = ans.split('\n')
ans_text_part = ans_tokens[:2] + ans_tokens[11:]
ans_coords_part = ans_tokens[2:11]
converted_tokens = str(qctask).split('\n')
converted_text_part = converted_tokens[:2] + converted_tokens[11:]
converted_coords_part = converted_tokens[2:11]
self.assertEqual(ans_text_part, converted_text_part)
for ans_coords, converted_coords in zip(ans_coords_part,
converted_coords_part):
ans_coords_tokens = ans_coords.split()
converted_coords_tokens = converted_coords.split()
self.assertEqual(ans_coords_tokens[0], converted_coords_tokens[0])
xyz1 = ans_coords_tokens[1:]
xyz2 = converted_coords_tokens[1:]
for t1, t2 in zip(xyz1, xyz2):
self.assertTrue(abs(float(t1)-float(t2)) < 0.0001)
def test_no_mol(self):
ans = '''$comment
Test Methane
$end
$molecule
-1 2
read
$end
$rem
jobtype = sp
exchange = b3lyp
basis = 6-31+g*
$end
'''
qctask = QcTask(molecule="READ", title="Test Methane",
exchange="B3LYP", jobtype="SP", charge=-1,
spin_multiplicity=2,
basis_set="6-31+G*")
self.assertEqual(str(qctask), ans)
self.elementary_io_verify(ans, qctask)
def test_simple_basis_str(self):
ans = '''$comment
Test Methane
$end
$molecule
0 1
C 0.00000000 0.00000000 0.00000000
H 0.00000000 0.00000000 1.08900000
H 1.02671900 0.00000000 -0.36300000
H -0.51336000 -0.88916500 -0.36300000
Cl -0.51336000 0.88916500 -0.36300000
$end
$rem
jobtype = sp
exchange = b3lyp
basis = 6-31+g*
$end
'''
qctask = QcTask(mol, title="Test Methane", exchange="B3LYP",
jobtype="SP",
basis_set="6-31+G*")
self.assertEqual(str(qctask), ans)
self.elementary_io_verify(ans, qctask)
def test_fragmented_molecule(self):
mol1 = copy.deepcopy(mol)
mol1.set_charge_and_spin(1, 2)
mol2 = copy.deepcopy(water_mol)
mol2.set_charge_and_spin(-1, 2)
qctask = QcTask([mol1, mol2], title="Test Fragments", exchange="B3LYP",
jobtype="bsse", charge=0, spin_multiplicity=3, basis_set="6-31++G**")
ans = """$comment
Test Fragments
$end
$molecule
0 3
--
1 2
C 0.00000000 0.00000000 0.00000000
H 0.00000000 0.00000000 1.08900000
H 1.02671900 0.00000000 -0.36300000
H -0.51336000 -0.88916500 -0.36300000
Cl -0.51336000 0.88916500 -0.36300000
--
-1 2
O 2.63227300 -0.31350400 -0.75037600
H 3.26818200 -0.93731000 -0.43146400
H 2.18419800 -0.75330500 -1.46905900
$end
$rem
jobtype = bsse
exchange = b3lyp
basis = 6-31++g**
$end
"""
self.assertEqual(str(qctask), ans)
self.elementary_io_verify(ans, qctask)
def test_mixed_basis_str(self):
qctask = QcTask(mol, title="Test Methane", exchange="B3LYP",
jobtype="SP",
basis_set=[("C", "6-311G*"), ("H", "6-31g(d,p)"), ("H", "6-31g(d,p)"),
("H", "6-31g*"), ("cl", "6-31+g*")])
ans_mixed = """$comment
Test Methane
$end
$molecule
0 1
C 0.00000000 0.00000000 0.00000000
H 0.00000000 0.00000000 1.08900000
H 1.02671900 0.00000000 -0.36300000
H -0.51336000 -0.88916500 -0.36300000
Cl -0.51336000 0.88916500 -0.36300000
$end
$rem
jobtype = sp
exchange = b3lyp
basis = mixed
$end
$basis
C 1
6-311g*
****
H 2
6-31g(d,p)
****
H 3
6-31g(d,p)
****
H 4
6-31g*
****
Cl 5
6-31+g*
****
$end
"""
self.assertEqual(ans_mixed, str(qctask))
self.elementary_io_verify(ans_mixed, qctask)
qctask.set_basis_set("6-31+G*")
ans_simple = """$comment
Test Methane
$end
$molecule
0 1
C 0.00000000 0.00000000 0.00000000
H 0.00000000 0.00000000 1.08900000
H 1.02671900 0.00000000 -0.36300000
H -0.51336000 -0.88916500 -0.36300000
Cl -0.51336000 0.88916500 -0.36300000
$end
$rem
jobtype = sp
exchange = b3lyp
basis = 6-31+g*
$end
"""
self.assertEqual(str(qctask), ans_simple)
qctask.set_basis_set([("C", "6-311G*"), ("H", "6-31g(d,p)"), ("H", "6-31g(d,p)"),
("H", "6-31g*"), ("cl", "6-31+g*")])
self.assertEqual(str(qctask), ans_mixed)
self.elementary_io_verify(ans_mixed, qctask)
def test_opt_constraint_str(self):
opt_coords = [[-1.8438708, 1.7639844, 0.0036111],
[-0.3186117, 1.7258535, 0.0241264],
[0.1990523, 0.2841796, -0.0277432],
[1.7243049, 0.2460376, -0.0067397],
|
andreaso/ansible
|
lib/ansible/modules/remote_management/wakeonlan.py
|
Python
|
gpl-3.0
| 4,077
| 0.004415
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2016, Dag Wieers <dag@wieers.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: wakeonlan
version_added: '2.2'
short_description: Send a magic Wake-on-LAN (WoL) broadcast packet
description:
- The C(wakeonlan) module sends magic Wake-on-LAN (WoL) broadcast packets.
options:
mac:
description:
- MAC address to send Wake-on-LAN broadcast packet for.
required: true
broadcast:
description:
- Network broadcast address to use for broadcasting magic Wake-on-LAN packet.
default: 255.255.255.255
port:
description:
- UDP port to use for magic Wake-on-LAN packet.
default: 7
author: "Dag Wieers (@dagwieers)"
todo:
- Add arping support to check whether the system is up (before and after)
- Enable check-mode support (when we have arping support)
- Does not have SecureOn password support
notes:
- This module sends a magic packet, without knowing whether it worked
- Only works if the targ
|
et system was properly configured for Wake-on-LAN (in the BIOS and/or the OS)
- Some BIOSes have a different (configurable) Wake-on-LAN boot order (i.e. PXE first) when turned off
'''
EXAMPLES = '''
- name: Send a magic Wake-on-LAN packet to 00:00:5E:00:53:66
wakeonlan:
mac: '00:00:5E:00:53:66'
broadcast: 192.0.2.23
delegate_to: localhost
- wakeonlan:
mac: 00:00:5E:00:53:66
|
port: 9
delegate_to: localhost
'''
RETURN='''
# Default return values
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.pycompat24 import get_exception
import socket
import struct
def wakeonlan(module, mac, broadcast, port):
""" Send a magic Wake-on-LAN packet. """
mac_orig = mac
# Remove possible separator from MAC address
if len(mac) == 12 + 5:
mac = mac.replace(mac[2], '')
# If we don't end up with 12 hexadecimal characters, fail
if len(mac) != 12:
module.fail_json(msg="Incorrect MAC address length: %s" % mac_orig)
# Test if it converts to an integer, otherwise fail
try:
int(mac, 16)
except ValueError:
module.fail_json(msg="Incorrect MAC address format: %s" % mac_orig)
# Create payload for magic packet
data = ''
padding = ''.join(['FFFFFFFFFFFF', mac * 20])
for i in range(0, len(padding), 2):
data = ''.join([data, struct.pack('B', int(padding[i: i + 2], 16))])
# Broadcast payload to network
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
try:
sock.sendto(data, (broadcast, port))
except socket.error:
e = get_exception()
sock.close()
module.fail_json(msg=str(e))
sock.close()
def main():
module = AnsibleModule(
argument_spec = dict(
mac = dict(type='str', required=True),
broadcast = dict(type='str', default='255.255.255.255'),
port = dict(type='int', default=7),
),
supports_check_mode = True,
)
mac = module.params['mac']
broadcast = module.params['broadcast']
port = module.params['port']
if not module.check_mode:
wakeonlan(module, mac, broadcast, port)
module.exit_json(changed=True)
if __name__ == '__main__':
main()
|
google/python_portpicker
|
src/tests/portpicker_test.py
|
Python
|
apache-2.0
| 16,155
| 0.000371
|
#!/usr/bin/python
#
# Copyright 2007 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Unittests for the portpicker module."""
from __future__ import print_function
import errno
import os
import random
import socket
import sys
import unittest
from contextlib import ExitStack
if sys.platform == 'win32':
import _winapi
else:
_winapi = None
try:
# pylint: disable=no-name-in-module
from unittest import mock # Python >= 3.3.
except ImportError:
import mock # https://pypi.python.org/pypi/mock
import portpicker
class PickUnusedPortTest(unittest.TestCase):
def IsUnusedTCPPort(self, port):
return self._bind(port, socket.SOCK_STREAM, socket.IPPROTO_TCP)
def IsUnusedUDPPort(self, port):
return self._bind(port, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
def setUp(self):
# So we can Bind even if portpicker.bind is stubbed out.
self._bind = portpicker.bind
portpicker._owned_ports.clear()
portpicker._free_ports.clear()
portpicker._random_ports.clear()
def testPickUnusedPortActuallyWorks(self):
"""This test can be flaky."""
for _ in range(10):
port = portpicker.pick_unused_port()
self.assertTrue(self.IsUnusedTCPPort(port))
self.assertTrue(self.IsUnusedUDPPort(port))
@unittest.skipIf('PORTSERVER_ADDRESS' not in os.environ,
'no port server to test against')
def testPickUnusedCanSuccessfullyUsePortServer(self):
with mock.patch.object(portpicker, '_pick_unused_port_without_server'):
portpicker._pick_unused_port_without_server.side_effect = (
Exception('eek!')
)
# Since _PickUnusedPortWithoutServer() raises an exception, if we
# can successfully obtain a port, the portserver must be working.
port = portpicker.pick_unused_port()
self.assertTrue(self.IsUnusedTCPPort(port))
self.assertTrue(self.IsUnusedUDPPort(port))
@unittest.skipIf('PORTSERVER_ADDRESS' not in os.environ,
'no port server to test against')
def testPickUnusedCanSuccessfullyUsePortServerAddressKwarg(self):
with mock.patch.object(portpicker, '_pick_unused_port_without_server'):
portpicker._pick_unused_port_without_server.side_effect = (
Exception('eek!')
)
# Since _PickUnusedPortWithoutServer() raises an exception, and
# we've temporarily removed PORTS
|
ERVER_ADDRESS from os.environ, if
# we can successfully obtain a port, the portserver must be working.
addr = os.environ.pop('PORTSERVER_ADDRESS')
try:
port = portpicker.pick_unused_port(portserver_address=addr)
self.assertTrue(self.IsUnusedTCPPort(port))
self.assertTrue
|
(self.IsUnusedUDPPort(port))
finally:
os.environ['PORTSERVER_ADDRESS'] = addr
@unittest.skipIf('PORTSERVER_ADDRESS' not in os.environ,
'no port server to test against')
def testGetPortFromPortServer(self):
"""Exercise the get_port_from_port_server() helper function."""
for _ in range(10):
port = portpicker.get_port_from_port_server(
os.environ['PORTSERVER_ADDRESS'])
self.assertTrue(self.IsUnusedTCPPort(port))
self.assertTrue(self.IsUnusedUDPPort(port))
def testSendsPidToPortServer(self):
with ExitStack() as stack:
if _winapi:
create_file_mock = mock.Mock()
create_file_mock.return_value = 0
read_file_mock = mock.Mock()
write_file_mock = mock.Mock()
read_file_mock.return_value = (b'42768\n', 0)
stack.enter_context(
mock.patch('_winapi.CreateFile', new=create_file_mock))
stack.enter_context(
mock.patch('_winapi.WriteFile', new=write_file_mock))
stack.enter_context(
mock.patch('_winapi.ReadFile', new=read_file_mock))
port = portpicker.get_port_from_port_server(
'portserver', pid=1234)
write_file_mock.assert_called_once_with(0, b'1234\n')
else:
server = mock.Mock()
server.recv.return_value = b'42768\n'
stack.enter_context(
mock.patch.object(socket, 'socket', return_value=server))
port = portpicker.get_port_from_port_server(
'portserver', pid=1234)
server.sendall.assert_called_once_with(b'1234\n')
self.assertEqual(port, 42768)
def testPidDefaultsToOwnPid(self):
with ExitStack() as stack:
stack.enter_context(
mock.patch.object(os, 'getpid', return_value=9876))
if _winapi:
create_file_mock = mock.Mock()
create_file_mock.return_value = 0
read_file_mock = mock.Mock()
write_file_mock = mock.Mock()
read_file_mock.return_value = (b'52768\n', 0)
stack.enter_context(
mock.patch('_winapi.CreateFile', new=create_file_mock))
stack.enter_context(
mock.patch('_winapi.WriteFile', new=write_file_mock))
stack.enter_context(
mock.patch('_winapi.ReadFile', new=read_file_mock))
port = portpicker.get_port_from_port_server('portserver')
write_file_mock.assert_called_once_with(0, b'9876\n')
else:
server = mock.Mock()
server.recv.return_value = b'52768\n'
stack.enter_context(
mock.patch.object(socket, 'socket', return_value=server))
port = portpicker.get_port_from_port_server('portserver')
server.sendall.assert_called_once_with(b'9876\n')
self.assertEqual(port, 52768)
@mock.patch.dict(os.environ,{'PORTSERVER_ADDRESS': 'portserver'})
def testReusesPortServerPorts(self):
with ExitStack() as stack:
if _winapi:
read_file_mock = mock.Mock()
read_file_mock.side_effect = [
(b'12345\n', 0),
(b'23456\n', 0),
(b'34567\n', 0),
]
stack.enter_context(mock.patch('_winapi.CreateFile'))
stack.enter_context(mock.patch('_winapi.WriteFile'))
stack.enter_context(
mock.patch('_winapi.ReadFile', new=read_file_mock))
else:
server = mock.Mock()
server.recv.side_effect = [b'12345\n', b'23456\n', b'34567\n']
stack.enter_context(
mock.patch.object(socket, 'socket', return_value=server))
self.assertEqual(portpicker.pick_unused_port(), 12345)
self.assertEqual(portpicker.pick_unused_port(), 23456)
portpicker.return_port(12345)
self.assertEqual(portpicker.pick_unused_port(), 12345)
@mock.patch.dict(os.environ,{'PORTSERVER_ADDRESS': ''})
def testDoesntReuseRandomPorts(self):
ports = set()
for _ in range(10):
try:
port = portpicker.pick_unused_port()
except portpicker.NoFreePortFoundError:
# This sometimes happens when not using portserver. Just
# skip to the next attempt.
continue
p
|
vmg/hg-stable
|
mercurial/copies.py
|
Python
|
gpl-2.0
| 12,819
| 0.001872
|
# copies.py - copy detection for Mercurial
#
# Copyright 2008 Matt Mackall <mpm@selenic.com>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
import util
import heapq
def _nonoverlap(d1, d2, d3):
"Return list of elements in d1 not in d2 or d3"
return sorted([d for d in d1 if d not in d3 and d not in d2])
def _dirname(f):
s = f.rfind("/")
if s == -1:
return ""
return f[:s]
def _findlimit(repo, a, b):
"""Find the earliest revision that's an ancestor of a or b but not both,
None if no such revision exists.
"""
# basic idea:
# - mark a and b with different sides
# - if a parent's children are all on the same side, the parent is
# on that side, otherwise it is on no side
# - walk the graph in topological order with the help of a heap;
# - add unseen parents to side map
# - clear side of any parent that has children on different sides
# - track number of interesting revs that might still be on a side
# - track the lowest interesting rev seen
# - quit when interesting revs is zero
cl = repo.changelog
working = len(cl) # pseudo rev for the working directory
if a is None:
a = working
if b is None:
b = working
side = {a: -1, b: 1}
visit = [-a, -b]
heapq.heapify(visit)
interesting = len(visit)
hascommonancestor = False
limit = working
while interesting:
r = -heapq.heappop(visit)
if r == working:
parents = [cl.rev(p) for p in repo.dirstate.parents()]
else:
parents = cl.parentrevs(r)
for p in parents:
if p < 0:
continue
if p not in side:
# first time we see p; add it to visit
side[p] = side[r]
if side[p]:
interesting += 1
heapq.heappush(visit, -p)
elif side[p] and side[p] != side[r]:
# p was interesting but now we know better
side[p] = 0
interesting -= 1
hascommonancestor = True
if side[r]:
limit = r # lowest rev visited
interesting -= 1
if not hascommonancestor:
return None
return limit
def _chain(src, dst, a, b):
'''chain two sets of copies a->b'''
t = a.copy()
for k, v in b.iteritems():
if v in t:
# found a chain
if t[v] != k:
# file wasn't renamed back to itself
t[k] = t[v]
if v not in dst:
# chain was a rename, not a copy
del t[v]
if v in src:
# file is a copy of an existing file
t[k] = v
# remove criss-crossed copies
|
for k, v in t.items():
if k in src and v in dst:
del t[k]
return t
def _tracefile(fctx, actx):
'''return file context that is the ancestor of fctx present in actx'''
stop = actx.rev()
am = actx.manifest()
for f in fctx.ancestors():
if am.get(f.path(), None) == f.filenode()
|
:
return f
if f.rev() < stop:
return None
def _dirstatecopies(d):
ds = d._repo.dirstate
c = ds.copies().copy()
for k in c.keys():
if ds[k] not in 'anm':
del c[k]
return c
def _forwardcopies(a, b):
'''find {dst@b: src@a} copy mapping where a is an ancestor of b'''
# check for working copy
w = None
if b.rev() is None:
w = b
b = w.p1()
if a == b:
# short-circuit to avoid issues with merge states
return _dirstatecopies(w)
# find where new files came from
# we currently don't try to find where old files went, too expensive
# this means we can miss a case like 'hg rm b; hg cp a b'
cm = {}
missing = set(b.manifest().iterkeys())
missing.difference_update(a.manifest().iterkeys())
for f in missing:
ofctx = _tracefile(b[f], a)
if ofctx:
cm[f] = ofctx.path()
# combine copies from dirstate if necessary
if w is not None:
cm = _chain(a, w, cm, _dirstatecopies(w))
return cm
def _backwardrenames(a, b):
# Even though we're not taking copies into account, 1:n rename situations
# can still exist (e.g. hg cp a b; hg mv a c). In those cases we
# arbitrarily pick one of the renames.
f = _forwardcopies(b, a)
r = {}
for k, v in sorted(f.iteritems()):
# remove copies
if v in a:
continue
r[v] = k
return r
def pathcopies(x, y):
'''find {dst@y: src@x} copy mapping for directed compare'''
if x == y or not x or not y:
return {}
a = y.ancestor(x)
if a == x:
return _forwardcopies(x, y)
if a == y:
return _backwardrenames(x, y)
return _chain(x, y, _backwardrenames(x, a), _forwardcopies(a, y))
def mergecopies(repo, c1, c2, ca):
"""
Find moves and copies between context c1 and c2 that are relevant
for merging.
Returns four dicts: "copy", "movewithdir", "diverge", and
"renamedelete".
"copy" is a mapping from destination name -> source name,
where source is in c1 and destination is in c2 or vice-versa.
"movewithdir" is a mapping from source name -> destination name,
where the file at source present in one context but not the other
needs to be moved to destination by the merge process, because the
other context moved the directory it is in.
"diverge" is a mapping of source name -> list of destination names
for divergent renames.
"renamedelete" is a mapping of source name -> list of destination
names for files deleted in c1 that were renamed in c2 or vice-versa.
"""
# avoid silly behavior for update from empty dir
if not c1 or not c2 or c1 == c2:
return {}, {}, {}, {}
# avoid silly behavior for parent -> working dir
if c2.node() is None and c1.node() == repo.dirstate.p1():
return repo.dirstate.copies(), {}, {}, {}
limit = _findlimit(repo, c1.rev(), c2.rev())
if limit is None:
# no common ancestor, no copies
return {}, {}, {}, {}
m1 = c1.manifest()
m2 = c2.manifest()
ma = ca.manifest()
def makectx(f, n):
if len(n) != 20: # in a working context?
if c1.rev() is None:
return c1.filectx(f)
return c2.filectx(f)
return repo.filectx(f, fileid=n)
ctx = util.lrucachefunc(makectx)
copy = {}
movewithdir = {}
fullcopy = {}
diverge = {}
def _checkcopies(f, m1, m2):
checkcopies(ctx, f, m1, m2, ca, limit, diverge, copy, fullcopy)
repo.ui.debug(" searching for copies back to rev %d\n" % limit)
u1 = _nonoverlap(m1, m2, ma)
u2 = _nonoverlap(m2, m1, ma)
if u1:
repo.ui.debug(" unmatched files in local:\n %s\n"
% "\n ".join(u1))
if u2:
repo.ui.debug(" unmatched files in other:\n %s\n"
% "\n ".join(u2))
for f in u1:
_checkcopies(f, m1, m2)
for f in u2:
_checkcopies(f, m2, m1)
renamedelete = {}
renamedelete2 = set()
diverge2 = set()
for of, fl in diverge.items():
if len(fl) == 1 or of in c1 or of in c2:
del diverge[of] # not actually divergent, or not a rename
if of not in c1 and of not in c2:
# renamed on one side, deleted on the other side, but filter
# out files that have been renamed and then deleted
renamedelete[of] = [f for f in fl if f in c1 or f in c2]
renamedelete2.update(fl) # reverse map for below
else:
diverge2.update(fl) # reverse map for below
if fullcopy:
repo.ui.debug(" all copies found (* = to merge, ! = divergent, "
"% = renamed and deleted):\n")
for f in sorted(fullcopy):
note = ""
if f in copy:
note += "*"
if f in diverge2:
note += "!"
|
douban/dpark
|
dpark/broadcast.py
|
Python
|
bsd-3-clause
| 24,223
| 0.001238
|
from __future__ import absolute_import
import os
import zmq
import uuid as uuid_pkg
import time
import binascii
import random
import socket
import struct
import marshal
import mmap
from multiprocessing import Manager, Condition
from mmap import ACCESS_WRITE, ACCESS_READ
from dpark.utils.log import get_logger
from dpark.utils import compress, decompress, spawn
from dpark.cache import Cache
from dpark.serialize import marshalable
from dpark.env import env
import six
from six.moves import range, map, cPickle
try:
from itertools import izip
except ImportError:
izip = zip
logger = get_logger(__name__)
MARSHAL_TYPE, PICKLE_TYPE = list(range(2))
BLOCK_SHIFT = 20
BLOCK_SIZE = 1 << BLOCK_SHIFT
GUIDE_ADDR = 'NewBroadcastGuideAddr'
DOWNLOAD_ADDR = 'NewDownloadAddr'
BATCHED_BLOCKS = 3
GUIDE_STOP, GUIDE_GET_SOURCES, GUIDE_SET_SOURCES, GUIDE_REPORT_BAD = list(range(4))
SERVER_STOP, SERVER_FETCH, SERVER_FETCH_FAIL, SERVER_FETCH_OK, \
DATA_GET, DATA_GET_OK, DATA_GET_FAIL, DATA_DOWNLOADING, SERVER_CLEAR_ITEM = list(range(9))
class GuideManager(object):
def __init__(self):
self._started = False
self.guides = {}
self.host = socket.gethostname()
self.guide_thread = None
self.guide_addr = None
self.register_addr = {}
self.ctx = zmq.Context()
def start(self):
if self._started:
return
self._started = True
self.guide_thread = self.start_guide()
env.register(GUIDE_ADDR, self.guide_addr)
def start_guide(self):
sock = self.ctx.socket(zmq.REP)
port = sock.bind_to_random_port('tcp://0.0.0.0')
self.guide_addr = 'tcp://%s:%d' % (self.host, port)
def run():
logger.debug("guide start at %s", self.guide_addr)
while self._started:
if not sock.poll(1000, zmq.POLLIN):
continue
type_, msg = sock.recv_pyobj()
if type_ == GUIDE_STOP:
sock.send_pyobj(0)
break
elif type_ == GUIDE_GET_SOURCES:
uuid = msg
sources = None
if uuid in self.guides:
sources = self.guides[uuid]
else:
logger.warning('uuid %s NOT REGISTERED in guide server', uuid)
sock.send_pyobj(sources)
elif type_ == GUIDE_SET_SOURCES:
uuid, addr, bitmap = msg
if any(bitmap):
sources = None
if uuid in self.guides:
sources = self.guides[uuid]
if sources:
sources[addr] = bitmap
else:
|
self.guides[uuid] = {addr: bitmap}
self.register_addr[uuid] = addr
sock.send_pyobj(None)
elif type_ =
|
= GUIDE_REPORT_BAD:
uuid, addr = msg
sources = self.guides[uuid]
if addr in sources:
if addr != self.register_addr[uuid]:
del sources[addr]
else:
logger.warning('The addr %s to delete is the register Quit!!!', addr)
sock.send_pyobj(None)
else:
logger.error('Unknown guide message: %s %s', type_, msg)
sock.send_pyobj(None)
return spawn(run)
def shutdown(self):
if not self._started:
return
self._started = False
if self.guide_thread and self.guide_addr. \
startswith('tcp://%s:' % socket.gethostname()):
self.guide_thread.join(timeout=1)
if self.guide_thread.is_alive():
logger.warning("guide_thread not stopped.")
self.guide_addr = None
def check_memory(location):
try:
import psutil
pid = os.getpid()
p = psutil.Process(pid)
rss = p.memory_info().rss >> 20
logger.info('memory rss %d MB in host %s at ',
rss, socket.gethostname(), location)
except ImportError:
logger.warning('import psutil failed')
class DownloadManager(object):
def __init__(self):
self._started = False
self.server_thread = None
self.download_threads = {}
self.uuid_state_dict = None
self.uuid_map_dict = None
self.guide_addr = None
self.server_addr = None
self.host = None
self.ctx = None
self.random_inst = None
self.master_broadcast_blocks = {}
def start(self):
if self._started:
return
self.manager = manager = Manager()
self.shared_uuid_fn_dict = manager.dict()
self.shared_uuid_map_dict = manager.dict()
self.shared_master_blocks = manager.dict()
self.download_cond = Condition()
self._started = True
self.ctx = zmq.Context()
self.host = socket.gethostname()
if GUIDE_ADDR not in env.environ:
start_guide_manager()
self.guide_addr = env.get(GUIDE_ADDR)
self.random_inst = random.SystemRandom()
self.server_addr, self.server_thread = self.start_server()
self.uuid_state_dict = {}
self.uuid_map_dict = {}
self.master_broadcast_blocks = {}
env.register(DOWNLOAD_ADDR, self.server_addr)
def start_server(self):
sock = self.ctx.socket(zmq.REP)
sock.setsockopt(zmq.LINGER, 0)
port = sock.bind_to_random_port("tcp://0.0.0.0")
server_addr = 'tcp://%s:%d' % (self.host, port)
guide_sock = self.ctx.socket(zmq.REQ)
guide_sock.setsockopt(zmq.LINGER, 0)
guide_sock.connect(self.guide_addr)
def run():
logger.debug("server started at %s", server_addr)
while self._started:
if not sock.poll(1000, zmq.POLLIN):
continue
type_, msg = sock.recv_pyobj()
logger.debug('server recv: %s %s', type_, msg)
if type_ == SERVER_STOP:
sock.send_pyobj(None)
break
elif type_ == SERVER_FETCH:
uuid, indices, client_addr = msg
if uuid in self.master_broadcast_blocks:
block_num = len(self.master_broadcast_blocks[uuid])
bls = []
for index in indices:
if index >= block_num:
logger.warning('input index too big %s for '
'len of blocks %d from host %s',
str(indices), block_num, client_addr)
sock.send_pyobj((SERVER_FETCH_FAIL, None))
else:
bls.append(self.master_broadcast_blocks[uuid][index])
sock.send_pyobj((SERVER_FETCH_OK, (indices, bls)))
elif uuid in self.uuid_state_dict:
fd = os.open(self.uuid_state_dict[uuid][0], os.O_RDONLY)
mmfp = mmap.mmap(fd, 0, access=ACCESS_READ)
os.close(fd)
bitmap = self.uuid_map_dict[uuid]
block_num = len(bitmap)
bls = []
for index in indices:
if index >= block_num:
logger.warning('input index too big %s for '
'len of blocks %d from host %s',
str(indices), block_num, client_addr)
sock.send_pyobj((SERVER_FETCH_FAIL, None))
else:
mmfp.seek(bitmap[index][0])
block = mmfp.read(bitmap[index][1])
|
tkrotoff/QuarkPlayer
|
buildbot/upload_package.py
|
Python
|
gpl-3.0
| 2,173
| 0.018408
|
"""
QuarkPlayer, a Phonon media player
Copyright (C) 2008-2009 Tanguy Krotoff <tkrotoff@gmail.com>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import sys, os, glob
import ftplib
def ftp_upload_files(host, login, password, host_path, files_to_upload):
"""
Uploads files onto a FTP server in binary mode.
"""
ftp = ftplib.FTP(host)
ftp.login(login, password)
for file_to_upload in files_to_upload:
print 'upload {0} to ftp://{1}@{2}:{3}'.format(file_to_upload, login, host, host_path)
|
file = open(file_to_upload, 'rb')
destpath = os.path.join(host_path, os.path.basename(file_to_upload))
ftp.storbinary('STOR ' + destpath, file)
mode = '644'
print 'chmod {0} {1}'.format(mode, file_to_upload)
ftp.voidcmd('SITE CHMOD ' + mode + ' ' + destpath)
ftp.quit()
if __name__ == "__main__":
loginfile = 'login.txt'
file = open(loginfile, 'r')
login = file.readline().strip()
password = file.readline().strip()
file.close(
|
)
os.remove(loginfile)
files_to_upload = []
for i, pattern in enumerate(sys.argv):
if i > 0:
# Fix a bug under Windows,
# this script gets called with these arguments:
# ['upload_package.py', "'*.exe'", "'*.deb'", "'*.rpm'"]
# instead of ['upload_package.py', '*.exe', '*.deb', '*.rpm']
pattern = pattern.replace('\'', '')
pattern = pattern.replace('\"', '')
files_to_upload.extend(glob.glob(pattern))
ftp_upload_files('192.168.0.12', login, password, '/var/www/snapshots/', files_to_upload)
for file_to_upload in files_to_upload:
print 'rm {0}'.format(file_to_upload)
os.remove(file_to_upload)
|
DjangoNYC/squid
|
squid/core/forms.py
|
Python
|
mit
| 352
| 0
|
from django import forms
from .models import M
|
emberRSVP
class EventAttendeeForm(forms.ModelForm):
id = forms.IntegerField(widget=forms.HiddenInput)
worked_on = forms.CharField(widget=forms.Textarea(attrs={
'cols': '35',
'rows': '5'
}))
class Meta:
model = MemberRSVP
fields = ('id', 'worked_on',)
| |
alogg/dolfin
|
demo/undocumented/tensor-weighted-poisson/python/generate_data.py
|
Python
|
gpl-3.0
| 1,642
| 0
|
"""This program is used to generate the coefficients c00, c01 and c11
used in the demo."""
# Copyright (C) 2007-2009 Anders Logg
#
# This file is part of DOLFIN.
#
# DOLFIN is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# DOLFIN is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with DOLFIN. If not, see <http://www.gnu.org/licenses/>.
#
# First added: 2009-12-16
# Last changed: 2009-12-16
from dolfin import *
# Create mesh
mesh = UnitSquareMe
|
sh(32, 32)
# Create mesh functions for c00, c01, c11
c00 = MeshFunction("double", mesh, 2)
c01 = MeshFunction("double", mesh, 2)
c11 = MeshFunction("double", mesh
|
, 2)
# Iterate over mesh and set values
for cell in cells(mesh):
if cell.midpoint().x() < 0.5:
c00[cell] = 1.0
c01[cell] = 0.3
c11[cell] = 2.0
else:
c00[cell] = 3.0
c01[cell] = 0.5
c11[cell] = 4.0
# Store to file
mesh_file = File("mesh.xml.gz")
c00_file = File("c00.xml.gz")
c01_file = File("c01.xml.gz")
c11_file = File("c11.xml.gz")
mesh_file << mesh
c00_file << c00
c01_file << c01
c11_file << c11
# Plot mesh functions
plot(c00, title="C00")
plot(c01, title="C01")
plot(c11, title="C11")
interactive()
|
helix84/activae
|
src/Type.py
|
Python
|
bsd-3-clause
| 2,833
| 0.003883
|
# Copyright (C) 2010 CENATIC: Centro Nacional de Referencia de
# Aplicacion de las TIC basadas en Fuentes Abiertas, Spain.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# Redistributions of source
|
code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above cop
|
yright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# Neither the name of the CENATIC nor the names of its contributors
# may be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# You may contact the copyright holder at: Fundacion CENATIC, Edificio
# de Servicios Sociales: C/ Vistahermosa, 1, 3ra planta, 06200
# Almendralejo (Badajoz), Spain
from DBSlayer import Query
def get_type_name (type_id):
l = get_type (type_id)
if not l:
return None
return l['name']
def get_type (type_id):
q = "SELECT id, type "\
"FROM asset_types WHERE id=%(type_id)s;" % locals()
query = Query(q)
if len(query) != 1:
return None
ret = {'id': type_id,
'name': query['type'][0]}
return ret
def get_types ():
q = "SELECT id, type "\
"FROM asset_types;" % locals()
query = Query(q)
if not len(query):
return None
ret = []
for x in query:
d={'id': query[x]['id'],
'name': query[x]['type']}
ret.append(d)
return ret
def test ():
import sys
try:
type_id = sys.argv[1]
except IndexError:
print 'Required test parameters: type_id'
sys.exit(1)
print 'Types:', get_types()
print 'type_id %s, type_name %s' % (type_id, get_type_name(type_id))
print get_type(type_id),
if __name__ == '__main__':
test()
|
Galithil/genologics_sql
|
doc/source/conf.py
|
Python
|
mit
| 9,443
| 0.006036
|
# -*- coding: utf-8 -*-
#
# genologics-sql documentation build configuration file, created by
# sphinx-quickstart on Wed Jan 27 15:17:17 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
import sphinx_rtd_theme
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
sys.path.insert(0, os.path.abspath('../../'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.viewcode',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'genologics-sql'
copyright = u'2016, Denis Moreno'
author = u'Denis Moreno'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.0.1'
# The full version, including alpha/beta/rc tags.
release = '0.0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#html_theme = 'alabaster'
html_theme = "sphinx_rtd_theme"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_langu
|
age = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configu
|
ration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'genologics-sqldoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'genologics-sql.tex', u'genologics-sql Documentation',
u'Denis Moreno', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents
|
bamueh/dark-matter
|
test/test_simplify.py
|
Python
|
mit
| 1,892
| 0
|
from unittest import TestCase
from dark.simplify import simplifyTitle
class SimplifyTitle(TestCase):
"""
Tests for the dark.simplify.simplifyTitle function.
"""
def testEmptyTitle(self):
"""
Simplifying an empty title with a non-empty target should return
an empty title.
"""
self.assertEqual('', simplifyTitle('', 'xxx'))
def testEmtpyTitleWithEmptyTarget(self):
"""
Simplifying an empty title should return an empty title.
"""
self.assertEqual('', simplifyTitle('', ''))
def testPrefix(self):
"""
When the target is a prefix, the title up to the target (including the
whole word that has the prefix) should be returned.
"""
self.assertEqual(
'Funny sea lion polyoma',
simplifyTitle('Funny sea lion polyomavirus 1 CSL6994', 'polyoma'))
def testSuffix(self):
"""
When the target is a suffix, the title up to the target (including the
whole word that ha
|
s the suffix) should be returned.
"""
self.assertEqual(
'Funny sea lion polyomavirus',
simplifyT
|
itle('Funny sea lion polyomavirus 1 CSL6994', 'virus'))
def testContained(self):
"""
When the target is contained, the title up to the target (including the
prefix of the word that has the target) should be returned.
"""
self.assertEqual(
'Funny sea lion polyoma',
simplifyTitle('Funny sea lion polyomavirus 1 CSL6994', 'yoma'))
def testExact(self):
"""
When the target is the same as a word in the title, the title up to
and including the target should be returned.
"""
self.assertEqual(
'Funny sea lion',
simplifyTitle('Funny sea lion polyomavirus 1 CSL6994', 'lion'))
|
javiergarridomellado/ej5
|
apu/forms.py
|
Python
|
gpl-2.0
| 745
| 0.02953
|
from django import forms
from apu.models import Persona
class FormularioContactos(forms.Form):
asunto=forms.CharField()
email=forms.EmailField(required=False)
mensaje=forms.CharField()
class PersonaForm(forms.ModelForm):
nombre = forms.CharField(max_length=50,help_text="nombre Persona")
dni = forms.CharField(max_length=9,help_text="dni Persona")
pais = forms.CharF
|
ield(max_length=20,help_text="pais Persona")
equipo = forms.CharField(max_length=10,help_text="equipo Persona")
hobbies = forms.TextField(max_length=200,help_text="hobbies Persona")
#password = models.PasswordField(max_length=15)
fondo = forms.IntegerField()
class Meta:
model
|
= Persona
fields = ('nombre','dni','pais','equipo','hobbies','fondo')
|
nathanielvarona/airflow
|
airflow/providers/google/cloud/example_dags/example_dataflow_flex_template.py
|
Python
|
apache-2.0
| 2,774
| 0.001802
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Example Airflow DAG for Google Cloud Dataflow service
"""
import os
from airflow import models
from airflow.providers.google.cloud.operators.dataflow import DataflowStartFlexTemplateOperator
from airflow.utils.dates import days_ago
GCP_PROJECT_ID = os.environ.get("GCP_PROJECT_ID", "example-project")
DATAFLOW_FLEX_TEMPLATE_JOB_NAME = os.environ.
|
get(
'GCP_DATAFLOW_FLEX_TEMPLATE_JOB_NAME', "dataflow-flex-template"
)
# For simplicity we us
|
e the same topic name as the subscription name.
PUBSUB_FLEX_TEMPLATE_TOPIC = os.environ.get(
'GCP_DATAFLOW_PUBSUB_FLEX_TEMPLATE_TOPIC', "dataflow-flex-template"
)
PUBSUB_FLEX_TEMPLATE_SUBSCRIPTION = PUBSUB_FLEX_TEMPLATE_TOPIC
GCS_FLEX_TEMPLATE_TEMPLATE_PATH = os.environ.get(
'GCP_DATAFLOW_GCS_FLEX_TEMPLATE_TEMPLATE_PATH',
"gs://INVALID BUCKET NAME/samples/dataflow/templates/streaming-beam-sql.json",
)
BQ_FLEX_TEMPLATE_DATASET = os.environ.get('GCP_DATAFLOW_BQ_FLEX_TEMPLATE_DATASET', 'airflow_dataflow_samples')
BQ_FLEX_TEMPLATE_LOCATION = os.environ.get('GCP_DATAFLOW_BQ_FLEX_TEMPLATE_LOCATION>', 'us-west1')
with models.DAG(
dag_id="example_gcp_dataflow_flex_template_java",
start_date=days_ago(1),
schedule_interval=None, # Override to match your needs
) as dag_flex_template:
# [START howto_operator_start_template_job]
start_flex_template = DataflowStartFlexTemplateOperator(
task_id="start_flex_template_streaming_beam_sql",
body={
"launchParameter": {
"containerSpecGcsPath": GCS_FLEX_TEMPLATE_TEMPLATE_PATH,
"jobName": DATAFLOW_FLEX_TEMPLATE_JOB_NAME,
"parameters": {
"inputSubscription": PUBSUB_FLEX_TEMPLATE_SUBSCRIPTION,
"outputTable": f"{GCP_PROJECT_ID}:{BQ_FLEX_TEMPLATE_DATASET}.streaming_beam_sql",
},
}
},
do_xcom_push=True,
location=BQ_FLEX_TEMPLATE_LOCATION,
)
# [END howto_operator_start_template_job]
|
dscorbett/pygments
|
pygments/lexers/_postgres_builtins.py
|
Python
|
bsd-2-clause
| 12,184
| 0.000246
|
"""
pygments.lexers._postgres_builtins
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Self-updating data files for PostgreSQL
|
lexer.
:copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
# Autogenerated: please edit them if you like wasting your time.
KEYWORDS = (
'ABORT',
'ABSOLUTE',
'ACCESS',
'ACTION',
'ADD',
'ADMIN',
|
'AFTER',
'AGGREGATE',
'ALL',
'ALSO',
'ALTER',
'ALWAYS',
'ANALYSE',
'ANALYZE',
'AND',
'ANY',
'ARRAY',
'AS',
'ASC',
'ASSERTION',
'ASSIGNMENT',
'ASYMMETRIC',
'AT',
'ATTACH',
'ATTRIBUTE',
'AUTHORIZATION',
'BACKWARD',
'BEFORE',
'BEGIN',
'BETWEEN',
'BIGINT',
'BINARY',
'BIT',
'BOOLEAN',
'BOTH',
'BY',
'CACHE',
'CALL',
'CALLED',
'CASCADE',
'CASCADED',
'CASE',
'CAST',
'CATALOG',
'CHAIN',
'CHAR',
'CHARACTER',
'CHARACTERISTICS',
'CHECK',
'CHECKPOINT',
'CLASS',
'CLOSE',
'CLUSTER',
'COALESCE',
'COLLATE',
'COLLATION',
'COLUMN',
'COLUMNS',
'COMMENT',
'COMMENTS',
'COMMIT',
'COMMITTED',
'CONCURRENTLY',
'CONFIGURATION',
'CONFLICT',
'CONNECTION',
'CONSTRAINT',
'CONSTRAINTS',
'CONTENT',
'CONTINUE',
'CONVERSION',
'COPY',
'COST',
'CREATE',
'CROSS',
'CSV',
'CUBE',
'CURRENT',
'CURRENT_CATALOG',
'CURRENT_DATE',
'CURRENT_ROLE',
'CURRENT_SCHEMA',
'CURRENT_TIME',
'CURRENT_TIMESTAMP',
'CURRENT_USER',
'CURSOR',
'CYCLE',
'DATA',
'DATABASE',
'DAY',
'DEALLOCATE',
'DEC',
'DECIMAL',
'DECLARE',
'DEFAULT',
'DEFAULTS',
'DEFERRABLE',
'DEFERRED',
'DEFINER',
'DELETE',
'DELIMITER',
'DELIMITERS',
'DEPENDS',
'DESC',
'DETACH',
'DICTIONARY',
'DISABLE',
'DISCARD',
'DISTINCT',
'DO',
'DOCUMENT',
'DOMAIN',
'DOUBLE',
'DROP',
'EACH',
'ELSE',
'ENABLE',
'ENCODING',
'ENCRYPTED',
'END',
'ENUM',
'ESCAPE',
'EVENT',
'EXCEPT',
'EXCLUDE',
'EXCLUDING',
'EXCLUSIVE',
'EXECUTE',
'EXISTS',
'EXPLAIN',
'EXPRESSION',
'EXTENSION',
'EXTERNAL',
'EXTRACT',
'FALSE',
'FAMILY',
'FETCH',
'FILTER',
'FIRST',
'FLOAT',
'FOLLOWING',
'FOR',
'FORCE',
'FOREIGN',
'FORWARD',
'FREEZE',
'FROM',
'FULL',
'FUNCTION',
'FUNCTIONS',
'GENERATED',
'GLOBAL',
'GRANT',
'GRANTED',
'GREATEST',
'GROUP',
'GROUPING',
'GROUPS',
'HANDLER',
'HAVING',
'HEADER',
'HOLD',
'HOUR',
'IDENTITY',
'IF',
'ILIKE',
'IMMEDIATE',
'IMMUTABLE',
'IMPLICIT',
'IMPORT',
'IN',
'INCLUDE',
'INCLUDING',
'INCREMENT',
'INDEX',
'INDEXES',
'INHERIT',
'INHERITS',
'INITIALLY',
'INLINE',
'INNER',
'INOUT',
'INPUT',
'INSENSITIVE',
'INSERT',
'INSTEAD',
'INT',
'INTEGER',
'INTERSECT',
'INTERVAL',
'INTO',
'INVOKER',
'IS',
'ISNULL',
'ISOLATION',
'JOIN',
'KEY',
'LABEL',
'LANGUAGE',
'LARGE',
'LAST',
'LATERAL',
'LEADING',
'LEAKPROOF',
'LEAST',
'LEFT',
'LEVEL',
'LIKE',
'LIMIT',
'LISTEN',
'LOAD',
'LOCAL',
'LOCALTIME',
'LOCALTIMESTAMP',
'LOCATION',
'LOCK',
'LOCKED',
'LOGGED',
'MAPPING',
'MATCH',
'MATERIALIZED',
'MAXVALUE',
'METHOD',
'MINUTE',
'MINVALUE',
'MODE',
'MONTH',
'MOVE',
'NAME',
'NAMES',
'NATIONAL',
'NATURAL',
'NCHAR',
'NEW',
'NEXT',
'NFC',
'NFD',
'NFKC',
'NFKD',
'NO',
'NONE',
'NORMALIZE',
'NORMALIZED',
'NOT',
'NOTHING',
'NOTIFY',
'NOTNULL',
'NOWAIT',
'NULL',
'NULLIF',
'NULLS',
'NUMERIC',
'OBJECT',
'OF',
'OFF',
'OFFSET',
'OIDS',
'OLD',
'ON',
'ONLY',
'OPERATOR',
'OPTION',
'OPTIONS',
'OR',
'ORDER',
'ORDINALITY',
'OTHERS',
'OUT',
'OUTER',
'OVER',
'OVERLAPS',
'OVERLAY',
'OVERRIDING',
'OWNED',
'OWNER',
'PARALLEL',
'PARSER',
'PARTIAL',
'PARTITION',
'PASSING',
'PASSWORD',
'PLACING',
'PLANS',
'POLICY',
'POSITION',
'PRECEDING',
'PRECISION',
'PREPARE',
'PREPARED',
'PRESERVE',
'PRIMARY',
'PRIOR',
'PRIVILEGES',
'PROCEDURAL',
'PROCEDURE',
'PROCEDURES',
'PROGRAM',
'PUBLICATION',
'QUOTE',
'RANGE',
'READ',
'REAL',
'REASSIGN',
'RECHECK',
'RECURSIVE',
'REF',
'REFERENCES',
'REFERENCING',
'REFRESH',
'REINDEX',
'RELATIVE',
'RELEASE',
'RENAME',
'REPEATABLE',
'REPLACE',
'REPLICA',
'RESET',
'RESTART',
'RESTRICT',
'RETURNING',
'RETURNS',
'REVOKE',
'RIGHT',
'ROLE',
'ROLLBACK',
'ROLLUP',
'ROUTINE',
'ROUTINES',
'ROW',
'ROWS',
'RULE',
'SAVEPOINT',
'SCHEMA',
'SCHEMAS',
'SCROLL',
'SEARCH',
'SECOND',
'SECURITY',
'SELECT',
'SEQUENCE',
'SEQUENCES',
'SERIALIZABLE',
'SERVER',
'SESSION',
'SESSION_USER',
'SET',
'SETOF',
'SETS',
'SHARE',
'SHOW',
'SIMILAR',
'SIMPLE',
'SKIP',
'SMALLINT',
'SNAPSHOT',
'SOME',
'SQL',
'STABLE',
'STANDALONE',
'START',
'STATEMENT',
'STATISTICS',
'STDIN',
'STDOUT',
'STORAGE',
'STORED',
'STRICT',
'STRIP',
'SUBSCRIPTION',
'SUBSTRING',
'SUPPORT',
'SYMMETRIC',
'SYSID',
'SYSTEM',
'TABLE',
'TABLES',
'TABLESAMPLE',
'TABLESPACE',
'TEMP',
'TEMPLATE',
'TEMPORARY',
'TEXT',
'THEN',
'TIES',
'TIME',
'TIMESTAMP',
'TO',
'TRAILING',
'TRANSACTION',
'TRANSFORM',
'TREAT',
'TRIGGER',
'TRIM',
'TRUE',
'TRUNCATE',
'TRUSTED',
'TYPE',
'TYPES',
'UESCAPE',
'UNBOUNDED',
'UNCOMMITTED',
'UNENCRYPTED',
'UNION',
'UNIQUE',
'UNKNOWN',
'UNLISTEN',
'UNLOGGED',
'UNTIL',
'UPDATE',
'USER',
'USING',
'VACUUM',
'VALID',
'VALIDATE',
'VALIDATOR',
'VALUE',
'VALUES',
'VARCHAR',
'VARIADIC',
'VARYING',
'VERBOSE',
'VERSION',
'VIEW',
'VIEWS',
'VOLATILE',
'WHEN',
'WHERE',
'WHITESPACE',
'WINDOW',
'WITH',
'WITHIN',
'WITHOUT',
'WORK',
'WRAPPER',
'WRITE',
'XML',
'XMLATTRIBUTES',
'XMLCONCAT',
'XMLELEMENT',
'XMLEXISTS',
'XMLFOREST',
'XMLNAMESPACES',
'XMLPARSE',
'XMLPI',
'XMLROOT',
'XMLSERIALIZE',
'XMLTABLE',
'YEAR',
'YES',
'ZONE',
)
DATATYPES = (
'bigint',
'bigserial',
'bit',
'bit varying',
'bool',
'boolean',
'box',
'bytea',
'char',
'character',
'character varying',
'cidr',
'circle',
'date',
'decimal',
'double precision',
'float4',
'float8',
'inet',
'int',
'int2',
'int4',
'int8',
'integer',
'interval',
'json',
'jsonb',
'line',
'lseg',
'macaddr',
'macaddr8',
'money',
'numeric',
'path',
'pg_lsn',
'pg_snapshot',
'point',
'polygon',
'real',
'serial',
'serial2',
'serial4',
'serial8',
'smallint',
'smallserial',
'text',
'time',
'timestamp',
'timestamptz',
'timetz',
'tsquery',
'tsvector',
'txid_snapshot',
'uuid',
'varbit',
'varchar',
'with time zone',
'without time zone',
'xml',
)
PSEUDO_TYPES = (
'any',
'anyarray',
'anycompatible',
'anycompatiblearray',
'anycompatiblenonarray',
'anycompatiblerange',
'anyelement',
'anyenum',
'anynonarray',
'anyrange',
'cstring',
'event_trigger',
'fdw_handler',
'index_am_handler',
'internal',
'language_handler',
'pg_ddl_command',
'record',
'table_am_handler
|
pombreda/swarming
|
appengine/auth_service/common/importer.py
|
Python
|
apache-2.0
| 14,804
| 0.010875
|
# Copyright 2014 The Swarming Authors. All rights reserved.
# Use of this source code is governed by the Apache v2.0 license that can be
# found in the LICENSE file.
"""Imports groups from some external tar.gz bundle or plain text list.
External URL should serve *.tar.gz file with the following file structure:
<external group system name>/<group name>:
userid
userid
...
For example ldap.tar.gz may look like:
ldap/trusted-users:
jane
joe
...
ldap/all:
jane
joe
...
Each tarball may have groups from multiple external systems, but groups from
some external system must not be split between multiple tarballs. When importer
sees <external group system name>/* in a tarball, it modifies group list from
that system on the server to match group list in the tarball _exactly_,
including removal of groups that are on the server, but no longer present in
the tarball.
Plain list format should have one userid per line and can only describe a single
group in a single system. Such groups will be added to 'external/*' groups
namespace. Removing such group from importer config will remove it from
service too.
"""
import collections
import contextlib
import logging
import StringIO
import tarfile
from google.appengine.api import app_identity
from google.appengine.ext import ndb
from components import auth
from components import utils
from components.auth import model
class BundleImportError(Exception):
"""Base class for errors while fetching external bundle."""
class BundleFetchError(BundleImportError):
"""Failed to fetch the archive from remote URL."""
def __init__(self, url, status_code, content):
super(BundleFetchError, self).__init__()
self.url = url
self.status_code = status_code
self.content = content
def __str__(self):
return 'Request to %s failed with code %d:\n%r' % (
self.url, self.status_code, self.content)
class BundleUnpackError(BundleImportError):
"""Failed to untar the archive."""
def __init__(self, inner_exc):
super(BundleUnpackError, self).__init__()
self.inner_exc = inner_exc
def __str__(self):
return 'Not a valid tar archive: %s' % self.inner_exc
class BundleBadFormatError(BundleImportError):
"""Group file in bundle has invalid format."""
def __init__(self, inner_exc):
super(BundleBadFormatError, self).__init__()
self.inner_exc = inner_exc
def __str__(self):
return 'Bundle contains invalid group file: %s' % self.inner_exc
def config_key():
"""Key of GroupImporterConfig singleton entity."""
return ndb.Key('GroupImporterConfig', 'config')
class GroupImporter
|
Config(ndb.Model):
"""Singleton entity with group importer configuration JSON."""
config = ndb.JsonProperty()
modified_by = auth.IdentityProperty(indexed=False)
modified_ts = ndb.DateTimeProperty(auto_now=True, indexe
|
d=False)
def is_valid_config(config):
"""Checks config for correctness."""
if not isinstance(config, list):
return False
seen_systems = set(['external'])
seen_groups = set()
for item in config:
if not isinstance(item, dict):
return False
# 'format' is an optional string describing the format of the imported
# source. The default format is 'tarball'.
fmt = item.get('format', 'tarball')
if fmt not in ['tarball', 'plainlist']:
return False
# 'url' is a required string: where to fetch groups from.
url = item.get('url')
if not url or not isinstance(url, basestring):
return False
# 'oauth_scopes' is an optional list of strings: used when generating OAuth
# access_token to put in Authorization header.
oauth_scopes = item.get('oauth_scopes')
if oauth_scopes is not None:
if not all(isinstance(x, basestring) for x in oauth_scopes):
return False
# 'domain' is an optional string: will be used when constructing emails from
# naked usernames found in imported groups.
domain = item.get('domain')
if domain and not isinstance(domain, basestring):
return False
# 'tarball' format uses 'systems' and 'groups' fields.
if fmt == 'tarball':
# 'systems' is a required list of strings: group systems expected to be
# found in the archive (they act as prefixes to group names, e.g 'ldap').
systems = item.get('systems')
if not systems or not isinstance(systems, list):
return False
if not all(isinstance(x, basestring) for x in systems):
return False
# There should be no overlap in systems between different bundles.
if set(systems) & seen_systems:
return False
seen_systems.update(systems)
# 'groups' is an optional list of strings: if given, filters imported
# groups only to this list.
groups = item.get('groups')
if groups and not all(isinstance(x, basestring) for x in groups):
return False
elif fmt == 'plainlist':
# 'group' is a required name of imported group. The full group name will
# be 'external/<group>'.
group = item.get('group')
if not group or not isinstance(group, basestring) or group in seen_groups:
return False
seen_groups.add(group)
else:
assert False, 'Unreachable'
return True
def read_config():
"""Returns currently stored config or [] if not set."""
e = config_key().get()
return (e.config if e else []) or []
def write_config(config):
"""Updates stored configuration."""
if not is_valid_config(config):
raise ValueError('Invalid config')
e = GroupImporterConfig(
key=config_key(),
config=config,
modified_by=auth.get_current_identity())
e.put()
def import_external_groups():
"""Refetches all external groups.
Runs as a cron task. Raises BundleImportError in case of import errors.
"""
# Missing config is not a error.
config = read_config()
if not config:
logging.info('Not configured')
return
if not is_valid_config(config):
raise BundleImportError('Bad config')
# Fetch all files specified in config in parallel.
futures = [fetch_file_async(p['url'], p.get('oauth_scopes')) for p in config]
# {system name -> group name -> list of identities}
bundles = {}
for p, future in zip(config, futures):
fmt = p.get('format', 'tarball')
# Unpack tarball into {system name -> group name -> list of identities}.
if fmt == 'tarball':
fetched = load_tarball(
future.get_result(), p['systems'], p.get('groups'), p.get('domain'))
assert not (
set(fetched) & set(bundles)), (fetched.keys(), bundles.keys())
bundles.update(fetched)
continue
# Add plainlist group to 'external/*' bundle.
if fmt == 'plainlist':
group = load_group_file(future.get_result(), p.get('domain'))
name = 'external/%s' % p['group']
if 'external' not in bundles:
bundles['external'] = {}
assert name not in bundles['external'], name
bundles['external'][name] = group
continue
assert False, 'Unreachable'
# Nothing to process?
if not bundles:
return
@ndb.transactional
def snapshot_groups():
"""Fetches all existing groups and AuthDB revision number."""
groups = model.AuthGroup.query(ancestor=model.root_key()).fetch_async()
return auth.get_auth_db_revision(), groups.get_result()
@ndb.transactional
def apply_import(revision, entities_to_put, keys_to_delete):
"""Transactionally puts and deletes a bunch of entities."""
# DB changed between transactions, retry.
if auth.get_auth_db_revision() != revision:
return False
# Apply mutations, bump revision number.
futures = []
futures.extend(ndb.put_multi_async(entities_to_put))
futures.extend(ndb.delete_multi_async(keys_to_delete))
ndb.Future.wait_all(futures)
if any(f.get_exception() for f in futures):
raise ndb.Rollback()
auth.replicate_auth_db()
return True
# Try to apply the change until success or deadline. Split transaction into
# two (assuming AuthDB changes infrequently) to avoid reading and writing too
# much stuff from within a single transaction (and to avoid keeping the
# transaction
|
taigaio/taiga-back
|
taiga/users/migrations/0029_user_verified_email.py
|
Python
|
agpl-3.0
| 391
| 0
|
# Generated by Django 2.2.14 on 2020-07-30 12:09
from django.db import migrations, models
class Migration(migrations.
|
Migration):
dependencies = [
('users', '0028_auto_20200615_0811'),
]
operations = [
migrations.AddField(
model_name='user',
name='verified_email',
field=models.BooleanField(
|
default=True),
),
]
|
Micronaet/micronaet-quality
|
quality/etl/import.py
|
Python
|
agpl-3.0
| 101,183
| 0.009172
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Modules used for ETL - Create User
# Modules required:
import os
import xmlrpclib, sys, csv, ConfigParser
from openerp.tools.status_history import status
from datetime import datetime
# -----------------------------------------------------------------------------
# Set up parameters (for connection to Open ERP Database)
# -----------------------------------------------------------------------------
# Startup from config file:
config = ConfigParser.ConfigParser()
file_config = os.path.expanduser('~/ETL/generalfood/openerp.cfg')
config.read([file_config])
dbname = config.get('dbaccess','dbname')
user = config.get('dbaccess','user')
pwd = config.get('dbaccess','pwd')
server = config.get('dbaccess','server')
port = config.get('dbaccess','port') # verify if it's necessary: getint
separator = eval(config.get('dbaccess','separator')) # test
log_only_err
|
or = eval(config.get('log','error')) # log only error in function
# Startup from code:
default_error_data = "2014/07/30"
default_product_id = 1921 # for lot creation (acceptation)
default_lot_id = 92710 # ERR
log_file = os.path.expanduser("~/ETL/generalfood/log/%s.txt" % (datetime.now()))
|
log = open(log_file, 'w')
# -----------------------------------------------------------------------------
# XMLRPC connection
# -----------------------------------------------------------------------------
sock = xmlrpclib.ServerProxy(
'http://%s:%s/xmlrpc/common' % (server, port), allow_none=True)
uid = sock.login(dbname ,user ,pwd)
sock = xmlrpclib.ServerProxy(
'http://%s:%s/xmlrpc/object' % (server, port), allow_none=True)
# -----------------------------------------------------------------------------
# Utility function
# -----------------------------------------------------------------------------
def format_string(valore):
try:
valore = valore.decode('cp1252')
except:
tmp = ""
for i in valore:
try:
tmp += i.decode('cp1252')
except:
pass # jump char
valore = tmp
valore = valore.encode('utf-8')
return valore.strip()
def format_date(valore,date=True):
''' Formatta nella data di PG
'''
try:
if date:
mga = valore.strip().split(' ')[0].split('/') # only date (not time)
year = int(mga[2])
if year < 100:
if year > 50:
year += 1900
else:
year += 2000
return '%4d-%02d-%02d' % (year, int(mga[0]), int(mga[1]))
except:
return False
def format_currency(valore):
''' Formatta nel float per i valori currency
'''
try:
return float(valore.strip().split(' ')[-1].replace(',','.'))
except:
return 0.0
def format_boolean(value):
''' Formatta le stringhe '0' e '1' in boolean True e False
'''
return value == '1'
def log_event(*event):
''' Log event and comunicate with print
'''
if log_only_error and event[0][:5] == "[INFO":
return
log.write("%s. %s\r\n" % (datetime.now(), event))
print event
return
def create_partner(partner_code, type_of_partner, default_dict):
''' Create simple element for partner not found
(write after in default_dict new element)
'''
try:
field = "sql_%s_code" % type_of_partner
partner_ids = sock.execute(dbname, uid, pwd, "res.partner", "search",
[(field, '=', partner_code)])
if partner_ids:
partner_id = partner_ids[0]
else:
data = {
'name': "Partner %s (from migration)" % (partner_code),
field: partner_code,
'sql_import': True,
}
if type_of_partner == 'customer':
data['ref'] = partner_code
data['customer'] = True
elif type_of_partner == 'supplier':
data['supplier'] = True
elif type_of_partner == 'destination':
data['is_address'] = True
partner_id = sock.execute(dbname, uid, pwd, "res.partner",
'create', data)
log_event("[WARN] %s partner created: %s" % (type_of_partner, partner_code))
default_dict[partner_code] = partner_id
return partner_id
except:
log_event("[ERROR] Error creating %s partner: %s" % (type_of_partner, partner_code))
return False
def get_or_create_partner(partner_code, type_of_partner, mandatory, res_partner_customer, res_partner_supplier):
''' Try to get partner element or create a simple element if not present
'''
if type_of_partner == 'customer':
default_dict = res_partner_customer
elif type_of_partner == 'supplier':
default_dict = res_partner_supplier
elif type_of_partner == 'destination':
default_dict = res_partner_customer # search in customer dict
else:
default_dict = {} # nothing
partner_id = default_dict.get(partner_code, False)
if not partner_id: # create e simple element
partner_id = create_partner(partner_code, type_of_partner, default_dict)
if mandatory and not partner_id:
log_event("[ERROR] %s partner not found: %s" % (
type_of_partner, partner_code))
return partner_id
# -----------------------------------------------------------------------------
# Importazioni qualifiche fornitore
# -----------------------------------------------------------------------------
qualifications = {
'1': 'full', # Piena qualitica
'2': 'reserve', # Con riserva
'3': 'discarded', # Scartato
'4': 'uneventful', # Non movimentato
'5': 'test', # In prova
'6': 'occasional', # Occasionale
}
# -----------------------------------------------------------------------------
# Importazioni comunicazioni
# -----------------------------------------------------------------------------
comunications = {
'1': 1, # Cliente
'2': 2, # Fornitore
'3': 3, # ASL
}
# -----------------------------------------------------------------------------
# Importazioni gravità
# -----------------------------------------------------------------------------
gravity = {
'1': 2, # Grave
'2': 3, # Importante
'3': 1, # Secondario
}
# -----------------------------------------------------------------------------
# Importazioni origin
# -----------------------------------------------------------------------------
origin = {
'1': 1, # Ordine
'2': 2, # Magazzino
'3': 3, # Fornitore
'4': 4, # Cliente
'5': 5, # Trasporto
'6': 6, # Fatturazione
'7': 7, # Non definibile
'8': 8, # Commerciale
'9': 9, # Logistica
'10': 10, # Confezionamento
'11': 11, # Acquisti
}
# -----------------------------------------------------------------------------
# Importazioni cause
# -----------------------------------------------------------------------------
cause = {
'1': 1, # Igiene
'2': 2, # Qualità
'3': 3, # Quantità
'4': 4, # Ritardo
'5': 5, # Prodotto sbagliato
'6': 6, # Confezione
'7': 7, # Errore cliente
'8': 8, # Prezzo
'9': 9, # Non definibile
'10': 10, # Glassatura
'11': 11, # Temperatura
'12': 12, # Pezzatura
'13': 13, # Corpi estranei/Contaminati
'14': 14, # Mancanza prodotto risp a bolla
'15': 15, # Rottura di stock
}
# -----------------------------------------------------------------------------
# Importazioni Sampling plan
# -----------------------------------------------------------------------------
plan = {
'1': 1, # Bieta erbetta
'3': 2, # Broccoli calabri IGF
'4': 3, # Carote Baby e rondelle
'6': 4,
|
tsarnowski/hamster
|
wafadmin/Options.py
|
Python
|
gpl-3.0
| 6,022
| 0.06277
|
#! /usr/bin/env python
# encoding: utf-8
import os,sys,imp,types,tempfile,optparse
import Logs,Utils
from Constants import*
cmds='distclean configure build install clean uninstall check dist distcheck'.split()
commands={}
is_install=False
options={}
arg_line=[]
launch_dir=''
tooldir=''
lockfile=os.environ.get('WAFLOCK','.lock-wscript')
try:cache_global=os.path.abspath(os.environ['WAFCACHE'])
except KeyError:cache_global=''
platform=Utils.unversioned_sys_platform()
conf_file='conf-runs-%s-%d.pickle'%(platform,ABI)
remote_repo=['http://waf.googlecode.com/svn/']
default_prefix=os.environ.get('PREFIX')
if not default_prefix:
if platform=='win32':
d=tempfile.gettempdir()
default_prefix=d[0].upper()+d[1:]
else:default_prefix='/usr/local/'
default_jobs=os.environ.get('JOBS',-1)
if default_jobs<1:
try:
if'SC_NPROCESSORS_ONLN'in os.sysconf_names:
default_jobs=os.sysconf('SC_NPROCESSORS_ONLN')
else:
default_jobs=int(Utils.cmd_output(['sysctl','-n','hw.ncpu']))
except:
if os.name=='java':
from java.lang import Runtime
default_jobs=Runtime.getRuntime().availableProcessors()
else:
default_jobs=int(os.environ.get('NUMBER_OF_PROCESSORS',1))
default_destdir=os.environ.get('DESTDIR','')
def get_usage(self):
cmds_str=[]
module=Utils.g_module
if module:
tbl=module.__dict__
keys=list(tbl.keys())
keys.sort()
if'build'in tbl:
if not module.build.__doc__:
module.build.__doc__='builds the project'
if'configure'in tbl:
if not module.configure.__doc__:
module.configure.__doc__='configures the project'
ban=['set_options','init','shutdown']
optlst=[x for x in keys if not x in ban and type(tbl[x])is type(parse_args_impl)and tbl[x].__doc__ and not x.startswith('_')]
just=max([len(x)for x in optlst])
for x in optlst:
cmds_str.append(' %s: %s'%(x.ljust(just),tbl[x].__doc__))
ret='\n'.join(cmds_str)
else:
ret=' '.join(cmds)
return'''waf [command] [options]
Main commands (example: ./waf build -j4)
%s
'''%ret
setattr(optparse.OptionParser,'get_usage',get_usage)
def create_parser(module=None):
Logs.debug('options: create_parser is called')
parser=optparse.OptionParser(conflict_handler="resolve",version='waf %s (%s)'%(WAFVERSION,WAFREVISION))
parser.formatter.width=Utils.get_term_cols()
p=parser.add_option
p('-j','--jobs',type='int',default=default_jobs,help='amount of parallel jobs (%r)'%default_jobs,dest='jobs')
p('-k','--keep',action='store_true',default=False,help='keep running happily on independent task groups',dest='keep')
p('-v','--verbose',action='count',default=0,help='verbosity level -v -vv or -vvv [default: 0]',dest='verbose')
p('--nocache',a
|
ction='store_true',default=False,help='ignore the WAFCACHE (if set)',dest='nocache')
p('--zones',action='store',default='',help='debugging zones (task_gen, deps, tasks, etc)',dest='zones')
p('-p','--progress',action='count',default=0
|
,help='-p: progress bar; -pp: ide output',dest='progress_bar')
p('--targets',action='store',default='',help='build given task generators, e.g. "target1,target2"',dest='compile_targets')
gr=optparse.OptionGroup(parser,'configuration options')
parser.add_option_group(gr)
gr.add_option('-b','--blddir',action='store',default='',help='build dir for the project (configuration)',dest='blddir')
gr.add_option('-s','--srcdir',action='store',default='',help='src dir for the project (configuration)',dest='srcdir')
gr.add_option('--prefix',help='installation prefix (configuration) [default: %r]'%default_prefix,default=default_prefix,dest='prefix')
gr.add_option('--download',action='store_true',default=False,help='try to download the tools if missing',dest='download')
gr=optparse.OptionGroup(parser,'installation options')
parser.add_option_group(gr)
gr.add_option('--destdir',help='installation root [default: %r]'%default_destdir,default=default_destdir,dest='destdir')
gr.add_option('-f','--force',action='store_true',default=False,help='force file installation',dest='force')
return parser
def parse_args_impl(parser,_args=None):
global options,commands,arg_line
(options,args)=parser.parse_args(args=_args)
arg_line=args
commands={}
for var in cmds:commands[var]=0
if not args:
commands['build']=1
args.append('build')
for arg in args:
commands[arg]=True
if'check'in args:
idx=args.index('check')
try:
bidx=args.index('build')
if bidx>idx:
raise ValueError('build before check')
except ValueError as e:
args.insert(idx,'build')
if args[0]!='init':
args.insert(0,'init')
if options.keep:options.jobs=1
if options.jobs<1:options.jobs=1
if'install'in sys.argv or'uninstall'in sys.argv:
options.destdir=options.destdir and os.path.abspath(os.path.expanduser(options.destdir))
Logs.verbose=options.verbose
Logs.init_log()
if options.zones:
Logs.zones=options.zones.split(',')
if not Logs.verbose:Logs.verbose=1
elif Logs.verbose>0:
Logs.zones=['runner']
if Logs.verbose>2:
Logs.zones=['*']
class Handler(Utils.Context):
parser=None
def __init__(self,module=None):
self.parser=create_parser(module)
self.cwd=os.getcwd()
Handler.parser=self
def add_option(self,*k,**kw):
self.parser.add_option(*k,**kw)
def add_option_group(self,*k,**kw):
return self.parser.add_option_group(*k,**kw)
def get_option_group(self,opt_str):
return self.parser.get_option_group(opt_str)
def sub_options(self,*k,**kw):
if not k:raise Utils.WscriptError('folder expected')
self.recurse(k[0],name='set_options')
def tool_options(self,*k,**kw):
if not k[0]:
raise Utils.WscriptError('invalid tool_options call %r %r'%(k,kw))
tools=Utils.to_list(k[0])
path=Utils.to_list(kw.get('tdir',kw.get('tooldir',tooldir)))
for tool in tools:
tool=tool.replace('++','xx')
if tool=='java':tool='javaw'
if tool.lower()=='unittest':tool='unittestw'
module=Utils.load_tool(tool,path)
try:
fun=module.set_options
except AttributeError:
pass
else:
fun(kw.get('option_group',self))
def parse_args(self,args=None):
parse_args_impl(self.parser,args)
|
injectnique/KnuckleHeadedMcSpazatron
|
GenericBytecode.py
|
Python
|
mit
| 46,794
| 0.013506
|
#!C:\Python27\python.exe
# Filename: GenericBytecode.py
# -*- coding: utf-8 -*-
import os
import Settings
'''
Generic Bytecode
Simply add, remove or modify bytecode for use in KHMS
'''
createFrame = ['aload_0', 'getfield', 'aload_0', 'dup', 'getfield', 'dup_x1',
'iconst_1', 'iadd', 'putfield', 'iload_1', 'aload_0', 'getfield', \
'invokevirtual', 'iadd', 'i2b', 'bastore', 'return']
writeDWord = ['aload_0', 'getfield', 'aload_0', 'dup', 'getfield', 'dup_x1', \
'iconst_1', 'iadd', 'putfield', 'iload_1', 'bipush', 'ishr', \
'i2b', 'bastore', 'aload_0', 'getfield', 'aload_0', 'dup', \
'getfield', 'dup_x1', 'iconst_1', 'iadd', 'putfield', 'iload_1', \
'bipush', 'ishr', 'i2b', 'bastore', 'aload_0', 'getfield', \
'aload_0', 'dup', 'getfield', 'dup_x1', 'iconst_1', 'iadd', \
'putfield', 'iload_1', 'bipush', 'ishr', 'i2b', 'bastore', \
'aload_0', 'getfield', 'aload_0', 'dup', 'getfield', 'dup_x1', \
'iconst_1', 'iadd', 'putfield', 'iload_1', 'i2b', 'bastore', 'return']
# writeWordBigEndian = ['aload_0', 'getfield', 'aload_0', 'dup', 'getfield', \
# 'dup_x1', 'iconst_1', 'iadd', 'putfield', 'iload_1', \
# 'i2b', 'bastore', 'return', 'aload_0', 'dup', 'getfield', \
# 'iconst_3', 'iadd', 'putfield', 'sipush', 'aload_0', \
# 'getfield', 'aload_0', 'getfield', 'iconst_3', 'isub', \
# 'baload', 'bipush', 'ishl', 'iand', 'sipush', 'aload_0', \
# 'getfield', 'aload_0', 'getfield', 'iconst_2', 'isub', \
# 'baload', 'bipush', 'ishl', 'iand', 'iadd', 'sipush', \
# 'aload_0', 'getfield', 'aload_0', 'getfield', 'iconst_1', \
# 'isub', 'baload', 'iand', 'iadd', 'ireturn']
writeWordBigEndian = ['aload_0',
'getfield Stream/buffer [B',
'aload_0',
'dup',
'getfield Stream/currentOffset I',
'dup_x1',
'iconst_1',
'iadd',
'putfield Stream/currentOffset I',
'iload_1',
'i2b',
'bastore',
'return']
writeWord = ['aload_0', 'getfield', 'aload_0', 'dup', 'getfield', 'dup_x1', \
'iconst_1', 'iadd', 'putfield', 'iload_1', 'bipush', 'ishr', \
'i2b', 'bastore', 'aload_0', 'getfield', 'aload_0', 'dup', \
'getfield', 'dup_x1', 'iconst_1', 'iadd', 'putfield', 'iload_1', \
'i2b', 'bastore', 'return']
writeDWordBigEndian = ['aload_0', 'getfield', 'aload_0', 'dup', 'getfield', \
'dup_x1', 'iconst_1', 'iadd', 'putfield', 'iload_1', \
'bipush', 'ishr', 'i2b', 'bastore', 'aload_0', \
'getfield', 'aload_0', 'dup', 'getfield', 'dup_x1', \
'iconst_1', 'iadd', 'putfield', 'iload_1', 'bipush', \
'ishr', 'i2b', 'bastore', 'aload_0', 'getfield', \
'aload_0', 'dup', 'getfield', 'dup_x1', 'iconst_1', \
'iadd', 'putfield', 'iload_1', 'i2b', 'bastore', 'return']
method403 = ['aload_0', 'getfield', 'aload_0', 'dup', 'getfield', 'dup_x1', \
'iconst_1', 'iadd', 'putfield', 'iload_1', 'i2b', 'bastore', \
'aload_0', 'getfield', 'aload_0', 'dup', 'getfield', 'dup_x1', \
'iconst_1', 'iadd', 'putfield', 'iload_1', 'bipush', 'ishr', \
'i2b', 'bastore', 'aload_0', 'getfield', 'aload_0', 'dup', \
'getfield', 'dup_x1', 'iconst_1', 'iadd', 'putfield', 'iload_1', \
'bipush', 'ishr', 'i2b', 'bastore', 'aload_0', 'getfield', \
'aload_0', 'dup', 'getfield', 'dup_x1', 'iconst_1', 'iadd', \
'putfield', 'iload_1', 'bipush', 'ishr', 'i2b', 'bastore', 'return']
writeQWord = ['aload_0', 'getfield', 'aload_0', 'dup', 'getfield', 'dup_x1', \
'iconst_1', 'iadd', 'putfield', 'lload_1', 'bipush', 'lshr', \
'l2i', 'i2b', 'bastore', 'aload_0', 'getfield', 'aload_0', \
'dup', 'getfield', 'dup_x1', 'iconst_1', 'iadd', 'putfield', \
'lload_1', 'bipush', 'lshr', 'l2i', 'i2b', 'bastore', 'aload_0', \
'getfield', 'aload_0', 'dup', 'getfield', 'dup_x1', 'iconst_1', \
'iadd', 'putfield', 'lload_1', 'bipush', 'lshr', 'l2i', 'i2b', \
'bastore', 'aload_0', 'getfield', 'aload_0', 'dup', 'getfield', \
'dup_x1', 'iconst_1', 'iadd', 'putfield', 'lload_1', 'bipush', \
'lshr', 'l2i', 'i2b', 'bastore', 'aload_0', 'getfield', \
'aload_0', 'dup', 'getfield', 'dup_x1', 'iconst_1', 'iadd', \
'putfield', 'lload_1', 'bipush', 'lshr', 'l2i', 'i2b', \
'bastore', 'aload_0', 'getfield', 'aload_0', 'dup', 'getfield', \
'dup_x1', 'iconst_1', 'iadd', 'putfield', 'lload_1', 'bipush', \
'lshr', 'l2i', 'i2b', 'bastore', 'aload_0', 'getfield', \
'aload_0', 'dup', 'getfield', 'dup_x1', 'iconst_1', 'iadd', \
'putfield', 'lload_1', 'bipush', 'lshr', 'l2i', 'i2b', \
'bastore', 'aload_0', 'getfield', 'aload_0', 'dup', 'getfield', \
'dup_x1', 'iconst_1', 'iadd', 'putfield', 'lload_1', 'l2i', \
'i2b', 'bastore', 'goto', 'astore_3', 'new', 'dup', \
'invokespecial', 'ldc', 'invokevirtual', 'lload_1', \
'invokevirtual', 'ldc', 'invokevirtual', 'aload_3', \
'invokevirtual', 'invokevirtual', 'invokevirtual', \
'invokestatic', 'new', 'dup', 'invokespecial', 'athrow', 'return']
writeString = ['aload_1', 'invokevirtual', 'iconst_0', 'aload_0', 'getfield', \
'aload_0', 'getfield', 'aload_1', 'invokevirtual', \
'invokestatic', 'aload_0', 'dup', 'getfield', 'aload_1', \
'invokevirtual', 'iadd', 'putfield', 'aload_0', 'getfield', \
'aload_0', 'dup', 'getfield', 'dup_x1', 'iconst_1', 'iadd', \
'putfield', 'bipush', 'bastore', 'return']
method424 = ['aload_0', 'getfield', 'aload_0', 'dup', 'getfield', 'dup_x1', \
'iconst_1', 'iadd', 'putfield', 'iload_1', 'ineg', 'i2b', \
'bastore', 'return']
method425 = ['aload_0', 'getfield', 'aload_0', 'dup', 'getfield', 'dup_x1', \
'iconst_1', 'iadd', 'putfield', 'sipush', 'iload_1', 'isub', \
'i2b', 'bastore', 'return']
method431 = ['aload_0', 'getfield', 'aload_0', 'dup', 'getfield', 'dup_x1', \
'iconst_1', 'iadd', 'putfield', 'iload_1', 'i2b', 'bastore', \
'aload_0', 'getfield', 'aload_0', 'dup', 'getfield', 'dup_x1', \
'iconst_1', 'iadd', 'putfield', 'iload_1', 'bipush', '
|
ishr', \
'i2b', 'bastore', 'return']
method432 = ['aload_0', 'getfield', 'aload_0', 'dup', 'getfield
|
', 'dup_x1', \
'iconst_1', 'iadd', 'putfield', 'iload_1', 'bipush', 'ishr', \
'i2b', 'bastore', 'aload_0', 'getfield', 'aload_0', 'dup', \
'getfield', 'dup_x1', 'iconst_1', 'iadd', 'putfield', 'iload_1', \
'sipush', 'iadd', 'i2b', 'bastore', 'return']
method433 = ['aload_0', 'getfield', 'aload_0', 'dup', 'getfield', 'dup_x1', \
'iconst_1', 'iadd', 'putfield', 'iload_1', 'sipush', 'iadd', \
'i2b', 'bastore', 'aload_0', 'getfield', 'aload_0', 'dup', \
'getfield', 'dup_x1', 'iconst_1', 'iadd', 'putfield', 'iload_1', \
'bipush', 'ishr', 'i2b', 'bastore', 'return']
getNextKey = ['aload_0', 'dup', 'getfield', 'dup_x1', 'iconst_1', 'isub', \
'putfield', 'ifne', 'aload_0', 'invokespecial', 'aload_0', \
'sipush', 'putfield', 'aload_0', 'getfield', 'aload_0', \
'getfield', 'iaload', 'ireturn']
isaac = ['aload_0', 'dup', 'getfield', 'aload_0', 'dup', 'getfield', \
'iconst_1', 'iadd', 'dup_x1', 'putfield', '
|
F0lha/UJunior-Projects
|
DailyProgrammer/Challenge#318/src.py
|
Python
|
mit
| 2,546
| 0.008641
|
from itertools import permutations
import re
def create_formula(combination,numbers):
formula = ""
index = 0
for op in combination:
formula += str(numbers[index]) + op
index += 1
formula += numbers[index]
return formula
'''
Unnecessary Funtion
'''
def evaluate(form):
result = 0
for index in range(len(form)):
if form[index] == "+":
result += int(form[index+1])
index +=
|
1
elif form[index] == "-":
result -= int(form[index+1])
index += 1
elif form[index] == "*":
result *= int(form[index+1])
index += 1
elif form[index] == "/":
result //= int(form[index+1])
index += 1
else:
result += int(form[index])
return result
def countdown(numbers):
rightCombinations = []
finalScore = numbers.pop()
combinations = returnAllCombinations(
|
len(numbers) - 1)
perms = list(permutations(numbers))
for combination in combinations:
for permut in perms:
formula = create_formula(combination,permut)
#form = re.split("([*+-/])",formula)
#if int(evaluate(form)) == int(finalScore):
if int(eval(formula)) == int(finalScore):
rightCombinations.append(formula)
return rightCombinations
def returnAllCombinations(size):
listFinal = []
for x in range(0,size):
if len(listFinal) == 0:
for y in range(0,4):
if y == 0:
listFinal.append("+")
elif y == 1:
listFinal.append("-")
elif y == 2:
listFinal.append("*")
else:
listFinal.append("/")
else:
newList = []
for l in listFinal:
for y in range(0,4):
newLine = list(l)
if y == 0:
newLine.append("+")
elif y == 1:
newLine.append("-")
elif y == 2:
newLine.append("*")
else:
newLine.append("/")
newList.append(newLine)
listFinal = list(newList)
return listFinal
out = open("output.txt",'w')
for line in open("input.txt",'r'):
for formula in countdown(line.split(" ")):
out.write(formula)
out.write("\n")
out.write("\n\n")
|
wangjun/BT-Share
|
web/module/module.py
|
Python
|
mit
| 955
| 0.006283
|
#!/usr/bin/env python
# encoding: utf-8
import re
from tornado.web import UIModule
from conf.config import BT_PAGE_SIZE
#TODO it is may not be good to put it here to make the pager class scattered
class Pagination(UIModule):
def render(self, page, ur
|
i, list_rows=BT_PAGE_SIZE):
def gen_page_list(current_page=1, total_page=1, list_rows=BT_PAGE_SIZE):
#TODO add ajax pager support
return range(1, total_page + 1)
def build_uri(uri, param, value):
regx = re.compile("[\?&](%s=[^\?&]*)" % param)
find = regx.search(uri)
split = "&" if re.search(r"\?", uri) else "?"
if n
|
ot find:
return "%s%s%s=%s" % (uri, split, param, value)
return re.sub(find.group(1), "%s=%s" % (param, value), uri)
return self.render_string("pagination.html", page=page, uri=uri, gen_page_list=gen_page_list, list_rows=list_rows, build_uri=build_uri)
|
shearichard/django-channels-demo
|
chnnlsdmo/chnnlsdmo/settings_heroku.py
|
Python
|
bsd-3-clause
| 510
| 0.005882
|
import
|
os
from os.path import abspath, basename, dirname, join, normpath
from sys import path
import dj_database_url
from .settings import *
DEBUG = True
PROJECT_ROOT = os.path.dirname(os.path.abspath(__file__))
STATIC_ROOT = os.path.join(PROJECT_ROOT, 'staticfiles')
STATIC_URL = '/static/'
# E
|
xtra places for collectstatic to find static files.
STATICFILES_DIRS = (
os.path.join(PROJECT_ROOT, 'static'),
)
DATABASES['default'] = dj_database_url.config()
ROOT_URLCONF = 'chnnlsdmo.chnnlsdmo.urls'
|
diegojromerolopez/djanban
|
src/djanban/apps/boards/migrations/0018_list_position.py
|
Python
|
mit
| 505
| 0.00198
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2016-08-17 17:37
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('boards', '0017_card_blocking_cards'),
]
operations = [
migrations.AddField(
m
|
odel_name='list',
name='position',
field=models.PositiveIntegerField(default=0, verbose_name='Position of this list in t
|
he board'),
),
]
|
ctrlaltdel/neutrinator
|
vendor/requestsexceptions/__init__.py
|
Python
|
gpl-3.0
| 2,032
| 0
|
# Copyright 2015 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import warnings
try:
from requests.packages.urllib3.exceptions import InsecurePlatformWarning
except ImportError:
try:
from urllib3.exceptions import InsecurePlatformWarning
except ImportError:
InsecurePlatformWarning = None
try:
from requests.packages.urllib3.exceptions import InsecureRequestW
|
arning
except ImportError:
try:
from urllib3.exceptions import InsecureRequestWarning
except ImportError:
InsecureRequestWarning = None
try:
from requests.packages.urllib3.exceptions import SubjectAltNameWarning
except ImportError:
try:
from urllib3.exceptions import SubjectAltNameWarning
except ImportError:
SubjectAltNameWarning = None
try:
from requests.packages.urllib3.exceptions import SNIMissingWarning
except I
|
mportError:
try:
from urllib3.exceptions import SNIMissingWarning
except ImportError:
SNIMissingWarning = None
def squelch_warnings(insecure_requests=True):
if SubjectAltNameWarning:
warnings.filterwarnings('ignore', category=SubjectAltNameWarning)
if InsecurePlatformWarning:
warnings.filterwarnings('ignore', category=InsecurePlatformWarning)
if SNIMissingWarning:
warnings.filterwarnings('ignore', category=SNIMissingWarning)
if insecure_requests and InsecureRequestWarning:
warnings.filterwarnings('ignore', category=InsecureRequestWarning)
|
ZenSecurity/nassl
|
src/DebugSslClient.py
|
Python
|
gpl-2.0
| 3,283
| 0.009138
|
#!/usr/bin/python2.7
from nassl._nassl import SSL
from SslClient import SslClient
class DebugSslClient(SslClient):
"""
An SSL client with additional debug methods that no one should ever use (insecure renegotiation, etc.).
"""
def get_secure_renegotiation_support(self):
return self._ssl.get_secure_renegotiation_support()
def get_current_compression_method(self):
return self._ssl.get_current_compression_method()
@staticmethod
def get_available_compression_methods():
"""
Returns the list of SSL compression methods supported by SslClient.
"""
return SSL.get_available_compression_methods()
def do_renegotiate(self):
"""Initiate an SSL renegotiation."""
|
if not self._handshakeDone:
raise IOError('SSL Handshake was not c
|
ompleted; cannot renegotiate.')
self._ssl.renegotiate()
return self.do_handshake()
def get_session(self):
"""Get the SSL connection's Session object."""
return self._ssl.get_session()
def set_session(self, sslSession):
"""Set the SSL connection's Session object."""
return self._ssl.set_session(sslSession)
def set_options(self, options):
return self._ssl.set_options(options)
def get_dh_param(self):
"""Retrieve the negotiated Ephemeral Diffie Helmann parameters."""
d = self._openssl_str_to_dic(self._ssl.get_dh_param())
d['GroupSize'] = d.pop('DH_Parameters').strip('( bit)')
d['Type'] = "DH"
d['Generator'] = d.pop('generator').split(' ')[0]
return d
def get_ecdh_param(self):
"""Retrieve the negotiated Ephemeral EC Diffie Helmann parameters."""
d = self._openssl_str_to_dic(self._ssl.get_ecdh_param(), ' ')
d['GroupSize'] = d.pop('ECDSA_Parameters').strip('( bit)')
d['Type'] = "ECDH"
if 'Cofactor' in d :
d['Cofactor'] = d['Cofactor'].split(' ')[0]
for k in d.keys() :
if k.startswith('Generator') :
d['Generator'] = d.pop(k)
d['GeneratorType'] = k.split('_')[1].strip('()')
break
else :
d['GeneratorType'] = 'Unknown'
return d
@staticmethod
def _openssl_str_to_dic(s, param_tab=' ') :
"""EDH and ECDH parameters pretty-printing."""
d = {}
to_XML = lambda x : "_".join(m for m in x.replace('-', ' ').split(' '))
current_arg = None
for l in s.splitlines() :
if not l.startswith(param_tab) :
if current_arg :
d[current_arg] = "0x"+d[current_arg].replace(':', '')
current_arg = None
args = tuple(arg.strip() for arg in l.split(':') if arg.strip())
if len(args) > 1 :
# one line parameter
d[to_XML(args[0])] = args[1]
else :
# multi-line parameter
current_arg = to_XML(args[0])
d[current_arg] = ''
else :
d[current_arg] += l.strip()
if current_arg :
d[current_arg] = "0x"+d[current_arg].replace(':', '')
return d
|
dburggie/py3D
|
bodies/Sphere.py
|
Python
|
mit
| 2,470
| 0.011741
|
import bounds
from py3D import
|
Vector, Ray, Color, Body
class Sphere(Body):
center = Vector()
radius = 0.0
R = 0.0
color = [0.01,0.01,0.01]
def p(self):
"""Returns the name of the type of body this is."""
return 'Sphere'
def set_position(sel
|
f, c):
self.center = c
return self
def set_radius(self, r):
self.radius = abs(r)
self.R = r ** 2.0
return self
def set_color(self, c):
self.color = c
return self
def get_color(self, point):
"""Returns color of body at given point."""
return self.color.dup()
def normal(self, point):
"""Returns normal vector of body at given point."""
return (point - self.center).scale(1/self.radius)
def set_reflectivity(self, r):
self._r = max(0.0,min(1.0,r))
return self
def reflectivity(self, point):
"""Returns percentage of brightness due to specular reflection."""
return self._r
def __init__(self, center, radius, color = Color()):
Body.__init__(self)
self.set_position(center)
self.set_radius(radius)
self.set_color(color)
self.set_reflectivity(0.2)
# Intersection of ray with a sphere boils down to the solutions to a
# quadratic vector equation.
#
# Let S be the vector from sphere center to ray origin, D be ray direction
# and R be the square of the radius of the sphere
#
# Then call S dot S SS, and, similarly, SD is S dot D
#
# Now the intersections occur at the following distances:
# -SD +/- sqrt(SD**2 + R - SS)
def intersection(self, ray):
"""Returns distance from ray to closest intersection with sphere."""
S = ray.o - self.center
SD = S.dot( ray.d )
SS = S.dot(S)
# no hit if sphere is really far away
if SS > bounds.too_far ** 2:
return -1.0
radical = SD ** 2 + self.R - SS
# negative radical implies no solutions
if radical < 0.0:
return -1.0
radical **= 0.5
hit = -1 * SD - radical
if hit < bounds.too_close:
hit = -1 * SD + radical
if hit < bounds.too_small:
return -1.0
else:
return hit
else:
return hit
|
bashu/fluentcms-twitterfeed
|
fluentcms_twitterfeed/south_migrations/0001_initial.py
|
Python
|
apache-2.0
| 7,116
| 0.00801
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'TwitterRecentEntriesItem'
db.create_table(u'contentitem_fluentcms_twitterfeed_twitterrecententriesitem', (
(u'contentitem_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['fluent_contents.ContentItem'], unique=True, primary_key=True)),
('title', self.gf('django.db.models.fields.CharField')(max_length=200, blank=True)),
('twitter_user', self.gf('django.db.models.fields.CharField')(max_length=75)),
('amount', self.gf('django.db.models.fields.PositiveSmallIntegerField')(default=5)),
('widget_id', self.gf('django.db.models.fields.CharField')(max_length=75)),
('footer_text', self.gf('django.db.models.fields.CharField')(max_length=200, blank=True)),
('include_replies', self.gf('django.db.models.fields.BooleanField')(default=False)),
))
db.send_create_signal(u'fluentcms_twitterfeed', ['TwitterRecentEntriesItem'])
# Adding model 'TwitterSearchItem'
db.create_table(u'contentitem_fluentcms_twitterfeed_twittersearchitem', (
(u'contentitem_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['fluent_contents.ContentItem'], unique=True, primary_key=True)),
('title', self.gf('django.db.models.fields.CharField')(max_length=200, blank=True)),
('query', self.gf('django.db.models.fields.CharField')(default='', max_length=200)),
('amount', self.gf('django.db.models.fields.PositiveSmallIntegerField')(default=5)),
('widget_id', self.gf('django.db.models.fields.CharField')(max_length=75)),
('footer_text', self.gf('django.db.models.fields.CharField')(max_length=200, blank=True)),
('include_replies', self.gf('django.db.models.fields.BooleanField')(default=False)),
))
db.send_create_signal(u'fluentcms_twitterfeed', ['TwitterSearchItem'])
def backwards(self, orm):
# Deleting model 'TwitterRecentEntriesItem'
db.delete_table(u'contentitem_fluentcms_twitterfeed_twitterrecententriesitem')
# Deleting model 'TwitterSearchItem'
db.delete_table(u'contentitem_fluentcms_twitterfeed_twittersearchitem')
models = {
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'fluent_contents.contentitem': {
'Meta': {'ordering': "('placeholder', 'sort_order')", 'object_name': 'ContentItem'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language_code': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '15', 'db_index': 'True'}),
'parent_id': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'parent_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
'placeholder': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'contentitems'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['fluent_contents.Placeholder']"}),
|
'polymorphic_ctype': ('django.db.models
|
.fields.related.ForeignKey', [], {'related_name': "'polymorphic_fluent_contents.contentitem_set+'", 'null': 'True', 'to': u"orm['contenttypes.ContentType']"}),
'sort_order': ('django.db.models.fields.IntegerField', [], {'default': '1', 'db_index': 'True'})
},
'fluent_contents.placeholder': {
'Meta': {'unique_together': "(('parent_type', 'parent_id', 'slot'),)", 'object_name': 'Placeholder'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'parent_id': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'parent_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']", 'null': 'True', 'blank': 'True'}),
'role': ('django.db.models.fields.CharField', [], {'default': "'m'", 'max_length': '1'}),
'slot': ('django.db.models.fields.SlugField', [], {'max_length': '50'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'})
},
u'fluentcms_twitterfeed.twitterrecententriesitem': {
'Meta': {'ordering': "('placeholder', 'sort_order')", 'object_name': 'TwitterRecentEntriesItem', 'db_table': "u'contentitem_fluentcms_twitterfeed_twitterrecententriesitem'", '_ormbases': ['fluent_contents.ContentItem']},
'amount': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '5'}),
u'contentitem_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['fluent_contents.ContentItem']", 'unique': 'True', 'primary_key': 'True'}),
'footer_text': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'include_replies': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'twitter_user': ('django.db.models.fields.CharField', [], {'max_length': '75'}),
'widget_id': ('django.db.models.fields.CharField', [], {'max_length': '75'})
},
u'fluentcms_twitterfeed.twittersearchitem': {
'Meta': {'ordering': "('placeholder', 'sort_order')", 'object_name': 'TwitterSearchItem', 'db_table': "u'contentitem_fluentcms_twitterfeed_twittersearchitem'", '_ormbases': ['fluent_contents.ContentItem']},
'amount': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '5'}),
u'contentitem_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['fluent_contents.ContentItem']", 'unique': 'True', 'primary_key': 'True'}),
'footer_text': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'include_replies': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'query': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '200'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'widget_id': ('django.db.models.fields.CharField', [], {'max_length': '75'})
}
}
complete_apps = ['fluentcms_twitterfeed']
|
Bairdo/faucet
|
tests/faucet_mininet_test_base.py
|
Python
|
apache-2.0
| 72,009
| 0.000667
|
#!/usr/bin/env python
"""Base class for all FAUCET unit tests."""
# pylint: disable=missing-docstring
# pylint: disable=too-many-arguments
import collections
import glob
import ipaddress
import json
import os
import random
import re
import shutil
import subprocess
import time
import unittest
import yaml
import requests
from requests.exceptions import ConnectionError
# pylint: disable=import-error
from mininet.log import error, output
from mininet.net import Mininet
from mininet.node import Intf
from mininet.util import dumpNodeConnections, pmonitor
from ryu.ofproto import ofproto_v1_3 as ofp
import faucet_mininet_test_util
import faucet_mininet_test_topo
class FaucetTestBase(unittest.TestCase):
"""Base class for all FAUCET unit tests."""
ONE_GOOD_PING = '1 packets transmitted, 1 received, 0% packet loss'
FAUCET_VIPV4 = ipaddress.ip_interface(u'10.0.0.254/24')
FAUCET_VIPV4_2 = ipaddress.ip_interface(u'172.16.0.254/24')
FAUCET_VIPV6 = ipaddress.ip_interface(u'fc00::1:254/64')
FAUCET_VIPV6_2 = ipaddress.ip_interface(u'fc01::1:254/64')
OFCTL = 'ovs-ofctl -OOpenFlow13'
BOGUS_MAC = '01:02:03:04:05:06'
FAUCET_MAC = '0e:00:00:00:00:01'
LADVD = 'ladvd -e lo -f'
ONEMBPS = (1024 * 1024)
DB_TIMEOUT = 5
ACL_CONFIG = ''
CONFIG = ''
CONFIG_GLOBAL = ''
GAUGE_CONFIG_DBS = ''
N_UNTAGGED = 0
N_TAGGED = 0
NUM_DPS = 1
RUN_GAUGE = True
REQUIRES_METERS = False
PORT_ACL_TABLE = 0
VLAN_TABLE = 1
VLAN_ACL_TABLE = 2
ETH_SRC_TABLE = 3
IPV4_FIB_TABLE = 4
IPV6_FIB_TABLE = 5
VIP_TABLE = 6
FLOOD_TABLE = 8
ETH_DST_TABLE = 7
config = None
dpid = None
hardware = 'Open vSwitch'
hw_switch = False
gauge_controller = None
gauge_of_port = None
prom_port = None
net = None
of_port = None
ctl_privkey = None
ctl_cert = None
ca_certs = None
port_map = {'port_1': 1, 'port_2': 2, 'port_3': 3, 'port_4': 4}
switch_map = {}
tmpdir = None
net = None
topo = None
cpn_intf = None
config_ports = {}
env = collections.defaultdict(dict)
rand_dpids = set()
def __init__(self, name, config, root_tmpdir, ports_sock, max_test_load):
super(FaucetTestBase, self).__init__(name)
self.config = config
self.root_tmpdir = root_tmpdir
self.ports_sock = ports_sock
self.max_test_load = max_test_load
def rand_dpid(self):
reserved_range = 100
while True:
dpid = random.randint(1, (2**32 - reserved_range)) + reserved_range
if dpid not in self.rand_dpids:
self.rand_dpids.add(dpid)
return str(dpid)
def _set_var(self, controller, var, value):
self.env[controller][var] = value
def _set_var_path(self, controller, var, path):
self._set_var(controller, var, os.path.join(self.tmpdir, path))
def _set_prom_port(self, name='faucet'):
self._set_var(name, 'FAUCET_PROMETHEUS_PORT', str(self.prom_port))
self._set_var(name, 'FAUCET_PROMETHEUS_ADDR', faucet_mininet_test_util.LOCALHOST)
def _set_static_vars(self):
self._set_var_path('faucet', 'FAUCET_CONFIG', 'faucet.yaml')
self._set_var_path('faucet', 'FAUCET_ACL_CONFIG', 'faucet-acl.yaml')
self._set_var_path('faucet', 'FAUCET_LOG', 'faucet.log')
self._set_var_path('faucet', 'FAUCET_EXCEPTION_LOG', 'faucet-exception.log')
self._set_var_path('gauge', 'GAUGE_CONFIG', 'gauge.yaml')
self._set_var_path('gauge', 'GAUGE_LOG', 'gauge.log')
self._set_var_path('gauge', 'GAUGE_EXCEPTION_LOG', 'gauge-exception.log')
self.faucet_config_path = self.env['faucet']['FAUCET_CONFIG']
self.faucet_acl_config_path = self.env['faucet']['FAUCET_ACL_CONFIG']
self.gauge_config_path = self.env['gauge']['GAUGE_CONFIG']
self.debug_log_path = os.path.join(
self.tmpdir, 'ofchannel.log')
self.monitor_stats_file = os.path.join(
self.tmpdir, 'ports.txt')
self.monitor_state_file = os.path.join(
self.tmpdir, 'state.txt')
self.monitor_flow_table_file = os.path.join(
self.tmpdir, 'flow.txt')
if self.config is not None:
if 'hw_switch' in self.config:
self.hw_switch = self.config['hw_switch']
if self.hw_switch:
self.dpid = self.config['dpid']
self.cpn_intf = self.config['cpn_intf']
self.hardware = self.config['hardware']
if 'ctl_privkey' in self.config:
self.ctl_privkey = self.config['ctl_privkey']
if 'ctl_cert' in self.config:
self.ctl_cert = self.config['ctl_cert']
if 'ca_certs' in self.config:
self.ca_certs = self.config['ca_certs']
dp_ports = self.config['dp_ports']
self.port_map = {}
s
|
elf.switch_map = {}
for i, switch_port in enumerate(dp_ports):
test_port_name = 'port_%u' % (i + 1)
self.port_map[test_port_name] = switch_port
self.switch_map[test_port_name] = dp_ports[switch_port]
def _set_vars(self):
self._set_prom_port()
def _write_faucet_config(self):
faucet_config = '\n'.join((
self.get_config_header(
self.CONFIG_GLOBAL.format
|
(tmpdir=self.tmpdir), self.debug_log_path, self.dpid, self.hardware),
self.CONFIG % self.port_map))
if self.config_ports:
faucet_config = faucet_config % self.config_ports
with open(self.faucet_config_path, 'w') as faucet_config_file:
faucet_config_file.write(faucet_config)
if self.ACL_CONFIG and self.ACL_CONFIG != '':
with open(self.faucet_acl_config_path, 'w') as faucet_acl_config_file:
faucet_acl_config_file.write(self.ACL_CONFIG % self.port_map)
def _write_gauge_config(self):
gauge_config = self.get_gauge_config(
self.faucet_config_path,
self.monitor_stats_file,
self.monitor_state_file,
self.monitor_flow_table_file)
if self.config_ports:
gauge_config = gauge_config % self.config_ports
with open(self.gauge_config_path, 'w') as gauge_config_file:
gauge_config_file.write(gauge_config)
def _test_name(self):
return faucet_mininet_test_util.flat_test_name(self.id())
def _tmpdir_name(self):
tmpdir = os.path.join(self.root_tmpdir, self._test_name())
os.mkdir(tmpdir)
return tmpdir
def _controller_lognames(self):
lognames = []
for controller in self.net.controllers:
logname = controller.logname()
if os.path.exists(logname) and os.path.getsize(logname) > 0:
lognames.append(logname)
return lognames
def _wait_load(self, load_retries=120):
for _ in range(load_retries):
load = os.getloadavg()[0]
time.sleep(random.randint(1, 7))
if load < self.max_test_load:
return
output('load average too high %f, waiting' % load)
self.fail('load average %f consistently too high' % load)
def _allocate_config_ports(self):
for port_name in list(self.config_ports.keys()):
self.config_ports[port_name] = None
for config in (self.CONFIG, self.CONFIG_GLOBAL, self.GAUGE_CONFIG_DBS):
if re.search(port_name, config):
port = faucet_mininet_test_util.find_free_port(
self.ports_sock, self._test_name())
self.config_ports[port_name] = port
output('allocating port %u for %s' % (port, port_name))
def _allocate_faucet_ports(self):
if self.hw_switch:
self.of_port = self.config['of_port']
else:
self.of_port = faucet_mininet_test_util.find_free_port(
self.ports_sock, self._test_name())
self.prom_port = faucet_mininet_test_ut
|
HeatherHillers/RoamMac
|
src/configmanager/editorwidgets/numberwidget.py
|
Python
|
gpl-2.0
| 1,472
| 0.004076
|
import os
from PyQt4.QtCore import pyqtSignal
from PyQt4.QtGui import QComboBox, QDoubleValidator
from configmanager.editorwidgets.core import ConfigWidget
from configmanager.editorwidgets.uifiles.ui_numberwidget_config import Ui_Form
class NumberWidgetConfig(Ui_Form, ConfigWidget):
description = 'Number entry widget'
def __init__(self, parent=None):
super(NumberWidgetConfig, self).__init__(parent)
self.setupUi(self)
self.minEdit.setValidator( QDoubleValidator() )
self.maxEdit.setValidator( QDoubleValidator() )
self.minEdit.textChanged.connect(self.widgetchanged)
self.maxEdit.textChanged.connect(self.widgetchanged)
self.prefixEdit.textChanged.connect(self.widgetchanged)
self.suffixEdit.textChanged.connect(self.widgetchanged)
def getconfig(sel
|
f):
config = {}
config['max'] = self.maxEdit.text()
config['min'] = self.minEdit.text()
config['prefix'] = self.prefixEdit.text()
config['suffix'] = self.suffixEdit.text()
return config
def setconfig(self, config):
self.blockSignals(True)
max = config.get('max', '')
min = config.get('min', '')
prefix = c
|
onfig.get('prefix', '')
suffix = config.get('suffix', '')
self.minEdit.setText(min)
self.maxEdit.setText(max)
self.prefixEdit.setText(prefix)
self.suffixEdit.setText(suffix)
self.blockSignals(False)
|
chenjiancan/wechatpy
|
wechatpy/client/api/wifi.py
|
Python
|
mit
| 5,576
| 0
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from datetime import datetime, date
from optionaldict import optionaldict
from wechatpy.client.api.base import BaseWeChatAPI
class WeChatWiFi(BaseWeChatAPI):
API_BASE_URL = 'https://api.weixin.qq.com/bizwifi/'
def list_shops(self, page_index=1, page_size=20):
"""
获取门店列表
详情请参考
http://mp.weixin.qq.com/wiki/15/bcfb5d4578ea818b89913472cf2bbf8f.html
:param page_index: 可选,分页下标,默认从1开始
:param page_size: 可选,每页的个数,默认20个,最大20个
:return: 返回的 JSON 数据包
"""
res = self._post(
'shop/list',
data={
'pageindex': page_index,
'pagesize': page_size,
}
)
return res['data']
def add_device(self, shop_id, ssid, password, bssid):
"""
添加设备
详情请参考
http://mp.weixin.qq.com/wiki/10/6232005bdc497f7cf8e19d4e843c70d2.html
:param shop_id: 门店 ID
:param ssid: 无线网络设备的ssid。非认证公众号添加的ssid必需是“WX”开头(“WX”为大写字母),
认证公众号和第三方平台无此限制;所有ssid均不能包含中文字符
:param password: 无线网络设备的密码,大于8个字符,不能包含中文字符
:param bssid: 无线网络设备无线mac地址,格式冒号分隔,字符长度17个,并且字母小写
:return: 返回的 JSON 数据包
"""
return self._post(
'device/add',
data={
'shop_id': shop_id,
'ssid': ssid,
'password': password,
'bssid': bssid,
}
)
def list_devices(self, shop_id=None, page_index=1, page_size=20):
"""
查询设备
详情请参考
http://mp.weixin.qq.com/wiki/10/6232005bdc497f7cf8e19d4e843c70d2.html
:param shop_id: 可选,门店 ID
:param page_index: 可选,分页下标,默认从1开始
:param page_size: 可选,每页的个数,默认20个,最大20个
:return: 返回的 JSON 数据包
"""
data = optionaldict(
shop_id=shop_id,
pageindex=page_index,
pagesize=page_size
)
res = self._post('device/list', data=data)
return res['data']
def delete_device(self, bssid):
"""
删除设备
详情请参考
http://mp.weixin.qq.com/wiki/10/6232005bdc497f7cf8e19d4e843c70d2.html
:param bssid: 无线网络设备无线mac地址,格式冒号分隔,字符长度17个,并且字母小写
:return: 返回的 JSON 数据包
"""
return self._post('device/delete', data={'bssid': bssid})
def get_qrcode_url(self, shop_id, img_id):
"""
获取物料二维码图片网址
详情请参考
http://mp.weixin.qq.com/wiki/7/fcd0378ef00617fc276be2b3baa80973.html
:param shop_id: 门店 ID
:param img_id: 物料样式编号:0-二维码,可用于自由设计宣传材料;
1-桌贴(二维码),100mm×100mm(宽×高),可直接张贴
:return: 二维码图片网址
"""
res = self._post(
'qrcode/get',
data={
'shop_id': shop_id,
'img_id': img_id,
}
)
return res['data']['qrcode_url']
def set_homepage(self, shop_id, template_id, url=None):
"""
设置商家主页
详情请参考
http://mp.weixin.qq.com/wiki/6/2732f3cf83947e0e4971aa8797ee9d6a.html
:param shop_id: 门店 ID
:param template_id: 模板ID,0-默认模板,1-自定义url
:param url: 自定义链接,当template_id为1时必填
:return: 返回的 JSON 数据包
"""
data = {
'shop_id': shop_id,
'template_id': template_id,
}
if url:
data['struct'] = {'url': url}
return self._post('homepage/set', data=data)
def get_homepage(self, shop_id):
"""
查询商家主页
详情请参考
http://mp.weixin.qq.com/wiki/6/2732f3cf83947e0e4971aa8797ee9d6a.html
:param shop_id: 门店 ID
:return: 返回的 JSON 数据包
"""
res = self._post('homepage/get', data={'shop_id': shop_id})
return res['data']
def list_statistics(self, begin_date, end_da
|
te, shop_id=-1):
"""
Wi-Fi数据统计
详情请参考
http://mp.weixin.qq.com/wiki/8/dfa2b756b66fca5d9b1211bc18812698.html
:param begin_date: 起始日期时间,最长时间跨度为30天
:param end_date: 结束日期时间戳,最长时间跨度为30天
:param shop_id: 可选,门店 ID,按门店ID搜索,-1为总统计
:return: 返回的 JSON 数据包
"""
if isinstance(begin_date, (datetime, date)):
begin_date = begin_date.strft
|
ime('%Y-%m-%d')
if isinstance(end_date, (datetime, date)):
end_date = end_date.strftime('%Y-%m-%d')
res = self._post(
'statistics/list',
data={
'begin_date': begin_date,
'end_date': end_date,
'shop_id': shop_id
}
)
return res['data']
|
kirklink/udacity-fullstack-p4
|
conference.py
|
Python
|
apache-2.0
| 40,994
| 0.000488
|
#!/usr/bin/env python
"""
conference.py -- Udacity conference server-side Python App Engine API;
uses Google Cloud Endpoints
$Id: conference.py,v 1.25 2014/05/24 23:42:19 wesc Exp wesc $
created by wesc on 2014 apr 21
"""
__author__ = 'wesc+api@google.com (Wesley Chun)'
from datetime import datetime
import json
import os
import time
from utils import getUserId
from utils import validate_websafe_key
from utils import ndb_to_message
from utils import message_to_ndb
import endpoints
from protorpc import messages
from protorpc import message_types
from protorpc import remote
from google.appengine.api import urlfetch
from google.appengine.ext import ndb
from google.appengine.api import memcache
from google.appengine.api import taskqueue
from models import Profile
from models import ProfileMiniForm
from models import ProfileForm
from models import TeeShirtSize
from models import Conference
from models import ConferenceForm
from models import ConferenceForms
from models import ConferenceQueryForm
from models import ConferenceQueryForms
from models import BooleanMessage
from models import ConflictException
from models import StringMessage
from models import Session
from models import SessionForm
from models import SessionCreateForm
from models import SessionForms
from models import SessionType
from models import Speaker
from models import SpeakerForm
from models import SpeakerCreateForm
from models import SpeakerForms
from models import WishList
from models import WishListForm
from settings import WEB_CLIENT_ID
EMAIL_SCOPE = endpoints.EMAIL_SCOPE
API_EXPLORER_CLIENT_ID = endpoints.API_EXPLORER_CLIENT_ID
# - - - Conference Defaults - - - - - - - - - - - - - - - - -
DEFAULTS = {
"city": "Default City",
"maxAttendees": 0,
"seatsAvailable": 0,
"topics": [
"Default",
"Topic"
],
}
OPERATORS = {
'EQ': '=',
'GT': '>',
'GTEQ': '>=',
'LT': '<',
'LTEQ': '<=',
'NE': '!='
}
FIELDS = {
'CITY': 'city',
'TOPIC': 'topics',
'MONTH': 'month',
'MAX_ATTENDEES': 'maxAttendees',
}
MEMCACHE_ANNOUNCEMENTS_KEY = "RECENT ANNOUNCEMENTS"
CONF_POST_REQUEST = endpoints.ResourceContainer(message_types.VoidMessage,
websafeConferenceKey=messages.StringField(1),
register=messages.BooleanField(2))
CONF_GET_REQUEST = endpoints.ResourceContainer(message_types.VoidMessage,
websafeConferenceKey=messages.StringField(1))
SESSION_POST_REQUEST = endpoints.ResourceContainer(SessionCreateForm,
websafeConferenceKey=messages.StringField(1))
SESSIONS_GET_REQUEST = endpoints.ResourceContainer(message_types.VoidMessage,
websafeConferenceKey=messages.StringField(1),
sessionType=messages.StringField(2))
SPEAKER_GET_REQUEST = endpoints.ResourceContainer(message_types.VoidMessage,
websafeSpeakerKey=messages.StringField(1))
WISHLIST_PUT_REQUEST = endpoints.ResourceContainer(message_types.VoidMessage,
add=messages.StringField(1), remove=messages.StringField(2))
SPEAKER_QUERY_BY_NAME = endpoints.ResourceContainer(message_types.VoidMessage,
firstName=messages.StringField(1), lastName=messages.StringField(2))
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
@endpoints.api(name='conference', version='v1',
allowed_client_ids=[WEB_CLIENT_ID, API_EXPLORER_CLIENT_ID],
scopes=[EMAIL_SCOPE])
class ConferenceApi(remote.Service):
"""Conference API v0.1"""
# - - - Profile objects - - - - - - - - - - - - - - - - - - -
def _copyProfileToForm(self, prof):
"""Copy relevant fields from Profile to ProfileForm."""
# copy relevant fields from Profile to ProfileForm
pf = ProfileForm()
for field in pf.all_fields():
if hasattr(prof, field.name):
# convert t-shirt string to Enum; just copy others
if field.name == 'teeShirtSize':
setattr(pf, field.name,
getattr(TeeShirtSize, getattr(prof, field.name)))
else:
setattr(pf, field.name, getattr(prof, field.name))
pf.check_initialized()
|
return pf
def _getProfileFromUser(self):
"""Return user Profile from datastore, creating new one if
non-existent."""
## TODO 2
## step 1: make sure u
|
ser is authed
## uncomment the following lines:
user = endpoints.get_current_user()
if not user:
raise endpoints.UnauthorizedException('Authorization required')
user_id = getUserId(user)
p_key = ndb.Key(Profile, user_id)
profile = p_key.get()
## step 2: create a new Profile from logged in user data
## you can use user.nickname() to get displayName
## and user.email() to get mainEmail
if not profile:
profile = Profile(userId=None, key=p_key,
displayName=user.nickname(), mainEmail=user.email(),
teeShirtSize=str(TeeShirtSize.NOT_SPECIFIED), )
profile.put()
return profile # return Profile
def _doProfile(self, save_request=None):
"""Get user Profile and return to user, possibly updating it first."""
# get user Profile
prof = self._getProfileFromUser()
# if saveProfile(), process user-modifyable fields
if save_request:
for field in ('displayName', 'teeShirtSize'):
if hasattr(save_request, field):
val = getattr(save_request, field)
if val:
setattr(prof, field, str(val))
prof.put()
# return ProfileForm
return self._copyProfileToForm(prof)
@endpoints.method(message_types.VoidMessage, ProfileForm, path='profile',
http_method='GET', name='getProfile')
def getProfile(self, request):
"""Return user profile."""
return self._doProfile()
# TODO 1
# 1. change request class
# 2. pass request to _doProfile function
@endpoints.method(ProfileMiniForm, ProfileForm, path='profile',
http_method='POST', name='saveProfile')
def saveProfile(self, request):
"""Update & return user profile."""
return self._doProfile(request)
# - - - Conference objects - - - - - - - - - - - - - - - - -
def _copyConferenceToForm(self, conf, displayName):
"""Copy relevant fields from Conference to ConferenceForm."""
cf = ConferenceForm()
for field in cf.all_fields():
if hasattr(conf, field.name):
# convert Date to date string; just copy others
if field.name.endswith('Date'):
setattr(cf, field.name, str(getattr(conf, field.name)))
else:
setattr(cf, field.name, getattr(conf, field.name))
elif field.name == "websafeKey":
setattr(cf, field.name, conf.key.urlsafe())
if displayName:
setattr(cf, 'organizerDisplayName', displayName)
cf.check_initialized()
return cf
def _createConferenceObject(self, request):
"""Create or update Conference object, returning
ConferenceForm/request."""
# preload necessary data items
user = endpoints.get_current_user()
if not user:
raise endpoints.UnauthorizedException('Authorization required')
user_id = getUserId(user)
if not request.name:
raise endpoints.BadRequestException(
"Conference 'name' field required")
# copy ConferenceForm/ProtoRPC Message into dict
data = {field.name: getattr(request, field.name) for field in
request.all_fields()}
del data['websafeKey']
del data['organizerDisplayName']
# add default values for those missing (both data model & outbound
# Message)
for df in DEFAULTS:
if data[df] in (None, []):
data[df] = DEFAULTS[df]
setattr(request, df, DEFAULTS[df])
# convert dates from strings to Date objects; set month based on
# start_date
if da
|
GeoCat/QGIS
|
python/plugins/processing/algs/qgis/ExportGeometryInfo.py
|
Python
|
gpl-2.0
| 6,865
| 0.001165
|
# -*- coding: utf-8 -*-
"""
***************************************************************************
ExportGeometryInfo.py
---------------------
Date : August 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'August 2012'
__copyright__ = '(C) 2012, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os
from qgis.PyQt.QtGui import QIcon
from qgis.PyQt.QtCore import QVariant
from qgis.core import (QgsCoordinateTransform,
QgsField,
QgsFields,
QgsWkbTypes,
QgsFeatureSink,
QgsDistanceArea,
QgsProcessingUtils,
QgsProcessingParameterFeatureSource,
QgsProcessingParameterEnum,
QgsProcessingParameterFeatureSink)
from processing.algs.qgis.QgisAlgorithm import QgisAlgorithm
from processing.tools import vector
pluginPath = os.path.split(os.path.split(os.path.dirname(__file__))[0])[0]
class ExportGeometryInfo(QgisAlgorithm):
INPUT = 'INPUT'
METHOD = 'CALC_METHOD'
OUTPUT = 'OUTPUT'
def icon(self):
return QIcon(os.path.join(pluginPath, 'images', 'ftools', 'export_geometry.png'))
def tags(self):
return self.tr('export,add,information,measurements,areas,lengths,perimeters,latitudes,longitudes,x,y,z,extract,points,lines,polygons').split(',')
def group(self):
return self.tr('Vector geometry')
def __init__(self):
super().__init__()
self.export_z = False
self.export_m = False
self.distance_area = None
self.calc_methods = [self.tr('Layer CRS'),
self.tr('Project CRS'),
self.tr('Ellipsoidal')]
def initAlgorithm(self, config=None):
self.addParameter(QgsProcessingParameterFeatureSource(self.INPUT,
self.tr('Input layer')))
self.addParameter(QgsProcessingParameterEnum(self.METHOD,
self.tr('Calculate using'), options=self.calc_methods, defaultValue=0))
self.addParameter(QgsProcessingParameterFeatureSink(self.OUTPUT, self.tr('Added geom info')))
def name(self):
return 'exportaddgeometrycolumns'
def displayName(self):
return self.tr('Export geometry columns')
def processAlgorithm(self, parameters, context, feedback):
source = self.parameterAsSource(parameters, self.INPUT, context)
method = self.parameterAsEnum(parameters, self.METHOD, context)
wkb_type = source.wkbType()
fields = source.fields()
new_fields = QgsFields()
if QgsWkbTypes.geometryType(wkb_type) == QgsWkbTypes.PolygonGeometry:
new_fields.append(QgsField('area', QVariant.Double))
new_fields.append(QgsField('perimeter', QVariant.Double))
elif QgsWkbTypes.geometryType(wkb_type) == QgsWkbTypes.LineGeometry:
new_fields.append(QgsField('length', QVariant.Double))
else:
new_fields.append(QgsField('xcoord', QVariant.Double))
new_fields.append(QgsField('ycoord', QVariant.Double))
if QgsWkbTypes.hasZ(source.wkbType()):
self.export_z = True
new_fields.append(QgsField('zcoord', QVariant.Double))
if QgsWkbTypes.hasM(source.wkbType()):
self.export_m = True
new_fields.append(QgsField('mvalue', QVariant.Double))
fields = QgsProcessingUtils.combineFields(fields, new_fields)
(sink, dest_id) = self.parameterAsSink(parameters, self.OUTPUT, context,
fields, wkb_type, source.sourceCrs())
coordTransform = None
# Calculate with:
# 0 - layer CRS
# 1 - project CRS
# 2 - ellipsoidal
self.distance_area = QgsDistanceArea()
if method == 2:
self.distance_area.setSourceCrs(source.sourceCrs())
self.distance_area.setEllipsoid(context.project().ellipsoid())
elif method == 1:
coordTransform = QgsCoordinateTransform(source.sourceCrs(), context.project().crs())
features = source.getFeatures()
total = 100.0 / source.featureCount() if source.featureCount() else 0
for current, f in enumerate(features):
if feedback.isCanceled():
break
outFeat = f
attrs = f.attributes()
inGeom = f.geometry()
if inGeom:
if coordTransform is not None:
inGeom.transform(coordTransform)
if inGeom.type() == QgsWkbTypes.PointGeo
|
metry:
attrs.extend(self.point_attributes(inGeom))
elif inGeom.type() == QgsWkbTypes.PolygonGeometry:
attrs.extend(self.polygon_attributes(inGeom))
else:
attrs.extend(self
|
.line_attributes(inGeom))
outFeat.setAttributes(attrs)
sink.addFeature(outFeat, QgsFeatureSink.FastInsert)
feedback.setProgress(int(current * total))
return {self.OUTPUT: dest_id}
def point_attributes(self, geometry):
pt = None
if not geometry.isMultipart():
pt = geometry.geometry()
else:
if geometry.numGeometries() > 0:
pt = geometry.geometryN(0)
attrs = []
if pt:
attrs.append(pt.x())
attrs.append(pt.y())
# add point z/m
if self.export_z:
attrs.append(pt.z())
if self.export_m:
attrs.append(pt.m())
return attrs
def line_attributes(self, geometry):
return [self.distance_area.measureLength(geometry)]
def polygon_attributes(self, geometry):
area = self.distance_area.measureArea(geometry)
perimeter = self.distance_area.measurePerimeter(geometry)
return [area, perimeter]
|
tcporco/SageBoxModels
|
boxmodel/boxmodel.py
|
Python
|
gpl-2.0
| 26,560
| 0.041679
|
#*****************************************************************************
# Copyright (C) 2017 Lee Worden <worden dot lee at gmail dot com>
#
# Distributed under the terms of the GNU General Public License (GPL) v.2
# http://www.gnu.org/licenses/
#*****************************************************************************
import graph_latex_patched
from sage.all import *
import dynamicalsystems
from sage.misc.latex import _latex_file_
#from sage.symbolic.relation import solve
from sage.symbolic.function_factory import function
# constant 'enum' values for use with indexing
class deps:
index, sumover = range(0,2)
def plot_boxmodel_graph( g, filename=None, inline=False, figsize=(6,6), empty_vertices=(), ellipsis_vertices=(), **options ):
import itertools
#print 'ellipsis vertices:', ellipsis_vertices
lopts = {
'graphic_size': figsize,
'edge_labels': True,
'edge_thickness' : 0.02,
#'edge_fills': True,
#'edge_color': 'white',
#'edge_thickness': 0.05
'vertex_shape': 'rectangle',
'vertices_empty': { x:True for x in empty_vertices },
'vertex_colors': { x:'white' for x in ellipsis_vertices },
#'vertex_label_colors': { x:'white' for x in self._sources | self._sinks }
}
graph_latex_patched.setup_latex_preamble()
gop = graph_latex_patched.GraphLatex(g)
if inline:
lopts['margins'] = (0.5,0.5,0.5,0.5)
lopts.update( options )
#print 'lopts:',lopts
if 'latex_options' in options:
g.set_latex_options( **(options['latex_options']) )
gop.set_options( **lopts )
gl = gop.latex()
xp = ''
if inline:
#LT = '\n\\vspace{24pt}\n' + gl + '\n\\vspace{24pt}\n'
LT = gl
else:
if figsize[0] > 6.75 or figsize[1] > 9:
latex.add_package_to_preamble_if_available('geometry')
xp = '\\geometry{papersize={' + str(figsize[0] + 10) + 'cm,' + str(figsize[1] + 20) + 'cm}}\n'
LT = _latex_file_( dynamicalsystems.wrap_latex( gl ), title='', extra_preamble=xp )
if filename is not None:
#print 'plot to', filename
LF = open( filename, 'w' )
LF.write( LT )
LF.close()
return LT
## see BoxModel.plot_boxes() method below
## this is a transformation that supports plotting a box model
## graph using per capita flow rates rather than absolute rates
def per_capita_rates(g):
def to_per_capita(r,s):
if s in r.variables(): return (r/s).collect_common_factors().expand()
else:
print 'Warning: rate ', str(r), 'not converted to per capita'
return r
return DiGraph(
[ (v,w,to_per_capita(e,v)) for v,w,e in g.edge_iterator() ],
multiedges=True,
pos = g.get_pos()
)
class BoxModel(SageObject):
"""Parent class for all kinds of box models.
Note that since this gets its variables from a graph's vertices,
rather than from indexers, it can't be used in adaptive dynamics.
Subclasses that generate their boxes, maybe can.
"""
def __init__(self, graph,
vars=None,
parameters=None,
parameter_dependencies={},
sources=(),
sinks=(),
aggregate_names=(),
bindings=dynamicalsystems.Bindings()):
# we are given a directed graph whose vertex labels are state
# variables, representing fractions of total population,
# and whose edge labels are flow rates.
try:
graph.edge_iterator()
except AttributeError:
try:
self.__init__( graph._graph, graph._vars, sources=graph._sources, sinks=graph._sinks, aggregate_names=graph._aggregate_names, bindings=graph._bindings )
return
except AttributeError:
graph = DiGraph(graph)
self._graph = graph
self._graph.set_latex_options( edge_labels=True )
self._sources = Set( sources )
self._sinks = Set( sinks )
self._aggregate_names = aggregate_names
if vars is None:
vars = Set( graph.vertices() ) - self._sources - self._sinks
self._vars = list(vars)
print 'vars', self._vars, 'sources', self._sources, 'sinks', self._sinks
def getvars(r):
try: return r.variables()
except AttributeError: return []
if parameters is None:
# avoid namespace confusion with product.union
#print 'make parameters'; sys.stdout.flush()
parameters = sorted( list(
reduce(
lambda x,y: x.union(y),
(set(getvars(r)) for f,t,r in graph.edges()),
set()
).difference(
self._vars, self._sources, self._sinks, self._aggregate_names
)
), key=str )
#print 'made parameters'; sys.stdout.flush()
self._parameters = parameters
print 'parameters:', parameters
if False:
self._parameter_dependencies = parameter_dependencies
for p in self._parameters:
if p not in self._parameter_dependencies:
# infer connections between parameters and compartmentalization
# for now, simple rule:
# just connect it to the source variable of its arrow
# TODO: inference including defined quantities like N
#print 'infer dependencies for parameter', p
for v,w,e in self._graph.edges():
try: vs = getvars(e)
except AttributeError: vs = []
if p in vs:
pd = [ v ]
#print 'found', p, 'in arrow', e
#print 'infer dependency on', v
if p in self._parameter_dependencies and self._parameter_dependencies[p] != pd:
#print 'but already inferred', self._parameter_dependencies[p]
#print 'dependencies of parameter', p, 'are unclear, inferring no dependencies'
pd = []
self._parameter_dependencies[p] = pd
for p, pd in self._parameter_dependencies.items():
|
try: [ d[0] for d in pd ]
except
|
: self._parameter_dependencies[p] = [ (d,deps.index) for d in pd ]
#print 'parameter dependencies:', self._parameter_dependencies
self._bindings = bindings
if self._graph.get_pos() is None:
pos = { v:(i,0) for i,v in enumerate(self._vars) }
pos.update( { v:(-1,i) for i,v in enumerate(self._sources) } )
pos.update( { v:(xx,i) for i,v in enumerate(self._sinks) for xx in (max(x for x,y in pos.itervalues()),) } )
self._graph.set_pos( pos )
def bind(self, *args, **vargs):
bindings = dynamicalsystems.Bindings( *args, **vargs )
bound_graph = DiGraph( [
(bindings(v),bindings(w),bindings(e)) for v,w,e in self._graph.edge_iterator()
],
multiedges=True,
pos = { bindings(v):p for v,p in self._graph.get_pos().items() } if self._graph.get_pos() is not None else None
)
return BoxModel(
bound_graph,
vars = [ bindings(v) for v in self._vars ],
sources = Set( bindings(v) for v in self._sources ),
sinks = Set( bindings(v) for v in self._sinks ),
parameters = [ bindings(p) for p in self._parameters ],
parameter_dependencies = {
bindings(p):[(bindings(d),t) for d,t in pd] for p,pd in self._parameter_dependencies.items()
},
aggregate_names = self._aggregate_names,
bindings = self._bindings + bindings
)
def add_transitions( self, trs ):
# We take BoxModel to be an immutable object, so this operation
# returns a new BoxModel. trs is a list of (source,target,rate)
# tuples suitable for adding to self._graph
#print 'add_transitions', trs
#print 'parameters before', self._parameters
nbm = deepcopy(self)
nbm._graph.add_edges( trs )
#print self._vars
for f,t,r in trs:
try:
#print r
#print r.variables()
#print Set( r.variables() ).difference( Set( self._vars ) )
nbm._parameters.update( Set( r.variables() ) - self._vars - self._aggregate_names )
except At
|
flyhigher139/mayblog
|
blog/main/sitemaps.py
|
Python
|
gpl-2.0
| 968
| 0.004132
|
#!/usr/bin/env pytho
|
n
# -*- coding: utf-8 -*-
from django.contrib.sitemaps
|
import Sitemap
from . import models
class BlogSitemap(Sitemap):
changefreq = "daily"
priority = 0.5
def items(self):
return models.Post.objects.filter(is_draft=False)
def lastmod(self, obj):
return obj.update_time
class PageSitemap(Sitemap):
changefreq = "monthly"
priority = 0.5
def items(self):
return models.Page.objects.filter(is_draft=False)
def lastmod(self, obj):
return obj.update_time
# class CategorySitemap(Sitemap):
# changefreq = "weekly"
# priority = 0.6
# def items(self):
# return models.Category.objects.all()
# class TagSitemap(Sitemap):
# changefreq = "weekly"
# priority = 0.6
# def items(self):
# return models.Tag.objects.all()
sitemaps = {
'blog': BlogSitemap,
'page': PageSitemap,
# 'category': CategorySitemap,
# 'tag': TagSitemap,
}
|
joke2k/django-options
|
django_options/formset.py
|
Python
|
bsd-3-clause
| 4,546
| 0.00264
|
from django import forms
from django.conf import settings
from django.contrib.admin.helpers import normalize_fieldsets, AdminReadonlyField, AdminField
from django.contrib.admin.templatetags.admin_static import static
from django.utils.safestring import mark_safe
class AdminForm(object):
def __init__(self, form, fieldsets, readonly_fields=None):
self.form, self.fieldsets = form, normalize_fieldsets(fieldsets)
if readonly_fields is None:
readonly_fields = ()
self.readonly_fields = readonly_fields
def __iter__(self):
for name, options in self.fieldsets:
yield Fieldset(self.form, name,
readonly_fields=self.readonly_fields,
**options
)
def first_field(self):
try:
fieldset_name, fieldset_options = self.fieldsets[0]
field_name = fieldset_options['fields'][0]
if not isinstance(field_name, basestring):
field_name = field_name[0]
return self.form[field_name]
except (KeyError, IndexError):
pass
try:
return iter(self.form).next()
except StopIteration:
return None
def _media(self):
media = self.form.media
for fs in self:
media = media + fs.media
return media
media = property(_media)
class Fieldset(object):
def __init__(self, form, name=None, readonly_fields=(), fields=(), classes=(),
description=None):
self.form = form
self._name, self.fields = name, fields
self.classes = u' '.join(classes)
self._description = description
self.readonly_fields = readonly_fields
@property
def name(self):
return self._name if self._name else self.form.title
@property
def description(self):
|
return self._description if self._description else self.form.descript
|
ion
def _media(self):
if 'collapse' in self.classes:
extra = '' if settings.DEBUG else '.min'
js = ['jquery%s.js' % extra,
'jquery.init.js',
'collapse%s.js' % extra]
return forms.Media(js=[static('admin/js/%s' % url) for url in js])
return forms.Media()
media = property(_media)
def __iter__(self):
for field in self.fields:
yield Fieldline(self.form, field, self.readonly_fields)
class Fieldline(object):
def __init__(self, form, field, readonly_fields=None, model_admin=None):
self.form = form # A django.forms.Form instance
if not hasattr(field, "__iter__"):
self.fields = [field]
else:
self.fields = field
self.model_admin = model_admin
if readonly_fields is None:
readonly_fields = ()
self.readonly_fields = readonly_fields
def __iter__(self):
for i, field in enumerate(self.fields):
if field in self.readonly_fields:
yield SingleReadonlyField(self.form, field, is_first=(i == 0))
else:
yield SingleField(self.form, field, is_first=(i == 0))
def errors(self):
return mark_safe(u'\n'.join([self.form[f].errors.as_ul() for f in self.fields if f not in self.readonly_fields]).strip('\n'))
class SingleField(AdminField):
pass
# def __init__(self, form, field, is_first):
# super(SingleField, self).__init__(form, field, is_first)
# self.field = form[field] # A django.forms.BoundField instance
# self.is_first = is_first # Whether this field is first on the line
# self.is_checkbox = isinstance(self.field.field.widget, forms.CheckboxInput)
class SingleReadonlyField(AdminReadonlyField):
def __init__(self, form, field, is_first):
# Make self.field look a little bit like a field. This means that
# {{ field.name }} must be a useful class name to identify the field.
# For convenience, store other field-related data here too.
if callable(field):
class_name = field.__name__ != '<lambda>' and field.__name__ or ''
else:
class_name = field
self.field = {
'name': class_name,
'label': form[field].label,
'field': field,
'help_text': form[field].help_text
}
self.form = form
self.is_first = is_first
self.is_checkbox = False
self.is_readonly = True
def contents(self):
return self.form[self.field]
|
hjpwhu/Python
|
src/hjp.edu.nlp.data.task/semeval.py
|
Python
|
mit
| 269
| 0.003717
|
import codecs
f = codecs.open("/Users/hjp/Downloads/task/
|
data/dev.txt", 'r', 'utf-8')
for line
|
in f.readlines():
print(line)
sents = line.split('\t')
print(sents[1] + "\t" + sents[3])
for i in range(len(sents)):
print(sents[i])
f.close()
|
vangalamaheshh/snakemake
|
snakemake/exceptions.py
|
Python
|
mit
| 10,433
| 0.000192
|
__author__ = "Johannes Köster"
__copyright__ = "Copyright 2015, Johannes Köster"
__email__ = "koester@jimmy.harvard.edu"
__license__ = "MIT"
import os
import traceback
from tokenize import TokenError
from snakemake.logging import logger
def format_error(ex, lineno,
linemaps=None,
snakefile=None,
show_traceback=False):
if linemaps is None:
linemaps = dict()
msg = str(ex)
if linemaps and snakefile and snakefile in linemaps:
lineno = linemaps[snakefile][lineno]
if isinstance(ex, SyntaxError):
msg = ex.msg
location = (" in line {} of {}".format(lineno, snakefile) if
lineno and snakefile else "")
tb = ""
if show_traceback:
tb = "\n".join(format_traceback(cut_traceback(ex), linemaps=linemaps))
return '{}{}{}{}'.format(ex.__class__.__name__, location, ":\n" + msg
if msg else ".", "\n{}".format(tb) if
show_traceback and tb else "")
def get_exception_origin(ex, linemaps):
for file, lineno, _, _ in reversed(traceback.extract_tb(ex.__traceback__)):
if file in linemaps:
return lineno, file
def cut_traceback(ex):
snakemake_path = os.path.dirname(__file__)
for line in traceback.extract_tb(ex.__traceback__):
dir = os.path.dirname(line[0])
if not dir:
dir = "."
if not os.path.isdir(dir) or not os.path.samefile(snakemake_path, dir):
yield line
def format_traceback(tb, linemaps):
for file, lineno, function, code in tb:
if file in linemaps:
lineno = linemaps[file][lineno]
if code is not None:
yield ' File "{}", line {}, in {}'.format(file, lineno, function)
def print_exception(ex, linemaps):
"""
Print an error message for a given exception.
Arguments
ex -- the exception
linemaps -- a dict of a dict that maps for each snakefile
the compiled lines to source code lines in the snakefile.
"""
tb = "Full " + "".join(traceback.format_exception(type(ex), ex, ex.__traceback__))
logger.debug(tb)
if isinstance(ex, SyntaxError) or isinstance(ex, IndentationError):
logger.error(format_error(ex, ex.lineno,
linemaps=linemaps,
snakefile=ex.filename,
show_traceback=True))
return
origin = get_exception_origin(ex, linemaps)
if origin is not None:
lineno, file = origin
logger.error(format_error(ex, lineno,
linemaps=linemaps,
snakefile=file,
show_traceback=True))
return
elif isinstance(ex, TokenError):
logger.error(format_error(ex, None, show_traceback=False))
elif isinstance(ex, MissingRuleException):
logger.error(format_error(ex, None,
linemaps=linemaps,
snakefile=ex.filename,
show_traceback=False))
elif isinstance(ex, RuleException):
for e in ex._include + [ex]:
if not e.omit:
logger.error(format_error(e, e.lineno,
linemaps=linemaps,
snakefile=e.filename,
show_traceback=True))
elif isinstance(ex, WorkflowError):
logger.error(format_error(ex, ex.lineno,
linemaps=linemaps,
snakefile=ex.snakefile,
show_traceback=True))
elif isinstance(ex, KeyboardInterrupt):
logger.info("Cancelling snakemake on user request.")
else:
traceback.print_exception(type(ex), ex, ex.__traceback__)
class WorkflowError(Exception):
@staticmethod
def format_args(args):
for arg in args:
if isinstance(arg, str):
yield arg
else:
yield "{}: {}".format(arg.__class__.__name__, str(arg))
def __init__(self, *args, lineno=None, snakefile=None, rule=None):
super().__init__("\n".join(self.format_args(args)))
if rule is not None:
self.lineno = rule.lineno
self.snakefile = rule.snakefile
else:
self.lineno = lineno
self.snakefile = snakefile
self.rule = rule
class WildcardError(WorkflowError):
pass
class RuleException(Exception):
"""
Base class for exception occuring withing the
execution or definition of rules.
"""
def __init__(self,
message=None,
|
include=None,
|
lineno=None,
snakefile=None,
rule=None):
"""
Creates a new instance of RuleException.
Arguments
message -- the exception message
include -- iterable of other exceptions to be included
lineno -- the line the exception originates
snakefile -- the file the exception originates
"""
super(RuleException, self).__init__(message)
self._include = set()
if include:
for ex in include:
self._include.add(ex)
self._include.update(ex._include)
if rule is not None:
if lineno is None:
lineno = rule.lineno
if snakefile is None:
snakefile = rule.snakefile
self._include = list(self._include)
self.lineno = lineno
self.filename = snakefile
self.omit = not message
@property
def messages(self):
return map(str, (ex for ex in self._include + [self] if not ex.omit))
class InputFunctionException(WorkflowError):
pass
class MissingOutputException(RuleException):
pass
class IOException(RuleException):
def __init__(self, prefix, rule, files,
include=None,
lineno=None,
snakefile=None):
message = ("{} for rule {}:\n{}".format(prefix, rule, "\n".join(files))
if files else "")
super().__init__(message=message,
include=include,
lineno=lineno,
snakefile=snakefile,
rule=rule)
class MissingInputException(IOException):
def __init__(self, rule, files, include=None, lineno=None, snakefile=None):
super().__init__("Missing input files", rule, files, include,
lineno=lineno,
snakefile=snakefile)
class PeriodicWildcardError(RuleException):
pass
class ProtectedOutputException(IOException):
def __init__(self, rule, files, include=None, lineno=None, snakefile=None):
super().__init__("Write-protected output files", rule, files, include,
lineno=lineno,
snakefile=snakefile)
class UnexpectedOutputException(IOException):
def __init__(self, rule, files, include=None, lineno=None, snakefile=None):
super().__init__("Unexpectedly present output files "
"(accidentally created by other rule?)", rule, files,
include,
lineno=lineno,
snakefile=snakefile)
class AmbiguousRuleException(RuleException):
def __init__(self, filename, job_a, job_b, lineno=None, snakefile=None):
super().__init__(
"Rules {job_a} and {job_b} are ambiguous for the file {f}.\n"
"Expected input files:\n"
"\t{job_a}: {job_a.input}\n"
"\t{job_b}: {job_b.input}".format(job_a=job_a,
job_b=job_b,
f=filename),
lineno=lineno,
snakefile=snakefile)
self.rule1, self.rule2 = job_a.rule, job_b.rule
class CyclicGraphException(RuleException):
def __init__(self, repeatedrule,
|
lordtangent/arsenalsuite
|
cpp/apps/absubmit/maya/pumpThread.py
|
Python
|
gpl-2.0
| 504
| 0.049603
|
import maya.cmds as cmds
import maya.utils as utils
import threading
import time
imp
|
ort sys
from PyQt4 import QtCore, QtGui
pumpedThread = None
app = None
def pumpQt():
global app
def processor():
app.processEvents()
while 1:
time.sl
|
eep(0.01)
utils.executeDeferred( processor )
def initializePumpThread():
global pumpedThread
global app
if pumpedThread == None:
app = QtGui.QApplication(sys.argv)
pumpedThread = threading.Thread( target = pumpQt, args = () )
pumpedThread.start()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.