repo_name stringlengths 5 100 | path stringlengths 4 231 | language stringclasses 1 value | license stringclasses 15 values | size int64 6 947k | score float64 0 0.34 | prefix stringlengths 0 8.16k | middle stringlengths 3 512 | suffix stringlengths 0 8.17k |
|---|---|---|---|---|---|---|---|---|
glidernet/python-ogn-client | ogn/parser/aprs_comment/tracker_parser.py | Python | agpl-3.0 | 3,367 | 0.012177 | from ogn.parser.pattern import PATTERN_TRACKER_POSITION_COMMENT, PATTERN_TRACKER_STATUS_COMMENT
from ogn.parser.utils import FPM_TO_MS, HPM_TO_DEGS
from .base import BaseParser
class TrackerParser(BaseParser):
def __init__(self):
self.beacon_type = 'tracker'
self.position_pattern = PATTERN_TRACKER_POSITION_COMMENT
self.status_pattern = PATTERN_TRACKER_STATUS_COMMENT
def parse_position(self, aprs_comment):
match = self.position_pattern.match(aprs_comment)
result = {}
if match.group('details'):
result.update({
'address_type': int(match.group('details'), 16) & 0b00000011,
'aircraft_type': (int(match.group('details'), 16) & 0b01111100) >> 2,
'stealth': (int(match.group('details'), 16) & 0b10000000) >> 7 == 1,
'address': match.group('address'),
})
if match.group('climb_rate'): result['climb_rate'] = int(match.group('climb_rate')) * FPM_TO_MS
if match.group('turn_rate'): result['turn_rate'] = float(match.group('turn_rate')) * HPM_TO_DEGS
if match.group('flight_level'): result['flight | level'] = float(match.group('flight_level'))
if match.group('signal_quality'): result['signal_quality'] = float(match.group('signal_quality'))
if match.group('error_count'): result['error_count'] = int(match.group('error_count'))
if match.group('frequency_offset'): result[ | 'frequency_offset'] = float(match.group('frequency_offset'))
if match.group('gps_quality'):
result.update({
'gps_quality': {
'horizontal': int(match.group('gps_quality_horizontal')),
'vertical': int(match.group('gps_quality_vertical'))
}
})
if match.group('signal_power'): result['signal_power'] = float(match.group('signal_power'))
return result
def parse_status(self, aprs_comment):
match = self.status_pattern.match(aprs_comment)
if match:
result = {}
if match.group('hardware_version'): result['hardware_version'] = int(match.group('hardware_version'))
if match.group('software_version'): result['software_version'] = int(match.group('software_version'))
if match.group('gps_satellites'): result['gps_satellites'] = int(match.group('gps_satellites'))
if match.group('gps_quality'): result['gps_quality'] = int(match.group('gps_quality'))
if match.group('gps_altitude'): result['gps_altitude'] = int(match.group('gps_altitude'))
if match.group('pressure'): result['pressure'] = float(match.group('pressure'))
if match.group('temperature'): result['temperature'] = float(match.group('temperature'))
if match.group('humidity'): result['humidity'] = int(match.group('humidity'))
if match.group('voltage'): result['voltage'] = float(match.group('voltage'))
if match.group('transmitter_power'): result['transmitter_power'] = int(match.group('transmitter_power'))
if match.group('noise_level'): result['noise_level'] = float(match.group('noise_level'))
if match.group('relays'): result['relays'] = int(match.group('relays'))
return result
else:
return {'comment': aprs_comment}
|
JR--Chen/flasky | app/spider/decorators.py | Python | mit | 641 | 0.00156 | from functools import wraps
def retry_task(f):
@wraps(f)
def decorated_function(*args, **kwargs):
retry = kwargs.get('retry', False)
if retry == 0:
return f(*args, **kwargs)
elif retry > 0:
for x in range(0, retry):
result = f(*args, **kwargs)
| if result | ['status'] != 500:
return result
return f(*args, **kwargs)
elif retry == -1:
while retry:
result = f(*args, **kwargs)
if result['status'] != 500:
return result
return decorated_function
|
viaict/viaduct | app/models/user.py | Python | mit | 5,021 | 0 | from datetime import datetime
from flask_login import UserMixin, AnonymousUserMixin
from typing import List
from app import db, constants
from app.models.base_model import BaseEntity
from app.models.education import Education
from app.models.group import Group
class AnonymousUser(AnonymousUserMixin):
"""
Has attributes for flask-login.
is_anonymous = True, is_active & is_authenticated = False.
current_user is equal to an instance of this class whenever the user is
not logged in.
Check logged in using:
>>> from flask_login import login_required
>>> from flask import Blueprint
>>>
>>> blueprint = Blueprint("somemodule", __name__)
>>> @blueprint.route("/someroute")
>>> @login_required
Keep in mind, all the user attributes are not available when the user is
not logged in.
"""
id = 0
has_paid = False
groups: List[Group] = []
class User(db.Model, UserMixin, BaseEntity):
"""The groups property is backreferenced from group.py."""
__tablename__ = 'user'
prints = ('id', 'email', 'password', 'first_name', 'last_name',
'student_id', 'receive_information')
email = db.Column(db.String(200), unique=True)
password = db.Column(db.String(60))
first_name = db.Column(db.String(256))
last_name = db.Column(db.String(256))
locale = db.Column(db.Enum(*list(constants.LANGUAGES.keys()),
name='locale'),
default="nl")
# Membership status
# TODO Rename to member
has_paid = db.Column(db.Boolean, default=None)
paid_date = db.Column(db.DateTime)
honorary_member = db.Column(db.Boolean, default=False)
favourer = db.Column(db.Boolean, default=False)
# TODO REMOVE
shirt_size = db.Column(db.Enum('Small', 'Medium', 'Large',
name='user_shirt_size'))
# TODO REMOVE
allergy = db.Column(db.String(1024)) # Allergy / medication
# TODO REMOVE
diet = db.Column(db.Enum('Vegetarisch', 'Veganistisch', 'Fruitarier',
name='user_diet'))
# TODO REMOVE
gender = db.Column(db.Enum('Man', 'Vrouw', 'Geen info',
name='user_sex'))
phone_nr = db.Column(db.String(16))
# TODO REMOVE
emergency_phone_nr = db.Column(db.String(16))
# TODO REMOVE not used.
description = db.Column(db.String(1024)) # Description of user
# Study
student_id = db.Column(db.String(256))
education_id = db.Column(db.Integer, db.ForeignKey('education.id'))
birth_date = db.Column(db.Date)
study_start = db.Column(db.Date)
receive_information = db.Column(db.Boolean, default=False)
disabled = db.Column(db.Boolean, default=False)
# Location
address = db.Column(db.String(256))
zip = db.Column(db.String(8))
city = db.Column(db.String(256))
country = db.Column(db.String(256), default='Nederland')
alumnus = db.Co | lumn(db.Boolean, default=False)
education = db.relationship(Education,
backref=db.backref('user', lazy='dynamic'))
cope | rnica_id = db.Column(db.Integer(), nullable=True)
avatar_file_id = db.Column(db.Integer, db.ForeignKey('file.id'))
student_id_confirmed = db.Column(db.Boolean, default=False, nullable=False)
def __init__(self, email=None, password=None, first_name=None,
last_name=None, student_id=None, education_id=None,
birth_date=None, study_start=None, receive_information=None):
if not email:
self.id = 0
self.email = email
self.password = password
self.first_name = first_name
self.last_name = last_name
self.student_id = student_id
self.education_id = education_id
self.birth_date = birth_date
self.study_start = study_start
self.receive_information = receive_information
def __setattr__(self, name, value):
"""
If has_paid is set to true, we want to store the date that happend.
Because of legacy code and sqlalchemy we do it this way
"""
if name == 'has_paid' and value:
super(User, self).__setattr__(
"paid_date", datetime.now())
super(User, self).__setattr__(name, value)
def __str__(self):
return self.name
def get_user_id(self):
"""Retrieve the unique id of the user for authlib."""
return self.id
def update_email(self, new_email):
if self.email == new_email:
return
old_email = self.email
for group in self.groups:
if not group.maillist:
continue
group.remove_email_from_maillist(old_email)
group.add_email_to_maillist(new_email)
self.email = new_email
@property
def name(self):
if not self.first_name and not self.last_name:
return None
return ' '.join([self.first_name, self.last_name])
|
cjlux/Poppy-ENSAM-Talence | Environnement_programmation/Codes Xavier Mariot/codes test.py | Python | gpl-3.0 | 1,215 | 0.009959 | # -*- coding: utf-8 -*-
"""
Éditeur de Spyder
Ce script temporaire est sauvegardé ici :
/home/poppy/.spyder2/.temp.py
"""
# bibliothèques nécessaires au fonctionnement de Poppy humanoide
import pypot
from poppy.creatures import PoppyHumanoid
import kinematics as kin
# démmarrer la simulation poppy dans vrep. Il faut lancer V-rep avant d'executer le code.
poppy = PoppyHumanoid(simulator='vrep')
# Relancer la simulation
poppy.reset_simulation()
# Arrêter complètement la simulation poppy
poppy.stop_simulation()
pypot.vrep.close_all_connections()
# Affiche la liste des moteurs. Le schema de Poppy avec les moteurs est dans la doc.
print"Réponse:"
print "j'ai", len( poppy.motors ), "moteurs"
print "ils sont tous indexés dans une ", type( poppy.motors ), "qui s'appelle poppy.motors \n\n la voici: "
for m in poppy.motors:
| print "-------------"
print " | nom du moteur: ", m.name
print "position actuelle du moteur: ", m.present_position, "degrès"
# Poppy dit oui
for i in range(2):
poppy.head_y.goal_position = -20
poppy.head_y.goal_position = +20
poppy.head_y.goal_position = 0
poppy.l_shoulder_y.goal_position = -45
poppy.l_shoulder_x.goal_position = 45
|
michaelmior/freudiancommits | freudiancommits/settings.py | Python | gpl-3.0 | 6,595 | 0.00091 | # Django settings for freudiancommits project.
import os
DEBUG = True if os.environ.get('DJANGO_DEBUG', None) == '1' else False
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', 'your_email@example.com'),
)
MANAGERS = ADMINS
import dj_database_url
DATABASES = {'default': dj_database_url.config()}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'UTC'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and |
# calendars according | to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = ''
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = os.environ['DJANGO_SECRET_KEY']
AUTHENTICATION_BACKENDS = (
# Needed to login by username in Django admin, regardless of `allauth`
'django.contrib.auth.backends.ModelBackend',
# `allauth` specific authentication methods, such as login by e-mail
'allauth.account.auth_backends.AuthenticationBackend',
)
# Don't require email addresses
SOCIALACCOUNT_EMAIL_REQUIRED = False
SOCIALACCOUNT_EMAIL_VERIFICATION = 'none'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
TEMPLATE_CONTEXT_PROCESSORS = (
'allauth.account.context_processors.account',
'allauth.socialaccount.context_processors.socialaccount',
'django.core.context_processors.request',
'django.contrib.auth.context_processors.auth'
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'freudiancommits.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'freudiancommits.wsgi.application'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
INSTALLED_APPS = (
'freudiancommits.main',
'freudiancommits.github',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
'allauth',
'allauth.account',
'allauth.socialaccount',
'allauth.socialaccount.providers.github',
'south',
'gunicorn',
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
if 'AWS_STORAGE_BUCKET_NAME' in os.environ:
AWS_ACCESS_KEY_ID = os.environ['AWS_ACCESS_KEY_ID']
AWS_SECRET_ACCESS_KEY = os.environ['AWS_SECRET_ACCESS_KEY']
AWS_STORAGE_BUCKET_NAME = os.environ['AWS_STORAGE_BUCKET_NAME']
AWS_S3_CUSTOM_DOMAIN = AWS_STORAGE_BUCKET_NAME
STATICFILES_STORAGE = 'storages.backends.s3boto.S3BotoStorage'
DEFAULT_FILE_STORAGE = 'storages.backends.s3boto.S3BotoStorage'
DEFAULT_FILE_STORAGE = 's3_folder_storage.s3.DefaultStorage'
DEFAULT_S3_PATH = 'media'
STATICFILES_STORAGE = 's3_folder_storage.s3.StaticStorage'
STATIC_S3_PATH = 'static'
AWS_S3_SECURE_URLS = False
AWS_QUERYSTRING_AUTH = False
MEDIA_ROOT = '/%s/' % DEFAULT_S3_PATH
MEDIA_URL = '//%s/%s/' % \
(AWS_STORAGE_BUCKET_NAME, DEFAULT_S3_PATH)
STATIC_ROOT = '/%s/' % STATIC_S3_PATH
STATIC_URL = '//%s/%s/' % \
(AWS_STORAGE_BUCKET_NAME, STATIC_S3_PATH)
ADMIN_MEDIA_PREFIX = STATIC_URL + 'admin/'
LOGIN_REDIRECT_URL = '/github/loading/'
|
michaelhowden/eden | modules/s3/s3aaa.py | Python | mit | 357,874 | 0.002582 | # -*- coding: utf-8 -*-
""" Authentication, Authorization, Accouting
@requires: U{B{I{gluon}} <http://web2py.com>}
@copyright: (c) 2010-2015 Sahana Software Foundation
@license: MIT
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
__all__ = ("AuthS3",
"S3Permission",
"S3Audit",
"S3RoleManager",
"S3OrgRoleManager",
"S3PersonRoleManager",
)
import datetime
#import re
from uuid import uuid4
try:
import json # try stdlib (Python 2.6)
except ImportError:
try:
import simplejson as json # try external module
except:
import gluon.contrib.simplejson as json # fallback to pure-Python module
try:
# Python 2.7
from collections import OrderedDict
except:
# Python 2.6
from gluon.contrib.simplejson.ordered_dict import OrderedDict
from gluon import *
from gluon.sqlhtml import OptionsWidget
from gluon.storage import Storage
from gluon.tools import Auth, callback, DEFAULT, replace_id
from gluon.utils import web2py_uuid
from s3dal import Row, Rows, Query, Table
from s3datetime import S3DateTime
from s3error import S3PermissionError
from s3fields import S3Represent, s3_uid, s3_timestamp, s3_deletion_status, s3_comments
from s3rest import S3Method
from s3track import S3Tracker
from s3utils import s3_addrow, s3_get_extension, s3_mark_required
DEBUG = False
if DEBUG:
import sys
print >> sys.stderr, "S3AAA: DEBUG MODE"
def _debug(m):
print >> sys.stderr, m
else:
_debug = lambda m: None
# =============================================================================
class AuthS3(Auth):
"""
S3 extensions of the gluon.tools.Auth class
- override:
| - __init__
- define_tables
- login_bare
- set_cookie
- login
- register
- email_reset_password
- verify_email
- profile
- has_membership
- requires_membership
- S3 extension for user registration:
- s3_register_validation
- s3_user_register_onaccept
- S3 extension for user administration:
| - configure_user_fields
- s3_verify_user
- s3_approve_user
- s3_link_user
- s3_user_profile_onaccept
- s3_link_to_person
- s3_link_to_organisation
- s3_link_to_human_resource
- s3_link_to_member
- s3_approver
- S3 custom authentication methods:
- s3_impersonate
- s3_logged_in
- S3 user role management:
- get_system_roles
- s3_set_roles
- s3_create_role
- s3_delete_role
- s3_assign_role
- s3_withdraw_role
- s3_has_role
- s3_group_members
- S3 ACL management:
- s3_update_acls
- S3 user identification helpers:
- s3_get_user_id
- s3_user_pe_id
- s3_logged_in_person
- s3_logged_in_human_resource
- S3 core authorization methods:
- s3_has_permission
- s3_accessible_query
- S3 variants of web2py authorization methods:
- s3_has_membership
- s3_requires_membership
- S3 record ownership methods:
- s3_make_session_owner
- s3_session_owns
- s3_set_record_owner
"""
# Configuration of UIDs for system roles
S3_SYSTEM_ROLES = Storage(ADMIN = "ADMIN",
AUTHENTICATED = "AUTHENTICATED",
ANONYMOUS = "ANONYMOUS",
EDITOR = "EDITOR",
MAP_ADMIN = "MAP_ADMIN",
ORG_ADMIN = "ORG_ADMIN",
ORG_GROUP_ADMIN = "ORG_GROUP_ADMIN",
)
def __init__(self):
""" Initialise parent class & make any necessary modifications """
Auth.__init__(self, current.db)
self.settings.lock_keys = False
self.settings.login_userfield = "email"
self.settings.lock_keys = True
messages = self.messages
messages.lock_keys = False
# @ToDo Move these to deployment_settings
messages.approve_user = \
"""Your action is required to approve a New User for %(system_name)s:
%(first_name)s %(last_name)s
%(email)s
Please go to %(url)s to approve this user."""
messages.email_approver_failed = "Failed to send mail to Approver - see if you can notify them manually!"
messages.email_sent = "Verification Email sent - please check your email to validate. If you do not receive this email please check you junk email or spam filters"
messages.email_verification_failed = "Unable to send verification email - either your email is invalid or our email server is down"
messages.email_verified = "Email verified - you can now login"
messages.duplicate_email = "This email address is already in use"
messages.help_utc_offset = "The time difference between UTC and your timezone, specify as +HHMM for eastern or -HHMM for western timezones."
messages.help_mobile_phone = "Entering a phone number is optional, but doing so allows you to subscribe to receive SMS messages."
messages.help_organisation = "Entering an Organization is optional, but doing so directs you to the appropriate approver & means you automatically get the appropriate permissions."
messages.help_image = "You can either use %(gravatar)s or else upload a picture here. The picture will be resized to 50x50."
messages.label_image = "Profile Image"
messages.label_organisation_id = "Organization"
messages.label_org_group_id = "Coalition"
messages.label_remember_me = "Remember Me"
messages.label_utc_offset = "UTC Offset"
#messages.logged_in = "Signed In"
#messages.logged_out = "Signed Out"
#messages.submit_button = "Signed In"
messages.new_user = \
"""A New User has registered for %(system_name)s:
%(first_name)s %(last_name)s
%(email)s
No action is required."""
messages.password_reset_button='Request password reset'
messages.profile_save_button = "Apply changes"
messages.registration_disabled = "Registration Disabled!"
messages.registration_verifying = "You haven't yet Verified your account - please check your email"
messages.reset_password = "Click on the link %(url)s to reset your password"
messages.verify_email = "Click on the link %(url)s to verify your email"
messages.verify_email_subject = "%(system_name)s - Verify Email"
messages.welcome_email_subject = "Welcome to %(system_name)s"
messages.welcome_email = \
"""Welcome to %(system_name)s
- You can start using %(system_name)s at: %(url)s
- To edit your profile go to: %(url)s%(profile)s
Thank you"""
messages.lock_keys = True
# S3Permission
self.permission = S3P |
shtripat/gluster_bridge | tendrl/gluster_bridge/gevent_util.py | Python | lgpl-2.1 | 1,785 | 0 | from contextlib import contextmanager
from functools import wraps
from gevent import getcurrent
class ForbiddenYield(Exception):
pass
@contextmanager
def nosleep_mgr():
old_switch_out = getattr(getcurrent(), 'switch_out', None)
def asserter():
raise ForbiddenYield("Context switch during `nosleep` region!")
getcurrent().switch_out = asserter
try:
yield
finally:
if old_switch_out is not None:
getcurrent().switch_out = old_switch_out
else:
del getcurrent().switch_out
def nosleep(func):
"""This decorator is used to assert that no geven greenlet yields
occur in the decorated function.
"""
@wraps(func)
def wrapped(*args, **kwargs):
with nosleep_mgr():
return func(*args, **kwargs)
return wrapped
if __name__ == '__main__':
# Tests for nosleep()
# ===================
import gevent.greenlet
import gevent.queue
from gevent import sleep
# This should raise no exception (print doesn't yield)
with nosleep_mgr():
print("test print!")
# Thi | s should raise an exception when we try push to a fixed size queue
try:
smallq = gevent.queue.Queue(1)
with nosleep_mgr():
smallq.put(1)
smallq.put(2)
except ForbiddenYield:
pass
else:
raise AssertionError("Failed")
# This should raise no exception when we try push to an unlimited
# size queue
bigq = gevent.queue.Queue(0)
| with nosleep_mgr():
for i in range(0, 10000):
bigq.put(i)
# This should raise an exception on sleep
# FIXME!!!!
try:
sleep(0.1)
except ForbiddenYield:
pass
else:
raise AssertionError("Failed")
|
andrwng/kudu | build-support/run_dist_test.py | Python | apache-2.0 | 5,863 | 0.013133 | #!/usr/bin/env python2
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# This script runs on the distributed-test slave and acts
# as a wrapper around run-test.sh.
#
# The distributed testing system can't pass in environment variables
# to commands, so this takes some parameters, turns them into environment
# variables, and then executes the test wrapper.
#
# We also 'cat' the test log upon completion so that the test logs are
# uploaded by the test slave back.
import glob
import optparse
import os
import re
import shutil
import subprocess
import sys
ME = os.path.abspath(__file__)
ROOT = os.path.abspath(os.path.join(os.path.dirname(ME), ".."))
def is_elf_binary(path):
""" Determine if the given path is an ELF binary (executable or shared library) """
if not os.path.isfile(path) or os.path.islink(path):
return False
try:
with file(path, "rb") as f:
magic = f.read(4)
return magic == "\x7fELF"
except:
# Ignore unreadable files
return False
def fix_rpath_component(bin_path, path):
"""
Given an RPATH component 'path' of the binary located at 'bin_path',
fix the thirdparty dir to be relative to the binary rather than absolute.
"""
rel_tp = os.path.relpath(os.path.join(ROOT, "thirdparty/"),
os.path.dirname(bin_path))
path = re.sub(r".*thirdparty/", "$ORIGIN/"+rel_tp + "/", path)
return path
def fix_rpath(path):
"""
Fix the RPATH/RUNPATH of the binary located at 'path' so that
the thirdparty/ directory is properly found, even though we will
run the binary at a different path than it was originally built.
"""
# Fetch the original rpath.
p = subprocess.Popen(["chrpath", path],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
if p.returncode != 0:
return
rpath = re.search("R(?:UN)?PATH=(.+)", stdout.strip()).group(1)
# Fix it to be relative.
new_path = ":".join(fix_rpath_component(path, c) for c in rpath.split(":"))
# Write the new rpath back into the binary.
subprocess.check_call(["chrpath", "-r", new_path, path])
def fixup_rpaths(root):
"""
Recursively walk the directory tree 'root' and fix the RPATH for any
ELF files (binaries/libraries) that are found.
"""
for dirpath, dirnames, filenames in os.walk(root):
for f in filenames:
p = os.path.join(dirpath, f)
if is_elf_binary(p):
fix_rpath(p)
def main():
p = optparse.OptionParser(usage="usage: %prog [options] <test-name>")
p.add_option("-e", "--env", dest="env", type="string", action="append",
help="key=value pairs for environment variables",
default=[])
p.add_option("--collect-tmpdir", dest="collect_tmpdir", action="store_true",
help="whether to collect the test tmpdir as an artifact if the test fails",
default=False)
options, args = | p.parse_args()
if len(args) < 1:
p.print_help(sys.stderr)
sys.exit(1)
test_exe = args[0]
test_name, _ = os.path.splitext(os.path.basename(test_exe))
test_dir = os.path.dirname(test_exe)
env = os.environ.copy()
for env_pair in options.env:
(k, v) = env_pair.split("=", 1)
env[k] = v
# Fix the RPATHs of any binaries. During the build, we end up with
# absolute paths from the build machine. This fixes the paths to be
# binary-relati | ve so that we can run it on the new location.
#
# It's important to do this rather than just putting all of the thirdparty
# lib directories into $LD_LIBRARY_PATH below because we need to make sure
# that non-TSAN-instrumented runtime tools (like 'llvm-symbolizer') do _NOT_
# pick up the TSAN-instrumented libraries, whereas TSAN-instrumented test
# binaries (like 'foo_test' or 'kudu-tserver') _DO_ pick them up.
fixup_rpaths(os.path.join(ROOT, "build"))
fixup_rpaths(os.path.join(ROOT, "thirdparty"))
# Add environment variables for Java dependencies. These environment variables
# are used in mini_hms.cc.
env['HIVE_HOME'] = glob.glob(os.path.join(ROOT, "thirdparty/src/apache-hive-*-bin"))[0]
env['HADOOP_HOME'] = glob.glob(os.path.join(ROOT, "thirdparty/src/hadoop-*"))[0]
env['JAVA_HOME'] = glob.glob("/usr/lib/jvm/java-1.8.0-*")[0]
env['LD_LIBRARY_PATH'] = ":".join(
[os.path.join(ROOT, "build/dist-test-system-libs/"),
os.path.abspath(os.path.join(test_dir, "..", "lib"))])
# Don't pollute /tmp in dist-test setting. If a test crashes, the dist-test slave
# will clear up our working directory but won't be able to find and clean up things
# left in /tmp.
test_tmpdir = os.path.abspath(os.path.join(ROOT, "test-tmp"))
env['TEST_TMPDIR'] = test_tmpdir
env['ASAN_SYMBOLIZER_PATH'] = os.path.join(ROOT, "thirdparty/installed/uninstrumented/bin/llvm-symbolizer")
rc = subprocess.call([os.path.join(ROOT, "build-support/run-test.sh")] + args,
env=env)
if rc != 0 and options.collect_tmpdir:
os.system("tar czf %s %s" % (os.path.join(test_dir, "..", "test-logs", "test_tmpdir.tgz"), test_tmpdir))
sys.exit(rc)
if __name__ == "__main__":
main()
|
jnayak1/cs3240-s16-team16 | upload/models.py | Python | mit | 726 | 0.002755 | from django.db import models
# Create your models here.
class SignUp(models.Model):
| email = models.EmailField()
full_name = models.CharField(max_length=120, blank=True, null=True)
timestamp = models.DateTimeField(auto_now_add=True, auto_now=False)
updated = models.DateTimeField(auto_now_add=False, auto_now=True)
def __str__(self):
return self.email
class PublicKey(models.Model):
file | = models.CharField(max_length=50)
key = models.CharField(max_length=32)
def __str__(self):
return self.file
# class UploadFile(models.Model):
# name = models.CharField(max_length=120, blank=True)
# file = models.FileField()
#
# def __str__(self):
# return self.name
|
EarToEarOak/RTLSDR-Scanner | rtlsdr_scanner/devices.py | Python | gpl-3.0 | 4,603 | 0.000434 | #
# rtlsdr_scan
#
# http://eartoearoak.com/software/rtlsdr-scanner
#
# Copyright 2012 -2015 Al Brown
#
# A frequency scanning GUI for the OsmoSDR rtl-sdr library at
# http://sdr.osmocom.org/trac/wiki/rtl-sdr
#
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from ctypes import c_ubyte, string_at
import rtlsdr
import serial
class DeviceGPS(object):
NMEA_SERIAL, GPSD, GPSD_OLD, NMEA_TCP = range(4)
TYPE = ['NMEA (Serial)', 'GPSd', 'GPSd (Legacy)', 'NMEA (Server)']
BYTES = [serial.FIVEBITS, serial.SIXBITS, serial.SEVENBITS,
| serial.EIGHTBITS]
PARITIES = [serial.PARITY_NONE, s | erial.PARITY_EVEN, serial.PARITY_ODD,
serial.PARITY_MARK, serial.PARITY_SPACE]
STOPS = [serial.STOPBITS_ONE, serial.STOPBITS_ONE_POINT_FIVE,
serial.STOPBITS_TWO]
def __init__(self):
self.name = 'GPS'
self.type = self.GPSD
self.resource = 'localhost:2947'
self.baud = 115200
self.bytes = serial.EIGHTBITS
self.parity = serial.PARITY_NONE
self.stops = serial.STOPBITS_ONE
self.soft = False
def get_bauds(self):
if self.type == DeviceGPS.NMEA_SERIAL:
return serial.Serial.BAUDRATES
return None
def get_serial_desc(self):
port = self.resource.split('/')
return '{} {}-{}{}{:g}'.format(port[0], self.baud, self.bytes,
self.parity, self.stops)
def get_desc(self):
if self.type == DeviceGPS.NMEA_SERIAL:
return self.get_serial_desc()
return self.resource
class DeviceRTL(object):
def __init__(self):
self.isDevice = True
self.indexRtl = None
self.name = None
self.serial = ''
self.server = 'localhost'
self.port = 1234
self.gains = []
self.gain = 0
self.calibration = 0
self.lo = 0
self.offset = 250e3
self.tuner = 0
self.levelOff = 0
def set(self, device):
self.gain = device.gain
self.calibration = device.calibration
self.lo = device.lo
self.offset = device.offset
self.tuner = device.tuner
self.levelOff = device.levelOff
def get_gains_str(self):
gainsStr = []
for gain in self.gains:
gainsStr.append(str(gain))
return gainsStr
def get_closest_gain_str(self, desired):
gain = min(self.gains, key=lambda n: abs(n - desired))
return str(gain)
def get_devices_rtl(currentDevices=None, statusBar=None):
if statusBar is not None:
statusBar.set_general("Refreshing device list...")
if currentDevices is None:
currentDevices = []
devices = []
count = rtlsdr.librtlsdr.rtlsdr_get_device_count()
for dev in range(0, count):
device = DeviceRTL()
device.indexRtl = dev
device.name = format_device_rtl_name(rtlsdr.librtlsdr.rtlsdr_get_device_name(dev))
buffer1 = (c_ubyte * 256)()
buffer2 = (c_ubyte * 256)()
serial = (c_ubyte * 256)()
rtlsdr.librtlsdr.rtlsdr_get_device_usb_strings(dev, buffer1, buffer2,
serial)
device.serial = string_at(serial)
try:
sdr = rtlsdr.RtlSdr(dev)
except IOError:
continue
device.gains = sdr.valid_gains_db
device.calibration = 0.0
device.lo = 0.0
for conf in currentDevices:
if conf.isDevice and device.name == conf.name and device.serial == conf.serial:
device.set(conf)
devices.append(device)
for conf in currentDevices:
if not conf.isDevice:
devices.append(conf)
if statusBar is not None:
statusBar.set_general("")
return devices
def format_device_rtl_name(name):
remove = ["/", "\\"]
for char in remove:
name = name.replace(char, " ")
return name
if __name__ == '__main__':
print 'Please run rtlsdr_scan.py'
exit(1)
|
rlowrance/find_best_mall | yelp_crawler/filter.py | Python | mit | 4,773 | 0.020113 | # filter the store data
# remove the redundant store's name
import csv
import re
import unicodedata
import sys
reload(sys)
import operator
sys.setdefaultencoding("utf-8")
root = "/Users/lily/workspace/find_best_mall/yelp_crawler"
dir = root + "/dataset"
file = dir + "/store.csv"
final_file = dir + "/tmp_yelp.csv"
def remove_accent_marks(input_str):
nkfd_form = unicodedata.normalize('NFKD', unicode(input_str))
return u"".join([c for c in nkfd_form if not unicodedata.combining(c)])
with open(file, 'rU') as store_file:
reader = csv.reader(store_file, delimiter=",")
for row in reader:
# lowercase and trim string
mall_id = row[2].lower().strip()
name = row[1].lower().strip()
print name
# replace several spaces to one space
name = re.sub("\s+", " ", name)
# remove special characters
name = re.sub("(\s*)[&|'|\\\|\/](\s*)", "", name)
#replace accent_marks of name
name = remove_accent_marks(name)
#filter atm & vending machines
if (re.search("(\s*atm\s*)|(\s*vending\s*machines\s*)|(^(advanced)\s*)", name)):
continue
#filter open EXCEPT ["open advanced", "mri open mobile"]
if (re.search("(^(open)\s*((?!(mobile))|(?!(advanced mri)))$)", name)):
continue
#remove [".co" ",the" "outet" "and co." "Now Open" "Opening"]
name = re.sub("(\s*and\s*co\.\s*)|(\s*co\.\s*)|(\s*\,\s*the\s*)|(\s*outlet\s*)|(\s*([-|\(|\*|\~]?)\s*((now\s*open)|(opening)|(reopening))\s*(.*)$)", " ", name)
# remove ["location", "new location","two locations", "relocation"]
name = re.sub("(\s*([-|\(|\*|\~]?)\s*((new\s*location(s?))|(location(s?))|(two location(s?)|(relocation(s?))\s*(.*)$))\s*(.*)$)", "", name)
# remove \s*-\s* and all after words
name = re.sub("(\s+\-\s+.*)$", "", name)
# replace - to space
name = re.sub("\-", " ", name)
# remove lower level upper level level 2 2nd level
name = re.sub("(^((level)|(next level))\s*(\d?)\s+)", "", name)
name = re.sub("(\s*((level)|(upper level)|(lower level))\s*(\d?)(.*)$)", "", name)
#change common stores name to the same
common_stores = ["aldo" , "starbucks" , "att", "aaa", "advance america", "as seen on tv", "sanrio", "hollister", "five guy", "rubios", "ecoatm", "hooter", "joppa", "wasabi", "guitar center"," rainforest cafe", "relax the back", "uno chicago grill","nys collection", "tmobile", "macdonald", "verizon"]
for common_store in common_stores:
if(re.search(common_store, name)):
name = common_store
# remove other special characters
# *, #, !, ?, ', @, $, +, ; % { }
name = re.sub("(\s*)[\.|\,|\\\%|\\\"|\\\'|\(|\)|\?|\@|\$|\+|\;|\\'|\\\"|\{|\}|\!|\*|\#|\:|\;|\\'|\!|\\\](\s*)", " ", name)
name = re.sub("\s+", " ", name)
name = name.strip()
# delete rule
# meet these words in delte rules, just ignore them
IS_IGNORE = False
for rule in delete_rules:
if (re.search(rule, name) != None):
IS_IGNORE = True
break
if(IS_IGNORE):
continue
# sub rule
# meet sub rules, replace the origin names with the sub sule name
for rule in sub_rules:
if (re.search(rule, name) != None):
new_rule = re.sub("\^", "", rule)
name = new_rule
break
# change rule
# change store name from A -> B
change_rules = get_rules("change")
for rule in change_rules:
rule = rule.split("->")
rule_from = rule[0].strip()
| rule_to = rule[1].strip()
name = re.sub(rule_from, rule_to, name)
# if the name just have spaces, ignore
if(len(re.sub("\s*", "", name)) <= 0):
continue
if(len(re.sub("\s*", "", name)) > 0):
# create unique id for stores
if not(name in stores_dic):
store_id = len(stores_dic)
stores_dic[name] = store_id
stores_list.append([name, stores_dic[name], | mall_id])
store_file.close()
with open(final_file, 'wb') as file:
writer = csv.writer(file, delimiter=',')
my_list = sorted(stores_list, key=operator.itemgetter(1))
writer.writerow(["store_name", "store_new_id", "mall_id"])
for val in my_list:
writer.writerow(val)
file.close()
with open(store_id_file, "wb") as file:
writer = csv.writer(file, delimiter=',')
writer.writerow(["store_name", "store_new_id"])
my_dic = sorted(stores_dic.items(), key=lambda x: x[1])
for val in my_dic:
writer.writerow(val)
file.close()
|
leeping/mdtraj | tests/test_reporter.py | Python | lgpl-2.1 | 7,439 | 0.001613 | ##############################################################################
# MDTraj: A Python Library for Loading, Saving, and Manipulating
# Molecular Dynamics Trajectories.
# Copyright 2012-2017 Stanford University and the Authors
#
# Authors: Robert McGibbon
# Contributors:
#
# MDTraj is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 2.1
# of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with MDTraj. If not, see <http://www.gnu.org/licenses/>.
##############################################################################
import os
import numpy as np
import pytest
import mdtraj as md
from mdtraj.formats import HDF5TrajectoryFile, NetCDFTrajectoryFile
from mdtraj.reporters import HDF5Reporter, NetCDFReporter, DCDReporter
from mdtraj.testing import eq
try:
from simtk.unit import nanometers, kelvin, picoseconds, femtoseconds
from simtk.openmm import LangevinIntegrator, Platfo | rm
from simtk.openmm.app import PDBFile, ForceField, Simulation, CutoffNonPeriodic, CutoffPeriodic, HBonds
HAVE_OPENMM = True
except ImportError:
HAVE_OPENMM = False
# special pytest global to mark | all tests in this module
pytestmark = pytest.mark.skipif(not HAVE_OPENMM, reason='test_reporter.py needs OpenMM.')
def test_reporter(tmpdir, get_fn):
pdb = PDBFile(get_fn('native.pdb'))
forcefield = ForceField('amber99sbildn.xml', 'amber99_obc.xml')
# NO PERIODIC BOUNDARY CONDITIONS
system = forcefield.createSystem(pdb.topology, nonbondedMethod=CutoffNonPeriodic,
nonbondedCutoff=1.0 * nanometers, constraints=HBonds, rigidWater=True)
integrator = LangevinIntegrator(300 * kelvin, 1.0 / picoseconds, 2.0 * femtoseconds)
integrator.setConstraintTolerance(0.00001)
platform = Platform.getPlatformByName('Reference')
simulation = Simulation(pdb.topology, system, integrator, platform)
simulation.context.setPositions(pdb.positions)
simulation.context.setVelocitiesToTemperature(300 * kelvin)
tmpdir = str(tmpdir)
hdf5file = os.path.join(tmpdir, 'traj.h5')
ncfile = os.path.join(tmpdir, 'traj.nc')
dcdfile = os.path.join(tmpdir, 'traj.dcd')
reporter = HDF5Reporter(hdf5file, 2, coordinates=True, time=True,
cell=True, potentialEnergy=True, kineticEnergy=True, temperature=True,
velocities=True)
reporter2 = NetCDFReporter(ncfile, 2, coordinates=True, time=True, cell=True)
reporter3 = DCDReporter(dcdfile, 2)
simulation.reporters.append(reporter)
simulation.reporters.append(reporter2)
simulation.reporters.append(reporter3)
simulation.step(100)
reporter.close()
reporter2.close()
reporter3.close()
with HDF5TrajectoryFile(hdf5file) as f:
got = f.read()
eq(got.temperature.shape, (50,))
eq(got.potentialEnergy.shape, (50,))
eq(got.kineticEnergy.shape, (50,))
eq(got.coordinates.shape, (50, 22, 3))
eq(got.velocities.shape, (50, 22, 3))
eq(got.cell_lengths, None)
eq(got.cell_angles, None)
eq(got.time, 0.002 * 2 * (1 + np.arange(50)))
assert f.topology == md.load(get_fn('native.pdb')).top
with NetCDFTrajectoryFile(ncfile) as f:
xyz, time, cell_lengths, cell_angles = f.read()
eq(cell_lengths, None)
eq(cell_angles, None)
eq(time, 0.002 * 2 * (1 + np.arange(50)))
hdf5_traj = md.load(hdf5file)
dcd_traj = md.load(dcdfile, top=get_fn('native.pdb'))
netcdf_traj = md.load(ncfile, top=get_fn('native.pdb'))
# we don't have to convert units here, because md.load already
# handles that
assert hdf5_traj.unitcell_vectors is None
eq(hdf5_traj.xyz, netcdf_traj.xyz)
eq(hdf5_traj.unitcell_vectors, netcdf_traj.unitcell_vectors)
eq(hdf5_traj.time, netcdf_traj.time)
eq(dcd_traj.xyz, hdf5_traj.xyz)
# yield lambda: eq(dcd_traj.unitcell_vectors, hdf5_traj.unitcell_vectors)
def test_reporter_subset(tmpdir, get_fn):
pdb = PDBFile(get_fn('native2.pdb'))
pdb.topology.setUnitCellDimensions([2, 2, 2])
forcefield = ForceField('amber99sbildn.xml', 'amber99_obc.xml')
system = forcefield.createSystem(pdb.topology, nonbondedMethod=CutoffPeriodic,
nonbondedCutoff=1 * nanometers, constraints=HBonds, rigidWater=True)
integrator = LangevinIntegrator(300 * kelvin, 1.0 / picoseconds, 2.0 * femtoseconds)
integrator.setConstraintTolerance(0.00001)
platform = Platform.getPlatformByName('Reference')
simulation = Simulation(pdb.topology, system, integrator, platform)
simulation.context.setPositions(pdb.positions)
simulation.context.setVelocitiesToTemperature(300 * kelvin)
tmpdir = str(tmpdir)
hdf5file = os.path.join(tmpdir, 'traj.h5')
ncfile = os.path.join(tmpdir, 'traj.nc')
dcdfile = os.path.join(tmpdir, 'traj.dcd')
atomSubset = [0, 1, 2, 4, 5]
reporter = HDF5Reporter(hdf5file, 2, coordinates=True, time=True,
cell=True, potentialEnergy=True, kineticEnergy=True, temperature=True,
velocities=True, atomSubset=atomSubset)
reporter2 = NetCDFReporter(ncfile, 2, coordinates=True, time=True,
cell=True, atomSubset=atomSubset)
reporter3 = DCDReporter(dcdfile, 2, atomSubset=atomSubset)
simulation.reporters.append(reporter)
simulation.reporters.append(reporter2)
simulation.reporters.append(reporter3)
simulation.step(100)
reporter.close()
reporter2.close()
reporter3.close()
t = md.load(get_fn('native.pdb'))
t.restrict_atoms(atomSubset)
with HDF5TrajectoryFile(hdf5file) as f:
got = f.read()
eq(got.temperature.shape, (50,))
eq(got.potentialEnergy.shape, (50,))
eq(got.kineticEnergy.shape, (50,))
eq(got.coordinates.shape, (50, len(atomSubset), 3))
eq(got.velocities.shape, (50, len(atomSubset), 3))
eq(got.cell_lengths, 2 * np.ones((50, 3)))
eq(got.cell_angles, 90 * np.ones((50, 3)))
eq(got.time, 0.002 * 2 * (1 + np.arange(50)))
assert f.topology == md.load(get_fn('native.pdb'), atom_indices=atomSubset).topology
with NetCDFTrajectoryFile(ncfile) as f:
xyz, time, cell_lengths, cell_angles = f.read()
eq(cell_lengths, 20 * np.ones((50, 3)))
eq(cell_angles, 90 * np.ones((50, 3)))
eq(time, 0.002 * 2 * (1 + np.arange(50)))
eq(xyz.shape, (50, len(atomSubset), 3))
hdf5_traj = md.load(hdf5file)
dcd_traj = md.load(dcdfile, top=hdf5_traj)
netcdf_traj = md.load(ncfile, top=hdf5_traj)
# we don't have to convert units here, because md.load already handles that
eq(hdf5_traj.xyz, netcdf_traj.xyz)
eq(hdf5_traj.unitcell_vectors, netcdf_traj.unitcell_vectors)
eq(hdf5_traj.time, netcdf_traj.time)
eq(dcd_traj.xyz, hdf5_traj.xyz)
eq(dcd_traj.unitcell_vectors, hdf5_traj.unitcell_vectors)
|
FFMG/myoddweb.piger | monitor/api/python/Python-3.7.2/Lib/test/test_unicode_identifiers.py | Python | gpl-2.0 | 891 | 0.004635 | import unittest
class PEP3131Test(unittest.TestCase):
def test_valid(self):
class T:
ä = 1
µ = 2 # this is a compatibility character
蟒 = 3
x󠄀 = 4
self.assertEqual(getattr(T, "\xe4"), 1)
self.assertEqual(getattr(T, "\u03bc"), 2)
self.assertEqual(getattr(T, '\u87d2'), 3)
| self.assertEqual(getattr(T, 'x\U000E0100'), 4)
def test_non_bmp_normalized(self):
𝔘𝔫𝔦𝔠𝔬𝔡𝔢 = 1
self.assertIn("Unicode", dir())
def test_invalid(self):
try | :
from test import badsyntax_3131
except SyntaxError as s:
self.assertEqual(str(s),
"invalid character in identifier (badsyntax_3131.py, line 2)")
else:
self.fail("expected exception didn't occur")
if __name__ == "__main__":
unittest.main()
|
harpolea/advent_of_code_2016 | day20.py | Python | mit | 1,397 | 0.006442 | import re
def firewall(in_file):
# read file
f = open(in_file, 'r')
ranges = []
for l in f:
m = re.match('(\d+)-(\d+)', l)
ranges.append([int(m.group(1)), int(m.group(2))])
ranges.sort()
lowest = 0
upper_lim = 0
for r in ranges:
if lowest < r[0]:
print('F | ound lowest-valued unblocked IP: {}'.format(lowest))
return
else:
if r[1] > upper_lim:
upper_lim = r[1]
lowest = upper_lim+1
# all are blocked
if lowest > 4294967295:
print | ('All IPs are blocked :(')
else:
print('Found lowest-valued unblocked IP: {}'.format(lowest))
return
def firewall_part2(in_file):
# read file
f = open(in_file, 'r')
ranges = []
for l in f:
m = re.match('(\d+)-(\d+)', l)
ranges.append([int(m.group(1)), int(m.group(2))])
ranges.sort()
lowest = 0
upper_lim = 0
n_unblocked = 0
for r in ranges:
if lowest < r[0]:
n_unblocked += r[0] - lowest
if r[1] > upper_lim:
upper_lim = r[1]
lowest = upper_lim+1
max_IP = 4294967295
if lowest < max_IP:
n_unblocked += max_IP+1 - lowest
print('Found {} unblocked IPs.'.format(n_unblocked))
return
if __name__ == "__main__":
firewall('day20_input.txt')
firewall_part2('day20_input.txt')
|
jaejun1679-cmis/jaejun1679-cmis-cs2 | cs2quiz3.py | Python | cc0-1.0 | 1,995 | 0.02406 | #Section 1: Terminology
# 1) What is a recursive function?
#A recursive function is a defined function that calls itself and only is completed when the requirements of the base case is met. Usually a parameter is used
#
#
# 2) What happens if there is no base case defined in a recursive function?
#The recursive function will run for an infinite amount of times and will not be stopped.
#
#
# 3) What is th | e first thing to consider w | hen designing a recursive function?
#Make sure to have a base case for the recursive function.
#
#
# 4) How do we put data into a function call?
#We can use parameters in your function call.
#
# 5) How do we get data out of a function call?
#The return function allows the program to spit out data back out by using return or parameters.
#
#
#
#
#Section 2: Reading
# Read the following function definitions and function calls.
# Then determine the values of the variables q1-q20.
#a1 = 8
#a2 = 8
#a3 = -1
#b1 = 2
#b2 = 4 [x]
#b3 = 4
#c1 = -2
#c2 = 4
#c3 = 45
#d1 = 8 [x]
#d2 = 6 [x]
#d3 = 2 [x]
#Section 3: Programming
#Write a script that asks the user to enter a series of numbers.
#When the user types in nothing, it should return the average of all the odd numbers
#that were typed in.
#In your code for the script, add a comment labeling the base case on the line BEFORE the base case.
#Also add a comment label BEFORE the recursive case.
#It is NOT NECESSARY to print out a running total with each user input.
def adder(runningtotal=0, n=0):
num = raw_input("Insert a number: ") #[x] does not have any labeling
if num == "":
average = runningtotal / n
print "The total average of the odd numbers is {}.".format(average)
if float(num) % 2==0:
adder()
else:
num = float(num)
n = n+1
runningtotal += num
adder(runningtotal, n) #[x] does not have any labeling
adder() #[x] did not use a main function
|
394954369/horizon | openstack_dashboard/dashboards/project/volumes/backups/tests.py | Python | apache-2.0 | 7,167 | 0.00014 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.core.urlresolvers import reverse
from django import http
from django.utils.http import urlencode
from mox import IsA # noqa
from openstack_dashboard import api
from openstack_dashboard.test import helpers as test
INDEX_URL = reverse('horizon:project:volumes:index')
VOLUME_BACKUPS_TAB_URL = reverse('horizon:project:volumes:backups_tab')
class VolumeBackupsViewTests(test.TestCase):
@test.create_stubs({api.cinder: ('volume_backup_create',)})
def test_create_backup_post(self):
volume = self.volumes.first()
backup = self.cinder_volume_backups.first()
api.cinder.volume_backup_create(IsA(http.HttpRequest),
volume.id,
backup.container_name,
backup.name,
backup.description) \
.AndReturn(backup)
self.mox.ReplayAll()
formData = {'method': 'CreateBackupForm',
'tenant_id': self.tenant.id,
'volume_id': volume.id,
'container_name': backup.container_name,
'name': backup.name,
'description': backup.description}
url = reverse('horizon:project:volumes:volumes:create_backup',
args=[volume.id])
res = self.client.post(url, formData)
self.assertNoFormErrors(res)
self.assertMessageCount(error=0, warning=0)
self.assertRedirectsNoFollow(res, VOLUME_BACKUPS_TAB_URL)
@test.create_stubs({api.cinder: ('volume_list',
'volume_backup_supported',
'volume_backup_list',
'volume_backup_delete')})
def test_delete_volume_backup(self):
vol_backups = self.cinder_volume_backups.list()
volumes = self.cinder_volumes.list()
backup = self.cinder_volume_backups.first()
api.cinder.volume_backup_supported(IsA(http.HttpRequest)). \
MultipleTimes().AndReturn(True)
api.cinder.volume_backup_list(IsA(http.HttpRequest)). \
AndReturn(vol_backups)
api.cinder.volume_list(IsA(http.HttpRequest)). \
AndReturn(volumes)
api.cinder.volume_backup_delete(IsA(http.HttpRequest), backup.id)
api.cinder.volume_backup_list(IsA(http.HttpRequest)). \
AndReturn(vol_backups)
api.cinder.volume_list(IsA(http.HttpRequest)). \
AndReturn(volumes)
self.mox.ReplayAll()
formData = {'action':
'volume_backups__delete__%s' % backup.id}
res = self.client.post(INDEX_URL +
"?tab=volumes_and_snapshots__backups_tab",
formData, follow=True)
self.assertIn("Scheduled deletion of Volume Backup: backup1",
[m.message for m in res.context['messages']])
@test.create_stubs({api.cinder: ('volume_backup_get', 'volume_get')})
def test_volume_backup_detail_get(self):
backup = self.cinder_volume_backups.first()
volume = self.cinder_volumes.get(id=backup.volume_id)
api.cinder.volume_backup_get(IsA(http.HttpRequest), backup.id). \
AndReturn(backup)
api.cinder.volume_get(IsA(http.HttpRequest), backup.volume_id). \
AndReturn(volume)
self.mox.ReplayAll()
url = reverse('horizon:project:volumes:backups:detail',
args=[backup.id])
res = self.client.get(url)
self.assertContains(res,
"<h2>Volume Backup Details: %s</h2>" %
backup.name,
1, 200)
self.assertContains(res, "<dd>%s</dd>" % backup.name, 1, 200)
self.assertContains(res, "<dd>%s</dd>" % backup.id, 1, 200)
self.assertContains(res, "<dd>Available</dd>", 1, 200)
@test.create_stubs({api.cinder: ('volume_backup_get',)})
def test_volume_backup_detail_get_with_exception(self):
# Test to verify redirect if get volume backup fails
backup = self.cinder_volume_backups.first()
api.cinder.volume_backup_get(IsA(http.HttpRequest), backup.id).\
AndRaise(self.exceptions.cinder)
self.mox.ReplayAll()
url = reverse('horizon:project:volumes:backups:detail',
args=[backup.id])
res = self.client.get(url)
self.assertNoFormErrors(res)
self.assertMessageCount(error=1)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs({api.cinder: ('volume_backup_get', 'volume_get')})
def test_volume_backup_detail_with_volume_get_exception(self):
# Test to verify redirect if get volume fails
backup = self.cinder_volume_backups.first()
api.cinder.volume_backup_get(IsA(http.HttpRequest), backup.id). \
AndReturn(backup)
api.cinder.volume_get(IsA(http.HttpRequest), backup.volume_id). \
AndRaise(self.excepti | ons.cinder)
self.mox.ReplayAll()
url = reverse('horizon:project:volumes:backups:detail',
args=[backup.id])
res = self.client.get(url)
self.assertNoFormErrors(res)
self.assertMessageCount(error=1)
self.assertRedir | ectsNoFollow(res, INDEX_URL)
@test.create_stubs({api.cinder: ('volume_list',
'volume_backup_restore',)})
def test_restore_backup(self):
backup = self.cinder_volume_backups.first()
volumes = self.cinder_volumes.list()
api.cinder.volume_list(IsA(http.HttpRequest)). \
AndReturn(volumes)
api.cinder.volume_backup_restore(IsA(http.HttpRequest),
backup.id,
backup.volume_id). \
AndReturn(backup)
self.mox.ReplayAll()
formData = {'method': 'RestoreBackupForm',
'backup_id': backup.id,
'backup_name': backup.name,
'volume_id': backup.volume_id}
url = reverse('horizon:project:volumes:backups:restore',
args=[backup.id])
url += '?%s' % urlencode({'backup_name': backup.name,
'volume_id': backup.volume_id})
res = self.client.post(url, formData)
self.assertNoFormErrors(res)
self.assertMessageCount(success=1)
self.assertRedirectsNoFollow(res, INDEX_URL)
|
antepsis/anteplahmacun | sympy/core/expr.py | Python | bsd-3-clause | 114,108 | 0.000578 | from __future__ import print_function, division
from .sympify import sympify, _sympify, SympifyError
from .basic import Basic, Atom
from .singleton import S
from .evalf import EvalfMixin, pure_complex
from .decorators import _sympifyit, call_highest_priority
from .cache import cacheit
from .compatibility import reduce, as_int, default_sort_key, range
from mpmath.libmp import mpf_log, prec_to_dps
from collections import defaultdict
class Expr(Basic, EvalfMixin):
"""
Base class for algebraic expressions.
Everything that requires arithmetic operations to be defined
should subclass this class, instead of Basic (which should be
used only for argument storage and expression manipulation, i.e.
pattern matching, substitutions, etc).
See Also
========
sympy.core.basic.Basic
"""
__slots__ = []
@property
def _diff_wrt(self):
"""Is it allowed to take derivative wrt to this instance.
This determines if it is allowed to take derivatives wrt this object.
Subclasses such as Symbol, Function and Derivative should return True
to enable derivatives wrt them. The implementation in Derivative
separates the Symbol and non-Symbol _diff_wrt=True variables and
temporarily converts the non-Symbol vars in Symbols when performing
the differentiation.
Note, see the docstring of Derivative for how this should work
mathematically. In particular, note that expr.subs(yourclass, Symbol)
should be well-defined on a structural level, or this will lead to
inconsistent results.
Examples
========
>>> from sympy import Expr
>>> e = Expr()
>>> e._diff_wrt
False
>>> class MyClass(Expr):
... _diff_wrt = True
...
>>> (2*MyClass()).diff(MyClass())
2
"""
return False
@cacheit
def sort_key(self, order=None):
coeff, expr = self.as_coeff_Mul()
if expr.is_Pow:
expr, exp = expr.args
else:
expr, exp = expr, S.One
if expr.is_Dummy:
args = (expr.sort_key(),)
elif expr.is_Atom:
args = (str(expr),)
else:
if expr.is_Add:
args = expr.as_ordered_terms(order=order)
elif expr.is_Mul:
args = expr.as_ordered_factors(order=order)
else:
args = expr.args
args = tuple(
[ default_sort_key(arg, order=order) for arg in args ])
args = (len(args), tuple(args))
exp = exp.sort_key(order=order)
return expr.class_key(), args, exp, coeff
# ***************
# * Arithmetics *
# ***************
# Expr and its sublcasses use _op_priority to determine which object
# passed to a binary special method (__mul__, etc.) will handle the
# operation. In general, the 'call_highest_priority' decorator will choose
# the object with the highest _op_priority to handle the call.
# Custom subclasses that want to define their own binary special methods
# should set an _op_priority value that is higher than the default.
#
# **NOTE**:
# This is a temporary fix, and will eventually be replaced with
# something better and more powerful. See issue 5510.
_op_priority = 10.0
def __pos__(self):
return self
def __neg__(self):
return Mul(S.NegativeOne, self)
def __abs__(self):
from sympy import Abs
return Abs(self)
@_sympifyit('other', NotImplemented)
@call_highest_priority('__radd__')
def __add__(self, other):
return Add(self, other)
@_sympifyit('other', NotImplemented)
@call_highest_priority('__add__')
def __radd__(self, other):
return Add(other, self)
@_sympifyit('other', NotImplemented)
@call_highest_priority('__rsub__')
def __sub__(self, other):
return Add(self, -other)
@_sympifyit('other', NotImplemented)
@call_highest_priority('__sub__')
def __rsub__(self, other):
return Add(other, -self)
@_sympifyit('other', NotImplemented)
@call_highest_priority('__rmul__')
def __mul__(self, other):
return Mul(self, other)
@_sympifyit('other', NotImplemented)
@call_highest_priority('__mul__')
def __rmul__(self, other):
return Mul(other, self)
@_sympifyit('other', NotImplemented)
@call_highest_priority('__rpow__')
def __pow__(self, other):
return Pow(self, other)
@_sympifyit('other', NotImplemented)
@call_highest_priority('__pow__')
def __rpow__(self, other):
return Pow(other, self)
@_sympifyit('other', NotImplemented)
@call_highest_priority('__rdiv__')
def __div__(self, other):
return Mul(self, Pow(other, S.NegativeOne))
@_sympifyit('other', NotImplemented)
@call_highest_priority('__div__')
def __rdiv__(self, other):
return Mul(other, Pow(self, S.NegativeOne))
__truediv__ = __div__
__rtruediv__ = __rdiv__
@_sympifyit('other', NotImplemented)
@call_highest_priority('__rmod__')
def __mod__(self, other):
return Mod(self, other)
@_sympifyit('other', NotImplemented)
@call_highest_priority('__mod__')
def __rmod__(self, other):
return Mod(other, self)
@_sympifyit('other', NotImplemented)
@call_highest_priority('__rfloordiv__')
def __floordiv__(self, other):
from sympy.functions.elementary.integers import floor
return floor(self / other)
@_sympifyit('other', NotImplemented)
@call_highest_priority('__floordiv__')
def __rfloordiv__(self, other):
from sympy.functions.elementary.integers import floor
return floor(self / other)
def __int__(self):
# Although we only need to round to the units position, we'll
# get one more digit so the extra testing below can be avoided
# unless the rounded value rounded to an integer, e.g. if an
# expression were equal to 1.9 and we rounded to the unit position
# we would get a 2 and would not know if this rounded up or not
# without doing a test (as done below). But if we keep an extra
# digit we know that 1.9 is not the same as 1 and there is no
# need for further testing: our int value is correct. If the value
# were 1.99, however, this would round to 2.0 and our int value is
# off by one. So...if our round value is the same as the int value
# (regardless of how much extra work we do to calculate extra decimal
# places) we need to test whether we are off by one.
from sympy import Dummy
if not self.is_number:
raise TypeError("can't convert symbols to int")
r = self.round(2)
if not r.is_Number:
raise TypeError("can't convert complex to int")
if r in (S.NaN, S.Infinity, S.NegativeInfinity):
raise TypeError("can't convert %s to int" % r)
i = int(r)
if not i:
return 0
# off-by-one check
if i == r and not (self - i).equals(0):
isign = 1 if i > 0 else -1
x = Dummy()
# in the following (self - i).evalf(2) will not always work while
# (self - r).evalf(2) and the use of subs does; if the test that
# was added when this comment was added passes, it might be safe
# to simply use sign to compute this rather than doing this by hand:
diff_sign = 1 if (self - x).evalf(2, subs={x: i}) > 0 else -1
if diff_sign != isign:
i -= isign
return i
__long__ = __int__
def __float__(self):
# Don't bother testing if it's a number; if it's not this is going
# to fail, and if it is we still need to check t | hat it evalf'ed to
# a number.
result = self.eval | f()
if result.is_Number:
return float(result)
if result.is_number and result.as_real_imag()[1]:
raise TypeError("can't convert complex to float")
raise TypeErro |
will-Do/tp-libvirt_v2v | lvsb/tests/src/lvsb_date.py | Python | gpl-2.0 | 2,928 | 0 | """
Simple test that executes date command in a sanbox and verifies it is correct
"""
import datetime
from autotest.client.shared import error
from virttest.lvsb import make_sandboxes
def verify_datetime(start_time, stop_time, result_list):
"""
Return the number of sandboxes which reported incorrect date
"""
bad_dt = 0
for results in result_list: # list of aggregate managers
for result in results: # list of sandbox stdouts
try:
test_dt = datetime.datetime.fromtimestamp(
float(result.strip()))
except (TypeError, ValueError):
bad_dt += 1
else:
if test_dt >= start_time and test_dt <= stop_time:
continue # good result, check next
else:
bad_dt += 1
return bad_dt
def some_failed(failed_list):
"""
Return True if any single sandbox reported a non-zero exit code
"""
for failed in failed_list: # list of sandboxes w/ non-ze | ro exit codes
if failed > 0:
return True
return False
def run(test, params, env):
"""
Executes date command in a sanbox and verifies it is correct
1) Gather parameters
2) Create configured sandbox aggregater(s)
3) Run and stop all sandboxes in all agregators
4) Handle results
"""
# Record | time for comparison when finished
start_time = datetime.datetime.now()
status_error = bool('yes' == params.get('status_error', 'no'))
# list of sandbox agregation managers (list of lists of list of sandboxes)
sb_agg_list = make_sandboxes(params, env)
# Number of sandboxes for each aggregate type
agg_count = [agg.count for agg in sb_agg_list]
# Run all sandboxes until timeout or finished w/ output
# store list of stdout's for each sandbox in each aggregate type
result_list = [agg.results() for agg in sb_agg_list]
# Timeouts throw SandboxException, if normal exit, record ending time
stop_time = datetime.datetime.now()
# Number of sandboxs with non-zero exit codes for each aggregate type
failed_list = [agg.are_failed() for agg in sb_agg_list]
# handle results
if status_error: # Negative test
if not some_failed(failed_list) and verify_datetime(start_time,
stop_time,
result_list) < 1:
raise error.TestFail("Error test failed on only %s of %s sandboxes"
% (failed_list, agg_count))
else: # Positive test
if some_failed(failed_list):
raise error.TestFail("Some sandboxes had non-zero exit codes")
if verify_datetime(start_time, stop_time, result_list) > 0:
raise error.TestFail("Some sandboxes reported invalid date/time")
# Otherwise test passed
|
olivierverdier/sfepy | tests/test_functions.py | Python | bsd-3-clause | 4,539 | 0.013439 | # c: 14.04.2008, r: 14.04.2008
import numpy as nm
from sfepy import data_dir
filename_mesh = data_dir + '/meshes/2d/square_unit_tri.mesh'
def get_pars(ts, coors, | mode=None, region=None, ig=None, extra_arg=None):
if mode = | = 'special':
if extra_arg == 'hello!':
ic = 0
else:
ic = 1
return {('x_%s' % ic) : coors[:,ic]}
def get_p_edge(ts, coors, bc=None):
if bc.name == 'p_left':
return nm.sin(nm.pi * coors[:,1])
else:
return nm.cos(nm.pi * coors[:,1])
def get_circle(coors, domain=None):
r = nm.sqrt(coors[:,0]**2.0 + coors[:,1]**2.0)
return nm.where(r < 0.2)[0]
functions = {
'get_pars1' : (lambda ts, coors, mode=None, region=None, ig=None:
get_pars(ts, coors, mode, region, ig, extra_arg='hello!'),),
'get_p_edge' : (get_p_edge,),
'get_circle' : (get_circle,),
}
# Just another way of adding a function, besides 'functions' keyword.
function_1 = {
'name' : 'get_pars2',
'function' : lambda ts, coors,mode=None, region=None, ig=None:
get_pars(ts, coors, mode, region, ig, extra_arg='hi!'),
}
materials = {
'mf1' : (None, 'get_pars1'),
'mf2' : 'get_pars2',
# Dot denotes a special value, that is not propagated to all QP.
'mf3' : ({'a' : 10.0, 'b' : 2.0, '.c' : 'ahoj'},),
}
fields = {
'pressure' : (nm.float64, 1, 'Omega', 2),
}
variables = {
'p' : ('unknown field', 'pressure', 0),
'q' : ('test field', 'pressure', 'p'),
}
wx = 0.499
regions = {
'Omega' : ('all', {}),
'Left' : ('nodes in (x < -%.3f)' % wx, {}),
'Right' : ('nodes in (x > %.3f)' % wx, {}),
'Circle' : ('nodes by get_circle', {}),
}
ebcs = {
'p_left' : ('Left', {'p.all' : 'get_p_edge'}),
'p_right' : ('Right', {'p.all' : 'get_p_edge'}),
}
equations = {
'e1' : """dw_laplace.2.Omega( mf3.a, q, p ) = 0""",
}
solver_0 = {
'name' : 'ls',
'kind' : 'ls.scipy_direct',
}
solver_1 = {
'name' : 'newton',
'kind' : 'nls.newton',
}
fe = {
'chunk_size' : 1000
}
from sfepy.base.testing import TestCommon, assert_
from sfepy.base.base import pause, debug
class Test( TestCommon ):
def from_conf( conf, options ):
from sfepy.fem import ProblemDefinition
problem = ProblemDefinition.from_conf(conf)
test = Test(problem = problem, conf = conf, options = options)
return test
from_conf = staticmethod( from_conf )
def test_material_functions(self):
problem = self.problem
ts = problem.get_default_ts(step=0)
problem.materials.time_update(ts,
problem.domain,
problem.equations)
coors = problem.domain.get_mesh_coors()
mat1 = problem.materials['mf1']
assert_(nm.all(coors[:,0] == mat1.get_data(None, None, 'x_0')))
mat2 = problem.materials['mf2']
assert_(nm.all(coors[:,1] == mat2.get_data(None, None, 'x_1')))
mat3 = problem.materials['mf3']
key = mat3.get_keys(region_name='Omega')[0]
assert_(nm.all(mat3.get_data(key, 0, 'a') == 10.0))
assert_(nm.all(mat3.get_data(key, 0, 'b') == 2.0))
assert_(mat3.get_data(None, None, 'c') == 'ahoj')
return True
# mat.time_update(ts, problem)
def test_ebc_functions(self):
import os.path as op
problem = self.problem
problem.set_equations(self.conf.equations)
problem.time_update()
vec = problem.solve()
name = op.join(self.options.out_dir,
op.splitext(op.basename(__file__))[0] + '_ebc.vtk')
problem.save_state(name, vec)
ok = True
domain = problem.domain
iv = domain.regions['Left'].get_vertices(0)
coors = domain.get_mesh_coors()[iv]
ok = ok and self.compare_vectors(vec[iv], nm.sin(nm.pi * coors[:,1]),
label1='state_left', label2='bc_left')
iv = domain.regions['Right'].get_vertices(0)
coors = domain.get_mesh_coors()[iv]
ok = ok and self.compare_vectors(vec[iv], nm.cos(nm.pi * coors[:,1]),
label1='state_right', label2='bc_right')
return ok
def test_region_functions(self):
import os.path as op
problem = self.problem
name = op.join(self.options.out_dir,
op.splitext(op.basename(__file__))[0])
problem.save_regions(name, ['Circle'])
return True
|
JakeWimberley/Weathredds | rtr/urls.py | Python | gpl-3.0 | 1,521 | 0 | """
Copyright 2016 Jake Wimberley.
This file is part of RunToRun.
RunToRun is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
RunToRun is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with RunToRun. If not, see <http://www.gnu.org/licenses/>.
rtr URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.9/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views imp | ort Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^ | blog/', include('blog.urls'))
"""
from django.conf.urls import include, url
from django.contrib import admin
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'', include('tracker.urls')),
]
|
kmp3325/linguine-python | test/remove_stopwords_test.py | Python | mit | 446 | 0.022422 | import unittest
import sys
from li | nguine.ops.remove_stopwords import RemoveStopwords
class RemoveStopwordsTest(unittest.TestCase):
def setUp(self):
self.op = RemoveStopwords()
def test_run(self):
self.op = RemoveStopwords()
self.test_data = []
self.assertEqual(self.op.run(se | lf.test_data),
['quick,','brown','fox','jumps','over','lazy','dogs']
)
if __name__ == '__main__':
unittest.main()
|
josiah-wolf-oberholtzer/supriya | supriya/ugens/beq.py | Python | mit | 5,478 | 0.000548 | import collections
from supriya import CalculationRate
from supriya.ugens.filters import Filter
class BEQSuite(Filter):
"""
Abstract base class of all BEQSuite UGens.
"""
class BAllPass(BEQSuite):
"""
An all-pass filter.
::
>>> source = supriya.ugens.In.ar(0)
>>> ball_pass = supriya.ugens.BAllPass.ar(
... frequency=1200,
... reciprocal_of_q=1,
... source=source,
... )
>>> ball_pass
BAllPass.ar()
"""
_ordered_input_names = collections.OrderedDict(
[("source", None), ("frequency", 1200), ("reciprocal_of_q", 1)]
)
_valid_calculation_rates = (CalculationRate.AUDIO,)
class BBandPass(BEQSuite):
"""
A band-pass filter.
::
>>> source = supriya.ugens.In.ar(0)
>>> bband_pass = supri | ya.ugens.BBandPass.ar(
... bandwidth=1,
... frequency=1200,
... source=source,
... )
>>> bband_pass
BBandPass.ar()
"""
_ordered_input_names = collections.OrderedDict(
[("source", None), ("frequency", 1200), ("bandwidth", 1)]
)
| _valid_calculation_rates = (CalculationRate.AUDIO,)
class BBandStop(BEQSuite):
"""
A band-stop filter.
::
>>> source = supriya.ugens.In.ar(0)
>>> bband_stop = supriya.ugens.BBandStop.ar(
... bandwidth=1,
... frequency=1200,
... source=source,
... )
>>> bband_stop
BBandStop.ar()
"""
_ordered_input_names = collections.OrderedDict(
[("source", None), ("frequency", 1200), ("bandwidth", 1)]
)
_valid_calculation_rates = (CalculationRate.AUDIO,)
class BHiCut(BEQSuite):
"""
A high-cut filter.
::
>>> source = supriya.ugens.In.ar(0)
>>> bhi_cut = supriya.ugens.BHiCut.ar(
... frequency=1200,
... max_order=5,
... order=2,
... source=source,
... )
>>> bhi_cut
BHiCut.ar()
"""
_ordered_input_names = collections.OrderedDict(
[("source", None), ("frequency", 1200), ("order", 2), ("max_order", 5)]
)
_valid_calculation_rates = (CalculationRate.AUDIO,)
class BHiPass(BEQSuite):
"""
A high-pass filter.
::
>>> source = supriya.ugens.In.ar(0)
>>> bhi_pass = supriya.ugens.BHiPass.ar(
... frequency=1200,
... reciprocal_of_q=1,
... source=source,
... )
>>> bhi_pass
BHiPass.ar()
"""
_ordered_input_names = collections.OrderedDict(
[("source", None), ("frequency", 1200), ("reciprocal_of_q", 1)]
)
_valid_calculation_rates = (CalculationRate.AUDIO,)
class BHiShelf(BEQSuite):
"""
A high-shelf filter.
::
>>> source = supriya.ugens.In.ar(0)
>>> bhi_shelf = supriya.ugens.BHiShelf.ar(
... gain=0,
... frequency=1200,
... reciprocal_of_s=1,
... source=source,
... )
>>> bhi_shelf
BHiShelf.ar()
"""
_ordered_input_names = collections.OrderedDict(
[("source", None), ("frequency", 1200), ("reciprocal_of_s", 1), ("gain", 0)]
)
_valid_calculation_rates = (CalculationRate.AUDIO,)
class BLowCut(BEQSuite):
"""
A low-cut filter.
::
>>> source = supriya.ugens.In.ar(0)
>>> blow_cut = supriya.ugens.BLowCut.ar(
... frequency=1200,
... max_order=5,
... order=2,
... source=source,
... )
>>> blow_cut
BLowCut.ar()
"""
_ordered_input_names = collections.OrderedDict(
[("source", None), ("frequency", 1200), ("order", 2), ("max_order", 5)]
)
_valid_calculation_rates = (CalculationRate.AUDIO,)
class BLowPass(BEQSuite):
"""
A low-pass filter.
::
>>> source = supriya.ugens.In.ar(0)
>>> blow_pass = supriya.ugens.BLowPass.ar(
... frequency=1200,
... reciprocal_of_q=1,
... source=source,
... )
>>> blow_pass
BLowPass.ar()
"""
_ordered_input_names = collections.OrderedDict(
[("source", None), ("frequency", 1200), ("reciprocal_of_q", 1)]
)
_valid_calculation_rates = (CalculationRate.AUDIO,)
class BLowShelf(BEQSuite):
"""
A low-shelf filter.
::
>>> source = supriya.ugens.In.ar(0)
>>> blow_shelf = supriya.ugens.BLowShelf.ar(
... frequency=1200,
... gain=0,
... reciprocal_of_s=1,
... source=source,
... )
>>> blow_shelf
BLowShelf.ar()
"""
_ordered_input_names = collections.OrderedDict(
[("source", None), ("frequency", 1200), ("reciprocal_of_s", 1), ("gain", 0)]
)
_valid_calculation_rates = (CalculationRate.AUDIO,)
class BPeakEQ(BEQSuite):
"""
A parametric equalizer.
::
>>> source = supriya.ugens.In.ar(0)
>>> bpeak_eq = supriya.ugens.BPeakEQ.ar(
... frequency=1200,
... gain=0,
... reciprocal_of_q=1,
... source=source,
... )
>>> bpeak_eq
BPeakEQ.ar()
"""
_ordered_input_names = collections.OrderedDict(
[("source", None), ("frequency", 1200), ("reciprocal_of_q", 1), ("gain", 0)]
)
_valid_calculation_rates = (CalculationRate.AUDIO,)
|
rob-b/django-tagcon | setup.py | Python | mit | 783 | 0.002554 | import os
from distutils.core import setup
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name='django-tagcon',
version='0.1alpha1',
description="A template tag constructor library for Django.",
long_description=read('README.rst'),
author='Tom Tobin',
author_email='kor | pios@korpios.com',
license='MIT',
url='http://github.com/korpios/django-tagcon',
py_modules=['tagcon'],
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: O | SI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Framework :: Django',
],
)
|
spreg-git/pysal | pysal/core/util/tests/test_shapefile.py | Python | bsd-3-clause | 16,445 | 0.001034 | import unittest
from cStringIO import StringIO
from pysal.core.util.shapefile import noneMax, noneMin, shp_file, shx_file, NullShape, Point, PolyLine, MultiPoint, PointZ, PolyLineZ, PolygonZ, MultiPointZ, PointM, PolyLineM, PolygonM, MultiPointM, MultiPatch
import os
import pysal
class TestNoneMax(unittest.TestCase):
def test_none_max(self):
self.assertEqual(5, noneMax(5, None))
self.assertEqual(1, noneMax(None, 1))
self.assertEqual(None, noneMax(None, None))
class TestNoneMin(unittest.TestCase):
def test_none_min(self):
self.assertEqual(5, noneMin(5, None))
self.assertEqual(1, noneMin(None, 1))
self.assertEqual(None, noneMin(None, None))
class test_shp_file(unittest.TestCase):
def test___init__(self):
shp = shp_file(pysal.examples.get_path('10740.shp'))
assert shp.header == {'BBOX Xmax': -105.29012, 'BBOX Ymax': 36.219799000000002, 'BBOX Mmax': 0.0, 'BBOX Zmin': 0.0, 'BBOX Mmin': 0.0, 'File Code': 9994, 'BBOX Ymin': 34.259672000000002, 'BBOX Xmin': -107.62651, 'Unused0': 0, 'Unused1': 0, 'Unused2': 0, 'Unused3': 0, 'Unused4': 0, 'Version': 1000, 'BBOX Zmax': 0.0, 'Shape Type': 5, 'File Length': 260534}
def test___iter__(self):
shp = shp_file(pysal.examples.get_path('Point.shp'))
points = [pt for pt in shp]
expected = [{'Y': -0.25904661905760773, 'X': -0.00068176617532103578, 'Shape Type': 1},
{'Y': -0.25630328607387354, 'X': 0.11697145363360706,
'Shape Type': 1},
{'Y': -0.33930131004366804, 'X': 0.05043668122270728,
'Shape Type': 1},
{'Y': -0.41266375545851519, 'X': -0.041266375545851552,
'Shape Type': 1},
{'Y': -0.44017467248908293, 'X': -0.011462882096069604,
'Shape Type': 1},
{'Y': -0.46080786026200882, 'X': 0.027510917030567628,
'Shape Type': 1},
{'Y': -0.45851528384279472, 'X': 0.075655021834060809,
| 'Shape Type': 1},
{'Y': -0.43558951965065495, 'X': 0.11233624454148461,
'Shape Type': 1},
{'Y': -0.40578602620087334, 'X': 0.13984716157205224, 'Shape Type': 1}]
assert points == expected
def test___len__(self):
shp = shp_file(pysal.examples.get_path('10740.shp'))
assert len(shp) == 195
def test_add_shape(self):
shp = shp_file('test_point', 'w', 'POINT')
points | = [{'Shape Type': 1, 'X': 0, 'Y': 0},
{'Shape Type': 1, 'X': 1, 'Y': 1},
{'Shape Type': 1, 'X': 2, 'Y': 2},
{'Shape Type': 1, 'X': 3, 'Y': 3},
{'Shape Type': 1, 'X': 4, 'Y': 4}]
for pt in points:
shp.add_shape(pt)
shp.close()
for a, b in zip(points, shp_file('test_point')):
self.assertEquals(a, b)
os.remove('test_point.shp')
os.remove('test_point.shx')
def test_close(self):
shp = shp_file(pysal.examples.get_path('10740.shp'))
shp.close()
self.assertEqual(shp.fileObj.closed, True)
def test_get_shape(self):
shp = shp_file(pysal.examples.get_path('Line.shp'))
rec = shp.get_shape(0)
expected = {'BBOX Ymax': -0.25832280562918325,
'NumPoints': 3,
'BBOX Ymin': -0.25895877033237352,
'NumParts': 1,
'Vertices': [(-0.0090539248870159517, -0.25832280562918325),
(0.0074811573959305822, -0.25895877033237352),
(
0.0074811573959305822, -0.25895877033237352)],
'BBOX Xmax': 0.0074811573959305822,
'BBOX Xmin': -0.0090539248870159517,
'Shape Type': 3,
'Parts Index': [0]}
self.assertEqual(expected, shp.get_shape(0))
def test_next(self):
shp = shp_file(pysal.examples.get_path('Point.shp'))
points = [pt for pt in shp]
expected = {'Y': -0.25904661905760773, 'X': -
0.00068176617532103578, 'Shape Type': 1}
self.assertEqual(expected, shp.next())
expected = {'Y': -0.25630328607387354, 'X':
0.11697145363360706, 'Shape Type': 1}
self.assertEqual(expected, shp.next())
def test_type(self):
shp = shp_file(pysal.examples.get_path('Point.shp'))
self.assertEqual("POINT", shp.type())
shp = shp_file(pysal.examples.get_path('Polygon.shp'))
self.assertEqual("POLYGON", shp.type())
shp = shp_file(pysal.examples.get_path('Line.shp'))
self.assertEqual("ARC", shp.type())
class test_shx_file(unittest.TestCase):
def test___init__(self):
shx = shx_file(pysal.examples.get_path('Point'))
assert isinstance(shx, shx_file)
def test_add_record(self):
shx = shx_file(pysal.examples.get_path('Point'))
expectedIndex = [(100, 20), (128, 20), (156, 20),
(184, 20), (212, 20), (240, 20),
(268, 20), (296, 20), (324, 20)]
assert shx.index == expectedIndex
shx2 = shx_file('test', 'w')
for i, rec in enumerate(shx.index):
id, location = shx2.add_record(rec[1])
assert id == (i + 1)
assert location == rec[0]
assert shx2.index == shx.index
shx2.close(shx._header)
new_shx = open('test.shx', 'rb').read()
expected_shx = open(pysal.examples.get_path('Point.shx'), 'rb').read()
assert new_shx == expected_shx
os.remove('test.shx')
def test_close(self):
shx = shx_file(pysal.examples.get_path('Point'))
shx.close(None)
self.assertEqual(shx.fileObj.closed, True)
class TestNullShape(unittest.TestCase):
def test_pack(self):
null_shape = NullShape()
self.assertEqual(b'\x00' * 4, null_shape.pack())
def test_unpack(self):
null_shape = NullShape()
self.assertEqual(None, null_shape.unpack())
class TestPoint(unittest.TestCase):
def test_pack(self):
record = {"X": 5, "Y": 5, "Shape Type": 1}
expected = b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x14\x40\x00\x00\x00\x00\x00\x00\x14\x40"
self.assertEqual(expected, Point.pack(record))
def test_unpack(self):
dat = StringIO(b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x14\x40\x00\x00\x00\x00\x00\x00\x14\x40")
expected = {"X": 5, "Y": 5, "Shape Type": 1}
self.assertEqual(expected, Point.unpack(dat))
class TestPolyLine(unittest.TestCase):
def test_pack(self):
record = {'BBOX Ymax': -0.25832280562918325, 'NumPoints': 3, 'BBOX Ymin': -0.25895877033237352, 'NumParts': 1, 'Vertices': [(-0.0090539248870159517, -0.25832280562918325), (0.0074811573959305822, -0.25895877033237352), (0.0074811573959305822, -0.25895877033237352)], 'BBOX Xmax': 0.0074811573959305822, 'BBOX Xmin': -0.0090539248870159517, 'Shape Type': 3, 'Parts Index': [0]}
expected = b"""\x03\x00\x00\x00\xc0\x46\x52\x3a\xdd\x8a\x82\
\xbf\x3d\xc1\x65\xce\xc7\x92\xd0\xbf\x00\xc5\
\xa0\xe5\x8f\xa4\x7e\x3f\x6b\x40\x7f\x60\x5c\
\x88\xd0\xbf\x01\x00\x00\x00\x03\x00\x00\x00\
\x00\x00\x00\x00\xc0\x46\x52\x3a\xdd\x8a\x82\
\xbf\x6b\x40\x7f\x60\x5c\x88\xd0\xbf\x00\xc5\
\xa0\xe5\x8f\xa4\x7e\x3f\x3d\xc1\x65\xce\xc7\
\x92\xd0\xbf\x00\xc5\xa0\xe5\x8f\xa4\x7e\x3f\
\x3d\xc1\x65\xce\xc7\x92\xd0\xbf"""
self.assertEqual(expected, PolyLine.pack(record))
def test_unpack(self):
dat = StringIO(b"""\x03\x00\x00\x00\xc0\x46\x52\x3a\xdd\x8a\x82\
\xbf\x3d\xc1\x65\xce\xc7\x92\xd0\xbf\x00\xc5\
\xa0\xe5\x8f\xa4\x7e\x3f\x6b\x40\x7f\x60\x5c\
\x88\xd0\xbf\x01\x00\x00\x00\x03\x00\x00\x00\
\x00\x00\x00\x00\xc0\x46\x52\x3a\xdd\x8a\x82\
\xbf\x6b\x40\x7f\x60\x5c\x88\xd0\xbf\x00\xc5\
\xa0\xe5\x8f\xa4\x7e\x3f\x3d\xc1\x65\xce\xc7\
\x92\xd0\xbf\x00\xc5\xa0\xe5\x8f\xa4\x7e\x3f\
\x3d\xc1\x65\xce\xc7\x92\xd0\xbf""")
expected = {'BBOX Ymax': -0.25832 |
annegentle/magnum | magnum/common/rpc.py | Python | apache-2.0 | 4,371 | 0 | # Copyright 2014 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
__all__ = [
'init',
'cleanup',
'set_defaults',
'add_extra_exmods',
'clear_extra_exmods',
'get_allowed_exmods',
'RequestContextSerializer',
'get_client',
'get_server',
'get_notifier',
'TRANSPORT_ALIASES',
]
from oslo_config import cfg
import oslo_messaging as messaging
from oslo_serialization import jsonutils
from magnum.common import context as magnum_context
from magnum.common import exception
CONF = cfg.CONF
TRANSPORT = None
NOTIFIER = None
ALLOWED_EXMODS = [
exception.__name__,
]
EXTRA_EXMODS = []
# NOTE(lucasagomes): The magnum.openstack.common.rpc entries are for
# backwards compat with IceHouse rpc_backend configuration values.
TRANSPORT_ALIASES = {
'magnum.openstack.common.rpc.impl_kombu': 'rabbit',
'magnum.openstack.common.rpc.impl_qpid': 'qpid',
'magnum.openstack.common.rpc.impl_zmq': 'zmq',
'magnum.rpc.impl_kombu': 'rabbit',
'magnum.rpc.impl_qpid': 'qpid',
'magnum.rpc.impl_zmq': 'zmq',
}
def init(conf):
global TRANSPORT, NOTIFIER
exmods = get_allowed_exm | ods()
TRANSPORT = messaging.get_transport(conf,
allowed_remote_exmods=exmods,
aliases=TRANSPORT_ALIASES)
serializer = RequestContextSerializer(JsonPayloadSerializer())
NOTIFIER = messaging.Notifier(TRANSPORT, serializer=serializer)
def cleanup():
global TRANSPORT, NOTIFIER
assert TRANSPORT is not None
| assert NOTIFIER is not None
TRANSPORT.cleanup()
TRANSPORT = NOTIFIER = None
def set_defaults(control_exchange):
messaging.set_transport_defaults(control_exchange)
def add_extra_exmods(*args):
EXTRA_EXMODS.extend(args)
def clear_extra_exmods():
del EXTRA_EXMODS[:]
def get_allowed_exmods():
return ALLOWED_EXMODS + EXTRA_EXMODS
class JsonPayloadSerializer(messaging.NoOpSerializer):
@staticmethod
def serialize_entity(context, entity):
return jsonutils.to_primitive(entity, convert_instances=True)
class RequestContextSerializer(messaging.Serializer):
def __init__(self, base):
self._base = base
def serialize_entity(self, context, entity):
if not self._base:
return entity
return self._base.serialize_entity(context, entity)
def deserialize_entity(self, context, entity):
if not self._base:
return entity
return self._base.deserialize_entity(context, entity)
def serialize_context(self, context):
return context.to_dict()
def deserialize_context(self, context):
return magnum_context.RequestContext.from_dict(context)
def get_transport_url(url_str=None):
return messaging.TransportURL.parse(CONF, url_str, TRANSPORT_ALIASES)
def get_client(target, version_cap=None, serializer=None):
assert TRANSPORT is not None
serializer = RequestContextSerializer(serializer)
return messaging.RPCClient(TRANSPORT,
target,
version_cap=version_cap,
serializer=serializer)
def get_server(target, endpoints, serializer=None):
assert TRANSPORT is not None
serializer = RequestContextSerializer(serializer)
return messaging.get_rpc_server(TRANSPORT,
target,
endpoints,
executor='eventlet',
serializer=serializer)
def get_notifier(service='container', host=None, publisher_id=None):
assert NOTIFIER is not None
if not publisher_id:
publisher_id = "%s.%s" % (service, host or CONF.host)
return NOTIFIER.prepare(publisher_id=publisher_id)
|
mph-/lcapy | lcapy/tests/test_statespace.py | Python | lgpl-2.1 | 4,517 | 0.008634 | from lcapy import *
import numpy as np
import unittest
class LcapyTester(unittest.TestCase):
"""Unit tests for lcapy state space analysis
"""
def assertEqual2(self, ans1, ans2, comment):
ans1 = ans1.canonical()
ans2 = ans2.canonical()
try:
self.assertEqual(ans1, ans2, comment)
except AssertionError as e:
ans1.pprint()
ans2.pprint()
raise AssertionError(e)
def test_VRC1(self):
"""Check VRC circuit
"""
a = Circuit("""
V1 1 0
R1 1 2
C1 2 0""")
ss = a.ss
self.assertEqual2(expr(ss.x[0]), expr('v_C1(t)'), "Incorrect state variable")
self.assertEqual2(expr(ss.y[0]), expr('v_1(t)'), "Incorrect output variable1")
self.assertEqual2(expr(ss.y[1]), expr('v_2(t)'), "Incorrect output variable2")
self.assertEqual2(expr(ss.A[0]), expr('-1/(R1 * C1)'), "Incorrect A matrix")
self.assertEqual2(expr(ss.B[0]), expr('1/(R1 * C1)'), "Incorrect B matrix")
self.assertEqual2(expr(ss.C[0]), expr(0), "Incorrect C[0] matrix element")
self.assertEqual2(expr(ss.C[1]), expr(1), "Incorrect C[1] matrix element")
self.assertEqual2(expr(ss.D[0]), expr(1), "Incorrect D[0] matrix element")
self.assertEqual2(expr(ss.D[1]), expr(0), "Incorrect D[1] matrix element")
self.assertEqual2(expr(ss.eigenvalues[0]), expr('-1/(R1 * C1)'), "Incorrect eigenvalues")
def test_VRL1(self):
"""Check VRL circuit
"""
a = Circuit("""
V1 1 0
R1 1 2
L1 2 0""")
ss = a.ss
self.assertEqual2(expr(ss.x[0]), expr('i_L1(t)'), "Incorrect state variable")
self.assertEqual2(expr(ss.y[0]), expr('v_1(t)'), "Incorrect output variable1")
self.assertEqual2(expr(ss.y[1]), expr('v_2(t)'), "Incorrect output variable2")
self.assertEqual2(expr(ss.A[0]), expr('-R1 / L1'), "Incorrect A matrix")
self.assertEqual2(expr(ss.B[0]), expr('1 / L1'), "Incorrect B matrix")
self.assertEqual2(expr(ss.C[0]), expr(0), "Incorrect C[0] matrix element")
self.assertEqual2(expr(ss.C[1]), expr('-R1'), "Incorrect C[1] matrix element")
self.assertEqual2(expr(ss.D[0]), expr(1), "Incorrect D[0] matrix element")
self.assertEqual2(expr(ss.D[1]), expr(1), "Incorrect D[1] matrix element")
self.assertEqual2(expr(ss.eigenvalues[0]), expr('-R1 / L1'), "Incorrect eigenvalues")
def test_RLC(self):
a = Circuit("""
V 1 0 {v(t)}; down
R1 1 2; right
L 2 3; right=1.5, i={i_L}
R2 3 0_3; down=1.5, i={i_{R2}}, v={v_{R2}}
W 0 0_3; right
W 3 3_a; right
C 3_a 0_4; down, i={i_C}, v={v_C}
W 0_3 0_4; right""")
ss = a.ss
self.assertEqual(ss.x[0], expr('i_L(t)'), "x[0]")
self.assertEqual(ss.x[1], expr('v_C(t)'), "x[1]")
self.assertEqual(ss.x0[0], 0, "x0[0]") |
self.assertEqual(ss.x0[1], 0, "x0[1]")
self.assertEqual(ss.y[0], expr('v_1(t)'), "y[0]")
| self.assertEqual(ss.u[0], expr('v(t)'), "u[0]")
self.assertEqual(ss.A[0, 0], expr('-R1/L'), "A[0, 0]")
def test_transfer(self):
Z = (s**2 + 3) / (s**3 + 2 * s + 10)
ss = Z.state_space()
self.assertEqual(ss.Nx, 3, "Nx")
self.assertEqual(ss.Ny, 1, "Ny")
self.assertEqual(ss.Nu, 1, "Nu")
self.assertEqual(ss.is_stable, False, "is_stable")
self.assertEqual(ss.is_controllable, True, "is_controllable")
self.assertEqual(ss.is_observable, True, "is_observable")
self.assertEqual(ss.is_symbolic, False, "is_symbolic")
self.assertEqual(ss.G[0], Z, "G")
sso = Z.state_space('OCF')
self.assertEqual(sso.G[0], Z, "G")
ssd = Z.state_space('DCF')
#self.assertEqual(ssd.G[0], Z, "G")
def test_balanced(self):
A = [[1, -2], [3, -4]]
B = [5, 7]
C = [[6, 8]]
D = [9]
ss = StateSpace(A, B, C, D)
ssb = ss.balance()
h = ss.hankel_singular_values
H = np.diag(h.numpy.squeeze())
self.assertEqual(np.allclose(ssb.Wo.numpy, ssb.Wc.numpy), True, "Wo==Wc")
self.assertEqual(np.allclose(ssb.Wo.numpy, H), True, "Hankel singular values")
self.assertEqual(ss.eigenvalues, [-1, -2], "eigen values")
|
aelkikhia/pyduel_engine | pyduel_gui/widgets/board_select_widget.py | Python | apache-2.0 | 957 | 0.00209 | #!/usr/bin/python
# -*- coding: utf-8 -*-
from PyQt4 import QtGui
# from pyduel_engine.content.engine_states import BoardType
class BoardSelectWidget(QtGui.QComboBox) | :
def __init__(self, board_types, parent=None):
super(BoardSelectWidget, self).__init__(parent)
self.board_types = board_types
# dimensions
policy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred,
| QtGui.QSizePolicy.Preferred)
self.setSizePolicy(policy)
# populate
for name, member in self.board_types.__members__.items():
self.addItem(name.title(), member)
def getBoard(self):
"""Return BoardType selection"""
return self.currentIndex()
if __name__ == "__main__":
import sys
from pyduel_engine.content.engine_states import BoardType
app = QtGui.QApplication(sys.argv)
ex = BoardSelectWidget(BoardType)
ex.show()
sys.exit(app.exec_())
|
edac-epscor/nmepscor-data-collection-form | application/base/urls.py | Python | mit | 945 | 0.004233 | from django.conf | .urls import patterns, include, url
from django.conf import settings
from django.contrib import admin
admin.autodiscover()
urlpatterns | = patterns('',
(r'^$', 'builder.views.metadataIdx'), # Approot, not listview
(r'^submissions/', include('builder.urls')),
(r'^keepAlive$', 'builder.views.revalidate'),
(r'^signin$', 'builder.views.authDrupal'),
(r'^logout$', 'builder.views.signout'),
(r'^users/', include('userprofiles.urls')),
url(r'^admin/', include(admin.site.urls)),
)
if settings.DEBUG:
urlpatterns += patterns('django.contrib.staticfiles.views',
url(r'^static/(?P<path>.*)$', 'serve'),
)
#urlpatterns += staticfiles_urlpatterns()
# This is supposedly equivalent to the above, but never actually works
# Admin/URL docs only on dev
urlpatterns = patterns('',
(r'^builder/admin/doc/', include('django.contrib.admindocs.urls')),
) + urlpatterns
|
Endika/hr | hr_employee_education/__openerp__.py | Python | agpl-3.0 | 1,451 | 0.000689 | # -*- coding:utf-8 -*-
#
#
# Copyright (C) 2011,2013 Michael Telahun Makonnen <mmakonnen@gmail.com>.
# All Rights Reserved.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundat | ion, either version 3 of the License, or
# (at your option) any later version.
#
# This program is dist | ributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
{
'name': 'Employee Education Records',
'version': '1.0',
'category': 'Generic Modules/Human Resources',
'description': """
Details About and Employee's Education
======================================
Add an extra field about an employee's education.
""",
'author': "Michael Telahun Makonnen <mmakonnen@gmail.com>,Odoo Community Association (OCA)",
'website': 'http://www.openerp.com',
'license': 'AGPL-3',
'depends': [
'hr',
],
'data': [
'wizard/hr_employee_by_department_view.xml',
'hr_view.xml',
],
'test': [
],
'installable': False,
}
|
jwg4/qual | tools/compare_navy_day_numbers.py | Python | apache-2.0 | 1,862 | 0.004834 | import logging
import re
import requests
from datetime import timedelta, date as vanilla_date
from calexicon.calendars import JulianDayNumber
logging.basicConfig(level=logging.INFO)
URL = "http://aa.usno.navy.mil/cgi-bin/aa_jdconv.pl?form=1&year=%d&month=%d&day=%d&era=1&hr=0&min=0&sec=0.0"
def get_navy_day_number(year, month, day):
url = URL % (year, month, day)
logging.debug("Retrieving %s" % url)
r = requests.get(url)
assert(r.status_code == 200)
content = r.text
logging.debug("R | esponse: %s" % content)
match = re.search('JD ([0-9]+)[.]', content)
if not match:
raise Exception("Could not parse a Julian day number from the response")
return int(match.group(1))
def get_calexicon_number(year, month, day):
vd = van | illa_date(year, month, day)
d = JulianDayNumber().from_date(vd)
return d.native_representation()['day_number']
def compare(vd):
year = vd.year
month = vd.month
day = vd.day
navy_number = get_navy_day_number(year, month, day)
calexicon_number = get_calexicon_number(year, month, day)
logging.info("Navy: %d, Calexicon: %d." % (navy_number, calexicon_number))
if get_navy_day_number(year, month, day) != get_calexicon_number(year, month, day):
return False
else:
return True
def binary_search(start, end):
logging.info("Searching for anomalies between %s and %s" % (start, end))
delta = (end - start).days
half = int(delta / 2)
if half == 0:
return start, end
mid = start + timedelta(days=half)
if compare(mid) != compare(start):
return binary_search(start, mid)
elif compare(mid) != compare(end):
return binary_search(mid, end)
else:
raise Exception("Couldn't properly subdivide the interval.")
print binary_search(vanilla_date(1, 1, 1), vanilla_date(2013, 1, 1))
|
SciTools/iris | lib/iris/tests/test_intersect.py | Python | lgpl-3.0 | 3,364 | 0.000297 | # Copyright Iris contributors
#
# This file is part of Iris and is released under the LGPL license.
# See COPYING and COPYING.LESSER in the root of the repository for full
# licensing details.
"""
Test the intersection of Coords
"""
# import iris tests first so that some things can be initialised before importing anything else
import iris.tests as tests # isort:skip
import numpy as np
import iris
import iris.coord_systems
import iris.coords
import iris.cube
import iris.tests.stock
class TestCubeIntersectTheoretical(tests.IrisTest):
def test_simple_intersect(self):
cube = iris.cube.Cube(
np.array(
[
[1, 2, 3, 4, 5],
[2, 3, 4, 5, 6],
[3, 4, 5, 6, 7],
[4, 5, 6, 7, 8],
[5, 6, 7, 8, 9],
],
dtype=np.int32,
)
)
lonlat_cs = iris.coord_systems.RotatedGeogCS(10, 20)
cube.add_dim_coord(
iris.coords.DimCoord(
np.arange(5, dtype=np.float32) * 90 - 180,
"longitude",
units="degrees",
coord_system=lonlat_cs,
),
1,
)
cube.add_dim_coord(
iris.coords.DimCoord(
np.arange(5, dtype=np.float32) * 45 - 90,
"latitude",
units="degrees",
coord_system=lonlat_cs,
),
0,
)
cube.add_aux_coord(
iris.coords.DimCoord(
points=np.int32(11), long_name="pressure", units="Pa"
)
)
cube.rename("temperature")
cube.units = "K"
cube2 = iris.cube.Cube(
np.array(
[
[1, 2, 3, 4, 5],
[2, 3, 4, 5, 6],
[3, 4, 5, 6, 7],
[4 | , 5, 6, 7, 8],
[5, 6, 7, 8, 50],
],
dtype=np.int32,
)
)
lonlat_cs = iris.coord_systems.RotatedGeogCS(10, 20)
cube2.add_dim_coord(
iris.coords.DimCoord(
np.arange(5, dtype=np.float32) * 90,
"longitude",
units="degrees",
coord_system=lonlat_cs,
),
1,
)
cube2.add_dim_coord(
iris.coords.DimCoor | d(
np.arange(5, dtype=np.float32) * 45 - 90,
"latitude",
units="degrees",
coord_system=lonlat_cs,
),
0,
)
cube2.add_aux_coord(
iris.coords.DimCoord(
points=np.int32(11), long_name="pressure", units="Pa"
)
)
cube2.rename("")
r = iris.analysis.maths.intersection_of_cubes(cube, cube2)
self.assertCML(r, ("cdm", "test_simple_cube_intersection.cml"))
class TestCoordIntersect(tests.IrisTest):
def test_commutative(self):
step = 4.0
c1 = iris.coords.DimCoord(np.arange(100) * step)
offset_points = c1.points.copy()
offset_points -= step * 30
c2 = c1.copy(points=offset_points)
i1 = c1.intersect(c2)
i2 = c2.intersect(c1)
self.assertEqual(i1, i2)
if __name__ == "__main__":
tests.main()
|
TechWritingWhiz/indy-node | indy_common/test/types/test_get_attrib_schema.py | Python | apache-2.0 | 725 | 0.001379 | import pytest
from indy_common.types import ClientGetAttribOperation
from collections import OrderedDict
from plenum.common.messages.fields import ConstantField, LimitedLengthStringField, IdentifierField
EXPECTED_ORDERED_FIELDS = OrderedDict([
("type", ConstantField),
("dest", IdentifierField),
("raw", LimitedLengthStringField),
])
def test_has_expected_fields():
actual_field_names = OrderedD | ict(ClientGetAttribOperation.schema).keys()
assert actual_field_names == EXPECTED_ORDERED_FIELDS.keys()
def test | _has_expected_validators():
schema = dict(ClientGetAttribOperation.schema)
for field, validator in EXPECTED_ORDERED_FIELDS.items():
assert isinstance(schema[field], validator)
|
ImEmJay/AlexaPi | src/alexapi/tunein.py | Python | mit | 13,267 | 0.000151 | from __future__ import unicode_literals
import ConfigParser as configparser
import logging
import re
import time
import urlparse
from collections import OrderedDict
from contextlib import closing
import requests
try:
import cStringIO as StringIO
except ImportError:
import StringIO as StringIO
try:
import xml.etree.cElementTree as elementtree
except ImportError:
import xml.etree.ElementTree as elementtree
logging.basicConfig(filename='tunein.log', level=logging.DEBUG)
logger = logging.getLogger(__name__)
class PlaylistError(Exception):
pass
class Cache(object):
# TODO: merge this to util library (copied from mopidy-spotify)
def __init__(self, ctl=0, ttl=3600):
self.cache = {}
self.ctl = ctl
self.ttl = ttl
self._call_count = 0
def __call__(self, func):
def _memoized(*args):
now = time.time()
try:
value, last_update = self.cache[args]
age = now - last_update
if self._call_count > self.ctl or age > self.ttl:
self._call_count = 0
raise AttributeError
if self.ctl:
self._call_count += 1
return value
except (KeyError, AttributeError):
value = func(*args)
if value:
self.cache[args] = (value, now)
return value
except TypeError:
return func(*args)
def clear():
self.cache.clear()
_memoized.clear = clear
return _memoized
def parse_m3u(data):
# Copied from mopidy.audio.playlists
# Mopidy version expects a header but it's not always present
for line in data.readlines():
if not line.startswith('#') and line.strip():
yield line.strip()
def parse_pls(data):
# Copied from mopidy.audio.playlists
try:
cp = configparser.RawConfigParser()
cp.readfp(data)
except configparser.Error:
return
for section in cp.sections():
if section.lower() != 'playlist':
continue
for i in xrange(cp.getint(section, 'numberofentries')):
try:
# TODO: Remove this horrible hack to avoid adverts
if cp.has_option(section, 'length%d' % (i + 1)):
if cp.get(section, 'length%d' % (i + 1)) == '-1':
yield cp.get(section, 'file%d' % (i + 1))
else:
yield cp.get(section, 'file%d' % (i + 1))
except configparser.NoOptionError:
return
def fix_asf_uri(uri):
return re.sub(r'http://(.+\?mswmext=\.asf)', r'mms://\1', uri, re.I)
def parse_old_asx(data):
try:
cp = configparser.RawConfigParser()
cp.readfp(data)
except configparser.Error:
return
for section in cp.sections():
if section.lower() != 'reference':
continue
for option in cp.options(section):
if option.lower().startswith('ref'):
uri = cp.get(section, option).lower()
yield fix_asf_uri(uri)
def parse_new_asx(data):
# Copied from mopidy.audio.playlists
try:
for _, element in elementtree.iterparse(data):
element.tag = element.tag.lower() # normalize
for ref in element.findall('entry/ref[@href]'):
yield fix_asf_uri(ref.get('href', '').strip())
| for entry in element.findall('entry[@href]'):
| yield fix_asf_uri(entry.get('href', '').strip())
except elementtree.ParseError:
return
def parse_asx(data):
if 'asx' in data.getvalue()[0:50].lower():
return parse_new_asx(data)
else:
return parse_old_asx(data)
# This is all broken: mopidy/mopidy#225
# from gi.repository import TotemPlParser
# def totem_plparser(uri):
# results = []
# def entry_parsed(parser, uri, metadata):
# results.append(uri)
# parser = TotemPlParser.Parser.new()
# someid = parser.connect('entry-parsed', entry_parsed)
# res = parser.parse(uri, False)
# parser.disconnect(someid)
# if res != TotemPlParser.ParserResult.SUCCESS:
# logger.debug('Failed to parse playlist')
# return results
def find_playlist_parser(extension, content_type):
extension_map = {'.asx': parse_asx,
'.wax': parse_asx,
'.m3u': parse_m3u,
'.pls': parse_pls}
content_type_map = {'video/x-ms-asf': parse_asx,
'application/x-mpegurl': parse_m3u,
'audio/x-scpls': parse_pls}
parser = extension_map.get(extension, None)
if not parser and content_type:
# Annoying case where the url gave us no hints so try and work it out
# from the header's content-type instead.
# This might turn out to be server-specific...
parser = content_type_map.get(content_type.lower(), None)
return parser
class TuneIn(object):
"""Wrapper for the TuneIn API."""
def __init__(self, timeout, session=None):
self._base_uri = 'http://opml.radiotime.com/%s'
self._session = session or requests.Session()
self._timeout = timeout / 1000.0
self._stations = {}
def reload(self):
self._stations.clear()
self._tunein.clear() # pylint: disable=no-member
self._get_playlist.clear() # pylint: disable=no-member
def _flatten(self, data):
results = []
for item in data:
if 'children' in item:
results.extend(item['children'])
else:
results.append(item)
return results
def _filter_results(self, data, section_name=None, map_func=None):
results = []
def grab_item(item):
if 'guide_id' not in item:
return
if map_func:
station = map_func(item)
elif item.get('type', 'link') == 'link':
results.append(item)
return
else:
station = item
self._stations[station['guide_id']] = station
results.append(station)
for item in data:
if section_name is not None:
section_key = item.get('key', '').lower()
if section_key.startswith(section_name.lower()):
for child in item['children']:
grab_item(child)
else:
grab_item(item)
return results
def categories(self, category=''):
if category == 'location':
args = '&id=r0' # Annoying special case
elif category == 'language':
args = '&c=lang'
return [] # TuneIn's API is a mess here, cba
else:
args = '&c=' + category
# Take a copy so we don't modify the cached data
results = list(self._tunein('Browse.ashx', args))
if category in ('podcast', 'local'):
# Flatten the results!
results = self._filter_results(self._flatten(results))
elif category == '':
trending = {'text': 'Trending',
'key': 'trending',
'type': 'link',
'URL': self._base_uri % 'Browse.ashx?c=trending'}
# Filter out the language root category for now
results = [x for x in results if x['key'] != 'language']
results.append(trending)
else:
results = self._filter_results(results)
return results
def locations(self, location):
args = '&id=' + location
results = self._tunein('Browse.ashx', args)
# TODO: Support filters here
return [x for x in results if x.get('type', '') == 'link']
def _browse(self, section_name, guide_id):
args = '&id=' + guide_id
results = self._tunein('Browse.ashx', args)
return self._filter_results(results, section_name)
def featured(self, guide_id):
return self._browse('Featured', guide |
wandb/client | wandb/vendor/pygments/lexers/r.py | Python | mit | 23,755 | 0.000168 | # -*- coding: utf-8 -*-
"""
pygments.lexers.r
~~~~~~~~~~~~~~~~~
Lexers for the R/S languages.
:copyright: Copyright 2006-2017 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import Lexer, RegexLexer, include, words, do_insertions
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation, Generic
__all__ = ['RConsoleLexer', 'SLexer', 'RdLexer']
line_re = re.compile('.*?\n')
class RConsoleLexer(Lexer):
"""
For R console transcripts or R CMD BATCH output files.
"""
name = 'RConsole'
aliases = ['rconsole', 'rout']
filenames = ['*.Rout']
def get_tokens_unprocessed(self, text):
slexer = SLexer(**self.options)
current_code_block = ''
insertions = []
for match in line_re.finditer(text):
line = match.group()
if line.startswith('>') or line.startswith('+'):
# Colorize the prompt as such,
# then put rest of line into current_code_block
insertions.append((len(current_code_block),
[(0, Generic.Prompt, line[:2])]))
current_code_block += line[2:]
else:
# We have reached a non-prompt line!
# If we have stored prompt lines, need to process them first.
if current_code_block:
# Weave together the prompts and highlight code.
for item in do_insertions(
insertions, slexer.get_tokens_unprocessed(current_code_block)):
yield item
# Reset vars for next code block.
current_code_block = ''
insertions = []
# Now process the actual line itself, this is output from R.
yield match.start(), Generic.Output, line
# If we happen to end on a code block with nothing after it, need to
# process the last code block. This is neither elegant nor DRY so
# should be changed.
if current_code_block:
for item in do_insertions(
insertions, slexer.get_tokens_unprocessed(current_code_block)):
yield item
class SLexer(RegexLexer):
"""
For S, S-plus, and R source code.
.. versionadded:: 0.10
"""
name = 'S'
aliases = ['splus', 's', 'r']
filenames = ['*.S', '*.R', '.Rhistory', '.Rprofile', '.Renviron']
mimetypes = ['text/S-plus', 'text/S', 'text/x-r-source', 'text/x-r',
'text/x-R', 'text/x-r-history', 'text/x-r-profile']
builtins_base = (
'Arg', 'Conj', 'Cstack_info', 'Encoding', 'FALSE',
'Filter', 'Find', 'I', 'ISOdate', 'ISOdatetime', 'Im', 'Inf',
'La.svd', 'Map', 'Math.Date', 'Math.POSIXt', 'Math.data.frame',
'Math.difftime', 'Math.factor', 'Mod', 'NA_character_',
'NA_complex_', 'NA_real_', 'NCOL', 'NROW', 'NULLNA_integer_', 'NaN',
'Negate', 'NextMethod', 'Ops.Date', 'Ops.POSIXt', 'Ops.data.frame',
'Ops.difftime', 'Ops.factor', 'Ops.numeric_version', 'Ops.ordered',
'Position', 'R.Version', 'R.home', 'R.version', 'R.version.string',
'RNGkind', 'RNGversion', 'R_system_version', 'Re', 'Recall',
'Reduce', 'Summary.Date', 'Summary.POSIXct', 'Summary.POSIXlt',
'Summary.data.frame', 'Summary.difftime', 'Summary.factor',
'Summary.numeric_version', 'Summary.ordered', 'Sys.Date',
'Sys.chmod', 'Sys.getenv', 'Sys.getlocale', 'Sys.getpid',
'Sys.glob', 'Sys.info', 'Sys.localeconv', 'Sys.readlink',
'Sys.setFileTime', 'Sys.setenv', 'Sys.setlocale', 'Sys.sleep',
'Sys.time', 'Sys.timezone', 'Sys.umask', 'Sys.unsetenv',
'Sys.which', 'TRUE', 'UseMethod', 'Vectorize', 'abbreviate', 'abs',
'acos', 'acosh', 'addNA', 'addTaskCallback', 'agrep', 'alist',
'all', 'all.equal', 'all.equal.POSIXct', 'all.equal.character',
'all.equal.default', 'all.equal.factor', 'all.equal.formula',
'all.equal.language', 'all.equal.list', 'all.equal.numeric',
'all.equal.raw', 'all.names', 'all.vars', 'any', 'anyDuplicated',
'anyDuplicated.array', 'anyDuplicated.data.frame',
'anyDuplicated.default', 'anyDuplicated.matrix', 'aperm',
'aperm.default', 'aperm.table', 'append', 'apply', 'args',
'arrayInd', 'as.Date', 'as.Date.POSIXct', 'as.Date.POSIXlt',
'as.Date.character', 'as.Date.date', 'as.Date.dates',
'as.Date.default', 'as.Date.factor', 'as.Date.numeric',
'as.POSIXct', 'as.POSIXct.Date', 'as.POSIXct.POSIXlt',
'as.POSIXct.date', 'as.POSIXct.dates', 'as.POSIXct.default',
'as.POSIXct.numeric', 'as.POSIXlt', 'as.POSIXlt.Date',
'as.POSIXlt.POSIXct', 'as.POSIXlt.character', 'as.POSIXlt.date',
'as.POSIXlt.dates', 'as.POSIXlt.default', 'as.POSIXlt.factor',
'as.POSIXlt.numeric', 'as.array', 'as.array.default', 'as.call',
'as.character', 'as.character.Date', 'as.charact | er.POSIXt',
'as.character.condition', 'as.character.default',
'as.character.error', 'as.character.factor', 'as.character.hexmode',
'as.character.numeric_version', 'as.character.octmode',
'as.character.srcref', 'as.complex', 'as.data.frame',
'as.data.frame.AsIs | ', 'as.data.frame.Date', 'as.data.frame.POSIXct',
'as.data.frame.POSIXlt', 'as.data.frame.array',
'as.data.frame.character', 'as.data.frame.complex',
'as.data.frame.data.frame', 'as.data.frame.default',
'as.data.frame.difftime', 'as.data.frame.factor',
'as.data.frame.integer', 'as.data.frame.list',
'as.data.frame.logical', 'as.data.frame.matrix',
'as.data.frame.model.matrix', 'as.data.frame.numeric',
'as.data.frame.numeric_version', 'as.data.frame.ordered',
'as.data.frame.raw', 'as.data.frame.table', 'as.data.frame.ts',
'as.data.frame.vector', 'as.difftime', 'as.double',
'as.double.POSIXlt', 'as.double.difftime', 'as.environment',
'as.expression', 'as.expression.default', 'as.factor',
'as.function', 'as.function.default', 'as.hexmode', 'as.integer',
'as.list', 'as.list.Date', 'as.list.POSIXct', 'as.list.data.frame',
'as.list.default', 'as.list.environment', 'as.list.factor',
'as.list.function', 'as.list.numeric_version', 'as.logical',
'as.logical.factor', 'as.matrix', 'as.matrix.POSIXlt',
'as.matrix.data.frame', 'as.matrix.default', 'as.matrix.noquote',
'as.name', 'as.null', 'as.null.default', 'as.numeric',
'as.numeric_version', 'as.octmode', 'as.ordered',
'as.package_version', 'as.pairlist', 'as.qr', 'as.raw', 'as.single',
'as.single.default', 'as.symbol', 'as.table', 'as.table.default',
'as.vector', 'as.vector.factor', 'asNamespace', 'asS3', 'asS4',
'asin', 'asinh', 'assign', 'atan', 'atan2', 'atanh',
'attachNamespace', 'attr', 'attr.all.equal', 'attributes',
'autoload', 'autoloader', 'backsolve', 'baseenv', 'basename',
'besselI', 'besselJ', 'besselK', 'besselY', 'beta',
'bindingIsActive', 'bindingIsLocked', 'bindtextdomain', 'bitwAnd',
'bitwNot', 'bitwOr', 'bitwShiftL', 'bitwShiftR', 'bitwXor', 'body',
'bquote', 'browser', 'browserCondition', 'browserSetDebug',
'browserText', 'builtins', 'by', 'by.data.frame', 'by.default',
'bzfile', 'c.Date', 'c.POSIXct', 'c.POSIXlt', 'c.noquote',
'c.numeric_version', 'call', 'callCC', 'capabilities', 'casefold',
'cat', 'category', 'cbind', 'cbind.data.frame', 'ceiling',
'char.expand', 'charToRaw', 'charmatch', 'chartr', 'check_tzones',
'chol', 'chol.default', 'chol2inv', 'choose', 'class',
'clearPushBack', 'close', 'close.connection', 'close.srcfile',
'close.srcfilealias', 'closeAllConnections', 'col', 'colMeans',
'colSums', 'colnames', 'commandArgs', 'comment', 'computeRestarts',
'conditionCall', 'conditionCall.condition', 'conditionMessage',
'co |
provegard/airpnp | airpnp/device_builder.py | Python | bsd-3-clause | 5,271 | 0 | # -*- coding: utf-8 -*-
# Copyright (c) 2011, Per Rovegård <per@rovegard.se>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the authors nor the names of its contributors
# may be used to endor | se or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY A | ND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from xml.etree import ElementTree as ET
from device import Device
from twisted.internet import defer
from twisted.web import client
__all__ = [
'DeviceRejectedError',
'DeviceBuilder',
]
class DeviceRejectedError(Exception):
"""Raised by a DeviceBuilder if a device is rejected by a filter."""
def __init__(self, device, *args):
self.device = device
Exception.__init__(self, *args)
def reraise_with_url(failure, url):
# "unpack" from DeferredList, if applicable
if failure.type == defer.FirstError:
failure = failure.value.subFailure
# allow the caller to know the URL that caused the failure
if not hasattr(failure, "url"):
failure.url = url
# re-raise
failure.raiseException()
class DeviceBuilder(object):
"""Device builder that builds a Device object from a remote location.
The device builder downloads the XML definition of the root device from the
given location, and continues to download service information if the device
passes the initial filter.
"""
def __init__(self, soap_sender, filter_=None):
"""Initialize a device builder.
Arguments:
soap_sender -- passed to the created Device object
filter_ -- optional callable that receives the created device to
determine if the builder should continue with service
initialization. Should return a tuple of (bool, string),
where the bool is the continue flag, and the string is
a reason in case the continue flag is False.
"""
self._filter = filter_
self._soap_sender = soap_sender
def _check_filter(self, device):
if self._filter:
accepted, reason = self._filter(device)
if not accepted:
raise DeviceRejectedError(device, reason)
return device
def _init_service(self, element, service):
service.initialize(element, self._soap_sender)
return service
def _get_device(self, result):
"""Get the device from a list of tuples of (success, result), where the
result is a Service object.
"""
return result[0][1].device
def _init_services(self, device):
def start_init_service(service):
d = client.getPage(service.SCPDURL, timeout=5)
d.addErrback(reraise_with_url, service.SCPDURL)
d.addCallback(ET.fromstring)
d.addCallback(self._init_service, service)
return d
dl = [start_init_service(s) for s in device]
return defer.DeferredList(dl, fireOnOneErrback=True)
def build(self, location):
"""Build a Device object from a remote location.
Arguments:
location -- the HTTP URL where the root device XML can be found
Return a Deferred which will callback when the Device object is ready.
The caller must add an errback to handle errors raised during the build
process.
"""
d = defer.succeed(location)
# get the device XML
d.addCallback(client.getPage, timeout=5)
# parse it to an element
d.addCallback(ET.fromstring)
# create a new Device object
d.addCallback(Device, location)
# check if the device passes the filter
d.addCallback(self._check_filter)
# initialize services
d.addCallback(self._init_services)
# error handling for the service initialization
d.addErrback(reraise_with_url, location)
# make sure the Device object is returned
d.addCallback(self._get_device)
return d
|
coxmediagroup/googleads-python-lib | examples/dfp/v201411/placement_service/update_placements.py | Python | apache-2.0 | 2,284 | 0.007443 | #!/usr/bin/python
#
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing per | missions and
# limitations under the License.
"""This code example updates a single placement to allow for AdSense targeting.
To determine which placements exist, run get_all_placements.py.
"""
__author__ = ('Nicholas Chen',
'Joseph DiLallo')
# Import appropriate modules from the client library.
from googleads import dfp
PLACEMENT_ID = 'INSERT_PLACEMENT_ID_HERE'
def main(client, placement_id):
# Initializ | e appropriate service.
placement_service = client.GetService('PlacementService', version='v201411')
# Create query.
values = [{
'key': 'placementId',
'value': {
'xsi_type': 'NumberValue',
'value': placement_id
}
}]
query = 'WHERE id = :placementId'
statement = dfp.FilterStatement(query, values, 1)
# Get placements by statement.
placements = placement_service.getPlacementsByStatement(
statement.ToStatement())
for placement in placements:
if not placement['targetingDescription']:
placement['targetingDescription'] = 'Generic description'
placement['targetingAdLocation'] = 'All images on sports pages.'
placement['targetingSiteName'] = 'http://code.google.com'
placement['isAdSenseTargetingEnabled'] = 'true'
# Update placements remotely.
placements = placement_service.updatePlacements(placements)
for placement in placements:
print ('Placement with id \'%s\', name \'%s\', and AdSense targeting '
'enabled \'%s\' was updated.'
% (placement['id'], placement['name'],
placement['isAdSenseTargetingEnabled']))
if __name__ == '__main__':
# Initialize client object.
dfp_client = dfp.DfpClient.LoadFromStorage()
main(dfp_client, PLACEMENT_ID)
|
ramezquitao/pyoptools | pyoptools/raytrace/calc/calc.py | Python | gpl-3.0 | 25,857 | 0.026221 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''Method collection to obtain optical system information
This module contains a method collection to obtain information, and analyze
optical systems
'''
__all__=["intersection", "nearest_points", "chief_ray_search", "pupil_location",
"paraxial_location", "find_apperture", "find_ppp",
"get_optical_path_ep", "find_reference_sphere_radius",
"parallel_propagate", "parallel_propagate_ns", "ray_paths" ]
from pyoptools.raytrace.ray import Ray
from pyoptools.misc.pmisc import cross
from pyoptools.raytrace.system import System
from pyoptools.raytrace.component import Component
from pyoptools.raytrace.comp_lib import CCD
from pyoptools.raytrace.surface import Spherical
#from gui.plot_frame import PlotFrame
from pyoptools.raytrace.shape import Circular
from numpy import inf, sqrt, square, pi, dot, array, arctan2, alltrue, isnan,\
nan, mgrid,where
from scipy.optimize.minpack import fsolve
from numpy.random import normal
import multiprocessing as mp
#******Logger definition *******#
#import logging
#log= logging.getLogger("ray_trace.calc")
def intersection(r1,r2):
'''
Return the point of intersection between the rays r1 and r2.
Parameters
----------
r1,r2 : :class:`~pyoptools.raytrace.ray.Ray`
Rays to test for intersection.
Returns
-------
ip : tuple(float, float, float)
Intersection point coordinates. If the rays do not intersect
ip=(nan,nan,nan)
rv : bool
Indicates if the intersection point represent a real image (rv=true),
or a virtual image (rv=false). In this case virtual has the same meaning
as in virtual image i.e. intersection point is not in the actual path,
or is behind the ray's origin.
'''
d1=r1.dir
d2=r2.dir
p1=r1.pos
p2=r2.pos
d1xd2=cross(d1,d2)
# check if the rays are parallel
#log.info("Vector cross product:"+str(d1xd2))
if dot(d1xd2,d1xd2)==0. :
return array((nan,nan,nan)),False
p2p1xv2=cross(p2-p1,d2)
p2p1xv1=cross(p2-p1,d1)
a=p2p1xv2/d1xd2
b=p2p1xv1/d1xd2
# Remove the nan from the list
keep=~isnan(a)
an=a[keep]
keep=~isnan(b)
bn=b[keep]
ip=array((nan,nan,nan))
rv=False
#print an,bn
if len(an)>0:
if alltrue(an==an[0]) :
ip=p1+an[0]*d1
# check if all the solutions are equal
if alltrue(an>=0) and alltrue(bn>=0):
rv=True
#log.info("Intersection point found at:"+str(ip)+" "+str(rv))
return ip,rv
def nearest_points(ray1, ray2):
'''
Return the nearest points between 2 rays.
The image point locations in optical systems are usually found by
calculating the intersection between rays coming from a single object
point, but in aberrated systems, the 2 rays will not really intersect.
This function is used to find the point in space where the rays
are closest to each other. If the rays intersect the values returned
will be the intersection point.
The solution was taken from:
http://homepage.univie.ac.at/Franz.Vesely/notes/hard_sticks/hst/hst.html
Parameters
----------
r1,r2 : :class:`~pyoptools.raytrace.ray.Ray`
Rays to test for intersection.
Returns
-------
p1 : tuple(float, float, float)
Coordinates of the point living on ray 1 closest to ray 2
p2 : tuple(float, float, float)
Coordinates of the point living on ray 2 closest to ray 1
d : float
The distance between p1 and p2
rv : bool
Indicates if the intersection is real or virtual. rv=True for
real, rv=False for virtual. In this case virtual has the same meaning
as in virtual image i.e. p1 and p2 are not in the actual path, or are
behind the ray's origin.
'''
r1=ray1.pos
e1=ray1.dir
r2=ray2.pos
e2=ray2.dir
r12=r2-r1
t1= (dot(r12, e1) - (dot(r12, e2)*dot(e1, e2)))/(1-(dot(e1, e2))**2)
t2= -(dot(r12, e2) - (dot(r12, e1)*dot(e1, e2)))/(1-(dot(e1, e2))**2)
p1=r1+t1*e1
p2=r2+t2*e2
#log.info("nearest points"+str(p1)+" "+str(p2))
#log.info("tvalues "+str(t1)+" "+str(t2))
if t1>=0 and t2>=0:
rv=True
else:
rv=False
return p1, p2, sqrt(dot(p1-p2, p1-p2)), rv
def chief_ray_search(opsys,ccds,o=(0.,0.,0.),rt=(0.,0.,0.),er=0.1,w=pi/2.,maxiter=1000,wavelength=.58929):
'''
This function uses a random search algorithm to find the chief_ray for a
given optical system and object point.
Parameters
----------
opsys : :class:`pyoptools.raytrace.system.System`
Optical system that will be used to find the chief ray
ccds : :class:`pyoptools.raytrace.comp_lib.CCD`
Detector placed in the aperture plane. Must be centred in the optical
axis
o : tuple(float, flo0at, float)
coordinates of the object point used to find the chief ray
rt : tuple(float, float, float)
rotations made to a ray propagating in the z direction to obtain the
first test ray
er : float
Maximum acceptable distance between the ray and the center of the
aperture
w : float
Gaussian width in radians
wavelength : float
Wavelength of the ray used to find the principal ray given in
micrometers (.58929 by default).
Returns
-------
:class:`~pyoptools.raytrace.ray.Ray`
Chief ray found. (Ray instance)
Notes
-----
The algorithm starts using a given ray, propagating it in the optical
system, and finding the intersection point of this test ray and the
aperture plane. The distance from this point and the optical axis is
recorded.
Using a gaussian random generator, two rotation angles are calculated,
to generate a new test ray that is propagated in the optical system,
and its distance to the optical axis is found at the aperture plane.
If this distance is less than the distance found for the previous ray,
this ray is taken as the new *chief ray* candidate, and the algorithm
is repeated until the number of iterations reaches *maxiter*, or until
the distance is less than *er*.
the *rt* parameter gives the rotations made to a ray originating in
*o*, and propagating in the *Z* direction, to find the first test ray.
A detector object *ccds* should be placed at the aperture plane. It is used
to find the point where the ray intersects the aperture. To increase the
convergense speed of the algorithm, it is better to make sure that the first
test ray intersects the detector.
.. todo::
Implement a function similar to this one, using a minimization
algorithm
'''
#log.info("Entering chief_ray_search function")
test_ray=Ray(wavelength=wavelength)
opsys.clear_ray_list()
btx,bty,btz=rt #btz is not used
ntry=0
nt=0
#Check the initial test ray
retray=test_ray.ch_coord_sys_inv(o,(btx,bty,0))
#log.info("Calculating test_ray")
opsys.clear_ray_list()
opsys.reset()
opsys.ray_add(retray)
opsys.propagate()
try:
x,y,z=ccds.hit_list[0][0]
dist=sqrt(square(x)+square(y))
except:
dist=inf
p_dist=dist
while (p_dist> er)and (ntry<maxiter):
ntry=ntry+1
nt=nt+1
rx=normal(btx,w)
ry=normal(bty,w)
tray=test_ray.ch_coord_sys_inv(o,(rx,ry,0))
opsys.clear_ray_list()
opsys.reset()
opsys.ray_add(tray)
opsys.propagate()
try:
x,y,z=ccds.hit_list[0][0] |
dist=sqrt(square(x)+square(y))
| except:
#log.info("CCD not hitted by ray")
dist=inf
if p_dist>dist:
#Select this ray as new generator ray
btx=rx
bty=ry
p_dist=dist
nt=0
retray=tray
#log.info("distance to aperture ce |
klebercode/pmsal | pmsal/bid/models.py | Python | mit | 1,391 | 0 | # coding: utf-8
from django.db import models
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext_lazy as _
import uuid
import os
class Entry(models.Model):
def g | et_file_path(instance, filename):
ext = filename.split('.')[-1]
filename = "%s.%s" % (uuid.uuid4(), ext)
return os.path.join('licitacao', filename)
created = models.DateTi | meField(_(u'Data de Criação'), auto_now_add=True)
modified = models.DateTimeField(_(u'Data de Modificação'), auto_now=True)
description = models.TextField(_(u'Objeto da Licitação'))
process = models.CharField(_(u'Processo Licitatório Nº'), max_length=20)
price = models.CharField(_(u'Tomada de Preços Nº'), max_length=20)
attach = models.FileField(_(u'Arquivo'), upload_to=get_file_path,
help_text='Selecione um arquivo')
def admin_attach(self):
if self.attach:
return "<a href='%s'>Baixar</a>" % self.attach.url
else:
return "Nenhum arquivo encontrado"
admin_attach.allow_tags = True
admin_attach.short_description = _(u'Arquivo')
def __unicode__(self):
return unicode(self.process)
class Meta:
verbose_name = _(u'Licitação')
verbose_name_plural = _(u'Licitações')
ordering = ['-created', 'description', 'process', 'price']
|
survey-methods/samplics | src/samplics/utils/hadamard.py | Python | mit | 6,813 | 0.002643 | """computes Hadamard matrices.
A Hadamard matrix is a square matrix whose entries are either +1 or −1 and whose rows are mutually orthogonal.The Hadamard matrix is used to derive the BRR replicate weights. It is conjectured that
a Hadamard matrix exist for all n divisible by 4. However, the *hadarmard(n)* functions from
*scipy.linalg* only provides the matrix for n that are power of 2. Hence, in this module,
additional Hadamard matrices are implemented. For example, *scipy.linalg.hadamard()* can provide a matrix for n = 4, 8, 16, 32, 64, 128, etc. The module add Hadamard matrices for n = 12, 20, 24, 28, and some additional to come.
In appendix A, Wolter, K. M. (1985) [#w1985]_ provides a list of Hadamard matrices for all
n multiple of 4 up to 100 which should be sufficient for most applications. Note that this is
this reference is for the first edition of the book which explicitly provides the hadamard
matrices. Above that, the scipy function can be used. Also, more Hadamard matrices can be
found at: http://neilsloane.com/hadamard/
.. [#w1985] Wolter., K. M. (1985), *Introduction to variance Estimation*, Springer-Verlag New York, Inc
TODO: implements Hadamard matrices of order higher than 28.
"""
import math
import numpy as np
from scipy.linalg import hadamard as hdd
def hadamard(n: int) -> np.ndarray:
n_log2 = int(math.log(n, 2))
if math.pow(2, n_log2) == n:
return np.asarray(hdd(n))
elif n % 4 == 0:
hadamard_run = "_hadamard" + str(n) + "()"
return np.asarray(eval(hadamard_run))
else:
raise ValueError("n is not valid!")
def _hadamard2() -> np.ndarray:
hadamard2 = np.ones((2, 2))
hadamard2[1, 1] = -1
return hadamard2
def _hadamard12() -> np.ndarray:
hadamard12 = np.ones((12, 12))
for c in [1, 3, 7, 8, 9, 11]:
hadamard12[1, c] = -1
for c in [1, 2, 4, 8, 9, 10]:
hadamard12[2, c] = -1
for c in [2, 3, 5, 9, 10, 11]:
hadamard12[3, c] = -1
for c in [1, 3, 4, 6, 10, 11]:
hadamard12[4, c] = -1
for c in [1, 2, 4, 5, 7, 11]:
hadamard12[5, c] = -1
for c in [1, 2, 3, 5, 6, 8]:
hadamard12[6, c] = -1
for c in [2, 3, 4, 6, 7, 9]:
hadamard12[7, c] = -1
for c in [3, 4, 5, 7, 8, 10]:
hadamard12[8, c] = -1
for c in [4, 5, 6, 8, 9, 11]:
hadamard12[9, c] = -1
for c in [1, 5, 6, 7, 9, 10]:
hadamard12[10, c] = -1
for c in [2, 6, 7, 8, 10, 11]:
hadamard12[11, c] = -1
return hadamard12
def _hadamard20() -> np.ndarray:
hadamard20 = np.ones((20, 20))
row = np.array([1, 3, 4, 9, 11, 13, 14, 15, 16, 19])
for r in range(1, 20):
for c in row:
hadamard20[r, c] = -1
row = (row + 1) % 20
if row[9] == 0:
row[9] = 1
row = np.sort(row)
return hadamard20
def _hadamard24() -> np.ndarray:
hadamard2 | 4 = np.ones((24, 24))
row1_c1 = np.array([1, 3, 7, 8, 9, 11])
row1_c2 = np.array([13, 15, 19, 20, 21, 23])
for r1 in range(0, 11):
for c1 in row1_c1:
hadamard24[r1 + 1, c1] = -1
row1_c1 = (row1_c1 + 1) % 12
if row1_c1[5] == 0:
row1_c1[5] = 1
row1_c1 = np.sort(row1_c1)
for c2 in row1_c2:
hadamard24[r1 + 1, c2] = -1
row1_c2 = (row1_c2 + 1) % 24
if row | 1_c2[5] == 0:
row1_c2[5] = 13
row1_c2 = np.sort(row1_c2)
row2_c1 = np.array([1, 3, 7, 8, 9, 11])
row2_c2 = np.array([14, 16, 17, 18, 22])
for r2 in range(12, 23):
for c1 in row2_c1:
hadamard24[r2 + 1, c1] = -1
row2_c1 = (row2_c1 + 1) % 12
if row2_c1[5] == 0:
row2_c1[5] = 1
row2_c1 = np.sort(row2_c1)
for c2 in row2_c2:
hadamard24[r2 + 1, c2] = -1
row2_c2 = (row2_c2 + 1) % 24
if row2_c2[4] == 0:
row2_c2[4] = 13
row2_c2 = np.sort(row2_c2)
hadamard24[12:24, 12] = -1
hadamard24[12, 12:24] = -1
return hadamard24
def _hadamard28() -> np.ndarray:
hadamard28 = np.ones((28, 28))
hadamard28[0, 1] = -1
for c in [0, 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27]:
hadamard28[1, c] = -1
for c in [3, 6, 7, 12, 13, 14, 15, 16, 17, 18, 19, 24, 25]:
hadamard28[2, c] = -1
for c in [1, 2, 3, 5, 6, 9, 11, 12, 14, 16, 18, 21, 23, 24, 27]:
hadamard28[3, c] = -1
for c in [5, 8, 9, 14, 15, 16, 17, 18, 19, 20, 21, 26, 27]:
hadamard28[4, c] = -1
for c in [1, 3, 4, 5, 7, 8, 11, 13, 14, 16, 18, 20, 23, 25, 26]:
hadamard28[5, c] = -1
for c in [2, 3, 7, 10, 11, 16, 17, 18, 19, 20, 21, 22, 23]:
hadamard28[6, c] = -1
for c in [1, 2, 5, 6, 7, 9, 10, 13, 15, 16, 18, 20, 22, 25, 27]:
hadamard28[7, c] = -1
for c in [4, 5, 9, 12, 13, 18, 19, 20, 21, 22, 23, 24, 25]:
hadamard28[8, c] = -1
for c in [1, 3, 4, 7, 8, 9, 11, 12, 15, 17, 18, 20, 22, 24, 27]:
hadamard28[9, c] = -1
for c in [6, 7, 11, 14, 15, 20, 21, 22, 23, 24, 25, 26, 27]:
hadamard28[10, c] = -1
for c in [1, 3, 5, 6, 9, 10, 11, 13, 14, 17, 19, 20, 22, 24, 26]:
hadamard28[11, c] = -1
for c in [2, 3, 8, 9, 13, 16, 17, 22, 23, 24, 25, 26, 27]:
hadamard28[12, c] = -1
for c in [1, 2, 5, 7, 8, 11, 12, 13, 15, 16, 19, 21, 22, 24, 26]:
hadamard28[13, c] = -1
for c in [2, 3, 4, 5, 10, 11, 15, 18, 19, 24, 25, 26, 27]:
hadamard28[14, c] = -1
for c in [1, 2, 4, 7, 9, 10, 13, 14, 15, 17, 18, 21, 23, 24, 26]:
hadamard28[15, c] = -1
for c in [2, 3, 4, 5, 6, 7, 12, 13, 17, 20, 21, 26, 27]:
hadamard28[16, c] = -1
for c in [1, 2, 4, 6, 9, 11, 12, 15, 16, 17, 19, 20, 23, 25, 26]:
hadamard28[17, c] = -1
for c in [2, 3, 4, 5, 6, 7, 8, 9, 14, 15, 19, 22, 23]:
hadamard28[18, c] = -1
for c in [1, 2, 4, 6, 8, 11, 13, 14, 17, 18, 19, 21, 22, 25, 27]:
hadamard28[19, c] = -1
for c in [4, 5, 6, 7, 8, 9, 10, 11, 16, 17, 21, 24, 25]:
hadamard28[20, c] = -1
for c in [1, 3, 4, 6, 8, 10, 13, 15, 16, 19, 20, 21, 23, 24, 27]:
hadamard28[21, c] = -1
for c in [6, 7, 8, 9, 10, 11, 12, 13, 18, 19, 23, 26, 27]:
hadamard28[22, c] = -1
for c in [1, 3, 5, 6, 8, 10, 12, 15, 17, 18, 21, 22, 23, 25, 26]:
hadamard28[23, c] = -1
for c in [2, 3, 8, 9, 10, 11, 12, 13, 14, 15, 20, 21, 25]:
hadamard28[24, c] = -1
for c in [1, 2, 5, 7, 8, 10, 12, 14, 17, 19, 20, 23, 24, 25, 27]:
hadamard28[25, c] = -1
for c in [4, 5, 10, 11, 12, 13, 14, 15, 16, 17, 22, 23, 27]:
hadamard28[26, c] = -1
for c in [1, 3, 4, 7, 9, 10, 12, 14, 16, 19, 21, 22, 25, 26, 27]:
hadamard28[27, c] = -1
return hadamard28
|
hcs/mailman | src/mailman/rest/templates.py | Python | gpl-3.0 | 2,043 | 0.000489 | # Copyright (C) 2012 by the Free Software Foundation, Inc.
#
# This file is part of GNU Mailman.
#
# GNU Mailman is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option)
# any later version.
#
# GNU Mailman is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along with
# GNU Mailman. If not, see <http://www.gnu.org/licenses/>.
"""Template finder."""
from __future__ import absolute_import, print_function, unicode_literals
__metaclass__ = type
__all__ = [
'TemplateFinder',
]
import os
from restish import http, resource
from mailman.config import config
from mailman.utilities.i18n import TemplateNotFoundError, find
|
# Use mimetypes.guess_all_extensions()?
EXTENSIONS = {
'text/plain': '.txt',
'text/html': '.html',
}
class TemplateFinder(resource. | Resource):
"""Template finder resource."""
def __init__(self, mlist, template, language, content_type):
self.mlist = mlist
self.template = template
self.language = language
self.content_type = content_type
@resource.GET()
def find_template(self, request):
# XXX We currently only support .txt and .html files.
extension = EXTENSIONS.get(self.content_type)
if extension is None:
return http.not_found()
template = self.template + extension
fp = None
try:
try:
path, fp = find(template, self.mlist, self.language)
except TemplateNotFoundError:
return http.not_found()
else:
return fp.read()
finally:
if fp is not None:
fp.close()
|
cp16net/trove | trove/extensions/mgmt/instances/service.py | Python | apache-2.0 | 8,014 | 0 | # Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from novaclient import exceptions as nova_exceptions
from oslo_log import log as logging
from trove.backup.models import Backup
import trove.common.apischema as apischema
from trove.common.auth import admin_context
from trove.common import exception
from trove.common.i18n import _
from trove.common import wsgi
from trove.extensions.mgmt.instances import models
from trove.extensions.mgmt.instances import views
from trove.extensions.mgmt.instances.views import DiagnosticsView
from trove.extensions.mgmt.instances.views import HwInfoView
from trove.extensions.mysql import models as mysql_models
from trove.instance import models as instance_models
from trove.instance.service import InstanceController
LOG = logging.getLogger(__name__)
class MgmtInstanceController(InstanceController):
"""Controller for instance functionality."""
schemas = apischema.mgmt_instance
@classmethod
def get_action_schema(cls, body, action_schema):
action_type = body.keys()[0]
return action_schema.get(action_type, {})
@admin_context
def index(self, req, tenant_id, detailed=False):
"""Return all instances."""
LOG.info(_("req : '%s'\n\n") % req)
LOG.info(_("Indexing a database instance for tenant '%s'") % tenant_id)
context = req.environ[wsgi.CONTEXT_KEY]
deleted = None
deleted_q = req.GET.get('deleted', '').lower()
if deleted_q in ['true']:
deleted = True
elif deleted_q in ['false']:
deleted = False
clustered_q = req.GET.get('include_clustered', '').lower()
include_clustered = clustered_q == 'true'
try:
instances = models.load_mgmt_instances(
context, deleted=deleted, include_clustered=include_clustered)
except nova_exceptions.ClientException as e:
LOG.error(e)
return wsgi.Result(str(e), 403)
view_cls = views.MgmtInstancesView
return wsgi.Result(view_cls(instances, req=req).data(), 200)
@admin_context
def show(self, req, tenant_id, id):
"""Return a single instance."""
LOG.info(_("req : '%s'\n\n") % req)
LOG.info(_("Showing a database instance for tenant '%s'") % tenant_id)
LOG.info(_("id : '%s'\n\n") % id)
context = req.environ[wsgi.CONTEXT_KEY]
deleted_q = req.GET.get('deleted', '').lower()
include_deleted = deleted_q == 'true'
server = models.DetailedMgmtInstance.load(context, id,
include_deleted)
root_history = mysql_models.RootHistory.load(context=context,
instance_id=id)
return wsgi.Result(
views.MgmtInstanceDetailView(
server,
req=req,
root_history=root_history).data(),
200)
@admin_context
def action(self, req, body, tenant_id, id):
LOG.info("req : '%s'\n\n" % req)
LOG.info("Committing an ACTION against instance %s for tenant '%s'"
% (id, tenant_id))
if not body:
raise exception.BadRequest(_("Invalid request body."))
context = req.environ[wsgi.CONTEXT_KEY]
instance = models.MgmtInstance.load(context=context, id=id)
_actions = {
'stop': self._action_stop,
'reboot': self._action_reboot,
'migrate': self._action_migrate,
'reset-task-status': self._action_reset_task_status
}
selected_action = None
for key in body:
if key in _actions:
| if selected_action is not None:
msg = _("Only one action can be specified per request.")
raise exception.BadRequest(msg)
sel | ected_action = _actions[key]
else:
msg = _("Invalid instance action: %s") % key
raise exception.BadRequest(msg)
if selected_action:
return selected_action(context, instance, body)
else:
raise exception.BadRequest(_("Invalid request body."))
def _action_stop(self, context, instance, body):
LOG.debug("Stopping MySQL on instance %s." % instance.id)
instance.stop_db()
return wsgi.Result(None, 202)
def _action_reboot(self, context, instance, body):
LOG.debug("Rebooting instance %s." % instance.id)
instance.reboot()
return wsgi.Result(None, 202)
def _action_migrate(self, context, instance, body):
LOG.debug("Migrating instance %s." % instance.id)
LOG.debug("body['migrate']= %s" % body['migrate'])
host = body['migrate'].get('host', None)
instance.migrate(host)
return wsgi.Result(None, 202)
def _action_reset_task_status(self, context, instance, body):
LOG.debug("Setting Task-Status to NONE on instance %s." %
instance.id)
instance.reset_task_status()
LOG.debug("Failing backups for instance %s." % instance.id)
Backup.fail_for_instance(instance.id)
return wsgi.Result(None, 202)
@admin_context
def root(self, req, tenant_id, id):
"""Return the date and time root was enabled on an instance,
if ever.
"""
LOG.info(_("req : '%s'\n\n") % req)
LOG.info(_("Showing root history for tenant '%s'") % tenant_id)
LOG.info(_("id : '%s'\n\n") % id)
context = req.environ[wsgi.CONTEXT_KEY]
try:
instance_models.Instance.load(context=context, id=id)
except exception.TroveError as e:
LOG.error(e)
return wsgi.Result(str(e), 404)
rhv = views.RootHistoryView(id)
reh = mysql_models.RootHistory.load(context=context, instance_id=id)
if reh:
rhv = views.RootHistoryView(reh.id, enabled=reh.created,
user_id=reh.user)
return wsgi.Result(rhv.data(), 200)
@admin_context
def hwinfo(self, req, tenant_id, id):
"""Return a single instance hardware info."""
LOG.info(_("req : '%s'\n\n") % req)
LOG.info(_("Showing hardware info for instance '%s'") % id)
context = req.environ[wsgi.CONTEXT_KEY]
instance = models.MgmtInstance.load(context=context, id=id)
hwinfo = instance.get_hwinfo()
return wsgi.Result(HwInfoView(id, hwinfo).data(), 200)
@admin_context
def diagnostics(self, req, tenant_id, id):
"""Return a single instance diagnostics."""
LOG.info(_("req : '%s'\n\n") % req)
LOG.info(_("Showing a instance diagnostics for instance '%s'") % id)
LOG.info(_("id : '%s'\n\n") % id)
context = req.environ[wsgi.CONTEXT_KEY]
instance = models.MgmtInstance.load(context=context, id=id)
diagnostics = instance.get_diagnostics()
return wsgi.Result(DiagnosticsView(id, diagnostics).data(), 200)
@admin_context
def rpc_ping(self, req, tenant_id, id):
"""Checks if instance is reachable via rpc."""
LOG.info(_("req : '%s'\n\n") % req)
LOG.info(_("id : '%s'\n\n") % id)
context = req.environ[wsgi.CONTEXT_KEY]
instance = models.MgmtInstance.load(context=context, id=id)
instance.rpc_ping()
return wsgi.Result(None, 204)
|
richardcornish/timgorin | jobboardscraper/jobboardscraper/wsgi.py | Python | bsd-3-clause | 408 | 0 | """
WSGI config for jobboardscraper project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
http | s://docs.djangoproject.com/en/1.10/howto/deployment/wsgi/
""" |
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "jobboardscraper.settings")
application = get_wsgi_application()
|
opnsense/core | src/opnsense/scripts/shaper/lib/__init__.py | Python | bsd-2-clause | 7,689 | 0.004292 | """
Copyright (c) 2020 Ad Schellevis <ad@opnsense.org>
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
"""
import subprocess
import ujson
import re
import datetime
def parse_flow(flow_line):
tmp = flow_line.split()
if flow_line.find(':') > 0 and len(tmp) > 8:
# IPv6 layout
return {
'BKT':tmp[0],
'Prot':tmp[1],
'flowid':tmp[2],
'Source':tmp[3],
'Destination':tmp[4],
'pkt':int(tmp[5]) if tmp[5].isdigit() else 0,
'bytes':int(tmp[6]) if tmp[6].isdigit() else 0,
'drop_pkt':int(tmp[7]) if tmp[7].isdigit() else 0,
'drop_bytes':int(tmp[8]) if tmp[8].isdigit() else 0,
}
elif len(tmp) > 7:
return {
'BKT':tmp[0],
'Prot':tmp[1],
'Source':tmp[2],
'Destination':tmp[3],
'pkt':int(tmp[4]) if tmp[4].isdigit() else 0,
'bytes':int(tmp[5]) if tmp[5].isdigit() else 0,
'drop_pkt':int(tmp[6]) if tmp[6].isdigit() else 0,
'drop_bytes':int(tmp[7]) if tmp[7].isdigit() else 0
}
def parse_flowset_params(line):
return re.match(
r"q(?P<flow_set_nr>[0-9]*)(?P<queue_size>.*) (?P<flows>[0-9]*) flows"
" \((?P<buckets>[0-9]*) buckets\) sched (?P<sched_nr>[0-9]*)"
" weight (?P<weight>[0-9]*)"
" lmax (?P<lmax>[0-9]*)"
" pri (?P<pri>[0-9]*)"
"(?P<queue_params>.*)",
line
)
def trim_dict(payload):
for key in payload:
if type(payload[key]) == str:
payload[key] = payload[key].strip()
elif type(payload[key]) == dict:
trim_dict(payload[key])
return payload
def parse_ipfw_pipes():
result = dict()
pipetxt = subprocess.run(['/sbin/ipfw', 'pipe', 'show'], capture_output=True, text=True).stdout.strip()
current_pipe = None
current_pipe_header = False
for line in ("%s\n000000X" % pipetxt).split('\n'):
if len(line) == 0:
continue
if line[0].isdigit():
if current_pipe:
result[current_pipe['pipe']] = current_pipe
current_pipe_header = False
if line.find('burst') > -1:
current_pipe = {
'pipe': line[0:5],
'bw': line[7:line.find('ms') - 5].strip(),
'delay': line[line.find('ms') - 5: line.find('ms')].strip(),
'burst': line[line.find('burst ')+6:].strip(),
'flows': []
}
elif line[0] == 'q' and current_pipe is not None:
m = parse_flowset_params(line)
if m:
current_pipe['flowset'] = m.groupdict()
elif line.find("RED") > -1 and current_pipe is not None:
current_pipe['flowset']['queue_params'] = line.strip()
elif line.startswith(' sched'):
#
m = re.match(
r" sched (?P<sched_nr>[0-9]*) type (?P<sched_type>.*) flags (?P<sched_flags>0x[0-9a-fA-F]*)"
" (?P<sched_buckets>[0-9]*) buckets (?P<sched_active>[0-9]*) active",
line
)
if m:
current_pipe['scheduler'] = m.groupdict()
elif line.find('__Source') > 0:
current_pipe_header = True
elif current_pipe_header:
flow_stats = parse_flow(line)
if flow_stats:
current_pipe['flows'].append(flow_stats)
return trim_dict(result)
def parse_ipfw_queues():
result = dict()
queuetxt = subprocess.run(['/sbin/ipfw', 'queue', 'show'], capture_output=True, text=True).stdout.strip()
current_queue = None
current_queue_header = False
for line in ("%s\nq000000X" % queuetxt).split('\n'):
if len(line) == 0:
continue
if line[0] == 'q':
m = parse_flowset_params(line)
if current_queue:
result[current_queue['flow_set_nr']] = current_queue
if m:
current_queue = m.groupdict()
current_queue['flows'] = list()
elif line.find('__Source') > 0:
current_queue_header = True
else:
flow_stats = parse_flow(line)
if flow_stats:
current_queue['flows'].append(flow_stats)
return trim_dict(result)
def parse_ipfw_scheds():
result = dict()
schedtxt = subprocess.run(['/sbin/ipfw', 'sched', 'show'], capture_output=True, text=True).stdout.strip()
current_sched = None
for line in ("%s\n000000X" % schedtxt).split('\n'):
if len(line) == 0:
continue
if line[0].isdigit():
if current_sched:
result[current_sched['pipe']] = current_sched
if line.find('burst') > 0:
current_sched = {
'pipe': line[0:5]
}
elif line.startswith(' sched'):
m = re.match(
r" sched (?P<sched_nr>[0-9]*) type (?P<sched_type>.*) flags (?P<sched_flags>0x[0-9a-fA-F]*)"
" (?P<sched_buckets>[0-9]*) buckets (?P<sched_active>[0-9]*) active",
line
)
if m:
current_sched.update(m.groupdict())
elif line.find('Children flowsets') > 0:
current_sched['children'] = line[22:].split()
return trim_dict(result)
def parse_ipfw_rules():
result = {'queues': list(), 'pipes': list()}
ruletxt = subprocess.run(['/sbin/ipfw', '-aT', 'list'], capture_output=True, text=True).stdout.strip()
for line in ruletxt.split('\n'):
parts = line.split()
if len(parts) > 5 and parts[4] in ['queue', 'pipe']:
rule = {
'rule': parts[0],
'pkts': | int(parts[1]) if parts[1].isdigit() else 0,
'bytes': int(parts[2]) if parts[2].isdigit() else 0,
'accessed': datetime.datetime.fromtimestamp(int(parts[3])).isoformat() if parts[3].isdigit() else '',
'accessed_epoch': int(par | ts[3]) if parts[3].isdigit() else 0,
'attached_to': parts[5],
'rule_uuid': None
}
if line.find('//') > -1:
rule_uuid = line[line.find('//')+3:].strip().split()[0]
if rule_uuid.count('-') == 4:
rule['rule_uuid'] = rule_uuid
result["%ss" % parts[4]].append(rule)
return result
|
XerxesDGreat/library-app | library/urls.py | Python | apache-2.0 | 2,235 | 0.008054 | from django.conf.urls import patterns, url
from library import views
from django.views.generic.base import TemplateView
urlpatterns = patterns('',
# author urls
url(r'^authors/create/', views.AuthorCreateView.as_view(), name='author_add'),
url(r'^authors/search/$', views.author_ | search, name='author_search'),
url(r'^authors/$', views.AuthorIndexView.as_view(), name='author_index'),
# book urls
url(r'^books/(?P<pk>\d+)/update/$', views.BookUpdateView.as_view(), name='book_update'),
url(r'^books/(?P<pk>\d+)/$', views.BookDetailView.as_view(), name='book_detail'),
url(r'^books/create/$', views.BookCreateView.as_view(), name='book_create'),
url(r'^books/search/ | title/$', views.book_title_search, name='book_search'),
url(r'^books/$', views.BookIndexView.as_view(), name='book_index'),
# patron urls
url(r'^patrons/(?P<pk>\d+)/$', views.PatronDetailView.as_view(), name='patron_detail'),
url(r'^patrons/(?P<pk>\d+)/update/$', views.PatronUpdateView.as_view(), name='patron_update'),
url(r'^patrons/create/$', views.PatronCreateView.as_view(), name='patron_create'),
url(r'^patrons/search/$', views.patron_search, name='patron_search'),
url(r'^patrons/$', views.PatronIndexView.as_view(), name='patron_index'),
# circulation urls
url(r'^circulation/top/$', views.highest_rated_titles, name='circulation_top'),
url(r'^circulation/(?P<pk>\d+)/$', views.CirculationDetailView.as_view(), name='circulation_detail'),
url(r'^circulation/create/$', views.CirculationCreateView.as_view(), name='circulation_create'),
url(r'^circulation/patron/(?P<pk>\d+)/$', views.CirculationIndexByPatronView.as_view(), name='circulation_patron_index'),
url(r'^circulation/all/$', views.CirculationIndexView.as_view(), name='circulation_index'),
url(r'^circulation/$', TemplateView.as_view(template_name='circulation/home.html'), name='circulation_home'),
# report urls
url(r'^reports/(?P<patron_id>\d+)/others_read/$', views.others_read_report, name='reports_others_read'),
url(r'^reports/(?P<patron_id>\d+)/$', views.reports_patron_index, name='reports_patron_index'),
url(r'^reports/$', views.reports_home_select_patron, name='reports_home'),
) |
pywikibot-catfiles/file-metadata | tests/image/jpeg_file_test.py | Python | mit | 1,837 | 0 | # -*- coding: utf-8 -*-
from __future__ import (division, absolute_import, unicode_literals,
print_function)
import os
from file_metadata.image.jpeg_file import JPEGFile
from tests import fetch_file, unittest
class JPEGFileTest(unittest.TestCase):
def test_filename_zxing(self):
uut = JPEGFile(fetch_file('cmyk.jpg'))
self.assertIn('tmp_file_metadata', uut.fetch('filename_zxing'))
self.assertEqual(len(uut.temp_filenames), 1)
name = tuple(uut.temp_filenames)[0]
self.assertTrue(os.path.exists(name))
uut.close()
self.assertFalse(os.path.exists(name))
class JPEGFileBarcodeZXingTest(unittest.TestCase):
def test_jpeg_qrcode(self):
with JPEGFile(fetch_file('qrcode.jpg')) as uut:
data = uut.analyze_barcode_zxing()
self.assertIn('zxing:Barcodes', data)
self.assertEqual(len(data['zxing:Barcodes']), 1)
self.assertEqual(data['zxing:Barcodes'][0]['format'], 'QR_CODE')
self.assertEqual(data['zxing:Barcodes'][0]['data'],
'http://www.wikipedia.com')
def test_jpeg_cmyk(self):
with JPEGFile(fetch_file('cmyk.jpg')) as uut:
data = uut.analyze_barcode_zxing()
self.assertNotIn('zxing:Barcodes', data)
# Alt | hough no barcode is detected, this test is to ensure that the
# "Unsupported File Form | at" error doesn't occur for CMYK files.
def test_jpeg_unknown_cmyk(self):
with JPEGFile(fetch_file('unknown_cmyk.jpg')) as uut:
data = uut.analyze_barcode_zxing()
self.assertNotIn('zxing:Barcodes', data)
# Although no barcode is detected, this test is to ensure that the
# "Unsupported File Format" error doesn't occur for CMYK files.
|
kingvuplus/Test-OBH | lib/python/Components/Ipkg.py | Python | gpl-2.0 | 5,412 | 0.032336 | import os
from enigma import eConsoleAppContainer
from Components.Harddisk import harddiskmanager
opkgDestinations = []
opkgStatusPath = ''
def opkgExtraDestinations():
global opkgDestinations
return ''.join([" --dest %s:%s" % (i,i) for i in opkgDestinations])
def opkgAddDestination(mountpoint):
pass
#global opkgDestinations
#if mountpoint not in opkgDestinations:
#opkgDestinations.append(mountpoint)
#print "[Ipkg] Added to OPKG destinations:", mountpoint
def onPartitionChange(why, part):
global opkgDestinations
global opkgStatusPath
mountpoint = os.path.normpath(part.mountpoint)
if mountpoint and mountpoint != '/':
if why == 'add':
if opkgStatusPath == '':
# older opkg versions
opkgStatusPath = 'usr/lib/opkg/status'
if not os.path.exists(os.path.join('/', opkgStatusPath)):
# recent opkg versions
opkgStatusPath = 'var/lib/opkg/status'
if os.path.exists(os.path.join(mountpoint, opkgStatusPath)):
opkgAddDestination(mountpoint)
elif why == 'remove':
try:
opkgDestinations.remove(mountpoint)
print "[Ipkg] Removed from OPKG destinations:", mountpoint
except:
pass
harddiskmanager.on_partition_list_change.append(onPartitionChange)
for part in harddiskmanager.getMountedPartitions():
onPartitionChange('add', part)
class IpkgComponent:
EVENT_INSTALL = 0
EVENT_DOWNLOAD = 1
EVENT_INFLATING = 2
EVENT_CONFIGURING = 3
EVENT_REMOVE = 4
EVENT_UPGRADE = 5
EVENT_LISTITEM = 9
EVENT_DONE = 10
EVENT_ERROR = 11
EVENT_MODIFIED = 12
CMD_INSTALL = 0
CMD_LIST = 1
CMD_REMOVE = 2
CMD_UPDATE = 3
CMD_UPGRADE = 4
CMD_UPGRADE_LIST = 5
def __init__(self, ipkg = 'opkg'):
self.ipkg = ipkg
self.cmd = eConsoleAppContainer()
self.cache = None
self.callbackList = []
self.setCurrentCommand()
def setCurrentCommand(self, command = None):
self.currentCommand = command
def runCmdEx(self, cmd):
self.runCmd(opkgExtraDestinations() + ' ' + cmd)
def runCmd(self, cmd):
print "executing", self.ipkg, cmd
self.cmd.appClosed.append(self.cmdFinished)
self.cmd.dataAvail.append(self.cmdData)
if self.cmd.execute(self.ipkg + " " + cmd):
self.cmdFinished(-1)
def startCmd(self, cmd, args = None):
if cmd is self.CMD_UPDATE:
self.runCmdEx("update")
elif cmd is self.CMD_UPGRADE:
append = ""
if args["test_only"]:
append = " -test"
self.runCmdEx("upgrade" + append)
elif cmd is self.CMD_LIST:
self.fetchedList = []
if args['installed_only']:
self.runCmdEx("list_installed")
else:
self.runCmd("list")
elif cmd is self.CMD_INSTALL:
self.runCmd("install " + args['package'])
elif cmd is self.CMD_REMOVE:
self.runCmd("remove " + args['package'])
elif cmd is self.CMD_UPGRADE_LIST:
self.fetchedList = []
self.runCmdEx("list_upgradable")
self.setCurrentCommand(cmd)
def cmdFinished(self, retval):
self.callCallbacks(self.EVENT_DONE)
self.cmd.appClosed.remove(self.cmdFinished)
self.cmd.dataAvail.remove(self.cmdData)
def cmdData(self, data):
print "data:", data
if self.cache is None:
self. | cache = data
else:
| self.cache += data
if '\n' in data:
splitcache = self.cache.split('\n')
if self.cache[-1] == '\n':
iteration = splitcache
self.cache = None
else:
iteration = splitcache[:-1]
self.cache = splitcache[-1]
for mydata in iteration:
if mydata != '':
self.parseLine(mydata)
def parseLine(self, data):
if self.currentCommand in (self.CMD_LIST, self.CMD_UPGRADE_LIST):
item = data.split(' - ', 2)
if len(item) < 3:
self.callCallbacks(self.EVENT_ERROR, None)
return
self.fetchedList.append(item)
self.callCallbacks(self.EVENT_LISTITEM, item)
return
try:
if data[:11] == 'Downloading':
self.callCallbacks(self.EVENT_DOWNLOAD, data.split(' ', 5)[1].strip())
elif data[:9] == 'Upgrading':
self.callCallbacks(self.EVENT_UPGRADE, data.split(' ', 2)[1])
elif data[:10] == 'Installing':
self.callCallbacks(self.EVENT_INSTALL, data.split(' ', 2)[1])
elif data[:8] == 'Removing':
self.callCallbacks(self.EVENT_REMOVE, data.split(' ', 3)[2])
elif data[:11] == 'Configuring':
self.callCallbacks(self.EVENT_CONFIGURING, data.split(' ', 2)[1])
elif data[:17] == 'An error occurred':
self.callCallbacks(self.EVENT_ERROR, None)
elif data[:18] == 'Failed to download':
self.callCallbacks(self.EVENT_ERROR, None)
elif data[:21] == 'ipkg_download: ERROR:':
self.callCallbacks(self.EVENT_ERROR, None)
elif 'Configuration file \'' in data:
# Note: the config file update question doesn't end with a newline, so
# if we get multiple config file update questions, the next ones
# don't necessarily start at the beginning of a line
self.callCallbacks(self.EVENT_MODIFIED, data.split(' \'', 3)[1][:-1])
except Exception, ex:
print "[Ipkg] Failed to parse: '%s'" % data
print "[Ipkg]", ex
def callCallbacks(self, event, param = None):
for callback in self.callbackList:
callback(event, param)
def addCallback(self, callback):
self.callbackList.append(callback)
def removeCallback(self, callback):
self.callbackList.remove(callback)
def getFetchedList(self):
return self.fetchedList
def stop(self):
self.cmd.kill()
def isRunning(self):
return self.cmd.running()
def write(self, what):
if what:
# We except unterminated commands
what += "\n"
self.cmd.write(what, len(what))
|
chromium/chromium | third_party/android_deps/libs/com_android_support_documentfile/3pp/fetch.py | Python | bsd-3-clause | 2,497 | 0 | #!/usr/bin/env python3
# Copyright 2021 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# This is generated, do not edit. Update BuildConfigGenerator.groovy and
# 3ppFetch.template instead.
import argparse
import json
import os
import re
import urllib.request
_REPO_URL = 'https://dl.google.com/dl/android/maven2'
_GROUP_NAME = 'com/android/support'
_MODULE_NAME = 'documentfile'
_FILE_EXT = 'aar'
_OVERRIDE_LATEST = None
_PATCH_VERSION = 'cr1'
def do_latest():
if _OVERRIDE_LATEST is not None:
print(_OVERRIDE_LATEST + f'.{_PATCH_VERSION}')
return
maven_metadata_url = '{}/{}/{}/maven-metadata.xml'.format(
_REPO_URL, _GROUP_NAME, _MODULE_NAME)
metadata = urllib.request.urlopen(maven_metadata_url).read().decode(
'utf-8')
# Do not parse xml with the python included parser since it is susceptible
# to maliciously crafte | d xmls. Only use regular expression parsing to be
# safe. RE should be enough to handle what we need to extract.
match = re.search('<latest>([^<]+)</latest>', metadata)
if match:
l | atest = match.group(1)
else:
# if no latest info was found just hope the versions are sorted and the
# last one is the latest (as is commonly the case).
latest = re.findall('<version>([^<]+)</version>', metadata)[-1]
print(latest + f'.{_PATCH_VERSION}')
def get_download_url(version):
# Remove the patch version when getting the download url
version_no_patch, patch = version.rsplit('.', 1)
if patch.startswith('cr'):
version = version_no_patch
file_url = '{0}/{1}/{2}/{3}/{2}-{3}.{4}'.format(_REPO_URL, _GROUP_NAME,
_MODULE_NAME, version,
_FILE_EXT)
file_name = file_url.rsplit('/', 1)[-1]
partial_manifest = {
'url': [file_url],
'name': [file_name],
'ext': '.' + _FILE_EXT,
}
print(json.dumps(partial_manifest))
def main():
ap = argparse.ArgumentParser()
sub = ap.add_subparsers()
latest = sub.add_parser('latest')
latest.set_defaults(func=lambda _opts: do_latest())
download = sub.add_parser('get_url')
download.set_defaults(
func=lambda _opts: get_download_url(os.environ['_3PP_VERSION']))
opts = ap.parse_args()
opts.func(opts)
if __name__ == '__main__':
main()
|
carlosmccosta/Electric-Dipole | Source code/Electric dipole field lines.py | Python | mit | 5,205 | 0.013336 | from __future__ import division #Para não truncar a divisão de inteiros
from visual import * #Módulo com as funções gráficas do VPython
from math import *
scene_range = 15
scene.width = 1920
scene.height = 1080
scene.fullscreen = True
scene.autoscale = False
scene.range = (scene_range, scene_range, scene_range)
scene.center = (0,0,0)
scene.forward = (-1,-0.7,-1)
dt = 10
rate_emf = 1000
numero_planos_linhas_campo = 24
carga_particula = 1
massa_particula = 1.673*10**-27
carga_polo_pos = 5*10**7
pos_polo_pos = vector(0,2,0)
carga_polo_neg = -5*10**7
pos_polo_neg = vector(0,-2,0)
def criacao_emf():
#polos pos e neg
global pos_polo_pos
global pos_polo_neg
polo_pos = sphere(pos=pos_polo_pos, radius=1, material = materials.marble, opacity=0.25)
polo_neg = sphere(pos=pos_polo_neg, radius=1, material = materials.marble, opacity=0.25)
#criacao do referencial dentro da esfera positiva (sendo o vec_y_polo_pos paralelo ao vector que une os dois centros das esferas)
#os vectores serão usados nas rotações (eixos)
norm_vec_conect_center_spheres = norm(polo_pos.pos - polo_neg.pos)
vec_norm_polo_pos = vector(norm_vec_conect_center_spheres.y, norm_vec_conect_center_spheres.x, 0)
vec_x_polo_pos = arrow(pos=polo_pos.pos, axis=vec_norm_polo_pos, opacity=0.25, color = color.red)
vec_y_polo_pos = arrow(pos=polo_pos.pos, axis=norm_vec_conect_center_spheres, opacity=0.25, color = color.green)
vec_z_polo_pos = arrow(pos=polo_pos.pos, axis=cross(vec_y_polo_pos.axis, vec_x_polo_pos.axis), opacity=0.25, color = color.cyan)
#listas com os dados
lista_particulas_emf = []
lista_trajectos = []
#ângulos de rotação
latitude = 0
longitude = 0
#criação das particulas
while (longitude < 180):
dir_longitude = vec_x_polo_pos.axis.rotate(angle=radians(longitude), axis=vec_y_polo_pos.axis)
latitude_axis = vec_z_polo_pos.axis.rotate(angle=radians(longitude), axis=vec_y_polo_pos.axis)
while (latitude < 360):
dir_particula = dir_longitude.rotate(angle=radians(latitude), axis=latitude_axis)
pos_particula = polo_pos.pos + dir_particula
particula = sphere(pos=pos_particula, radius=0.05, opacity=0.25)
trajecto = curve(pos=pos_particula, color=color.yellow)
lista_particulas_emf.append(particula)
lista_trajectos.append(trajecto)
latitude += 360 / numero_planos_linhas_campo
latitude = 0
longitude += 360 / numero_planos_linhas_campo
#criação de arrays a partir das listas
array_particulas_emf = array(lista_particulas_emf)
array_trajectos = array(lista_trajectos)
#cálculo das linhas do campo magnético
continuar = True
picked_pole = None
while continuar:
rate(rate_emf)
#Caso o utilizador altere a posição de uma das partículas, reconstroi as linhas de campo
if scene.mouse.events:
m = scene.mouse.getevent()
if m.drag:
if (m.pick == polo_pos or m.pick == polo_neg):
picked_pole = m.pick
elif m.drop:
if picked_pole:
continuar = False
pos_polo_pos = polo_pos.pos
pos_polo_neg = polo_neg.pos
#Limpa os objectos e linhas de campo actuais
while(len(scene.objects) > 0):
scene.objects[0].visible = False
if picked_pole:
current_pos = scene.mouse.pos
offset = current_pos - picked_pole.pos
if (offset != 0):
picked_pole.pos += offset
for i in range(array_particulas_emf.size):
#Se as particulas se afastarem consideravelmento do centro dos polos ou quando entrarem dentro do polo neg, são imobilizadas
if ((mag(array_particulas_emf[i].pos) < scene_range) and (mag(array_particulas_emf[i].pos - polo_neg.pos) > polo_neg.radius)):
#cálculo dos dados
#Fe = k |q1|*|q1| / K r^2 -> Lei de Coulomb
#E = Fe / q
#E = k * q1 / K r^2
dist_particulas_pos = array_particulas_emf[i].pos - p | olo_pos.pos
dist_particulas_neg = array_particulas_emf[i].pos - polo_neg.pos
Eqp = ((9*10**9 * carga_polo_pos * 1.602*10**-19) / mag(dist_particulas_pos)**2) * norm(dist_particulas_pos)
Eqn = ((9*10**9 * carga_polo_neg * 1.602*10**-19) / mag(dist_particulas_n | eg)**2) * norm(dist_particulas_neg)
E = Eqp + Eqn
#x = x0 + v*t
#Como se está a desenhar as linhas de campo, está-se a percorrer o espaço usando E como vector director (análogo à velocidade de uma partícula)
pos = array_particulas_emf[i].pos + E * dt
#update dos dados
#array_campo_mag_emf[i] = E
array_particulas_emf[i].pos = pos
array_trajectos[i].append(pos)
while True:
criacao_emf()
|
adalke/rdkit | rdkit/Chem/AtomPairs/UnitTestDescriptors.py | Python | bsd-3-clause | 2,805 | 0.029947 | # $Id$
#
# Copyright (C) 2007 greg Landrum
#
# @@ All Rights Reserved @@
# This file is part of the RDKit.
# The contents are covered by the terms of the BSD license
# which is included in the file license.txt, found at the root
# of the RDKit source tree.
#
from __future__ import print_function
import unittest,os,gzip
from rdkit.six.moves import cPickle #@UnresolvedImport #pylint: disable=F0401
from rdkit import Chem
from rdkit import RDConfig
from rdkit.Chem.AtomPairs import Pairs,Torsions,Utils
class TestCase(unittest.TestCase):
def setUp(self):
self.testDataPath=os.path.join(RDConfig.RDCodeDir,'Chem','AtomPairs','test_data')
inF = gzip.open(os.path.join(self.testDataPath,'mols1000.pkl.gz'),'rb')
self.mols=cPickle.load(inF, encoding='bytes')
def testPairsRegression(self):
inF = gzip.open(os.path.join(self.testDataPath,'mols1000.aps.pkl.gz'),'rb')
atomPairs = cPickle.load(inF, encoding='bytes')
for i,m in enumerate(self.mols):
ap = Pairs.GetAtomPairFingerprint(m)
#if ap!=atomPairs[i]:
# print Chem.MolToSmiles(m)
# pd=ap.GetNonzeroElements()
# rd=atomPairs[i].GetNonzeroElements()
# for k,v in pd.iteritems():
# if rd.has_key(k):
# if rd[k]!=v: print '>>>1',k,v,rd[k]
# else:
# print '>>>2',k,v
# for k,v in rd.iteritems():
# if pd.has_key(k):
# if pd[k]!=v: print '>>>3',k,v,pd[k]
# else:
# print '>>>4',k,v
self.assertTrue(ap==atomPairs[i])
self.assertTrue(ap!=atomPairs[i-1])
def testTorsionsRegression(self):
inF = gzip.open(os.path.join(self.testDataPath,'mols1000.tts.pkl.gz'),'rb')
torsions = cPickle.load(inF, encoding='bytes')
for i,m in enumerate(self.mols):
tt = Torsions.GetTopologicalTorsionFingerprintAsIntVect(m)
if tt!=torsions[i]:
print(Chem.MolToSmiles(m))
pd=tt.GetNonzeroElements()
rd=torsions[i].GetNonzeroElements()
for k,v in pd.iteritems():
if rd.has_key(k):
if rd[k]!=v: print('>>>1',k,v,rd[k])
else:
print('>>>2',k,v)
for k,v in rd.iteritems():
if pd.has_key(k):
if pd[k]!=v: print('>>>3',k,v,pd[k])
else:
print('>>>4',k,v)
self.assertTrue(tt==torsions | [i])
self.assertTrue(tt!=torsions[i-1])
def testGithub334(self):
m1 = Chem.MolFromSmiles('N#C')
self.assertEqual(Utils.NumPiElectrons(m1.GetAtomWithIdx(0)),2)
self.assertEqual(Utils.NumPiElectrons(m1.GetAtomWithIdx(1)),2)
m1 = Chem.Mol | FromSmiles('N#[CH]')
self.assertEqual(Utils.NumPiElectrons(m1.GetAtomWithIdx(0)),2)
self.assertEqual(Utils.NumPiElectrons(m1.GetAtomWithIdx(1)),2)
if __name__ == '__main__':
unittest.main()
|
iulian787/spack | var/spack/repos/builtin/packages/r-rgexf/package.py | Python | lgpl-2.1 | 1,241 | 0.001612 | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RRgexf(RPackage):
"""Create, read and write GEXF (Graph Exchange | XML Format) graph files
(used in Gephi and others). Using the XML package, it allows the user to
easily build/read graph files including attributes, GEXF viz attributes
(such as color, size, and position), network dynamics (for both edges and
nodes) and edge weighting. Users can build/handle graphs element-by-element
or massively through data-frames, visualize the graph on a web browser
through "sigmajs" (a javascript library) and | interact with the igraph
package."""
homepage = "http://bitbucket.org/gvegayon/rgexf"
url = "https://cloud.r-project.org/src/contrib/rgexf_0.15.3.tar.gz"
list_url = "https://cloud.r-project.org/src/contrib/Archive/rgexf"
version('0.15.3', sha256='2e8a7978d1fb977318e6310ba65b70a9c8890185c819a7951ac23425c6dc8147')
depends_on('r-xml', type=('build', 'run'))
depends_on('r-rook', type=('build', 'run'))
depends_on('r-igraph', type=('build', 'run'))
|
mcxiaoke/python-labs | labs/photos_walker_00.py | Python | apache-2.0 | 1,959 | 0.001585 | # -*- coding: UTF-8 -*-
# Created by mcxiaoke on 2015/7/6 22:20.
__author__ = 'mcxiaoke'
import sys, os
from os import path
from datetime import datetime
print 'curren dir is', os.getcwd()
print 'command line args is', sys.argv
if len(sys.argv) < 2:
sys.exit(1)
# 批量重命名照片文件
# 根据文件修改日期重命名文件,然后移动到目标文件夹
FILE_NAME_FORMAT = "IMG_%Y%m%d_%H%M%S"
start_dir = path.abspath(sys.argv[1])
output_dir = path.join(path.dirname(start_dir), 'output')
if not path.exists(output_dir):
os.mkdir(output_dir)
print 'start dir is %s' % start_dir
print 'output dir is %s' % output_dir
bn = []
an = []
def handler(arg, dirname, names):
dir_path = path.join(dirname, dirname)
print ("current dir is %s" % dir_path)
for file in names:
file_path = path.abspath(path.join(dirname, file))
print "processing file: %s" % file
# print 'path is file: ', path.isfile(file | _path)
if not path.isfile(file_path):
continue
_, ext = path.splitext(file)
| file_st = os.stat(file_path)
fm = datetime.fromtimestamp(file_st.st_mtime)
print 'file modified time is', fm.strftime("%Y-%m-%d %H:%M:%S"), fm.microsecond
src_name = file
dest_name = fm.strftime(FILE_NAME_FORMAT) + ext
print 'src name is %s' % src_name
print 'dest name is %s' % dest_name
if src_name != dest_name:
bn.append(path.abspath(path.join(dirname, src_name)))
an.append(path.abspath(path.join(output_dir, dest_name)))
return 0
os.path.walk(start_dir, handler, ())
if bn and an:
for src, dest in zip(bn, an):
print src, dest
if path.exists(src) and path.isfile(src) and not path.exists(dest):
ret = os.rename(src, dest)
print 'rename result=', ret
print 'rename %s to %s' % (src, dest)
else:
print "%s not changed" % src
|
rlucioni/project-euler | euler/solutions/solution_11.py | Python | mit | 6,618 | 0.002419 | """Largest product in a grid
In the 20×20 grid below, four numbers along a diagonal line have been marked in red.
08 02 22 97 38 15 00 40 00 75 04 05 07 78 52 12 50 77 91 08
49 49 99 40 17 81 18 57 60 8 | 7 17 40 98 43 69 48 04 56 62 00
81 49 31 73 55 79 14 29 93 71 40 67 53 88 30 03 49 13 36 65
52 70 95 23 04 60 11 42 69 24 68 56 01 32 56 71 37 02 36 91
22 31 16 71 51 67 63 89 41 92 36 54 22 40 40 28 66 33 13 80
24 47 32 60 | 99 03 45 02 44 75 33 53 78 36 84 20 35 17 12 50
32 98 81 28 64 23 67 10 (26) 38 40 67 59 54 70 66 18 38 64 70
67 26 20 68 02 62 12 20 95 (63) 94 39 63 08 40 91 66 49 94 21
24 55 58 05 66 73 99 26 97 17 (78) 78 96 83 14 88 34 89 63 72
21 36 23 09 75 00 76 44 20 45 35 (14) 00 61 33 97 34 31 33 95
78 17 53 28 22 75 31 67 15 94 03 80 04 62 16 14 09 53 56 92
16 39 05 42 96 35 31 47 55 58 88 24 00 17 54 24 36 29 85 57
86 56 00 48 35 71 89 07 05 44 44 37 44 60 21 58 51 54 17 58
19 80 81 68 05 94 47 69 28 73 92 13 86 52 17 77 04 89 55 40
04 52 08 83 97 35 99 16 07 97 57 32 16 26 26 79 33 27 98 66
88 36 68 87 57 62 20 72 03 46 33 67 46 55 12 32 63 93 53 69
04 42 16 73 38 25 39 11 24 94 72 18 08 46 29 32 40 62 76 36
20 69 36 41 72 30 23 88 34 62 99 69 82 67 59 85 74 04 36 16
20 73 35 29 78 31 90 01 74 31 49 71 48 86 81 16 23 57 05 54
01 70 54 71 83 51 54 69 16 92 33 48 61 43 52 01 89 19 67 48
The product of these numbers is 26 × 63 × 78 × 14 = 1788696.
What is the greatest product of four adjacent numbers in the same direction (up, down, left, right, or diagonally) in the 20×20 grid?
"""
from euler import utils
GRID = '''
08 02 22 97 38 15 00 40 00 75 04 05 07 78 52 12 50 77 91 08
49 49 99 40 17 81 18 57 60 87 17 40 98 43 69 48 04 56 62 00
81 49 31 73 55 79 14 29 93 71 40 67 53 88 30 03 49 13 36 65
52 70 95 23 04 60 11 42 69 24 68 56 01 32 56 71 37 02 36 91
22 31 16 71 51 67 63 89 41 92 36 54 22 40 40 28 66 33 13 80
24 47 32 60 99 03 45 02 44 75 33 53 78 36 84 20 35 17 12 50
32 98 81 28 64 23 67 10 26 38 40 67 59 54 70 66 18 38 64 70
67 26 20 68 02 62 12 20 95 63 94 39 63 08 40 91 66 49 94 21
24 55 58 05 66 73 99 26 97 17 78 78 96 83 14 88 34 89 63 72
21 36 23 09 75 00 76 44 20 45 35 14 00 61 33 97 34 31 33 95
78 17 53 28 22 75 31 67 15 94 03 80 04 62 16 14 09 53 56 92
16 39 05 42 96 35 31 47 55 58 88 24 00 17 54 24 36 29 85 57
86 56 00 48 35 71 89 07 05 44 44 37 44 60 21 58 51 54 17 58
19 80 81 68 05 94 47 69 28 73 92 13 86 52 17 77 04 89 55 40
04 52 08 83 97 35 99 16 07 97 57 32 16 26 26 79 33 27 98 66
88 36 68 87 57 62 20 72 03 46 33 67 46 55 12 32 63 93 53 69
04 42 16 73 38 25 39 11 24 94 72 18 08 46 29 32 40 62 76 36
20 69 36 41 72 30 23 88 34 62 99 69 82 67 59 85 74 04 36 16
20 73 35 29 78 31 90 01 74 31 49 71 48 86 81 16 23 57 05 54
01 70 54 71 83 51 54 69 16 92 33 48 61 43 52 01 89 19 67 48
'''
def is_greater(numbers, adjacent, greatest_product):
"""Check if the product of the given numbers is greater than the greatest product.
Arguments:
numbers (list of int): A list of numbers to multiply together.
adjacent (int): The required count of (adjacent) numbers.
greatest_product (int): The current greatest product.
Returns:
int: If the count of numbers is equal to the required value, and the product
of the numbers is greater than the greatest product.
None: Otherwise.
"""
if len(numbers) == adjacent:
current_product = utils.product(numbers)
if greatest_product is None or current_product > greatest_product:
return current_product
# TODO: This could be more cleanly implemented as a class.
def search_grid(serialized, adjacent):
"""Search a grid for the greatest product of adjacent numbers.
Arguments:
serialized (str): Serialized representation of a grid of integers.
adjacent (int): How many adjacent numbers to consider.
Returns:
int: Greatest product of adjacent numbers.
"""
greatest_product = None
grid = utils.deserialize_grid(serialized)
# We only need to search right, down, and diagonally (upper right and
# lower right) as we visit each element to traverse the entire grid.
for row_index, row in enumerate(grid):
for column_index, column in enumerate(row):
# Look right
right = row[column_index:column_index + adjacent]
current_product = is_greater(right, adjacent, greatest_product)
if current_product is not None:
greatest_product = current_product
# Look down
down = []
for i in range(adjacent):
try:
down.append(grid[row_index + i][column_index])
# Index might be out of range, which means there isn't the required
# count of numbers vertically-adjacent to the current number.
except:
break
current_product = is_greater(down, adjacent, greatest_product)
if current_product is not None:
greatest_product = current_product
# Look diagonally, upper right
upper_diagonal = []
for i in range(adjacent):
working_row_index = row_index - i
# We don't want to be using negative indices, which would wrap around to
# the bottom of the grid.
if row_index < 0:
break
try:
upper_diagonal.append(grid[working_row_index][column_index + i])
# Index might be out of range, which means there isn't the required
# count of numbers diagonally-adjacent to the current number.
except:
break
current_product = is_greater(upper_diagonal, adjacent, greatest_product)
if current_product is not None:
greatest_product = current_product
# Look diagonally, lower right
lower_diagonal = []
for i in range(adjacent):
try:
lower_diagonal.append(grid[row_index + i][column_index + i])
# Index might be out of range, which means there isn't the required
# count of numbers diagonally-adjacent to the current number.
except:
break
current_product = is_greater(lower_diagonal, adjacent, greatest_product)
if current_product is not None:
greatest_product = current_product
return greatest_product
|
RyanWolfe/cloud-custodian | tests/test_vpc.py | Python | apache-2.0 | 16,157 | 0.000124 | # Copyright 2016 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from common import BaseTest
class NetworkInterfaceTest(BaseTest):
def test_interface_subnet(self):
factory = self.replay_flight_data(
'test_network_interface_filter')
client = factory().client('ec2')
vpc_id = client.create_vpc(CidrBlock="10.4.0.0/16")['Vpc']['VpcId']
self.addCleanup(client.delete_vpc, VpcId=vpc_id)
sub_id = client.create_subnet(
VpcId=vpc_id, CidrBlock="10.4.8.0/24")[
'Subnet']['SubnetId']
self.addCleanup(client.delete_subnet, SubnetId=sub_id)
sg_id = client.create_security_group(
GroupName="web-tier",
VpcId=vpc_id,
Description="for apps")['GroupId']
self.addCleanup(client.delete_security_group, GroupId=sg_id)
qsg_id = client.create_security_group(
GroupName="quarantine-group",
VpcId=vpc_id,
Description="for quarantine")['GroupId']
self.addCleanup(client.delete_security_group, GroupId=qsg_id)
net = client.create_network_interface(
SubnetId=sub_id, Groups=[sg_id])['NetworkInterface']
net_id = net['NetworkInterfaceId']
self.addCleanup(
client.delete_network_interface, NetworkInterfaceId=net_id)
p = self.load_policy({
'name': 'net-find',
'resource': 'eni',
'filters': [
{'type': 'subnet',
'key': 'SubnetId',
'value': sub_id},
{'type': 'security-group',
'key': 'Description',
'value': 'for apps'}
],
'actions': [{
'type': 'remove-groups',
'groups': 'matched',
'isolation-group': qsg_id}]
}, session_factory=factory)
resources = p.run()
self.assertEqual(len(resources), 1)
self.assertEqual(resources[0]['NetworkInterfaceId'], net_id)
self.assertEqual(resources[0]['c7n.matched-security-groups'], [sg_id])
results = client.describe_network_interfaces(
NetworkInterfaceIds=[net_id])['NetworkInterfaces']
self.assertEqual([g['GroupId'] for g in results[0]['Groups']], [qsg_id])
class SecurityGroupTest(BaseTest):
def test_used(self):
factory = self.replay_flight_data(
'test_security_group_used')
p = self.load_policy({
'name': 'sg-used',
'resource': 'security-group',
'filters': ['used']
}, session_factory=factory)
resources = p.run()
self.assertEqual(len(resources), 3)
self.assertEqual(
set(['sg-f9cc4d9f', 'sg-13de8f75', 'sg-ce548cb7']),
set([r['GroupId'] for r in resources]))
def test_unused(self):
factory = self.replay_flight_data(
'test_security_group_unused')
p = self.load_policy({
'name': 'sg-unused',
'resource': 'security-group',
'filters': ['unused'],
}, session_factory=factory)
resources = p.run()
self.assertEqual(len(resources), 1)
def test_only_ports(self):
factory = self.replay_flight_data(
'test_security_group_only_ports')
client = factory().client('ec2')
vpc_id = client.create_vpc(CidrBlock="10.4.0.0/16")['Vpc']['VpcId']
self.addCleanup(client.delete_vpc, VpcId=vpc_id)
sg_id = client.create_security_group(
GroupName="web-tier",
VpcId=vpc_id,
Description="for apps")['GroupId']
self.addCleanup(client.delete_security_group, GroupId=sg_id)
client.authorize_security_group_ingress(
GroupId=sg_id,
IpProtocol='tcp',
FromPort=60000,
ToPort=62000,
CidrIp='10.2.0.0/16')
client.authorize_security_group_ingress(
GroupId=sg_id,
IpProtocol='tcp',
FromPort=61000,
ToPort=61000,
CidrIp='10.2.0.0/16')
p = self.load_policy({
'name': 'sg-find',
'resource': 'security-group',
'filters': [
{'type': 'ingress',
'OnlyPorts': [61000]},
{'GroupName': 'web-tier'}]
}, session_factory=factory)
resources = p.run()
self.assertEqual(len(resources), 1)
self.assertEqual(
resources[0]['MatchedIpPermissions'],
[{u'FromPort': 60000,
u'IpProtocol': u'tcp',
u'IpRanges': [{u'CidrIp': u'10.2.0.0/16'}],
u'PrefixListIds': [],
u'ToPort': 62000,
u'UserIdGroupPairs': []}])
def test_security_group_delete(self):
factory = self.replay_flight_data(
'test_security_group_delete')
client = factory().client('ec2')
vpc_id = client.create_vpc(CidrBlock="10.4.0.0/16")['Vpc']['VpcId']
self.addCleanup(client.delete_vpc, VpcId=vpc_id)
sg_id = client.create_security_group(
GroupName="web-tier",
VpcId=vpc_id,
Description="for apps")['GroupId']
def delete_sg():
try:
client.delete_security_group(GroupId=sg_id)
except Exception:
pass
self.addCleanup(delete_sg)
p = self.load_policy({
'name': 'sg-delete',
'resource': 'security-group',
'filters': [
{'GroupId': sg_id}],
'actions': [
'delete']}, session_factory=factory)
resources = p.run()
self.assertEqual(len(resources), 1)
self.assertEqual(resources[0]['GroupId'], sg_id)
try:
group_info = client.describe_security_groups(GroupIds=[sg_id])
except:
pass
else:
self.fail("group not deleted")
def test_port_within_range(self):
factory = self.replay_flight_data(
'test_security_group_port_in_range')
client = factory().client('ec2')
vpc_id = client.create_vpc(CidrBlock="10.4.0.0/16")['Vpc']['VpcId']
self.addCleanup(client.delete_vpc, VpcId=vpc_id)
sg_id = client.create_security_group(
GroupName="web-tier",
VpcId=vpc_id,
Description="for apps")['GroupId']
self.addCleanup(client.delete_security_group, GroupId=sg_id)
client.authorize_security_group_ingress(
GroupId=sg_id,
IpProtocol='tcp',
FromPort=60000,
| ToPort=62000,
CidrIp='10.2.0.0/16')
p = self.load_policy({
'name': 'sg-find',
'resource': 'security-group',
'filters': [
{'type': 'ingress',
' | IpProtocol': 'tcp',
'FromPort': 60000},
{'GroupName': 'web-tier'}]
}, session_factory=factory)
resources = p.run()
self.assertEqual(len(resources), 1)
self.assertEqual(resources[0]['GroupName'], 'web-tier')
self.assertEqual(
resources[0]['MatchedIpPermissions'],
[{u'FromPort': 60000,
u'IpProtocol': u'tcp',
u'IpRanges': [{u'CidrIp': u'10.2.0.0/16'}],
u'PrefixListIds': [],
u'ToPort': 62000,
u'UserIdGroupPairs': []}])
def test_ingress_remove(self):
factory = self.replay_flight_data(
'test_security_group_ingress_filter')
client = factory().client('ec2')
|
Azure/azure-sdk-for-python | sdk/cognitiveservices/azure-cognitiveservices-search-visualsearch/azure/cognitiveservices/search/visualsearch/models/filters_py3.py | Python | mit | 1,058 | 0.002836 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved. |
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msr | est.serialization import Model
class Filters(Model):
"""A key-value object consisting of filters that may be specified to limit the
results returned by the API. Current available filters: site.
:param site: The URL of the site to return similar images and similar
products from. (e.g., "www.bing.com", "bing.com").
:type site: str
"""
_attribute_map = {
'site': {'key': 'site', 'type': 'str'},
}
def __init__(self, *, site: str=None, **kwargs) -> None:
super(Filters, self).__init__(**kwargs)
self.site = site
|
Alaxe/judgeSystem | users/migrations/0015_auto_20151217_1932.py | Python | gpl-2.0 | 694 | 0.002882 | # -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2015-12-17 17:32
from __future__ import un | icode_literals
from django.db import migrations, models
import users.models
class Migration(migrations.Migration):
dependencies = [
('users', '0014_auto_20150930_2125'),
]
operations = [
migrations.AlterField(
model_name='confirmati | on',
name='code',
field=models.CharField(default=users.models.gen_randcode, max_length=32),
),
migrations.AlterField(
model_name='passreset',
name='code',
field=models.CharField(default=users.models.gen_randcode, max_length=32),
),
]
|
gwn/dsql | setup.py | Python | mit | 832 | 0 | from setuptools import setup, find_packages
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
with ope | n(path.join(here, 'README.rst'), encod | ing='utf-8') as f:
long_description = f.read()
setup(
name='dsql',
version='0.4.1',
description='Dead simple RDBMS handling lib',
long_description=long_description,
url='https://github.com/gwn/dsql',
author='gwn',
author_email='egeavunc@gmail.com',
license='MIT',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Topic :: Database',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2.7',
],
keywords='sql db query builder simple',
packages=find_packages(exclude=['contrib', 'docs', 'tests']),
)
|
sheeshmohsin/FaceDetection | webapp/manage.py | Python | mit | 804 | 0 | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "webapp.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
| # The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except I | mportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
|
jessekl/flixr | venv/lib/python2.7/site-packages/webassets/filter/rjsmin/__init__.py | Python | mit | 631 | 0 | from __future__ import absolute_import
try:
import rjsmin
except ImportError:
from . import rjsmin
from webassets.filter import Filter
__all__ = ('RJSMin',)
class RJSMin(Filter):
"""Minifies Javascript by removing whitespace, comments, etc.
Uses the `rJSmin library <http://opensource.perlig.de/rjsmin/>`_,
which is included with webassets. However, if you have the external
package installed, it will be | used instead. You may want to do this
to get access to the faster C-extension.
"""
name = 'rjsmin'
def output(self, _in, out, **kw):
| out.write(rjsmin.jsmin(_in.read()))
|
NHebrard/ham-multisite | ham/utils/hamtask.py | Python | gpl-3.0 | 1,998 | 0.005506 | # -*- coding: utf-8 -*-
from ..provider.g5k import G5K
from constants import SYMLINK_NAME
from functools import wraps
|
import os
import yaml
import logging
def load_env():
env = {
| 'config' : {}, # The config
'resultdir': '', # Path to the result directory
'config_file' : '', # The initial config file
'nodes' : {}, # Roles with nodes
'phase' : '', # Last phase that have been run
'user' : '', # User id for this job
'kolla_repo': 'https://git.openstack.org/openstack/kolla',
'kolla_branch': 'stable/newton'
}
# Loads the previously saved environment (if any)
env_path = os.path.join(SYMLINK_NAME, 'env')
if os.path.isfile(env_path):
with open(env_path, 'r') as f:
env.update(yaml.load(f))
logging.debug("Reloaded config %s", env['config'])
# Resets the configuration of the environment
if os.path.isfile(env['config_file']):
with open(env['config_file'], 'r') as f:
env['config'].update(yaml.load(f))
logging.debug("Reloaded config %s", env['config'])
return env
def save_env(env):
env_path = os.path.join(env['resultdir'], 'env')
if os.path.isdir(env['resultdir']):
with open(env_path, 'w') as f:
yaml.dump(env, f)
def hamtask(doc):
"""Decorator for a Ham Task."""
def decorator(fn):
fn.__doc__ = doc
@wraps(fn)
def decorated(*args, **kwargs):
# TODO: Dynamically loads the provider
if kwargs.has_key('--provider'):
provider_name = kwargs['--provider']
kwargs['provider'] = G5K()
# Loads the environment & set the config
env = load_env()
kwargs['env'] = env
# Proceeds with the function executio
fn(*args, **kwargs)
# Save the environment
save_env(env)
return decorated
return decorator
|
Azure/azure-sdk-for-python | sdk/storage/azure-mgmt-storage/azure/mgmt/storage/v2016_01_01/models/_models_py3.py | Python | mit | 35,290 | 0.004307 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Dict, List, Optional, Union
import msrest.serialization
from ._storage_management_client_enums import *
class CheckNameAvailabilityResult(msrest.serialization.Model):
"""The CheckNameAvailability operation response.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar name_available: Gets a boolean value that indicates whether the name is available for you
to use. If true, the name is available. If false, the name has already been taken or is invalid
and cannot be used.
:vartype name_available: bool
:ivar reason: Gets the reason that a storage account name could not be used. The Reason element
is only returned if NameAvailable is false. Possible values include: "AccountNameInvalid",
"AlreadyExists".
:vartype reason: str or ~azure.mgmt.storage.v2016_01_01.models.Reason
:ivar message: Gets an error message explaining the Reason value in more detail.
:vartype message: str
"""
_validation = {
'name_available': {'readonly': True},
'reason': {'readonly': True},
'message': {'readonly': True},
}
_attribute_map = {
'name_available': {'key': 'nameAvailable', 'type': 'bool'},
'reason': {'key': 'reason', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
"""
"""
super(CheckNameAvailabilityResult, self).__init__(**kwargs)
self.name_available = | None
self.reason = None
self.message = None
class CustomDomain(msrest.serialization.Model):
"""The custom domain assigned to this storage account. This can be set via Update.
All required parameters must be populated in order to send to Azure.
:ivar name: Required. Gets or sets the custo | m domain name assigned to the storage account. Name
is the CNAME source.
:vartype name: str
:ivar use_sub_domain_name: Indicates whether indirect CName validation is enabled. Default
value is false. This should only be set on updates.
:vartype use_sub_domain_name: bool
"""
_validation = {
'name': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'use_sub_domain_name': {'key': 'useSubDomainName', 'type': 'bool'},
}
def __init__(
self,
*,
name: str,
use_sub_domain_name: Optional[bool] = None,
**kwargs
):
"""
:keyword name: Required. Gets or sets the custom domain name assigned to the storage account.
Name is the CNAME source.
:paramtype name: str
:keyword use_sub_domain_name: Indicates whether indirect CName validation is enabled. Default
value is false. This should only be set on updates.
:paramtype use_sub_domain_name: bool
"""
super(CustomDomain, self).__init__(**kwargs)
self.name = name
self.use_sub_domain_name = use_sub_domain_name
class Encryption(msrest.serialization.Model):
"""The encryption settings on the storage account.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar services: List of services which support encryption.
:vartype services: ~azure.mgmt.storage.v2016_01_01.models.EncryptionServices
:ivar key_source: The encryption keySource (provider). Possible values (case-insensitive):
Microsoft.Storage. Has constant value: "Microsoft.Storage".
:vartype key_source: str
"""
_validation = {
'key_source': {'required': True, 'constant': True},
}
_attribute_map = {
'services': {'key': 'services', 'type': 'EncryptionServices'},
'key_source': {'key': 'keySource', 'type': 'str'},
}
key_source = "Microsoft.Storage"
def __init__(
self,
*,
services: Optional["EncryptionServices"] = None,
**kwargs
):
"""
:keyword services: List of services which support encryption.
:paramtype services: ~azure.mgmt.storage.v2016_01_01.models.EncryptionServices
"""
super(Encryption, self).__init__(**kwargs)
self.services = services
class EncryptionService(msrest.serialization.Model):
"""A service that allows server-side encryption to be used.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar enabled: A boolean indicating whether or not the service encrypts the data as it is
stored.
:vartype enabled: bool
:ivar last_enabled_time: Gets a rough estimate of the date/time when the encryption was last
enabled by the user. Only returned when encryption is enabled. There might be some unencrypted
blobs which were written after this time, as it is just a rough estimate.
:vartype last_enabled_time: ~datetime.datetime
"""
_validation = {
'last_enabled_time': {'readonly': True},
}
_attribute_map = {
'enabled': {'key': 'enabled', 'type': 'bool'},
'last_enabled_time': {'key': 'lastEnabledTime', 'type': 'iso-8601'},
}
def __init__(
self,
*,
enabled: Optional[bool] = None,
**kwargs
):
"""
:keyword enabled: A boolean indicating whether or not the service encrypts the data as it is
stored.
:paramtype enabled: bool
"""
super(EncryptionService, self).__init__(**kwargs)
self.enabled = enabled
self.last_enabled_time = None
class EncryptionServices(msrest.serialization.Model):
"""A list of services that support encryption.
:ivar blob: The encryption function of the blob storage service.
:vartype blob: ~azure.mgmt.storage.v2016_01_01.models.EncryptionService
"""
_attribute_map = {
'blob': {'key': 'blob', 'type': 'EncryptionService'},
}
def __init__(
self,
*,
blob: Optional["EncryptionService"] = None,
**kwargs
):
"""
:keyword blob: The encryption function of the blob storage service.
:paramtype blob: ~azure.mgmt.storage.v2016_01_01.models.EncryptionService
"""
super(EncryptionServices, self).__init__(**kwargs)
self.blob = blob
class Endpoints(msrest.serialization.Model):
"""The URIs that are used to perform a retrieval of a public blob, queue, or table object.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar blob: Gets the blob endpoint.
:vartype blob: str
:ivar queue: Gets the queue endpoint.
:vartype queue: str
:ivar table: Gets the table endpoint.
:vartype table: str
:ivar file: Gets the file endpoint.
:vartype file: str
"""
_validation = {
'blob': {'readonly': True},
'queue': {'readonly': True},
'table': {'readonly': True},
'file': {'readonly': True},
}
_attribute_map = {
'blob': {'key': 'blob', 'type': 'str'},
'queue': {'key': 'queue', 'type': 'str'},
'table': {'key': 'table', 'type': 'str'},
'file': {'key': 'file', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
"""
"""
super(Endpoints, self).__init__(**kwargs)
self.blob = None
self.queue = None
self.table = None
self.file = None
class Resource(msrest.serialization.Model):
"""Resource.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Res |
shaurz/ome | ome/instructions.py | Python | mit | 4,080 | 0.004902 | # ome - Object Message Expressions
# Copyright (c) 2015-2016 Luke McCarthy <luke@iogopro.co.uk>
def format_instruction_args(args):
return ', '.join('%{}'.format(x) for x in args)
class Instruction(object):
args = ()
is_leaf = True
check_error = False
dest_from_heap = False
load_list = ()
save_list = ()
clear_list = ()
def emit(self, codegen):
getattr(codegen, self.__class__.__name__)(self)
class ALLOC(Instruction):
is_leaf = False
dest_from_heap = True
def __init__(self, dest, size, tag):
self.dest = dest
self.size = size
self.tag = tag
def __str__(self):
return '%{} = ALLOC(size:{}, tag:{})'.format(self.dest, self.size, self.tag)
class ARRAY(Instruction):
is_leaf = False
dest_from_heap = True
def __init__(self, dest, size, tag):
self.dest = dest
self.size = size
self.tag = tag
def __str__(self):
return '%{} = ARRAY(size: {}, tag: {})'.format(self.dest, self.size, self.tag)
class CALL(Instruction):
is_leaf = False
dest_from_heap = True
def __init__(self, dest, args, call_label, traceback_info, check_error=True, check_tag=None):
self.dest = dest
self.args = args
self.call_label = call_label
self.traceback_info = traceback_info
self.check_error = check_error
self.check_tag = check_tag
def __str__(self):
dest = '%{} = '.format(self.dest) if self.dest else ''
return '{}CALL {}({})'.format(dest, self.call_label, format_instruction_args(self.args))
class CONCAT(Instruction):
is_leaf = False
dest_from_heap = True
def __init__(self, dest, args, traceback_info):
self.dest = dest
self.args = args
self.traceback_info = traceback_info
def __str__(self):
return '%{} = CONCAT({})'.format(self.dest, format_instruction_args(self.args))
class LOAD_VALUE(Instruction):
def __init__(self, dest, tag, value):
self.dest = dest
self.tag = tag
self.value = value
def __str__(self):
return '%{} = TAG({}, {})'.format(self.dest, self.tag, self.value)
class LOAD_LABEL(Instruction):
def __init__(self, dest, tag, label):
self.dest = dest
self.tag = tag
self.label = label
def __str__(self):
return '%{} = TAG({}, {})'.format(self.dest, self.tag, self.label)
class GET_SLOT(Instruction):
dest_from_heap = True
def __init__(self, dest, object, slot_index):
self.dest = dest
self.args = [object]
self.slot_index = slot_index
def __str__(self):
return '%{} = GETSLOT(%{}, {})'.format(self.dest, self.object, self.slot_index)
@property
def object(self):
return self.args[0]
class SET_SLOT(Instruction):
def __init__(self, object, slot_index, value):
self.args = [object, value]
self.slot_index = slot_index
@property
def object(self):
return self.args[0]
@property
def value(self):
return self.args[1]
def __str__(self):
return 'SETSLOT(%{}, {}, %{})'.format(self.object, self.slot_index, self.value)
class SET_ELEM(Instruction):
def __init__(self, array, elem_index, value):
self.args = [array, value]
self.elem_index = elem_index
@property
def array(self):
return self.args[0]
@property
def value(self):
return self.args[1]
def __str__(self):
return 'SETELEM(%{}, {}, %{})'.format(self.array, self.elem_index, self.value)
class RETURN(Instruction):
def __init__(self, source):
self.args = [source]
@property
def source(self):
return self.args[0]
def __str__(self):
return 'RETURN %{}'.format(self.source)
class ALIAS(Instruction):
def __init__ | (self, d | est, source):
self.dest = dest
self.args = [source]
def __str__(self):
return '%{} = %{}'.format(self.dest, self.source)
@property
def source(self):
return self.args[0]
|
mollyproject/mollyproject | molly/apps/places/migrations/0009_auto__add_journey__add_scheduledstop.py | Python | apache-2.0 | 13,531 | 0.007686 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Journey'
db.create_table('places_journey', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('route', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['places.Route'])),
('external_ref', self.gf('django.db.models.fields.TextField')()),
('notes', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
('runs_on_monday', self.gf('django.db.models.fields.BooleanField')(default=False)),
('runs_on_tuesday', self.gf('django.db.models.fields.BooleanField')(default=False)),
('runs_on_wednesday', self.gf('django.db.models.fields.BooleanField')(default=False)),
('runs_on_thursday', self.gf('django.db.models.fields.BooleanField')(default=False)),
('runs_on_friday', self.gf('django.db.models.fields.BooleanField')(default=False)),
('runs_on_saturday', self.gf('django.db.models.fields.BooleanField')(default=False)),
('runs_on_sunday', self.gf('django.db.models.fields.BooleanField')(default=False)),
('runs_in_termtime', self.gf('django.db.models.fields.BooleanField')(default=False)),
('runs_in_school_holidays', self.gf('django.db.models.fields.BooleanField')(default=False)),
('runs_on_bank_holidays', self.gf('django.db.models.fields.BooleanField')(default=False)),
('runs_on_non_bank_holidays', self.gf('django.db.models.fields.BooleanField')(default=False)),
('runs_from', self.gf('django.db.models.fields.DateField')()),
('runs_until', self.gf('django.db.models.fields.DateField')()),
('vehicle', self.gf('django.db.models.fields.TextField')()),
))
db.send_create_signal('places', ['Journey'])
# Adding model 'ScheduledStop'
db.create_table('places_scheduledstop', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('entity', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['places.Entity'])),
('journey', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['places.Journey'])),
('order', self.gf('django.db.models.fields.IntegerField')()),
('sta', self.gf('django.db.models.fields.TimeField')(null=True, blank=True)),
('std', self.gf('django.db.models.fields.TimeField')(null=True, blank=True)),
('times_estimated', self.gf('django.db.models.fields.BooleanField')(default=False)),
('fare_stage', self.gf('django.db.models.fields.BooleanField')(default=False)),
('activity', self.gf('django.db.models.fields.CharField')(default='B', max_length=1)),
))
db.send_create_signal('places', ['ScheduledStop'])
def backwards(self, orm):
# Deleting model 'Journey'
db.delete_table('places_journey')
# Deleting model 'ScheduledStop'
db.delete_table('places_scheduledstop')
models = {
'places.entity': {
'Meta': {'object_name': 'Entity'},
'_identifiers': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['places.Identifier']", 'symmetrical': 'False'}),
'_metadata': ('django.db.models.fields.TextField', [], {'default': "'{}'"}),
'absolute_url': ('django.db.models.fields.TextField', [], {}),
'all_types': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': | 'False', 'related_name': "'entities'", 'blank': 'True', 'to': "orm['places.EntityType']"}),
'all_types_completion': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'entities_completion'", 'blank': 'True', 'to': "orm['places.EntityType']"}),
'geometry': ('django.contrib.gis.db.models.fields.GeometryField', [], {'null': 'True'}),
'groups': ('django.db.models.fields.related.Man | yToManyField', [], {'to': "orm['places.EntityGroup']", 'symmetrical': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'identifier_scheme': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'identifier_value': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'is_stack': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_sublocation': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'location': ('django.contrib.gis.db.models.fields.PointField', [], {'null': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['places.Entity']", 'null': 'True'}),
'primary_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['places.EntityType']", 'null': 'True'}),
'source': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['places.Source']"})
},
'places.entitygroup': {
'Meta': {'object_name': 'EntityGroup'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ref_code': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'source': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['places.Source']"})
},
'places.entitygroupname': {
'Meta': {'unique_together': "(('entity_group', 'language_code'),)", 'object_name': 'EntityGroupName'},
'entity_group': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'names'", 'to': "orm['places.EntityGroup']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language_code': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'title': ('django.db.models.fields.TextField', [], {})
},
'places.entityname': {
'Meta': {'unique_together': "(('entity', 'language_code'),)", 'object_name': 'EntityName'},
'entity': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'names'", 'to': "orm['places.Entity']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language_code': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'title': ('django.db.models.fields.TextField', [], {})
},
'places.entitytype': {
'Meta': {'object_name': 'EntityType'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['places.EntityTypeCategory']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'note': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'show_in_category_list': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'show_in_nearby_list': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'db_index': 'True'}),
'subtype_of': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'subtypes'", 'blank': 'True', 'to': "orm['places.EntityType']"}),
'subtype_of_completion': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'subtypes_completion'", 'blank': 'True', 'to': "orm['places.EntityType']"})
},
'places.entitytypecategory': {
'Meta': {'object_name': 'EntityTypeCategory'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.TextField', [], {})
},
'places.entitytypename': {
'Meta': {'unique_together': "(('entity_type', 'language_code'),)", 'object_name': 'EntityTypeName'},
|
madhusudancs/pytask | pytask/profile/tests.py | Python | agpl-3.0 | 1,307 | 0.002295 | #!/usr/bin/env python
#
# Copyright 2011 Authors of PyTask.
#
# This file is part of PyTask.
#
# PyTask is free software: you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PyTask is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without e | ven the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# for more details.
#
# You should have received a copy of the GNU General Public License
# along with PyTask. If not, see <http://www.gnu.org/licenses/>.
"""
This file demonstrates two different styles of tests (one doctest and | one
unittest). These will both pass when you run "manage.py test".
Replace these with more appropriate tests for your application.
"""
__authors__ = [
'"Madhusudan.C.S" <madhusudancs@fossee.in>',
]
from django.test import TestCase
class SimpleTest(TestCase):
def test_basic_addition(self):
"""
Tests that 1 + 1 always equals 2.
"""
self.failUnlessEqual(1 + 1, 2)
__test__ = {"doctest": """
Another way to test that 1 + 1 is equal to 2.
>>> 1 + 1 == 2
True
"""}
|
anomaly/vishnu | tests/backend/client/memcache/test_pymemcache.py | Python | apache-2.0 | 1,393 | 0 | from vishnu.session import Session
from vishnu.backend import PyMemcache
def memcache_client(sid=None):
if sid is None:
sid = Session.generate_sid()
config = PyMemcache()
client = config.client_from_config(sid)
return client
def test_load():
sid = Session.generate_sid()
client_a = memcache_client(sid)
# try to load (not started yet)
assert client_a.load() is False
# save
client_a.save()
# try to load (should be started)
assert client_a.load() is True
# start new client and check already started
client_b = memcache_client(sid)
assert client_b.load() is True
def test_clear():
client = memcache_client()
# save the session to start it
client.save()
# try to load (should be started)
assert client.load() | is True
# clear
client.clear()
# try to load (not started yet)
assert client.load() is False
def test_save():
sid = Session.generate_sid()
client_a = memcache_client(sid)
assert client_a.load() is False
client_a.save()
assert client_a.load() is True
# save some data to the session
client_a["key"] = "value"
client_a.save()
import time
time.sleep(1)
# start another client and check data was loaded
client_b = memcache_client(sid)
| assert client_b.load() is True
assert client_b.get("key") == "value"
|
tristanfisher/ffi4wd | config.py | Python | agpl-3.0 | 755 | 0.006623 | from easyos import easyos
import random
import string
def return_or_make_secret_key(secret_key_file):
# If we don't have a secret access key for signing sessions, make one
try:
with open(secret_key_file, "r") as f:
return f.read()
except IOError:
| print("Creating secret_key file")
l = random.randint(25, 50)
_rand = "".join(random.choice(string.printable) for i in range(l))
with open(secret_key_file, "w+") as f:
f.write(_rand)
return f.read()
class BaseConfig:
LISTEN_HOST = '127.0.0.1'
LISTEN_PORT = 5000
class DevelopmentConfig(BaseConfig):
DEBUG = True
| SECRET_KEY = return_or_make_secret_key(easyos["tmp_dir"] + "ffi4wd_development_csrf_key") |
deepmind/deepmind-research | physics_inspired_models/models/deterministic_vae.py | Python | apache-2.0 | 23,158 | 0.00652 | # Copyright 2020 DeepMind Technologies Limited.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module containing the main models code."""
import functools
from typing import Any, Dict, Mapping, Optional, Sequence, Tuple, Union
import distrax
from dm_hamiltonian_dynamics_suite.hamiltonian_systems import phase_space
import haiku as hk
import jax.numpy as jnp
import jax.random as jnr
import numpy as np
from physics_inspired_models import metrics
from physics_inspired_models import utils
from physics_inspired_models.models import base
from physics_inspired_models.models import dynamics
_ArrayOrPhase = Union[jnp.ndarray, phase_space.PhaseSpace]
class DeterministicLatentsGenerativeModel(base.SequenceModel[_ArrayOrPhase]):
"""Common class for generative models with deterministic latent dynamics."""
def __init__(
self,
latent_system_dim: int,
latent_system_net_type: str,
latent_system_kwargs: Dict[str, Any],
latent_dynamics_type: str,
encoder_aggregation_type: Optional[str],
decoder_de_aggregation_type: Optional | [str],
encoder_kwargs: Dict[str, Any],
decoder_kwargs: Dict[str, Any],
num_inference_steps: int,
num_target_steps: int,
latent_training_type: str,
training_data_split: str,
obje | ctive_type: str,
dt: float = 0.125,
render_from_q_only: bool = True,
prior_type: str = "standard_normal",
use_analytical_kl: bool = True,
geco_kappa: float = 0.001,
geco_alpha: Optional[float] = 0.0,
elbo_beta_delay: int = 0,
elbo_beta_final: float = 1.0,
name: Optional[str] = None,
**kwargs
):
can_run_backwards = latent_dynamics_type in ("ODE", "Physics")
# Verify arguments
if objective_type not in ("GECO", "ELBO", "NON-PROB"):
raise ValueError(f"Unrecognized training type - {objective_type}")
if geco_alpha is None:
geco_alpha = 0
if geco_alpha < 0 or geco_alpha >= 1:
raise ValueError("GECO alpha parameter must be in [0, 1).")
if prior_type not in ("standard_normal", "made", "made_gated"):
raise ValueError(f"Unrecognized prior_type='{prior_type}.")
if (latent_training_type == "forward_backward" and
training_data_split != "include_inference"):
raise ValueError("Training forward_backward works only when "
"training_data_split=include_inference.")
if (latent_training_type == "forward_backward" and
num_inference_steps % 2 == 0):
raise ValueError("Training forward_backward works only when "
"num_inference_steps are odd.")
if latent_training_type == "forward_backward" and not can_run_backwards:
raise ValueError("Training forward_backward works only when the model can"
" be run backwards.")
if prior_type != "standard_normal":
raise ValueError("For now we support only `standard_normal`.")
super().__init__(
can_run_backwards=can_run_backwards,
latent_system_dim=latent_system_dim,
latent_system_net_type=latent_system_net_type,
latent_system_kwargs=latent_system_kwargs,
encoder_aggregation_type=encoder_aggregation_type,
decoder_de_aggregation_type=decoder_de_aggregation_type,
encoder_kwargs=encoder_kwargs,
decoder_kwargs=decoder_kwargs,
num_inference_steps=num_inference_steps,
num_target_steps=num_target_steps,
name=name,
**kwargs
)
# VAE specific arguments
self.prior_type = prior_type
self.objective_type = objective_type
self.use_analytical_kl = use_analytical_kl
self.geco_kappa = geco_kappa
self.geco_alpha = geco_alpha
self.elbo_beta_delay = elbo_beta_delay
self.elbo_beta_final = jnp.asarray(elbo_beta_final)
# The dynamics module and arguments
self.latent_dynamics_type = latent_dynamics_type
self.latent_training_type = latent_training_type
self.training_data_split = training_data_split
self.dt = dt
self.render_from_q_only = render_from_q_only
latent_system_kwargs["net_kwargs"] = dict(
latent_system_kwargs["net_kwargs"])
latent_system_kwargs["net_kwargs"]["net_type"] = self.latent_system_net_type
if self.latent_dynamics_type == "Physics":
# Note that here system_dim means the dimensionality of `q` and `p`.
model_constructor = functools.partial(
dynamics.PhysicsSimulationNetwork,
system_dim=self.latent_system_dim // 2,
name="Physics",
**latent_system_kwargs
)
elif self.latent_dynamics_type == "ODE":
model_constructor = functools.partial(
dynamics.OdeNetwork,
system_dim=self.latent_system_dim,
name="ODE",
**latent_system_kwargs
)
elif self.latent_dynamics_type == "Discrete":
model_constructor = functools.partial(
dynamics.DiscreteDynamicsNetwork,
system_dim=self.latent_system_dim,
name="Discrete",
**latent_system_kwargs
)
else:
raise NotImplementedError()
self.dynamics = hk.transform(
lambda *args, **kwargs_: model_constructor()(*args, **kwargs_)) # pylint: disable=unnecessary-lambda
def process_inputs_for_encoder(self, x: jnp.ndarray) -> jnp.ndarray:
return utils.stack_time_into_channels(x, self.data_format)
def process_latents_for_dynamics(self, z: jnp.ndarray) -> _ArrayOrPhase:
if self.latent_dynamics_type == "Physics":
return phase_space.PhaseSpace.from_state(z)
return z
def process_latents_for_decoder(self, z: _ArrayOrPhase) -> jnp.ndarray:
if self.latent_dynamics_type == "Physics":
return z.q if self.render_from_q_only else z.single_state
return z
@property
def inferred_index(self) -> int:
if self.latent_training_type == "forward":
return self.num_inference_steps - 1
elif self.latent_training_type == "forward_backward":
assert self.num_inference_steps % 2 == 1
return self.num_inference_steps // 2
else:
raise NotImplementedError()
@property
def targets_index_offset(self) -> int:
if self.training_data_split == "overlap_by_one":
return -1
elif self.training_data_split == "no_overlap":
return 0
elif self.training_data_split == "include_inference":
return - self.num_inference_steps
else:
raise NotImplementedError()
@property
def targets_length(self) -> int:
if self.training_data_split == "include_inference":
return self.num_inference_steps + self.num_target_steps
return self.num_target_steps
@property
def train_sequence_length(self) -> int:
"""Computes the total length of a sequence needed for training."""
if self.training_data_split == "overlap_by_one":
# Input - [-------------------------------------------------]
# Inference - [---------------]
# Targets - [---------------------------------]
return self.num_inference_steps + self.num_target_steps - 1
elif self.training_data_split == "no_overlap":
# Input - [-------------------------------------------------]
# Inference - [---------------]
# Targets - [--------------------------------]
return self.num_inference_steps + self.num_target_steps
elif self.training_data_split == "include_inference":
# Input - [-------------------------------------------------]
# Inference - [---------------]
# Targets - [-------------------------------------------------]
return self.num_inference_steps + self.num_target_steps
else:
raise NotImplementedErro |
Wyn10/Cnchi | cnchi/installation/download/metalink.py | Python | gpl-3.0 | 16,922 | 0.001123 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# metalink.py
#
# Code from pm2ml Copyright (C) 2012-2013 Xyne
# Copyright © 2013-2016 Antergos
#
# This file is part of Cnchi.
#
# Cnchi is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# Cnchi is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# The following additional terms are in effect as per Section 7 of the license:
#
# The preservation of all legal notices and author attributions in
# the material or in the Appropriate Legal Notices displayed
# by works containing it is required.
#
# You should have received a copy of the GNU General Public License
# along with Cnchi; If not, see <http://www.gnu.org/licenses/>.
""" Operations with metalinks """
import argparse
import hashlib
import logging
import os
import re
import tempfile
import xml.dom.minidom as minidom
from collections import deque
try:
import pyalpm
except ImportError:
pass
try:
import xml.etree.cElementTree as eTree
except ImportError:
import xml.etree.ElementTree as eTree
MAX_URLS = 15
def get_info(metalink):
""" Reads metalink xml info and returns it """
# tag = "{urn:ietf:params:xml:ns:metalink}"
temp_file = tempfile.NamedTemporaryFile(delete=False)
temp_file.write(str(metalink).encode('UTF-8'))
temp_file.close()
metalink_info = {}
element = {}
for event, elem in eTree.iterparse(temp_file.name, events=('start', 'end')):
if event == "start":
if elem.tag.endswith("file"):
element['filename'] = elem.attrib['name']
elif elem.tag.endswith("identity"):
element['identity'] = elem.text
elif elem.tag.endswith("size"):
element['size'] = elem.text
elif elem.tag.endswith("version"):
element['version'] = elem.text
elif elem.tag.endswith("description"):
element['description'] = elem.text
elif elem.tag.endswith("hash"):
element['hash'] = elem.text
elif elem.tag.endswith("url"):
try:
element['urls'].append(elem.text)
except KeyError:
element['urls'] = [elem.text]
if event == "end":
if elem.tag.endswith("file"):
# Limit to MAX_URLS for file
if len(element['urls']) > MAX_URLS:
element['urls'] = element['urls'][:MAX_URLS]
key = element['i | dentity']
metalink_info[ | key] = element.copy()
element.clear()
elem.clear()
if os.path.exists(temp_file.name):
os.remove(temp_file.name)
return metalink_info
def create(alpm, package_name, pacman_conf_file):
""" Creates a metalink to download package_name and its dependencies """
# options = ["--conf", pacman_conf_file, "--noconfirm", "--all-deps", "--needed"]
options = ["--conf", pacman_conf_file, "--noconfirm", "--all-deps"]
if package_name is "databases":
options.append("--refresh")
else:
options.append(package_name)
try:
download_queue, not_found, missing_deps = build_download_queue(alpm, args=options)
except Exception as ex:
template = "Unable to create download queue for package {0}. An exception of type {1} occured. Arguments:\n{2!r}"
message = template.format(package_name, type(ex).__name__, ex.args)
logging.error(message)
return None
if not_found:
msg = "Can't find these packages: "
for pkg_not_found in sorted(not_found):
msg = msg + pkg_not_found + " "
logging.error(msg)
return None
if missing_deps:
msg = "Can't resolve these dependencies: "
for missing in sorted(missing_deps):
msg = msg + missing + " "
logging.error(msg)
return None
metalink = download_queue_to_metalink(download_queue)
return metalink
""" From here comes modified code from pm2ml
pm2ml is Copyright (C) 2012-2013 Xyne
More info: http://xyne.archlinux.ca/projects/pm2ml """
def download_queue_to_metalink(download_queue):
""" Converts a download_queue object to a metalink """
metalink = Metalink()
for database, sigs in download_queue.dbs:
metalink.add_db(database, sigs)
for pkg, urls, sigs in download_queue.sync_pkgs:
metalink.add_sync_pkg(pkg, urls, sigs)
return metalink
class Metalink(object):
""" Metalink class """
def __init__(self):
self.doc = minidom.getDOMImplementation().createDocument(None, "metalink", None)
self.doc.documentElement.setAttribute('xmlns', "urn:ietf:params:xml:ns:metalink")
self.files = self.doc.documentElement
# def __del__(self):
# self.doc.unlink()
def __str__(self):
""" Get a string representation of a metalink """
return re.sub(
r'(?<=>)\n\s*([^\s<].*?)\s*\n\s*',
r'\1',
self.doc.toprettyxml(indent=' ')
)
def add_urls(self, element, urls):
"""Add URL elements to the given element."""
for url in urls:
url_tag = self.doc.createElement('url')
element.appendChild(url_tag)
url_val = self.doc.createTextNode(url)
url_tag.appendChild(url_val)
def add_sync_pkg(self, pkg, urls, sigs=False):
"""Add a sync db package."""
file_ = self.doc.createElement("file")
file_.setAttribute("name", pkg.filename)
self.files.appendChild(file_)
for tag, db_attr, attrs in (
('identity', 'name', ()),
('size', 'size', ()),
('version', 'version', ()),
('description', 'desc', ()),
('hash', 'sha256sum', (('type', 'sha256'),)),
('hash', 'md5sum', (('type', 'md5'),))):
tag = self.doc.createElement(tag)
file_.appendChild(tag)
val = self.doc.createTextNode(str(getattr(pkg, db_attr)))
tag.appendChild(val)
for key, val in attrs:
tag.setAttribute(key, val)
urls = list(urls)
self.add_urls(file_, urls)
if sigs:
self.add_file(pkg.filename + '.sig', (u + '.sig' for u in urls))
def add_file(self, name, urls):
"""Add a signature file."""
file_ = self.doc.createElement("file")
file_.setAttribute("name", name)
self.files.appendChild(file_)
self.add_urls(file_, urls)
def add_db(self, db, sigs=False):
"""Add a sync db."""
file_ = self.doc.createElement("file")
name = db.name + '.db'
file_.setAttribute("name", name)
self.files.appendChild(file_)
urls = list(os.path.join(url, db.name + '.db') for url in db.servers)
self.add_urls(file_, urls)
if sigs:
self.add_file(name + '.sig', (u + '.sig' for u in urls))
class PkgSet(object):
""" Represents a set of packages """
def __init__(self, pkgs=None):
""" Init our internal self.pkgs dict with all given packages in pkgs """
self.pkgs = dict()
if pkgs:
for pkg in pkgs:
self.pkgs[pkg.name] = pkg
def __repr__(self):
return 'PkgSet({0})'.format(repr(self.pkgs))
def add(self, pkg):
self.pkgs[pkg.name] = pkg
def __and__(self, other):
new = PkgSet(set(self.pkgs.values()) & set(other.pkgs.values()))
return new
def __iand__(self, other):
self.pkgs = self.__and__(other).pkgs
return self
def __or__(self, other):
copy = PkgSet(list(self.pkgs.values()))
return copy.__ior__(other)
def __ior__(self, other):
self.pkgs.update(o |
skuda/client-python | kubernetes/test/test_v1_subject_access_review_spec.py | Python | apache-2.0 | 943 | 0.004242 | # coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.6.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import kubernetes.client
from kubernetes.client.rest import | ApiException
from kubernetes.client.models.v1_subject_acce | ss_review_spec import V1SubjectAccessReviewSpec
class TestV1SubjectAccessReviewSpec(unittest.TestCase):
""" V1SubjectAccessReviewSpec unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testV1SubjectAccessReviewSpec(self):
"""
Test V1SubjectAccessReviewSpec
"""
model = kubernetes.client.models.v1_subject_access_review_spec.V1SubjectAccessReviewSpec()
if __name__ == '__main__':
unittest.main()
|
qstokkink/py-ipv8 | ipv8/dht/provider.py | Python | lgpl-3.0 | 2,972 | 0.004038 | import logging
from binascii import hexlify
from socket import inet_aton, inet_ntoa
from struct import pack, unpack_from
from . import DHTError
from ..messaging.anonymization.tunnel import IntroductionPoint, PEER_SOURCE_DHT
from ..peer import Peer
from ..util import cast_to_bin
class DHTCommunityProvider(object):
"""
This class is a wrapper around the DHTCommunity and is used to discover introduction points
for hidden services.
"""
def __init__(self, dht_community, port):
self.dht_community = dht_community
self.port = port
self.logger = logging.getLogger(self.__class__.__name__)
async def peer_l | ookup(self, mid):
try:
await self.dht_community.connect_peer(mid)
except DHTError as e:
self.logger.debug("Failed to connect %s using the DHTCommunity (error: %s)", hexlify(mid), e)
return
async def lookup(self, | info_hash):
try:
values = await self.dht_community.find_values(info_hash)
except DHTError as e:
self.logger.info("Failed to lookup %s on the DHTCommunity (error: %s)", hexlify(info_hash), e)
return
results = []
for value, _ in values:
try:
ip_bin, port, last_seen, intro_key_len = unpack_from('!4sHIH', value)
ip = inet_ntoa(ip_bin)
intro_pk = b'LibNaCLPK:' + value[12:12 + intro_key_len]
intro_peer = Peer(intro_pk, address=(ip, port))
seeder_key_len, = unpack_from('!H', value, 12 + intro_key_len)
seeder_pk = b'LibNaCLPK:' + value[14 + intro_key_len:14 + intro_key_len + seeder_key_len]
results.append(IntroductionPoint(intro_peer, seeder_pk, PEER_SOURCE_DHT, last_seen))
except Exception as e:
self.logger.info("Error encountered during lookup %s on the DHTCommunity (error: %s)", hexlify(info_hash), e)
self.logger.info("Looked up %s in the DHTCommunity, got %d results", hexlify(info_hash), len(results))
return info_hash, results
async def announce(self, info_hash, intro_point):
# We strip away the LibNaCLPK part of the public key to avoid going over the DHT size limit.
intro_pk = intro_point.peer.public_key.key_to_bin()[10:]
seeder_pk = intro_point.seeder_pk[10:]
value = inet_aton(intro_point.peer.address[0]) + pack("!H", intro_point.peer.address[1])
value += pack('!I', intro_point.last_seen)
value += pack('!H', len(intro_pk)) + cast_to_bin(intro_pk)
value += pack('!H', len(seeder_pk)) + cast_to_bin(seeder_pk)
try:
await self.dht_community.store_value(info_hash, value)
except DHTError as e:
self.logger.info("Failed to announce %s to the DHTCommunity (error: %s)", hexlify(info_hash), e)
else:
self.logger.info("Announced %s to the DHTCommunity", hexlify(info_hash))
|
gooddata/zuul | zuul/reporter/gerrit.py | Python | apache-2.0 | 1,929 | 0 | # Copyright 2013 Rackspace Australia
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import voluptuous as v
from zuul.reporter import BaseReporter
class GerritReporter(BaseReporter):
"""Sends off reports to Gerrit."""
name = 'gerrit'
log = logging.getLogger("zuul.reporter.gerrit.Rep | orter")
def report(self, source, pipeline, item, message=None):
"""Send a message to gerrit."""
if not message:
message = self._formatItemReport(pipeline, item)
self.log.debug("Report change %s, params %s, message: %s" %
(item.change, self.reporter_config, message))
changeid = '%s,%s' % (item.change.number, item.change.pa | tchset)
item.change._ref_sha = source.getRefSha(
item.change.project.name, 'refs/heads/' + item.change.branch)
return self.connection.review(item.change.project.name, changeid,
message, self.reporter_config)
def getSubmitAllowNeeds(self):
"""Get a list of code review labels that are allowed to be
"needed" in the submit records for a change, with respect
to this queue. In other words, the list of review labels
this reporter itself is likely to set before submitting.
"""
return self.reporter_config
def getSchema():
gerrit_reporter = v.Any(str, v.Schema(dict))
return gerrit_reporter
|
saymedia/python-simpledb | django/management/commands/sdb_syncdomains.py | Python | bsd-3-clause | 914 | 0.001094 | from django.core.management.base import BaseCommand, CommandError
from djang | o.db.models.loading import AppCache
from django.conf import settings
import simpledb
class Command(BaseCommand):
help = ("Sync all of the SimpleDB domains.")
def handle(self, *args, **options):
apps = AppCache()
check = []
for module in apps.get_apps():
for d in module.__dict__:
ref = getattr(module, d)
if isinstance(ref, simpledb.models.ModelMe | taclass):
domain = ref.Meta.domain.name
if domain not in check:
check.append(domain)
sdb = simpledb.SimpleDB(settings.AWS_KEY, settings.AWS_SECRET)
domains = [d.name for d in list(sdb)]
for c in check:
if c not in domains:
sdb.create_domain(c)
print "Creating domain %s ..." % c
|
fnp/edumed | wtem/admin.py | Python | agpl-3.0 | 8,331 | 0.010447 | # -*- coding: utf-8 -*-
import os
from django.contrib import admin
from django import forms
from django.utils import simplejson
from django.utils.safestring import mark_safe
from django.core.urlresolvers import reverse
from django.conf.urls import url, patterns
from django.shortcuts import render
from django.contrib.auth.models import User
from django.http import HttpResponse
from django.template.loader import render_to_string
from .models import Submission, Assignment, Attachment, exercises
from .middleware import get_current_request
def get_user_exercises(user):
try:
assignment = Assignment.objects.get(user = user)
return [e for e in exercises if e['id'] in assignment.exercises]
except Assignment.DoesNotExist:
return []
readonly_fields = ('submitted_by', 'first_name', 'last_name', 'email', 'key', 'key_sent')
class AttachmentWidget(forms.Widget):
def render(self, name, value, *args, **kwargs):
if value:
a_tag = '<a href="%s">%s</a>' % (value, value)
else:
a_tag = 'brak'
return mark_safe(('<input type="hidden" name="%s" value="%s"/>' % (name, value)) + a_tag)
class SubmissionFormBase(forms.ModelForm):
class Meta:
model = Submission
exclude = ('answers', 'marks', 'contact', 'end_time') + readonly_fields
def get_open_answer(answers, exercise):
def get_option(options, id):
for option in options:
if option['id'] == int(id):
return option
exercise_id = str(exercise['id'])
answer = answers[exercise_id]
if exercise['type'] == 'open':
if isinstance(answer, list):
toret = ''
for part in answer:
field = get_option(exercise['fields'], part['id'])
toret += '- %s:\n\n%s\n\n' % (field['caption'], part['text'])
else:
toret = answer
if exercise['type'] == 'edumed_wybor':
ok = set(map(str, exercise['answer'])) == set(map(str,answer['closed_part']))
toret = u'Czesc testowa [%s]:\n' % ('poprawna' if ok else 'niepoprawna')
if len(answer['closed_part']):
for selected in answer['closed_part']:
option = get_option(exercise['options'], selected)
toret += '%s: %s\n' % (selected, option['text'])
else:
toret += u'<nie wybrano odpowiedzi>\n'
toret += u'\nCzesc otwarta (%s):\n\n' % ' '.join(exercise['open_part'])
toret += answer['open_part']
return toret
def get_form(request, submission):
fields = dict()
if submission.answers:
answers = simplejson.loads(submission.answers)
user_exercises = get_user_exercises(request.user)
for exercise in exercises:
if exercise not in user_exercises:
continue
answer_field_name = 'exercise_%s' % exercise['id']
mark_field_name = 'markof_%s_by_%s' % (exercise['id'], request.user.id)
if exercise['type'] in ('open', 'file_upload') or exercise.get('open_part', None):
if exercise['type'] == 'file_upload':
try:
attachment = Attachment.objects.get(submission = submission, exercise_id = exercise['id'])
except Attachment.DoesNotExist:
attachment = None
widget = AttachmentWidget
initial = attachment.file.url if attachment else None
else:
widget = forms.Textarea(attrs={'readonly':True})
initial = get_open_answer(answers, exercise)
fields[answer_field_name] = forms.CharField(
widget = widget,
initial = initial,
label = 'Rozwiązanie zadania %s' % exercise['id']
)
fields[mark_field_name] = forms.ChoiceField(
choices = [(None, '-')] + [(i,i) for i in range(exercise['max_points']+1)],
initial = submission.get_mark(user_id = request.user.id, exercise_id = exercise['id']),
label = u'Twoja ocena zadania %s' % exercise['id']
)
if not request.user.is_superuser:
class Meta(SubmissionFormBase.Meta):
pass
Meta.exclude += ('examiners',)
fields['Meta'] = Meta
return type('SubmissionForm', (SubmissionFormBase,), fields)
class SubmissionAdmin(admin.ModelAdmin):
list_display = ('__unicode__', 'todo', 'examiners_repr')
readonly_fields = readonly_fields
def get_form(self, request, obj, **kwargs):
return get_form(request, obj)
def submitted_by(self, instance):
if instance.contact:
return '<a href="%s">%s</a>' % (
reverse('admin:contact_contact_change', args = [instance.contact.id]),
instance.contact.contact
)
return '-'
submitted_by.allow_tags = True
submitted_by.short_description = "Zgłoszony/a przez"
def todo(self, submission):
user = get_current_request().user
user_exercises = | get_user_exercises(user)
user_marks = submission.marks.get(str(user.id), {})
return ','.join([str(e['id']) for e in user_exercises if str(e['id']) not in user_marks.keys()])
todo.short_description = | 'Twoje nieocenione zadania'
def examiners_repr(self, submission):
return ', '.join([u.username for u in submission.examiners.all()])
examiners_repr.short_description = 'Przypisani do zgłoszenia'
def save_model(self, request, submission, form, change):
for name, value in form.cleaned_data.items():
if name.startswith('markof_'):
parts = name.split('_')
exercise_id = parts[1]
user_id = parts[3]
submission.set_mark(user_id = user_id, exercise_id = exercise_id, mark = value)
submission.save()
def changelist_view(self, request, extra_context=None):
context = dict(examiners = [])
assignments = Assignment.objects.all()
if not request.user.is_superuser:
assignments = assignments.filter(user = request.user)
for assignment in assignments:
examiner = dict(name = assignment.user.username, todo = 0)
for submission in Submission.objects.filter(examiners = assignment.user):
for exercise_id in assignment.exercises:
if submission.get_mark(user_id = assignment.user.id, exercise_id = exercise_id) is None:
examiner['todo'] += 1
context['examiners'].append(examiner)
return super(SubmissionAdmin, self).changelist_view(request, extra_context = context)
def queryset(self, request):
qs = super(SubmissionAdmin, self).queryset(request)
if not request.user.is_superuser:
qs = qs.filter(examiners = request.user)
return qs
def get_urls(self):
urls = super(SubmissionAdmin, self).get_urls()
return patterns('',
url(r'^report/$', self.admin_site.admin_view(report_view), name='wtem_admin_report')
) + super(SubmissionAdmin, self).get_urls()
class SubmissionsSet:
def __init__(self, submissions):
self.submissions = submissions
self.examiners_by_exercise = dict()
for submission in submissions:
for user_id, marks in submission.marks.items():
user = User.objects.get(pk=user_id)
for exercise_id in marks.keys():
examiners = self.examiners_by_exercise.setdefault(exercise_id, [])
if not user in examiners:
examiners.append(user)
def report_view(request):
submissions = sorted(Submission.objects.all(), key = lambda s: -s.final_result)
toret = render_to_string('wtem/admin_report.csv', dict(
submissionsSet = SubmissionsSet(submissions),
exercise_ids = map(str, range(1,len(exercises)+1))
))
response = HttpResponse(toret, content_type = 'text/csv')
response['Content-Dispo |
saintdragon2/python-3-lecture-2015 | pygame_study/first_race_game.py | Python | mit | 3,830 | 0.001571 | __author__ = 'saintdragon2'
import pygame
import time
import random
pygame.init()
display_width = 800
display_height = 600
black = (0, 0, 0)
white = (255, 255, 255)
red = (255, 0, 0)
green = (0, 255, 0)
block_color = (53, 115, 255)
carImg = pygame.image.load('car.png')
car_width = carImg.get_rect().size[0]
gameDisplay = pygame.display.set_mode((display_width, display_height))
pygame.display.set_caption('My very first racing game')
clock = pygame.time.Clock()
def things_dodged(count):
font = pygame.font.SysFont(None, 25)
text = font.render("Dodged: " + str(count), True, black)
gameDisplay.blit(text, (0, 0))
def things(x, y, w, h, color):
pygame.draw.rect(gameDisplay, color, [x, y, w, h])
def car(x, y):
gameDisplay.blit(carImg, (x, y))
def text_objects(text, font):
text_surface = font.render(text, True, black)
return text_surface, text_surface.get_rect()
def message_display(text):
# large_text = pygame.font.Font('freescanbold.ttf', 115)
large_text = pygame.font.Font(pygame.font.get_default_font(), 117)
TextSurf, TextRect = text_objects(text, large_text)
TextRect.center = ((display_width/2), (display_height/2))
gameDisplay.blit(TextSurf, TextRect)
pygame.display.update()
time.sleep(2)
game_loop()
def crash():
message_display('You Crashed')
def game_intro():
intro = True
while intro:
for event in pygame.event.get():
print(event)
if event.type == pygame.QUIT:
pygame.quit()
quit()
gameDisplay.fill(white)
large_text = pygame.font.Font('NanumGothicExtraBold.ttf', 115)
TextSurf, TextRect = text_objects('분노의 질주', large_text)
TextRect.center = ((display_width/2), (display_height/2))
gameDisplay.blit(TextSurf, TextRect)
pygame.draw.rect(gameDisplay, green, (150, 450, 100, 50))
pygame.draw.rect(gameDisplay, red, (550, 450, 100, 50))
pygame.display.update()
clock.tick(15)
def game_loop():
x = display_width * 0.45
y = display_height * 0.7
x_change = 0
thing_startx = random.randrange(0,display_width)
thing_starty = -600
thing_speed = 7
thing_width = 100
thing_height = 100
thingCount = 1
dodged = 0
game_exit = False
while not game_exit:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
quit()
# Move left and right
if event.type == pygam | e.KEYDOWN:
if event.key == pygame.K_LEFT:
x_change = -5
elif event.key == pygame.K_RIGHT:
x_change = 5
if even | t.type == pygame.KEYUP:
if event.key == pygame.K_LEFT or event.key == pygame.K_RIGHT:
x_change = 0
x += x_change
gameDisplay.fill(white)
things(thing_startx, thing_starty, thing_width, thing_height, block_color)
thing_starty += thing_speed
car(x, y)
things_dodged(dodged)
if x > display_width - car_width or x < 0:
crash()
if thing_starty > display_height:
thing_starty = 0 - thing_height
thing_startx = random.randrange(0, display_width)
dodged += 1
thing_speed += 1
thing_width += (dodged * 1.2)
if y < thing_starty + thing_height:
print('y crossover')
if (x > thing_startx and x < thing_startx + thing_width) or (x + car_width > thing_startx and x + car_width < thing_startx + thing_width):
print('x crossover')
crash()
pygame.display.update()
clock.tick(60)
# game_intro()
game_loop()
pygame.quit()
quit() |
kubeflow/pipelines | samples/core/resource_spec/resource_spec_test.py | Python | apache-2.0 | 1,529 | 0 | # Copyright 2021 The Kubeflow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import kfp.deprecated as kfp
from .resource_spec import my_pipeline
from .resource_spec_v2 import my_pipeline as my_pipeline_v2
from kfp.samples.test.utils import run_pipeline_func, TestCase
def EXPECTED_OOM(run_id, run, * | *kwargs):
"""confirms a sample test case is failing, because of OOM."""
assert run.status == 'Failed'
run_ | pipeline_func([
TestCase(
pipeline_func=my_pipeline_v2,
mode=kfp.dsl.PipelineExecutionMode.V2_ENGINE,
),
TestCase(
pipeline_func=my_pipeline_v2,
mode=kfp.dsl.PipelineExecutionMode.V2_ENGINE,
arguments={'n': 21234567},
verify_func=EXPECTED_OOM,
),
TestCase(
pipeline_func=my_pipeline,
mode=kfp.dsl.PipelineExecutionMode.V1_LEGACY,
),
TestCase(
pipeline_func=my_pipeline,
mode=kfp.dsl.PipelineExecutionMode.V1_LEGACY,
arguments={'n': 21234567},
verify_func=EXPECTED_OOM,
),
])
|
sdispater/eloquent | eloquent/utils/__init__.py | Python | mit | 773 | 0 | # -*- coding: utf-8 -*-
import sys
PY2 = sys.version_info[0] == 2
if PY2:
long = long
unicode = unicode
basestring = basestring
from urllib import quote_plus, unquote_plus, quote, unquote
from urlparse import parse_qsl
else:
long = int
unicode = str
basestring = str
from urllib.parse import (quote_plus, unquote_plus,
parse_q | sl, quote, unquote)
class Null(object):
def __bool__(self):
return False
def __eq__(self, other):
return other is None
def decode(s, encodings=('utf8', 'ascii', 'latin1')):
for encoding in encodings:
try:
return s.decode(encoding)
except UnicodeDecodeError:
pass
return s.deco | de('utf8', 'ignore')
|
zhangyage/Python-oldboy | python-auto/p_ssh2/baolei_command.py | Python | apache-2.0 | 2,237 | 0.019566 | #!/usr/bin/env python
# -*- coding:utf-8 -*-
#通过堡垒机跳转时间命令传输
import paramiko
import os
import sys
import time
blip = "121.42.191.190" #定义的堡垒机信息
bluser = "zhangyage"
blpasswd = "MCya9B7ewPoTeNT8"
hostname = "116.196.69.47" #定义的业务主机信息
username = "root"
password = "Zhangyage"
port = 22
passinfo = '\'s password: ' #输入服务器密码的前标识串
paramiko.util.log_to_file('syslogin') #发送paramiko日志到syslogin中
ssh = paramiko.SSHClient() #ssh登录堡垒机
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect(hostname=blip, port=22, username=bluser, password=blpasswd) #创建ssh链接
channel = ssh.invoke_shell() #创建新的会话,开启命令调用
channel.settimeout(10) #会话命令执行超时时间,单位为秒
buff = ''
resp = ''
channel.send('ssh ' +username+'@'+hostname+'\n') #发送ssh链接命令
while not buff.endswith(passinfo): #ssh登录信息判断,
try:
resp = channel.recv(9999)
print resp
except Exception,e:
print 'Error info:%s connection time' % (str(e))
channel.close()
ssh.close()
sys.exit()
buff +=resp
if not buff.find('yes/no') ==-1: #输出串含有“yes/no”发送yes并回车
channel.send('yes\n')
buff=''
channel.send(password+'\n') #发送业务主机密码
buff = ''
while not buff.endswith('# '): #输 | 出串是#号表明校验已经通过,退出w | hile
resp = channel.recv(9999)
if not resp.find(passinfo) ==-1: #输出串含有password说明密码错误
print 'Error info:Autentical failed.'
channel.close() #关闭链接对象后退出
ssh.close()
sys.exit()
buff += resp
channel.send('ifconfig\n') #认证通过后发送ufconfig命令查看结果
buff = ''
try:
while buff.find('# ') == -1:
resp = channel.recv(9999)
buff += resp
except Exception,e:
print "error info:" +str(e)
print buff #打印输出结果
channel.close()
ssh.close() |
profgiuseppe/graph-tool-tests | src/graph_tool/all.py | Python | gpl-3.0 | 2,008 | 0 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# graph_tool -- a general graph manipulation python module
#
# Copyright (C) 2006-2013 Tiago de Paula Peixoto <tiago@skewed.de>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Utility module which includes all the sub-modules in graph_tool
"""
from __future__ import division, absolute_im | port, print_function
from graph_tool import *
import graph_tool
from graph_tool.correlations import *
import graph_tool.correlations
from graph_tool.centrality import *
import graph_tool.centrality
try:
from graph_tool.draw import *
import graph_tool.draw
except ImportError:
# Proceed despite errors with cairo, matplotlib, etc.
pass
from graph_tool.stats import *
import graph_tool.stats
from graph_tool.generation impor | t *
import graph_tool.generation
from graph_tool.stats import *
import graph_tool.stats
from graph_tool.clustering import *
import graph_tool.clustering
from graph_tool.community import *
import graph_tool.community
from graph_tool.run_action import *
import graph_tool.run_action
from graph_tool.topology import *
import graph_tool.topology
from graph_tool.flow import *
import graph_tool.flow
from graph_tool.spectral import *
import graph_tool.spectral
from graph_tool.search import *
import graph_tool.search
from graph_tool.util import *
import graph_tool.util
import graph_tool.collection
import graph_tool.collection as collection
|
azariven/BioSig_SEAS | bin/test/test_SEAS_import.py | Python | gpl-3.0 | 1,357 | 0.018423 | #!/usr/bin/env python
#
# Copyright (C) 2017 - Massachusetts Institute of Technology (MIT)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
This Code tests module import from SEAS
"""
import os
import sys
DIR = os.path.abspath(os.path.dirname(__file__))
sys.path.insert(0, os.path.join(DIR, '../..'))
#plotting
import SEAS_Utils.common_utils.data_plotter as plt
#timer
from S | EAS_Utils.common_utils.timer import si | mple_timer
#dbm
import SEAS_Utils.common_utils.db_management2 as dbm
#config
import SEAS_Utils.common_utils.configurable as config
#DIR
from SEAS_Utils.common_utils.DIRs import Simulation_DB
#constants
from SEAS_Utils.common_utils.constants import *
if __name__ == "__main__":
pass
|
clemense/cortex-py | cortex-py/test/test_cortex.py | Python | mit | 2,711 | 0.004795 | import time
from cortex import *
class MyDataHandler:
def __init__(self):
self.alldata = []
def MyErrorHandler(self, iLevel, msg):
print("ERROR: ")
print(iLevel, msg.contents)
return 0
def MyDataHandler(self, Frame):
print("got called")
try:
print("Received multi-cast frame no %d\n"%(Frame.contents.iFrame))
print "Bodies: ", Frame.con | tents.nBodies
print "BodyData: ", Frame.contents.BodyData[0].szName
print "Number of Markers of Body[0]: ", Frame.contents.BodyData[0].nMarkers
for i in range(Frame.contents.BodyData[0].nMarkers):
| print "MarkerX ", Frame.contents.BodyData[0].Markers[i][0]
print "MarkerY ", Frame.contents.BodyData[0].Markers[i][1]
print "MarkerZ ", Frame.contents.BodyData[0].Markers[i][2]
print "BodyMarker[2].x: ", Frame.contents.BodyData[0].Markers[3][0]
print "Unidentified markers: ", Frame.contents.nUnidentifiedMarkers
print "Delay: ", Frame.contents.fDelay
print "", Frame.contents.UnidentifiedMarkers[0][0]
self.alldata.append(Frame.contents.UnidentifiedMarkers[0][0])
except:
print("Frame empty")
return 0
if __name__ == "__main__":
my_obj = MyDataHandler()
Cortex_SetErrorMsgHandlerFunc(my_obj.MyErrorHandler)
Cortex_SetDataHandlerFunc(my_obj.MyDataHandler)
if Cortex_Initialize() != 0:
print("ERROR: unable to initialize")
Cortex_Exit()
exit(0)
pBodyDefs = Cortex_GetBodyDefs()
if pBodyDefs == None:
print("Failed to get body defs")
else:
print("Got body defs")
print("bodydefs: ", pBodyDefs.contents.nBodyDefs)
print "Marker names: "
print "", pBodyDefs.contents.BodyDefs[0].szName
for i in range(pBodyDefs.contents.BodyDefs[0].nMarkers):
print "Marker: ", pBodyDefs.contents.BodyDefs[0].szMarkerNames[i]
Cortex_FreeBodyDefs(pBodyDefs)
pBodyDefs = None
pResponse = c_void_p
nBytes = c_int
retval = Cortex_Request("GetContextFrameRate", pResponse, nBytes)
if retval != 0:
print("ERROR, GetContextFrameRate")
#contextFrameRate = cast(pResponse, POINTER(c_float))
#print("ContextFrameRate = %3.1f Hz", contextFrameRate)
print("*** Starting live mode ***")
retval = Cortex_Request("LiveMode", pResponse, nBytes)
time.sleep(1.0)
retval = Cortex_Request("Pause", pResponse, nBytes)
print("*** Paused live mode ***")
print("****** Cortex_Exit ******")
retval = Cortex_Exit();
print my_obj.alldata
|
saltstack/salt | tests/pytests/unit/pillar/test_sql_base.py | Python | apache-2.0 | 1,158 | 0 | import pytest
import salt.pillar.sql_base as sql_base
from tests.support.mock import MagicMock
class FakeExtPillar(sql_base.SqlBaseExtPillar):
"""
Mock SqlBaseExtPillar implementation for testing purpose
"""
@classmethod
def _db_name(cls):
return "fake"
def _get_cursor(self):
return MagicMock()
| @pytest.mark.parametrize("as_list", [True, False])
def test_process_results_as_json(as_list):
"""
Validates merging of dict values returned from JSON datatype.
"""
return_data = FakeExtPillar()
return_data.as_list = as_list
return_data.as_json = True
return_data.with_lists = None
return_data.enter_root(None)
return_data.process_fields(["json_data"], 0)
test_dicts = [
({"a": [1]},),
| ({"b": [2, 3]},),
({"a": [4]},),
({"c": {"d": [4, 5], "e": 6}},),
({"f": [{"g": 7, "h": "test"}], "c": {"g": 8}},),
]
return_data.process_results(test_dicts)
assert return_data.result == {
"a": [1, 4] if as_list else [4],
"b": [2, 3],
"c": {"d": [4, 5], "e": 6, "g": 8},
"f": [{"g": 7, "h": "test"}],
}
|
luotao1/Paddle | python/paddle/fluid/dygraph/dygraph_to_static/static_analysis.py | Python | apache-2.0 | 15,858 | 0.000694 | # Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
from paddle.utils import gast
from .logging_utils import warn
from .utils import is_paddle_api, is_dygraph_api, is_numpy_api, index_in_list, ast_to_source_code
__all__ = ['AstNodeWrapper', 'NodeVarType', 'StaticAnalysisVisitor']
class NodeVarType(object):
"""
Enum class of python variable types. We have to know some variable types
during compile time to transfer AST. For example, a string variable and a
tensor variable in if clause may lead to different conversion from dygraph
to static graph.
"""
ERROR = -1 # Returns when static analysis gets error
UNKNOWN = 0 # Reserve for AST nodes have not known the type
STATEMENT = 1 # For nodes representing statement (non-variable type)
CALLABLE = 2
# python data types
NONE = 100
BOOLEAN = 101
INT = 102
FLOAT = 103
STRING = 104
TENSOR = 105
NUMPY_NDARRAY = 106
# python collections
LIST = 200
SET = 201
DICT = 202
PADDLE_DYGRAPH_API = 300
PADDLE_CONTROL_IF = 301
PADDLE_CONTROL_WHILE = 302
PADDLE_CONTROL_FOR = 303
# Paddle API may not be visible to get source code.
# We use this enum value to denote the type return by a Paddle API
PADDLE_RETURN_TYPES = 304
# If node.node_var_type in TENSOR_TYPES, it can be considered as tensor-dependent.
TENSOR_TYPES = {TENSOR, PADDLE_RETURN_TYPES}
Annotation_map = {
"Tensor": TENSOR,
"paddle.Tensor": TENSOR,
"int": INT,
"float": FLOAT,
"bool": BOOLEAN,
"str": STRING
}
@staticmethod
def binary_op_output_type(in_type1, in_type2):
if in_type1 == in_type2:
return in_type1
if in_type1 == NodeVarType.UNKNOWN:
return in_type2
if in_type2 == NodeVarType.UNKNOWN:
return in_type1
supported_types = [
NodeVarType.BOOLEAN, NodeVarType.INT, NodeVarType.FLOAT,
NodeVarType.NUMPY_NDARRAY, NodeVarType.TENSOR,
NodeVarType.PADDLE_RETURN_TYPES
]
if in_type1 not in supported_types:
return NodeVarType.UNKNOWN
if in_type2 not in supported_types:
return NodeVarType.UNKNOWN
forbidden_types = [NodeVarType.NUMPY_NDARRAY, NodeVarType.TENSOR]
if in_type1 in forbidden_types and in_type2 in forbidden_types:
return NodeVarType.UNKNOWN
return max(in_type1, in_type2)
@staticmethod
def type_from_annotation(annotation):
annotation_str = ast_to_source_code(annotation).strip()
if annotation_str in NodeVarType.Annotation_map:
return NodeVarType.Annotation_map[annotation_str]
# raise warning if not found
warn("Currently we don't support annotation: %s" % annotation_str)
return NodeVarType.UNKNOWN
class AstNodeWrapper(object):
"""
Wrapper for python gast.node. We need a node wrapper because gast.node
doesn't store all required information when we are transforming AST.
We should collect additional information which the actual transformation
needs.
"""
def __init__(self, node):
self.node = node
self.parent = None
self.children = []
self.node_var_type = {NodeVarType.UNKNOWN}
class AstVarScope(object):
"""
AstVarScope is a class holding the map from current scope variable to its
type.
"""
SCOPE_TYPE_SCRIPT = 0
SCOPE_TYPE_FUNCTION = 1
SCOPE_TYPE_CLASS = 2
def __init__(self,
scope_name='',
scope_type=SCOPE_TYPE_SCRIPT,
parent_scope=None):
self.sub_scopes = []
self.name_to_id = {}
self.id_to_type = {}
self.cur_id = 0
self.scope_name = scope_name
self.scope_type = scope_type
self.parent_scope = parent_scope
if parent_scope is not None:
parent_scope.sub_scopes.append(self)
def add_var_type(self, var_name, node_var_type):
var_type = self.get_var_type(var_name)
if var_type == {NodeVarType.UNKNOWN}:
self.set_var_type(var_name, node_var_type)
else:
if isinsta | nce(node_var_type, set):
var_type.update(node_var_type)
else:
var_type.add(node_var_type)
def set_var_type(self, var_name, node_var_type):
if var_name in self.name_to_id:
num_id = self.name_to_id[var_name]
else:
num_id = self.cur_id
| self.cur_id += 1
self.name_to_id[var_name] = num_id
self.id_to_type[num_id] = node_var_type if isinstance(
node_var_type, set) else {node_var_type}
def get_var_type(self, var_name):
if var_name in self.name_to_id:
num_id = self.name_to_id[var_name]
return self.id_to_type[num_id]
if self.parent_scope is None:
return {NodeVarType.UNKNOWN}
return self.parent_scope.get_var_type(var_name)
class AstVarEnv(object):
"""
A class maintains scopes and mapping from name strings to type.
"""
def __init__(self):
self.cur_scope = AstVarScope()
def enter_scope(self, scope_name, scope_type):
self.cur_scope = AstVarScope(
scope_name, scope_type, parent_scope=self.cur_scope)
return self.cur_scope
def exit_scope(self):
assert self.cur_scope.parent_scope is not None, "Call exit_scope in "\
"AstVarEnv when current scope doesn't have parent scope."
self.cur_scope = self.cur_scope.parent_scope
return self.cur_scope
def get_parent_scope(self):
assert self.cur_scope.parent_scope is not None, "Call parent_scope in "\
"AstVarEnv when current scope doesn't have parent scope."
return self.cur_scope.parent_scope
def add_var_type(self, var_name, node_var_type):
self.cur_scope.add_var_type(var_name, node_var_type)
def set_var_type(self, var_name, node_var_type):
self.cur_scope.set_var_type(var_name, node_var_type)
def get_var_type(self, var_name):
return self.cur_scope.get_var_type(var_name)
def get_scope_var_type(self):
'''
Returns a dict mapping from variable name to type. Used for debug and
test.
'''
cur_scope_dict = {}
for name in self.cur_scope.name_to_id:
node_var_type = self.cur_scope.get_var_type(name)
cur_scope_dict[name] = node_var_type
return cur_scope_dict
class StaticAnalysisVisitor(object):
"""
A class that does static analysis
"""
def __init__(self, ast_root=None):
if ast_root is not None:
self.run(ast_root)
def run(self, ast_root):
self.node_wrapper_root = None
self.ancestor_wrappers = []
self.node_to_wrapper_map = {}
self.var_env = AstVarEnv()
self.dfs_visit(ast_root)
def dfs_visit(self, node):
# AST reuses some gast.nodes, such as Param node of expr_context
if node not in self.node_to_wrapper_map:
cur_wrapper = AstNodeWrapper(node)
self.node_to_wrapper_map[node] = cur_wrapper
else:
cur_wrapper = self.node_to_wrapper_map[node]
if self.node_wrapper_root is None:
self.node_wrapper_root = cur_wrapper
if len(self.ancestor_wrappers) != 0:
last_wrapper = self.ancestor_wrappers[-1]
last_wrapper.children.append(cur_wrapper)
cur_wrap |
ejona86/grpc | tools/run_tests/xds_k8s_test_driver/tests/authz_test.py | Python | apache-2.0 | 12,850 | 0.000078 | # Copyright 2021 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
from typing import Optional
from absl import flags
from absl.testing import absltest
import grpc
from framework import xds_k8s_testcase
flags.adopt_module_key_flags(xds_k8s_testcase)
# Type aliases
_XdsTestServer = xds_k8s_testcase.XdsTestServer
_XdsTestClient = xds_k8s_testcase.XdsTestClient
_SecurityMode = xds_k8s_testcase.SecurityXdsKubernetesTestCase.SecurityMode
_SAMPLE_DURATION = datetime.timedelta(seconds=0.5)
class AuthzTest(xds_k8s_testcase.SecurityXdsKubernetesTestCase):
RPC_TYPE_CYCLE = {
'UNARY_CALL': 'EMPTY_CALL',
'EMPTY_CALL': 'UNARY_CALL',
}
def setUp(self):
super().setUp()
self.next_rpc_type: Optional[int] = None
def authz_rules(self):
return [
{
"destinations": {
"hosts": [f"*:{self.server_xds_port}"],
"ports": [self.server_port],
"httpHeaderMatch": {
"headerName": "test",
"regexMatch": "host-wildcard",
},
},
},
{
"destinations": {
"hosts": [f"*:{self.server_xds_port}"],
"ports": [self.server_port],
"httpHeaderMatch": {
"headerName": "test",
"regexMatch": "header-regex-a+",
},
},
},
{
"destinations": [{
"hosts": [f"{self.server_xds_host}:{self.server_xds_port}"],
"ports": [self.server_port],
"httpHeaderMatch": {
"headerName": "test",
"regexMatch": "host-match1",
},
}, {
"hosts": [
f"a-not-it.com:{self.server_xds_port}",
f"{self.server_xds_host}:{self.server_xds_port}",
"z-not-it.com:1",
],
"ports": [1, self.server_port, 65535],
"httpHeaderMatch": {
"headerName": "test",
"regexMatch": "host-match2",
},
}],
},
{
"destinations": {
"hosts": [
f"not-the-host:{self.server_xds_port}",
"not-the-host",
],
"ports": [self.server_port],
"httpHeaderMatch": {
"headerName": "test",
"regexMatch": "never-match-host",
},
},
},
{
"destinations": {
"hosts": [f"*:{self.server_xds_port}"],
"ports": [1],
"httpHeaderMatch": {
"headerName": "test",
"regexMatch": "never-match-port",
},
},
},
{
"sources": {
"principals": ["*"],
},
"destinations": {
"hosts": [f"*:{self.server_xds_port}"],
"ports": [self.server_port],
"httpHeaderMatch": {
"headerName": "test",
"regexMatch": "principal-present",
},
},
},
{
"sources": [{
"principals": [
f"spiffe://{self.project}.svc.id.goog/not/the/client",
],
}, {
| "principals": [
f"spiffe://{self.project}.svc.id.goog/not/th | e/client",
f"spiffe://{self.project}.svc.id.goog/ns/"
f"{self.client_namespace}/sa/{self.client_name}",
],
}],
"destinations": {
"hosts": [f"*:{self.server_xds_port}"],
"ports": [self.server_port],
"httpHeaderMatch": {
"headerName": "test",
"regexMatch": "match-principal",
},
},
},
{
"sources": {
"principals": [
f"spiffe://{self.project}.svc.id.goog/not/the/client",
],
},
"destinations": {
"hosts": [f"*:{self.server_xds_port}"],
"ports": [self.server_port],
"httpHeaderMatch": {
"headerName": "test",
"regexMatch": "never-match-principal",
},
},
},
]
def configure_and_assert(self, test_client: _XdsTestClient,
test_metadata_val: Optional[str],
status_code: grpc.StatusCode) -> None:
# Swap method type every sub-test to avoid mixing results
rpc_type = self.next_rpc_type
if rpc_type is None:
stats = test_client.get_load_balancer_accumulated_stats()
for t in self.RPC_TYPE_CYCLE:
if not stats.stats_per_method[t].rpcs_started:
rpc_type = t
self.assertIsNotNone(rpc_type, "All RPC types already used")
self.next_rpc_type = self.RPC_TYPE_CYCLE[rpc_type]
metadata = None
if test_metadata_val is not None:
metadata = ((rpc_type, "test", test_metadata_val),)
test_client.update_config.configure(rpc_types=[rpc_type],
metadata=metadata)
self.assertRpcStatusCodes(test_client,
status_code=status_code,
duration=_SAMPLE_DURATION,
method=rpc_type)
def test_plaintext_allow(self) -> None:
self.setupTrafficDirectorGrpc()
self.td.create_authz_policy(action='ALLOW', rules=self.authz_rules())
self.setupSecurityPolicies(server_tls=False,
server_mtls=False,
client_tls=False,
client_mtls=False)
test_server: _XdsTestServer = self.startSecureTestServer()
self.setupServerBackends()
test_client: _XdsTestClient = self.startSecureTestClient(test_server)
with self.subTest('01_host_wildcard'):
self.configure_and_assert(test_client, 'host-wildcard',
grpc.StatusCode.OK)
with self.subTest('02_no_match'):
self.configure_and_assert(test_client, 'no-such-rule',
grpc.StatusCode.PERMISSION_DENIED)
self.configure_and_assert(test_client, None,
grpc.StatusCode.PERMISSION_DENIED)
with self.subTest('03_header_regex'):
self.configure_and_assert(test_client, 'header-regex-a',
grpc.StatusCode.OK)
self.configure_and_assert(test_client, 'header-regex-aa',
grpc.StatusCode.OK)
self.configure_and_assert(test_client, 'header-regex-',
grpc.StatusCode.PERMISSION_D |
migonzalvar/mfs2011-practicum-saas | server/agenda.py | Python | isc | 10,947 | 0.001096 | import json
import redis
from interval import interval_overlaps, slots_in_interval, Interval
from dataobjects import (Agenda, Shift, Appointment,
CollectionDataobjectMixin, ParentkeyDataobjectMixin)
class ConcurrencyWarning(Exception):
pass
class OverlappingIntervalWarning(Exception):
pass
class NotEmptyError(Exception):
pass
class ShiftNotEmptyError(NotEmptyError):
pass
class NotAvailableSlotError(Exception):
pass
class Datastore(object):
def __init__(self):
self._items = {'Agenda': {}, 'Shift': {}, 'Appointment': {}}
self._collection = {'Agenda': {}, 'Shift': {}, 'Appointment': {}}
def put(self, obj):
obj.key = id(obj)
self._items[obj.__class__.__name__][obj.key] = obj
if issubclass(obj.__class__, ParentkeyDataobjectMixin):
parent_obj = self.get(obj.parent_class, obj.parent_key)
self._collection[parent_obj.__class__.__name__][parent_obj.key][obj.key] = obj
if issubclass(obj.__class__, CollectionDataobjectMixin):
if obj.key not in self._collection[obj.__class__.__name__]:
self._collection[obj.__class__.__name__][obj.key] = {}
self._set_collection_methods(obj)
return obj
def delete(self, cls, key):
obj = self.get(cls, key)
if cls == Shift:
if list(obj.iteritems()):
raise ShiftNotEmptyError
if issubclass(cls, ParentkeyDataobjectMixin):
parent_obj = self.get(obj.parent_class, obj.parent_key)
try:
del self._collection[parent_obj.__class__.__name__][parent_obj.key][obj.key]
except KeyError:
pass
del self._items[cls.__name__][key]
return
def get(self, cls, key):
obj = self._items[cls.__name__][key]
if issubclass(obj.__class__, CollectionDataobjectMixin):
self._set_collection_methods(obj)
return obj
def _set_collection_methods(self, obj):
def _iter():
for key, item in self._collection[obj.__class__.__name__][obj.key].iteritems():
yield key, item
obj.set_iterator(_iter)
def _filter(start=None, end=None):
start = start or 0
end = end or 2147483647
for key, item in self._collection[obj.__class__.__name__][obj.key].iteritems():
if item.interval.start < end and item.interval.end > start:
yield key, item
obj.set_iteritems_filter(_filter)
def to_key(obj):
if isinstance(obj, type):
return obj.__name__
else:
return str(obj)
def k(*args):
return ":".join([to_key(arg) for arg in args])
class RedisDatastore(object):
def __init__(self):
redis_host = '127.0.0.1'
redis_port = 6379
redis_db = 0
self._rds = redis.StrictRedis(host=redis_host,
port=redis_port, db=redis_db)
def _sequence(self):
return str(self._rds.incr('sequence.agenda'))
def put(self, obj):
if obj.key == None:
obj.key = self._sequence()
rkey = k(obj.__class__, obj.key)
payload = json.dumps(obj.to_dict())
if issubclass(obj.__class__, ParentkeyDataobjectMixin):
parent_rkey = k(obj.parent_class, obj.parent_key, obj.__class__)
# Here begins appointment overlapping control
if obj.__class__ == Appointment:
watch_rkey = k(obj.parent_class, obj.parent_key, obj.__class__)
pipe = self._rds.pipeline(transaction=True)
pipe.watch(watch_rkey)
start_rkey = watch_rkey
end_rkey = k(watch_rkey, "end")
start_after = self._rds.zrangebyscore(
start_rkey, "-Inf", "(%d" % obj.interval.end)
end_before = self._rds.zrangebyscore(
end_rkey, "(%d" % obj.interval.start, "+Inf")
overlapping = set(start_after) & set(end_before)
if overlapping != set():
pipe.reset()
raise OverlappingIntervalWarning
try:
pipe.multi()
pipe.zadd(parent_rkey, obj.interval.start, obj.key)
pipe.zadd(k(parent_rkey, "end"), obj.interval.end, obj.key)
pipe.execute()
except redis.WatchError:
raise ConcurrencyWarning
finally:
pipe.reset()
else:
self._rds.zadd(parent_rkey, obj.interval.start, obj.key)
self._rds.zadd(k(parent_rkey, "end"), obj.interval.end, obj.key)
if issubclass(obj.__class__, CollectionDataobjectMixin):
self._set_collection_methods(obj)
self._rds.set(rkey, payload)
return obj
def delete(self, cls, key):
rkey = k(cls, key)
obj = None
if issubclass(cls, ParentkeyDataobjectMixin):
obj = self.get(cls, key)
parent | _rkey = k(obj.parent_class, obj.parent_key, cls)
self._rds.zrem(parent_rkey, key)
self._rds.zrem(k(parent_rkey, "end"), key)
if issubclass(cls, CollectionDataobjectMixin):
if not obj:
obj = self.get(cls, key)
collection_rkey = k(cls, key, obj.collection_class)
count = self._rds.zcard(collection_rkey)
if count != 0:
# FI | X: raise specific exception
raise ShiftNotEmptyError
self._rds.delete(rkey)
return
def get(self, cls, key):
rkey = k(cls, key)
payload = self._rds.get(rkey)
if payload == None:
raise KeyError
d = json.loads(payload)
obj = cls.from_dict(d)
obj.key = key
if issubclass(obj.__class__, CollectionDataobjectMixin):
self._set_collection_methods(obj)
return obj
def _set_collection_methods(self, obj):
collection_rkey = k(obj.__class__, obj.key, obj.collection_class)
def _iter():
for res in self._rds.zrangebyscore(collection_rkey, "-inf", "+inf"):
yield res, self.get(obj.collection_class, res)
obj.set_iterator(_iter)
def _filter(start=None, end=None):
start = "-Inf" if start == None else "(%s" % start
end = "+Inf" if end == None else "(%s" % end
start_rkey = collection_rkey
end_rkey = k(collection_rkey, "end")
start_after = self._rds.zrangebyscore(start_rkey, "-Inf", end)
end_before = self._rds.zrangebyscore(end_rkey, start, "+Inf")
for key in set(start_after) & set(end_before):
yield key, self.get(obj.collection_class, key)
obj.set_iteritems_filter(_filter)
def ds():
if not hasattr(ds, "datastore"):
ds.datastore = Datastore()
return ds.datastore
class AgendaController(object):
def __init__(self, key=None, minimum_length=None):
if key:
self._agenda = ds().get(Agenda, key)
else:
self._agenda = Agenda(minimum_length)
self._agenda = ds().put(self._agenda)
@property
def key(self):
return self._agenda.key
@property
def minimum_length(self):
return self._agenda.minimum_length
@minimum_length.setter
def minimum_length(self, value):
self._agenda.minimum_length = value
self._agenda = ds().put(self._agenda)
def add_shift(self, start, end):
shift = Shift(self.key, start, end)
shift = ds().put(shift)
return shift
def del_shift(self, shift_key):
ds().delete(Shift, shift_key)
def get_shift(self, shift_key):
return ds().get(Shift, shift_key)
def get_shifts_iteritems(self, start=None, end=None):
for key, shift in self._agenda.iteritems_filter(start, end):
yield (key, shift)
def get_shifts_itervalues(self, start=None, end=None):
for _, shift in self._agenda.iteri |
ychen820/microblog | y/google-cloud-sdk/platform/google_appengine/lib/django-1.4/django/db/transaction.py | Python | bsd-3-clause | 10,043 | 0.002589 | """
This module implements a transaction manager that can be used to define
transaction handling in a request or view function. It is used by transaction
control middleware and decorators.
The transaction manager can be in managed or in auto state. Auto state means the
system is using a commit-on-save strategy (actually it's more like
commit-on-change). As soon as the .save() or .delete() (or related) methods are
called, a commit is made.
Managed transactions don't do those commits, but will need some kind of manual
or implicit commits or rollbacks.
"""
from __future__ import with_statement
from functools import wraps
from django.db import connections, DEFAULT_DB_ALIAS
class TransactionManagementError(Exception):
"""
This exception is thrown when something bad happens with transaction
management.
"""
pass
def abort(using=None):
"""
Roll back any ongoing transactions and clean the transaction management
state of the connection.
This method is to be used only in cases where using balanced
leave_transaction_management() calls isn't possible. For example after a
request has finished, the transaction state isn't known, yet the connection
must be cleaned up for the next request.
"""
if using is None:
using = DEFAULT_DB_ALIAS
connection = connections[using]
connection.abort()
def enter_transaction_management(managed=True, using=None):
"""
Enters transaction management for a running thread. It must be balanced with
the appropriate leave_transaction_management call, since the actual state is
managed as a stack.
The state and dirty flag are carried over from the surrounding block or
from the settings, if there is no surrounding block (dirty is always false
when no current block is running).
"""
if using is None:
using = DEFAULT_DB_ALIAS
connection = connections[using]
connection.enter_transaction_management(managed)
def leave_transaction_management(using=None):
"""
Leaves transaction management for a running thread. A dirty flag is carried
over to the surrounding block, as a commit will commit all changes, even
those from outside. (Commits are on connection level.)
"""
if using is None:
using = DEFAULT_DB_ALIAS
connection = connections[using]
connection.leave_transaction_management()
def is_dirty(using=None):
"""
Returns True if the current transaction requires a commit for changes to
happen.
"""
if using is None:
using = DEFAULT_DB_ALIAS
connection = connections[using]
return connection.is_dirty()
def set_dirty(using=None):
"""
Sets a dirty flag for the current thread and code streak. This can be used
to decide in a managed block of code to decide whether there are open
changes waiting for commit.
"""
if using is None:
using = DEFAULT_DB_ALIAS
connection = connections[using]
connection.set_dirty()
def set_clean(using=None):
"""
Resets a dirty flag for the current thread and code streak. This can be used
to decide in a managed block of code to decide whether a commit or rollback
should happen.
"""
if using is None:
using = DEFAULT_DB_ALIAS
connection = connections[using]
connection.set_clean()
def clean_savepoints(using=None):
if using is None:
using = DEFAULT_DB_ALIAS
connection = connections[using]
connection.clean_savepoints()
def is_managed(using=None):
"""
Checks whether the transaction manager is in manual or in auto state.
"""
if using is None:
using = DEFAULT_DB_ALIAS
connection = connections[using]
return connection.is_managed()
def managed(flag=True, using=None):
"""
Puts the transaction manager into a manual state: managed transactions have
to be committed explicitly by the user. If you switch off transaction
management and there is a pending commit/rollback, the data will be
commited.
"""
if using is None:
using = DEFAULT_DB_ALIAS
connection = connections[using]
connection.managed(flag)
def commit_unless_managed(using=None):
"""
Commits changes if the system is not in managed transaction mode.
"""
if using is None:
using = DEFAULT_DB_ALIAS
connection = connections[using]
connection.commit_unless_managed()
def rollback_unless_managed(using=None):
"""
Rolls back changes if the system is not in managed transaction mode.
"""
if using is None:
using = DEFAULT_DB_ALIAS
connection = connections[using]
connection.rollback_unless_managed()
def commit(using=None):
"""
Does the commit itself and resets the dirty flag.
"""
if using is None:
using = DEFAULT_DB_ALIAS
connection = connections[using]
connection.commit()
def rollback(using=None):
"""
This function does the rollback itself and resets the dirty flag.
"""
if using is None:
using = DEFAULT_DB_ALIAS
connec | tion = connections[using]
connection.rollback()
def savepoint(using=None):
"""
Creates a savepoint (if supported and required by the backend) inside the
current transaction. Returns an identifier for the savepoint that will be
used for the subsequent rollback or commit.
"""
if using is None:
using = DEFAULT_DB_ALIAS
connection = connections[using]
return connection.savepoint()
def savepoint_rollback(sid, using=None):
"""
Rolls back the most recent savep | oint (if one exists). Does nothing if
savepoints are not supported.
"""
if using is None:
using = DEFAULT_DB_ALIAS
connection = connections[using]
connection.savepoint_rollback(sid)
def savepoint_commit(sid, using=None):
"""
Commits the most recent savepoint (if one exists). Does nothing if
savepoints are not supported.
"""
if using is None:
using = DEFAULT_DB_ALIAS
connection = connections[using]
connection.savepoint_commit(sid)
##############
# DECORATORS #
##############
class Transaction(object):
"""
Acts as either a decorator, or a context manager. If it's a decorator it
takes a function and returns a wrapped function. If it's a contextmanager
it's used with the ``with`` statement. In either event entering/exiting
are called before and after, respectively, the function/block is executed.
autocommit, commit_on_success, and commit_manually contain the
implementations of entering and exiting.
"""
def __init__(self, entering, exiting, using):
self.entering = entering
self.exiting = exiting
self.using = using
def __enter__(self):
self.entering(self.using)
def __exit__(self, exc_type, exc_value, traceback):
self.exiting(exc_value, self.using)
def __call__(self, func):
@wraps(func)
def inner(*args, **kwargs):
with self:
return func(*args, **kwargs)
return inner
def _transaction_func(entering, exiting, using):
"""
Takes 3 things, an entering function (what to do to start this block of
transaction management), an exiting function (what to do to end it, on both
success and failure, and using which can be: None, indiciating using is
DEFAULT_DB_ALIAS, a callable, indicating that using is DEFAULT_DB_ALIAS and
to return the function already wrapped.
Returns either a Transaction objects, which is both a decorator and a
context manager, or a wrapped function, if using is a callable.
"""
# Note that although the first argument is *called* `using`, it
# may actually be a function; @autocommit and @autocommit('foo')
# are both allowed forms.
if using is None:
using = DEFAULT_DB_ALIAS
if callable(using):
return Transaction(entering, exiting, DEFAULT_DB_ALIAS)(using)
return Transaction(entering, exiting, using)
def autocommit(using=None):
"""
Decorator that activates commit on save. This is Django's default behavior;
this decorator is useful if you globally activated transaction m |
FedoraScientific/salome-smesh | src/Tools/blocFissure/CasTests/fissureCoude_10.py | Python | lgpl-2.1 | 4,425 | 0.02815 | # -*- coding: utf-8 -*-
from blocFissure.gmu.fissureCoude import fissureCoude
class fissureCoude_10(fissureCoude):
# cas test ASCOU17
# ---------------------------------------------------------------------------
def setParamGeometrieSaine(self):
"""
Paramètres géométriques du tuyau coudé sain:
angleCoude
r_cintr
l_tube_p1
l_tube_p2
epais
de
"""
self.geomParams = dict(angleCoude = 90,
r_cintr = 1143,
l_tube_p1 = 3200,
l_tube_p2 = 3200,
epais = 35,
de = 762)
# ---------------------------------------------------------------------------
def setParamMaillageSain(self):
self.meshParams = dict(n_long_p1 = 13,
n_ep = 2,
n_long_coude = 20,
n_circ_g = 20,
n_circ_d = 20,
n_long_p2 = 13)
# ---------------------------------------------------------------------------
def setParamShapeFissure(self):
"""
paramètres de la fissure pour le tuyau coude
profondeur : 0 < profondeur <= épaisseur
rayonPipe : rayon du pipe correspondant au maillage rayonnant
lenSegPipe : longueur des mailles rayonnantes le long du fond de fissure (= rayonPipe par défaut)
azimut : entre 0 et 360°
alpha : 0 < alpha < angleCoude
longueur : <=2*profondeur ==> force une fissure elliptique (longueur/profondeur = grand axe/petit axe).
orientation : 0 | ° : longitudinale, 90° : circonférentielle, autre : uniquement fissures elliptiques
lgInfluence : distance autour de la shape de fissure a remailler (si 0, pris égal à profondeur. A ajuster selon le maillage)
elliptique : | True : fissure elliptique (longueur/profondeur = grand axe/petit axe); False : fissure longue (fond de fissure de profondeur constante, demi-cercles aux extrémites)
pointIn_x : optionnel coordonnées x d'un point dans le solide, pas trop loin du centre du fond de fissure (idem y,z)
externe : True : fissure face externe, False : fissure face interne
"""
# logging.info("setParamShapeFissure %s", self.nomCas)
self.shapeFissureParams = dict(profondeur = 2.5,
rayonPipe = 1.5,
lenSegPipe = 6,
azimut = 180,
alpha = 45,
longueur = 1196,
orientation = 0,
lgInfluence = 30,
elliptique = False,
externe = False)
# ---------------------------------------------------------------------------
def setParamMaillageFissure(self):
"""
Paramètres du maillage de la fissure pour le tuyau coudé
Voir également setParamShapeFissure, paramètres rayonPipe et lenSegPipe.
nbSegRad = nombre de couronnes
nbSegCercle = nombre de secteurs
areteFaceFissure = taille cible de l'arête des triangles en face de fissure.
"""
self.maillageFissureParams = dict(nomRep = '.',
nomFicSain = self.nomCas,
nomFicFissure = 'fissure_' + self.nomCas,
nbsegRad = 3,
nbsegCercle = 8,
areteFaceFissure = 2.5)
# ---------------------------------------------------------------------------
def setReferencesMaillageFissure(self):
self.referencesMaillageFissure = dict(Entity_Node = 93352,
Entity_Quad_Edge = 1456,
Entity_Quad_Triangle = 8934,
Entity_Quad_Quadrangle = 6978,
Entity_Quad_Tetra = 31147,
Entity_Quad_Hexa = 6972,
Entity_Quad_Penta = 1600,
Entity_Quad_Pyramid = 1696)
|
Cuuuurzel/KiPyCalc | sympy_old/physics/quantum/state.py | Python | mit | 26,583 | 0.001542 | """Dirac notation for states."""
from sympy import Expr, Symbol, Function, integrate, Expr
from sympy import Lambda, oo, conjugate, Tuple, sqrt, cacheit
from sympy.printing.pretty.stringpict import prettyForm
from sympy.physics.quantum.operator import Operator
from sympy.physics.quantum.qexpr import (
QExpr, dispatch_method
)
__all__ = [
'KetBase',
'BraBase',
'StateBase',
'State',
'Ket',
'Bra',
'TimeDepState',
'TimeDepBra',
'TimeDepKet',
'Wavefunction'
]
#-----------------------------------------------------------------------------
# States, bras and kets.
#-----------------------------------------------------------------------------
# LIGHT VERTICAL BAR
_straight_bracket = u"\u2758"
# MATHEMATICAL LEFT ANGLE BRACKET
_lbracket = u"\u27E8"
_rbracket = u"\u27E9"
# Other options for unicode printing of <, > and | for Dirac notation.
# VERTICAL LINE
# _straight_bracket = u"\u007C"
# LEFT-POINTING ANGLE BRACKET
# _lbracket = u"\u2329"
# _rbracket = u"\u232A"
# LEFT ANGLE BRACKET
# _lbracket = u"\u3008"
# _rbracket = u"\u3009"
class StateBase(QExpr):
"""Abstract base class for general abstract states in quantum mechanics.
All other state classes defined will need to inherit from this class. It
carries the basic structure for all other states such as dual, _eval_dagger
and label.
This is an abstract base class and you should not instantiate it directly,
instead use State.
"""
@classmethod
def _operators_to_state(self, ops, **options):
""" Returns the eigenstate instance for the passed operators.
This method should be overridden in subclasses. It will handle
being passed either an Operator instance or set of Operator
instances. It should return the corresponding state INSTANCE
or simply raise a NotImplementedError. See cartesian.py for an
example.
"""
raise NotImplementedError("Cannot map operators to states in this class. Method not implemented!")
def _state_to_operators(self, op_classes, **options):
""" Returns the operators which this state instance is an
eigenstate of.
This method should be overridden in subclasses. It will be
called on state instances and be passed the operator classes
that we wish to make into instances. The state instance will
then transform the classes appropriately, or raise a
NotImplementedError if it cannot return operator
instances. See cartesian.py for examples,
"""
raise NotImplementedError("Cannot map this state to operators. Method not implemented!")
@property
def operators(self):
"""Return the operator(s) that this state is an eigenstate of"""
from operatorset import state_to_operators #import internally to avoid circular import errors
return state_to_operators(self)
def _enumerate_state(self, num_states, **options):
raise NotImplementedError("Cannot enumerate this state!")
def _represent_default_basis(self, **options):
return self._represent(basis=self.operators)
#-------------------------------------------------------------------------
# Dagger/dual
#-------------------------------------------------------------------------
@property
def dual(self):
"""Return the dual state of this one."""
return self.dual_class()._new_rawargs(self.hilbert_space, *self.args)
@classmethod
def dual_class(self):
"""Return the class used to construt the dual."""
raise NotImplementedError(
'dual_class must be implemented in a subclass'
)
def _eval_dagger(self):
"""Compute the dagger of this state using the dual."""
return self.dual
#-------------------------------------------------------------------------
# Printing
#-------------------------------------------------------------------------
def _print_contents(self, printer, *args):
label = self._print_label(printer, *args)
return '%s%s%s' % (self.lbracket, label, self.rbracket)
def _print_contents_pretty(self, printer, *args):
from sympy.printing.pretty.stringpict import prettyForm
pform = self._print_label_pretty(printer, *args)
pform = prettyForm(*pform.left((self.lbracket_pretty)))
pform = prettyForm(*pform.right((self.rbracket_pretty)))
return pform
def _print_contents_latex(self, printer, *args):
label = self._print_label_latex(printer, *args)
# The extra {} brackets are needed to get matplotlib's latex
# rendered to render this properly.
return '{%s%s%s}' % (self.lbracket_latex, label, self.rbracket_latex)
class KetBase(StateBase):
"""Base class for Kets.
This class defines the dual property and the brackets for printing. This
is an abstract base class and you should not instantiate it directly,
instead use Ket.
"""
lbracket = '|'
rbracket = '>'
lbracket_pretty = prettyForm(_straight_bracket)
rbracket_pretty = prettyForm(_rbracket)
lbracket_latex = r'\left|'
rbracket_latex = r'\right\rangle '
@classmethod
def default_args(self):
return ("psi",)
@classmethod
def dual_class(self):
return BraBase
def __mul__(self, other):
"""KetBase*other"""
from sympy.physics.quantum.operator import OuterProduct
if isinstance(other, BraBase):
return OuterProduct(self, other)
else:
return Expr.__mul__(self, other)
def __rmul__(self, other):
"""other*KetBase"""
from sympy.physics.quantum.innerproduct import InnerProduct
if isinstance(other, BraBase):
return InnerProduct(other, self)
else:
return Expr.__rmul__(self, other)
#-------------------------------------------------------------------------
# _eval_* methods
#-------------------------------------------------------------------------
def _eval_innerproduct(self, bra, **hints):
"""Evaluate the inner product betweeen this ket and a bra.
This is called to compute <bra|ket>, where the ket is ``self``.
This method will dispatch to sub-methods having the format::
def _eval_innerproduct_BraClass(self, **hints):
Subclasses should define these methods (one for each BraClass) to
teach the ket how to take inner products with bras.
"""
return dispatch_method(self, '_eval_innerproduct', bra, **hints)
def _apply_operator(self, op, **options):
"""Apply an Operator to this Ket.
This method will dispatch to methods having the format::
def _apply_operator_OperatorName(op, **options):
Subclasses should define these methods (one for each OperatorName) to
teach the Ket how operators act on it | .
Parameters
==========
op : Operator
The Operator that is acting on the Ket.
options : dict
A dict of key/value pairs that control how the operator is applied
to the Ket.
"""
return dispatch_method(self, '_apply_operator', op, **options)
class BraBase(StateBase):
"""Base class for Bras.
This class defines the dua | l property and the brackets for printing. This
is an abstract base class and you should not instantiate it directly,
instead use Bra.
"""
lbracket = '<'
rbracket = '|'
lbracket_pretty = prettyForm(_lbracket)
rbracket_pretty = prettyForm(_straight_bracket)
lbracket_latex = r'\left\langle '
rbracket_latex = r'\right|'
@classmethod
def _operators_to_state(self, ops, **options):
state = self.dual_class().operators_to_state(ops, **options)
return state.dual
def _state_to_operators(self, op_classes, **options):
return self.dual._state_to_operators(op_classes, **options)
def _enumerate_state(self, num_states, **options):
dual_states = self.dual._enumerate_state(num_states, **options)
return map(lambda x: x.dual, dual_ |
wclark3/dsge-models | dsf_mfiles/save_results.py | Python | mit | 1,975 | 0.006076 | # argv[1] - file path to main folder (like $HOME/dsge-models)
# argv[2] - name of model (e.g. 'dsf' or 'nk' or 'ca')
from scipy.io import loadmat
from sys import argv
from json import load
TT = 30 # how many periods of results to send
model = argv[2]
fpath = argv[1] + '/' + model + '_mfiles/'
json = ''
#### 1 - load model results
# load results from mat file and convert to numpy lists
#mat = loadmat(fpath + model + '_results.m | at')
#endo_names = mat['M_']['endo_names'].tolist()[0][0]
#endo_simul = mat['oo_']['endo_simul'].tolist()[0][0]
# make string of JSON-looking data out of numpy lists
#for name, simul in zip(end | o_names, endo_simul):
# json += '"' + name.strip() + '":'
# json += '[' + ','.join(['%2f' % jj for jj in simul[0:TT]]) + '],'
#### 2 - load extra plot vars
# load results from mat file and convert to numpy lists (new format though)
mat = loadmat(fpath + 'plot_vars.mat')
plot_names = mat['plot_vars'].dtype.names
plot_simul = mat['plot_vars'][0][0]
for name, simul in zip(plot_names, plot_simul):
print 'name: ' + name
json += '"' + name.strip() + '":'
json += '[' + ','.join(['%2f' % jj for jj in simul[0:TT]]) + '],'
# write JSON-looking string to file
f = open(model + '_mfiles/' + model + '_results.json', 'w')
f.write('{' + json[:-1] + '}')
f.close()
# pull JSON data into python dict
json_data = open(fpath + model + '_results.json')
data = load(json_data)
json_data.close()
# pull JSON of short+long var names into python dict
json_names = open(fpath + 'json/var_list.json')
names = load(json_names)
json_names.close()
# make string of public directory
pub_fpath = fpath[:fpath[:-1].rfind('/')] + '/public/'
# create csv file to write to
f = open(pub_fpath + model + '_results.csv','w')
for key in data.keys():
#f.write(str(key) + ', ' + str(data[key])[1:-1] + '\n')
f.write(str(names[key]) + ', ' + str(data[key])[1:-1] + '\n')
f.close()
|
zqfan/leetcode | algorithms/172. Factorial Trailing Zeroes/solution.py | Python | gpl-3.0 | 234 | 0 | class Solution(object):
def trailingZer | oes(self, n):
"""
:type n: int
:rtype: int
"""
count_5 = 0
while n > 0:
count_5 += n / 5
n /= 5
return co | unt_5
|
openstack/keystone | keystone/common/rbac_enforcer/enforcer.py | Python | apache-2.0 | 22,030 | 0 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import functools
import flask
from oslo_log import log
from oslo_policy import opts
from oslo_policy import policy as common_policy
from oslo_utils import strutils
from keystone.common import authorization
from keystone.common import context
from keystone.common import policies
from keystone.common import provider_api
from keystone.common import utils
import keystone.conf
from keystone import exception
from keystone.i18n import _
CONF = keystone.conf.CONF
LOG = log.getLogger(__name__)
PROVIDER_APIS = provider_api.ProviderAPIs
_POSSIBLE_TARGET_ACTIONS = frozenset([
rule.name for
rule in policies.list_rules() if not rule.deprecated_for_removal
])
_ENFORCEMENT_CHECK_ATTR = 'keystone:RBAC:enforcement_called'
# TODO(gmann): Remove setting the default value of config policy_file
# once oslo_policy change the default value to 'policy.yaml'.
# https://github.com/openstack/oslo.policy/blob/a626ad12fe5a3abd49d70e3e5b95589d279ab578/oslo_policy/opts.py#L49
DEFAULT_POLICY_FILE = 'policy.yaml'
opts.set_defaults(CONF, DEFAULT_POLICY_FILE)
class RBACEnforcer(object):
"""Enforce RBAC on API calls."""
__shared_state__ = {}
__ENFORCER = None
ACTION_STORE_ATTR = 'keystone:RBAC:action_name'
# FOR TESTS ONLY
suppress_deprecation_warnings = False
def __init__(self):
# NOTE(morgan): All Enforcer Instances use the same shared state;
# BORG pattern.
self.__dict__ = self.__shared_state__
def _check_deprecated_rule(self, action):
def _name_is_changing(rule):
deprecated_rule = rule.deprecated_rule
return (deprecated_rule and
deprecated_rule.name != rule.name and
deprecated_rule.name in self._enforcer.file_rules)
def _check_str_is_changing(rule):
deprecated_rule = rule.deprecated_rule
return (deprecated_rule and
deprecated_rule.check_str != rule.check_str and
rule.name not in self._enforcer.file_rules)
def _is_deprecated_for_removal(rule):
return (rule.deprecated_for_removal and
rule.name in self._enforcer.file_rules)
def _emit_warning():
if not self._enforcer._warning_emitted:
LOG.warning("Deprecated policy rules found. Use "
"oslopolicy-policy-generator and "
"oslopolicy-policy-upgrade to detect and resolve "
"deprecated policies in your configuration.")
self._enforcer._warning_emitted = True
registered_rule = self._enforcer.registered_rules.get(action)
if not registered_rule:
return
if (_name_is_changing(registered_rule) or
_check_str_is_changing(registered_rule) or
_is_deprecated_for_removal(registered_rule)):
_emit_warning()
def _enforce(self, credentials, action, target, do_raise=True):
"""Verify that the action is valid on the target in this context.
This method is for cases that exceed the base enforcer
functionality (notably for compatibility with `@protected` style
decorators.
:param credentials: user credentials
:param action: string representing the action to be checked, which
should be colon separated for clarity.
:param target: dictionary representing the object of the action for
object creation this should be a dictionary
representing the location of the object e.g.
{'project_id': object.project_id}
:raises keystone.exception.Forbidden: If verification fails.
Actions should be colon separated for clarity. For example:
* identity:list_users
"""
# Add the exception arguments if asked to do a raise
extra = {}
if do_raise:
extra.update(exc=exception.ForbiddenAction, action=action,
do_raise=do_raise)
try:
result = self._enforcer.enforce(
rule=action, target=target, creds=credentials, **extra)
self._check_deprecated_rule(action)
return result
except common_policy.InvalidScope:
raise exception.ForbiddenAction(action=action)
def _reset(self):
# NOTE(morgan): Used for TEST purposes only.
self.__ENFORCER = None
@property
def _enforcer(self):
# The raw oslo-policy enforcer object
if self.__ENFORCER is None:
self.__ENFORCER = common_policy.Enforcer(CONF)
# NOTE(cmurphy) when running in the keystone server, suppress
# deprecation warnings for individual policy rules. Instead, we log
# a single notification at enforcement time indicating the
# oslo.policy tools the operator can use to detect and resolve
# deprecated policies. If there is no request context here, that
# means external tooling such as the oslo.policy tools are running
# this code, in which case we do want the full deprecation warnings
# emitted for individual polcy rules.
if flask.has_request_context():
self.__ENFORCER.suppress_deprecation_warning | s = True
# NOTE(cmurphy) Tests may explicitly disable these warnings to
# prevent an explosion of test logs
if self.suppress_deprecation_warnings:
self.__ENFORCER.suppress_deprecation_warnings = True
self.register_rules(self.__ENFORCER)
self.__ENFORCER._w | arning_emitted = False
return self.__ENFORCER
@staticmethod
def _extract_filter_values(filters):
"""Extract filter data from query params for RBAC enforcement."""
filters = filters or []
target = {i: flask.request.args[i] for
i in filters if i in flask.request.args}
if target:
if LOG.logger.getEffectiveLevel() <= log.DEBUG:
LOG.debug(
'RBAC: Adding query filter params (%s)',
', '.join(['%s=%s' % (k, v) for k, v in target.items()]))
return target
@staticmethod
def _extract_member_target_data(member_target_type, member_target):
"""Build some useful target data.
:param member_target_type: what type of target, e.g. 'user'
:type member_target_type: str or None
:param member_target: reference of the target data
:type member_target: dict or None
:returns: constructed target dict or empty dict
:rtype: dict
"""
ret_dict = {}
if ((member_target is not None and member_target_type is None) or
(member_target is None and member_target_type is not None)):
LOG.warning('RBAC: Unknown target type or target reference. '
'Rejecting as unauthorized. '
'(member_target_type=%(target_type)r, '
'member_target=%(target_ref)r)',
{'target_type': member_target_type,
'target_ref': member_target})
# Fast exit.
return ret_dict
if member_target is not None and member_target_type is not None:
ret_dict['target'] = {member_target_type: member_target}
else:
# Try and do some magic loading based upon the resource we've
# matched in our route. This is mostly so we can have a level of
# a |
intel-analytics/analytics-zoo | pyzoo/docs/doc-web.py | Python | apache-2.0 | 942 | 0.001062 | #!/usr/bin/env python
#
# Copyright 2018 Analytics Zoo Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in complia | nce with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import web
urls = (
' | /(.*)', 'router'
)
app = web.application(urls, globals())
class router:
def GET(self, path):
if path == '':
path = 'index.html'
f = open('_build/html/'+path)
return f.read()
if __name__ == "__main__":
app = web.application(urls, globals())
app.run()
|
ProfessorX/Config | .PyCharm30/system/python_stubs/-1247971765/readline.py | Python | gpl-2.0 | 6,031 | 0.010778 | # encoding: utf-8
# module readline
# from /usr/lib/python3.4/lib-dynload/readline.cpython-34m-x86_64-linux-gnu.so
# by generator 1.135
""" Importing this module enables command line editing using GNU readline. """
# no imports
# functions
def add_history(string): # real signature unknown; restored from __doc__
"""
add_history(string) -> None
add a line to the history buffer
"""
pass
def clear_history(): # real signature unknown; restored from __doc__
"""
clear_history() -> None
Clear the current readline history.
"""
pass
def get_begidx(): # real signature unknown; restored from __doc__
"""
get_ | begidx() -> int
get the beginning index of the readline tab-com | pletion scope
"""
return 0
def get_completer(): # real signature unknown; restored from __doc__
"""
get_completer() -> function
Returns current completer function.
"""
pass
def get_completer_delims(): # real signature unknown; restored from __doc__
"""
get_completer_delims() -> string
get the readline word delimiters for tab-completion
"""
return ""
def get_completion_type(): # real signature unknown; restored from __doc__
"""
get_completion_type() -> int
Get the type of completion being attempted.
"""
return 0
def get_current_history_length(): # real signature unknown; restored from __doc__
"""
get_current_history_length() -> integer
return the current (not the maximum) length of history.
"""
return 0
def get_endidx(): # real signature unknown; restored from __doc__
"""
get_endidx() -> int
get the ending index of the readline tab-completion scope
"""
return 0
def get_history_item(): # real signature unknown; restored from __doc__
"""
get_history_item() -> string
return the current contents of history item at index.
"""
return ""
def get_history_length(): # real signature unknown; restored from __doc__
"""
get_history_length() -> int
return the maximum number of items that will be written to
the history file.
"""
return 0
def get_line_buffer(): # real signature unknown; restored from __doc__
"""
get_line_buffer() -> string
return the current contents of the line buffer.
"""
return ""
def insert_text(string): # real signature unknown; restored from __doc__
"""
insert_text(string) -> None
Insert text into the command line.
"""
pass
def parse_and_bind(string): # real signature unknown; restored from __doc__
"""
parse_and_bind(string) -> None
Parse and execute single line of a readline init file.
"""
pass
def read_history_file(filename=None): # real signature unknown; restored from __doc__
"""
read_history_file([filename]) -> None
Load a readline history file.
The default filename is ~/.history.
"""
pass
def read_init_file(filename=None): # real signature unknown; restored from __doc__
"""
read_init_file([filename]) -> None
Parse a readline initialization file.
The default filename is the last filename used.
"""
pass
def redisplay(): # real signature unknown; restored from __doc__
"""
redisplay() -> None
Change what's displayed on the screen to reflect the current
contents of the line buffer.
"""
pass
def remove_history_item(pos): # real signature unknown; restored from __doc__
"""
remove_history_item(pos) -> None
remove history item given by its position
"""
pass
def replace_history_item(pos, line): # real signature unknown; restored from __doc__
"""
replace_history_item(pos, line) -> None
replaces history item given by its position with contents of line
"""
pass
def set_completer(function=None): # real signature unknown; restored from __doc__
"""
set_completer([function]) -> None
Set or remove the completer function.
The function is called as function(text, state),
for state in 0, 1, 2, ..., until it returns a non-string.
It should return the next possible completion starting with 'text'.
"""
pass
def set_completer_delims(string): # real signature unknown; restored from __doc__
"""
set_completer_delims(string) -> None
set the readline word delimiters for tab-completion
"""
pass
def set_completion_display_matches_hook(function=None): # real signature unknown; restored from __doc__
"""
set_completion_display_matches_hook([function]) -> None
Set or remove the completion display function.
The function is called as
function(substitution, [matches], longest_match_length)
once each time matches need to be displayed.
"""
pass
def set_history_length(length): # real signature unknown; restored from __doc__
"""
set_history_length(length) -> None
set the maximal number of items which will be written to
the history file. A negative length is used to inhibit
history truncation.
"""
pass
def set_pre_input_hook(function=None): # real signature unknown; restored from __doc__
"""
set_pre_input_hook([function]) -> None
Set or remove the pre_input_hook function.
The function is called with no arguments after the first prompt
has been printed and just before readline starts reading input
characters.
"""
pass
def set_startup_hook(function=None): # real signature unknown; restored from __doc__
"""
set_startup_hook([function]) -> None
Set or remove the startup_hook function.
The function is called with no arguments just
before readline prints the first prompt.
"""
pass
def write_history_file(filename=None): # real signature unknown; restored from __doc__
"""
write_history_file([filename]) -> None
Save a readline history file.
The default filename is ~/.history.
"""
pass
# no classes
# variables with complex values
__loader__ = None # (!) real value is ''
__spec__ = None # (!) real value is ''
|
declankenny/XBMC_datasync | resources/lib/functions.py | Python | mit | 2,067 | 0.014998 | import os
import pickle
import xbmc
import urllib2
def getFilesAsDic(FilePath,TrimPath):
'''
Based on a passed path build a dictionary of files and update times
'''
file_list = {}
FilePath = xbmc.validatePath(FilePath)
if os.path.isdir(FilePath):
for root, dirs, files in os.walk(FilePath):
for f in files:
file_name = os.path.join(root,f)
file_time = os.path.getmtime(file_name)
file_name =file_name.replace(TrimPath,'')
file_name =file_name.replace(os.sep,"||~~||")
file_list[file_name] = file_time
elif os.path.isfile(FilePath):
file_name = FilePath
file_time = os.path.getmtime(file_name)
file_name =file_name.replace(TrimPath,'')
file_name =file_name.replace(os.sep,"||~~||")
file_list[file_name] = file_time
else:
pass
return file_list
def writeFileList(FilePath,TargetPath,TrimPath,Append=0):
local_dir = os.path.dirname(TargetPath)
if not os.path.exists(local_dir):
os.makedirs(local_dir)
if Append == 0:
outfile = open(TargetPath,'wb')
else:
outfile = open(TargetPath,'ab')
pickle.dump(getFilesAsDic(FilePath,TrimPath), | outfile)
outfile.close()
def getDicFromURL(URLPath):
try:
f = urllib2.urlopen(URLPath)
dic = pickle.load(f)
f.close()
except:
dic={}
xbmc.log("Data Sync Service:: Cannot Connect to Server, please check IP Address and Port Number")
return dic
def downloadFile(LocalRoot,RemoteRoot,FileName):
local_path = xbmc.validatePath(os.path.join(LocalRoot,FileName))
local_dir = os.path.dirname(local_path)
if not os.path.exists(local_di | r):
os.makedirs(local_dir)
URLPath = RemoteRoot+FileName.replace("\\","/")
f = open(local_path,'wb')
try:
f.write(urllib2.urlopen(URLPath).read())
temp = 1
except:
temp = 0
f.close()
return temp |
thethythy/Mnemopwd | mnemopwd/client/uilayer/uicomponents/InputBox.py | Python | bsd-2-clause | 5,588 | 0.000716 | # -*- coding: utf-8 -*-
# Copyright (c) 2016-2017, Thierry Lemeunier <thierry at lemeunier dot net>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import curses
import curses.ascii
from .TextEditor import TextEditor
from .SecretTextEditor import SecretTextEditor
from .Component import Component
class InputBox(Component):
"""
A simple text editor with a border line
Attributes:
- value: the output text after edition
- shortcuts: list of shortcut keys for ending edition
- secret: is it a secret text?
"""
def __init__(self, parent, h, w, y, x, shortcuts=None, secret=False,
show=True, option=False, colourD=False):
"""Create a input text box"""
Component.__init__(self, parent, h, w, y, x)
if show:
self.window.attrset(colourD)
self.window.border()
self.window.refresh()
self.window.attrset(0)
self.editorbox = self.window.derwin(1, w - 4, 1, 2)
if not secret:
self.editor = TextEditor(self.editorbox)
else:
self.editor = SecretTextEditor(self.editorbox)
self.value = None
self.shortcuts = shortcuts
self.option = option
self.showOrHide = show
self.focus = False
self.colourD = colourD
# Cursor position
self.cursor_y = 0
self.cursor_x = 0
def is_editable(self):
"""This component is editable"""
return True
def is_actionable(self):
"""Return True by default (actionable)"""
return False
def focus_on(self):
"""See mother class"""
self.focus = True
self.editorbox.addstr(self.cursor_y, self.cursor_x, '_', curses.A_BLINK)
self.editorbox.move(self.cursor_y, self.cursor_x)
self.editorbox.refresh()
def focus_off(self):
"""See mother class"""
self.focus = False
self.editorbox.move(self.curso | r_y, self.cursor_x)
self.editorbox.clrtoeol()
self.editorbox.refresh()
def has_focus(self):
"""Does the component have the focus"""
return self.focus
def clear(self):
"""Clean up the editor content"""
self.value = None
self.cursor_x = 0
self.focus_off()
def update(self, label):
| """Change the content"""
self.value = label
self.window.clear()
self.show()
def show(self):
"""Show the editor"""
self.showOrHide = True
if self.value is not None:
self.editor.populate(self.value)
self.cursor_x = len(self.value)
self.window.attrset(self.colourD)
self.window.border()
self.window.refresh()
self.window.attrset(0)
def hide(self):
"""Hide the editor"""
self.showOrHide = False
self.window.clear()
self.window.refresh()
def redraw(self):
"""Redraw the editor"""
if self.showOrHide:
if self.value is not None:
self.editor.populate(self.value)
self.cursor_x = len(self.value)
self.window.attrset(self.colourD)
self.window.border()
self.window.refresh()
self.window.attrset(0)
def _controller_(self, ch):
"""Control if the character is a control key"""
if ch in [curses.KEY_UP, curses.KEY_DOWN, curses.ascii.TAB,
curses.ascii.CR, curses.ascii.ESC, curses.KEY_MOUSE]:
curses.ungetch(ch)
return curses.ascii.NL
elif ch in [curses.KEY_RIGHT, curses.KEY_LEFT, curses.KEY_A1,
curses.KEY_A3, curses.KEY_C1, curses.KEY_C3]:
return False
elif ch in [curses.ascii.DEL]:
return curses.ascii.BS
elif curses.ascii.isctrl(ch):
if chr(ch + 64) in self.shortcuts:
curses.ungetch(ch)
return curses.ascii.NL
else:
return ch
def edit(self):
"""Start editing operation"""
result = self.editor.edit(self._controller_)
if result != self.value:
if result != "":
self.value = result
self.cursor_x = len(result)
else:
self.value = None
self.cursor_x = 0
|
fairyzoro/python | JiYouMCC/0003/0003.py | Python | mit | 176 | 0.008772 | # | -*- coding: utf-8 -*-
# 第 0003 题:将 0001 题生成的 200 个激活码(或者优惠券)保存到 Redis 非关系型数据库中。
# fail to install redis, | skip it |
biosustain/marsi | marsi/alembic/env.py | Python | apache-2.0 | 1,993 | 0 | from __future__ import with_statement
from logging.config import fileConfig
from alembic import context
from sqlalchemy import engine_from_config, pool
# this is the Alembic Config object, which provides
# access to the values within the .ini file in use.
config = context.config
# Interpret the config file for Python logging.
# This line sets up loggers basically.
fileConfig(config.config_file_name)
# add your model's MetaData object here
# for 'autogenerate' support
# from myapp import mymod | el
# target_metadata = mymodel.Base.metadata
target_metadata = None
# other values from the config, defined by the needs of env.py,
# can be acquired:
# my_important_option = config.get_main_option("my_important_option")
# ... etc.
d | ef run_migrations_offline():
"""Run migrations in 'offline' mode.
This configures the context with just a URL
and not an Engine, though an Engine is acceptable
here as well. By skipping the Engine creation
we don't even need a DBAPI to be available.
Calls to context.execute() here emit the given string to the
script output.
"""
url = config.get_main_option("sqlalchemy.url")
context.configure(
url=url, target_metadata=target_metadata, literal_binds=True)
with context.begin_transaction():
context.run_migrations()
def run_migrations_online():
"""Run migrations in 'online' mode.
In this scenario we need to create an Engine
and associate a connection with the context.
"""
connectable = engine_from_config(
config.get_section(config.config_ini_section),
prefix='sqlalchemy.',
poolclass=pool.NullPool)
with connectable.connect() as connection:
context.configure(
connection=connection,
target_metadata=target_metadata
)
with context.begin_transaction():
context.run_migrations()
if context.is_offline_mode():
run_migrations_offline()
else:
run_migrations_online()
|
jpurma/Kataja | kataja/runner.py | Python | gpl-3.0 | 1,380 | 0.004354 | import os
import subprocess
# This is an example for using Kataja to launch a visualisation from a python script that doesn't use kataja
# structures, but can output bracket trees. Kataja is launched as a separate process so it doesn't stop the
# main script.
def send_to_kataja(tree, image_file=''):
# return os.system(f'python Kataja.py -image_out test.pdf "{tree}"')
args = ['python', 'Kataja.py']
if image_file:
args.append('-image_out')
args.append(image_file)
args.append(tree)
if os.name == 'posix':
# return os.spawnv(os.P_NOWAIT, '', args)
return subprocess.Popen(args, preexec_fn=os.setpgrp, stdout=subprocess.DEVNULL)
elif os.name == 'nt' and hasattr(os, 'P_DETACH'):
return os.spawnv(os.P_DETACH, 'python', args)
# python Kataja.py -image_out test.pdf "[ [ A {word} ] [.T did | [.V happen ] ] ]"
# tree = """[.{CP} [.{DP(0)} [.{D'} [.{D} which ] [.{NP} [.{N'} [.N wine ] ] ] ] ] [.{C'} [.C \epsilon [.{VP} [.{DP} [.{D'} [.D the ] [.{NP} [.{N'} [.N que | en ] ] ] ] ] [.{V'} [.V prefers ] [.{DP} t(0) ] ] ] ] ] ]
# """
tree = """[.{FP} {Graham Greene_i} [.{F'} on_j [.{TP} t_i [.{T'} t_j [.{AuxP} t_j [.{PrtP} kirjoittanut_k [.{VP} t_i [.{V'} t_k [.{DP} tämän kirjan ] ] ] ] ] ] ] ] ]
"""
send_to_kataja(tree, 'test.pdf')
print(f"I just sent {tree} to kataja.")
print("thanks, I'm done now!")
|
eternalfame/django_admin_monitoring | setup.py | Python | mit | 1,092 | 0.001832 | import os
from setuptools import setup, find_packages
with open(os.path.join(os.path.dirname(__file__), 'README.rst')) as readme:
README = readme.read()
# allow setup.py to be run from any path
os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))
setup(
name='djang | o_admin_monitoring',
version='0.1.3',
packages=find_packages(),
include_package_data=True,
license='MIT License',
description='A simple Django app that provides ability to monitor such things as user feedback in admin', |
long_description=README,
url='https://github.com/eternalfame/django_admin_monitoring',
author='Vyacheslav Sukhenko',
author_email='eternalfame@mail.ru',
classifiers=[
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
],
) |
beni55/SimpleCV | SimpleCV/examples/display/qt-example.py | Python | bsd-3-clause | 2,247 | 0.006231 | #!/usr/bin/env python
'''
This example shows how to display a SimpleCV image in a QT window
the code was taken from the forum post here:
http://help.simplecv.org/question/1866/any-simple-pyqt-sample-regarding-ui-or-display/
Author: Rodrigo gomes
'''
import os
import sys
import signal
from PyQt4 import uic, QtGui, QtCore
from SimpleCV import *
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_Dialog(object):
def setupUi(self, Dialog):
Dialog.setObjectName(_fromUtf8("Dialog"))
Dialog.resize(632, 483)
self.label = QtGui.QLabel(Dialog)
self.label.setGeometry(QtCore.QRect(80, 30, 491, 391))
self.label.setObjectName(_fromUtf8("label"))
self.retranslat | eUi(Dialog)
QtCore.QMetaObject.connectSlotsByName(Dialog)
def retranslateUi(self, Dialog):
Dialog.setWindowTitle(_translate("Dialog", "Dialog", None))
class Webcam(QtGui.QMainWindow):
def __init__(self, parent=None):
QtGui.QWidget.__init__(self,parent)
self.MainWindow = Ui_Dialog()
self.MainWindow.setupUi(self)
| self.webcam = Camera(0,{ "width": 640, "height": 480 })
self.timer = QtCore.QTimer()
self.connect(self.timer, QtCore.SIGNAL('timeout()'), self.show_frame)
self.timer.start(1);
def show_frame(self):
ipl_image = self.webcam.getImage()
ipl_image.dl().circle((150, 75), 50, Color.RED, filled = True)
data = ipl_image.getBitmap().tostring()
image = QtGui.QImage(data, ipl_image.width, ipl_image.height, 3 * ipl_image.width, QtGui.QImage.Format_RGB888)
pixmap = QtGui.QPixmap()
pixmap.convertFromImage(image.rgbSwapped())
self.MainWindow.label.setPixmap(pixmap)
if __name__ == "__main__":
app = QtGui.QApplication(sys.argv)
webcam = Webcam()
webcam.show()
app.exec_()
|
pracedru/pyDesign | PyDesignModel/PyDesignAnalysisItem.py | Python | mit | 5,100 | 0.001373 | from PyQt5.QtCore import *
from PyDesignData.PyDesignObject import *
from PyDesignModel.PyDesignCalcSheetsItem import PyDesignCalcSheetsItem
from PyDesignModel.PyDesignIcons import *
from PyDesignModel.PyDesignMaterialsItem import PyDesignMaterialsItem
from PyDesignModel.PyDesignModelItem import PyDesignModelItem
from PyDesignModel.PyDesignParametersItem import *
from PyDesignModel.PyDesignGeometriesItem import *
from PyDesignModel.PyDesignMeshesItem import *
from PyDesignModel.PyDesignSolversItem import PyDesignSolversItem
__author__ = 'magnus'
class PyDesignAnalysisItem(PyDesignModelItem):
def __init__(self, parent, py_design_analysis):
"""
:type py_design_analysis: PyDesignAnalysis
:param parent:
:param py_design_analysis:
:return:
"""
PyDesignModelItem.__init__(self, parent, parent.model)
self._data_object = py_design_analysis
self._data_dict[PyDesignNamedObject.NAME] = self.data_name
self._data_dict[PyDesignCommon.VALUE] = self.data_value
'''self._data_dict[PDP.size_temp] = self.data_size_temp
self._data_dict[PDP.medium_type] = self.data_medium_type
self._data_dict[PDP.size_pres] = self.data_size_pres'''
self._set_data_dict[PyDesignNamedObject.NAME] = self.set_data_name
self._icon = get_icon("analysis")
py_design_analysis.add_listener(self)
self._children.append(PyDesignParametersItem(py_design_analysis.properties, self))
self._children.append(PyDesignCalcSheetsItem(py_design_analysis, self))
self._children.append(PyDesignGeometriesItem(py_design_analysis, self))
self._child | ren.append(PyDesignMeshesItem(py_design_analysis, self))
self._children.append(PyDesignMaterialsItem(py_design_analysis, self))
self._children.append(PyDesignSolversItem(py_design_analysis, self))
self._type = "PyDesignAnalysisModelItem"
self._context_menu = QMenu()
add_prop_menu = self._context_menu.addAction("Add property")
add_prop_menu.triggered.connect(self.on_add_property)
add_prop_menu = self._context_menu.a | ddAction("Add calculation sheet")
add_prop_menu.triggered.connect(self.on_add_sheet)
add_prop_menu = self._context_menu.addAction("Add geometry")
add_prop_menu.triggered.connect(self.on_add_geometry)
add_prop_menu = self._context_menu.addAction("Add mesh")
add_prop_menu.triggered.connect(self.on_add_mesh)
add_prop_menu = self._context_menu.addAction("Add material")
add_prop_menu.triggered.connect(self.on_add_material)
add_prop_menu = self._context_menu.addAction("Delete analysis")
add_prop_menu.triggered.connect(self.on_delete)
def on_add_property(self):
PyDesignEventHandlers.on_add_parameter(self._data_object.properties)
def on_add_sheet(self):
PyDesignEventHandlers.on_add_sheet(self._data_object)
def on_add_geometry(self):
PyDesignEventHandlers.on_add_geometry(self._data_object, None)
def on_add_mesh(self):
pass
def on_add_material(self):
pass
def on_delete(self):
pass
def data_name(self, int_role):
if int_role == Qt.DisplayRole or int_role == Qt.EditRole:
return self._data_object.name
elif int_role == Qt.DecorationRole:
return self._icon
else:
return None
def set_data_name(self, int_role, data):
if int_role == Qt.EditRole:
self._data_object.name = data
return True
def data_value(self, int_role):
if int_role == Qt.DisplayRole:
type_name = "Unknown analysis"
type_name = "3D analysis" if self._data_object.analysis_type == 0 else type_name
type_name = "2D analysis" if self._data_object.analysis_type == 1 else type_name
type_name = "2D analysis axis symmetric" if self._data_object.analysis_type == 2 else type_name
return type_name
else:
return None
def data_size_pres(self, int_role):
if int_role == Qt.DisplayRole:
return self._data_object.size_pres
else:
return None
def data_medium_type(self, int_role):
if int_role == Qt.DisplayRole:
return self._data_object.medium_type
else:
return None
@staticmethod
def item_flags(int_pdp):
default_flags = Qt.ItemIsSelectable | Qt.ItemIsEnabled
if int_pdp == PyDesignNamedObject.NAME:
return default_flags | Qt.ItemIsEditable
else:
return default_flags
def on_context_menu(self, point):
self._context_menu.exec_(point)
def on_event(self, event):
"""
:type event: PyDesignEvent
:param event:
:return:
"""
self._model.on_item_changed(self)
if event.type == PyDesignEvent.EndItemAddedEvent:
pass
#new_item = PyDesignParameterItem(self, event.value)
#self.add_child(new_item)
return |
sserrot/champion_relationships | venv/Lib/site-packages/networkx/algorithms/minors.py | Python | mit | 17,564 | 0.000057 | # minors.py - functions for computing minors of graphs
#
# Copyright 2015 Jeffrey Finkelstein <jeffrey.finkelstein@gmail.com>.
# Copyright 2010 Drew Conway <drew.conway@nyu.edu>
# Copyright 2010 Aric Hagberg <hagberg@lanl.gov>
#
# This file is part of NetworkX.
#
# NetworkX is distributed under a BSD license; see LICENSE.txt for more
# information.
"""Provides functions for computing minors of a graph."""
from itertools import chain
from itertools import combinations
from itertools import permutations
from itertools import product
import networkx as nx
from networkx import density
from networkx.exception import NetworkXException
from networkx.utils import arbitrary_element
__all__ = ['contracted_edge', 'contracted_nodes',
'identified_nodes', 'quotient_graph']
chaini = chain.from_iterable
def equivalence_classes(iterable, relation):
"""Returns the set of equivalence classes of the given `iterable` under
the specified equivalence relation.
`relation` must be a Boolean-valued function that takes two argument. It
must represent an equivalence relation (that is, the relation induced by
the function must be reflexive, symmetric, and transitive).
The return value is a set of sets. It is a partition of the elements of
`iterable`; duplicate elements will be ignored so it makes the most sense
for `iterable` to be a :class:`set`.
"""
# For simplicity of implementation, we initialize the return value as a
# list of lists, then convert it to a set of sets at the end of the
# function.
blocks = []
# Determine the equivalence class for each element of the iterable.
for y in iterable:
# Each element y must be in *exactly one* equivalence class.
#
# Each block is guaranteed to be non-empty
for block in blocks:
x = arbitrary_element(block)
if relation(x, y):
block.append(y)
break
else:
# If the element y is not part of any known equivalence class, it
# must be in its own, so we create a new singleton equivalence
# class for it.
blocks.append([y])
return {frozenset(block) for block in blocks}
def quotient_graph(G, partition, edge_relation=None, node_data=None,
edge_data=None, relabel=False, create_using=None):
"""Returns the quotient graph of `G` under the specified equivalence
relation on nodes.
Parameters
----------
G : NetworkX graph
The graph for which to return the quotient graph with the
specified node relation.
partition : function or list of sets
If a function, this function must represent an equivalence
relation on the nodes of `G`. It must take two arguments *u*
and *v* and return True exactly when *u* and *v* are in the
same equivalence class. The equivalence classes form the nodes
in the returned graph.
If a list of sets, the list must form a valid partition of
the nodes of the graph. That is, each node must be in exactly
one block of the partition.
edge_relation : Boolean function with two arguments
This function mu | st represent an edge relation on the *blocks* of
`G` in the partition induced by `node_relation`. It must
take two arguments, *B* | and *C*, each one a set of nodes, and
return True exactly when there should be an edge joining
block *B* to block *C* in the returned graph.
If `edge_relation` is not specified, it is assumed to be the
following relation. Block *B* is related to block *C* if and
only if some node in *B* is adjacent to some node in *C*,
according to the edge set of `G`.
edge_data : function
This function takes two arguments, *B* and *C*, each one a set
of nodes, and must return a dictionary representing the edge
data attributes to set on the edge joining *B* and *C*, should
there be an edge joining *B* and *C* in the quotient graph (if
no such edge occurs in the quotient graph as determined by
`edge_relation`, then the output of this function is ignored).
If the quotient graph would be a multigraph, this function is
not applied, since the edge data from each edge in the graph
`G` appears in the edges of the quotient graph.
node_data : function
This function takes one argument, *B*, a set of nodes in `G`,
and must return a dictionary representing the node data
attributes to set on the node representing *B* in the quotient graph.
If None, the following node attributes will be set:
* 'graph', the subgraph of the graph `G` that this block
represents,
* 'nnodes', the number of nodes in this block,
* 'nedges', the number of edges within this block,
* 'density', the density of the subgraph of `G` that this
block represents.
relabel : bool
If True, relabel the nodes of the quotient graph to be
nonnegative integers. Otherwise, the nodes are identified with
:class:`frozenset` instances representing the blocks given in
`partition`.
create_using : NetworkX graph constructor, optional (default=nx.Graph)
Graph type to create. If graph instance, then cleared before populated.
Returns
-------
NetworkX graph
The quotient graph of `G` under the equivalence relation
specified by `partition`. If the partition were given as a
list of :class:`set` instances and `relabel` is False,
each node will be a :class:`frozenset` corresponding to the same
:class:`set`.
Raises
------
NetworkXException
If the given partition is not a valid partition of the nodes of
`G`.
Examples
--------
The quotient graph of the complete bipartite graph under the "same
neighbors" equivalence relation is `K_2`. Under this relation, two nodes
are equivalent if they are not adjacent but have the same neighbor set::
>>> import networkx as nx
>>> G = nx.complete_bipartite_graph(2, 3)
>>> same_neighbors = lambda u, v: (u not in G[v] and v not in G[u]
... and G[u] == G[v])
>>> Q = nx.quotient_graph(G, same_neighbors)
>>> K2 = nx.complete_graph(2)
>>> nx.is_isomorphic(Q, K2)
True
The quotient graph of a directed graph under the "same strongly connected
component" equivalence relation is the condensation of the graph (see
:func:`condensation`). This example comes from the Wikipedia article
*`Strongly connected component`_*::
>>> import networkx as nx
>>> G = nx.DiGraph()
>>> edges = ['ab', 'be', 'bf', 'bc', 'cg', 'cd', 'dc', 'dh', 'ea',
... 'ef', 'fg', 'gf', 'hd', 'hf']
>>> G.add_edges_from(tuple(x) for x in edges)
>>> components = list(nx.strongly_connected_components(G))
>>> sorted(sorted(component) for component in components)
[['a', 'b', 'e'], ['c', 'd', 'h'], ['f', 'g']]
>>>
>>> C = nx.condensation(G, components)
>>> component_of = C.graph['mapping']
>>> same_component = lambda u, v: component_of[u] == component_of[v]
>>> Q = nx.quotient_graph(G, same_component)
>>> nx.is_isomorphic(C, Q)
True
Node identification can be represented as the quotient of a graph under the
equivalence relation that places the two nodes in one block and each other
node in its own singleton block::
>>> import networkx as nx
>>> K24 = nx.complete_bipartite_graph(2, 4)
>>> K34 = nx.complete_bipartite_graph(3, 4)
>>> C = nx.contracted_nodes(K34, 1, 2)
>>> nodes = {1, 2}
>>> is_contracted = lambda u, v: u in nodes and v in nodes
>>> Q = nx.quotient_graph(K34, is_contracted)
>>> nx.is_isomorphic(Q, C)
True
>>> nx.is_isomorphic(Q, K24)
True
The blockmodeling technique described in [1]_ can be implemented as a
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.